aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/acpi/numa.c4
-rw-r--r--drivers/acpi/processor_core.c2
-rw-r--r--drivers/acpi/processor_perflib.c2
-rw-r--r--drivers/atm/fore200e.c11
-rw-r--r--drivers/atm/idt77252.c5
-rw-r--r--drivers/atm/lanai.c14
-rw-r--r--drivers/atm/nicstar.c4
-rw-r--r--drivers/base/power/Makefile1
-rw-r--r--drivers/base/power/generic_ops.c233
-rw-r--r--drivers/bluetooth/ath3k.c2
-rw-r--r--drivers/bluetooth/bcm203x.c2
-rw-r--r--drivers/bluetooth/bfusb.c2
-rw-r--r--drivers/bluetooth/bluecard_cs.c2
-rw-r--r--drivers/bluetooth/bpa10x.c2
-rw-r--r--drivers/bluetooth/bt3c_cs.c2
-rw-r--r--drivers/bluetooth/btmrvl_debugfs.c14
-rw-r--r--drivers/bluetooth/btmrvl_main.c2
-rw-r--r--drivers/bluetooth/btmrvl_sdio.c4
-rw-r--r--drivers/bluetooth/btsdio.c2
-rw-r--r--drivers/bluetooth/btuart_cs.c2
-rw-r--r--drivers/bluetooth/btusb.c2
-rw-r--r--drivers/bluetooth/dtl1_cs.c2
-rw-r--r--drivers/bluetooth/hci_ldisc.c2
-rw-r--r--drivers/bluetooth/hci_vhci.c2
-rw-r--r--drivers/char/agp/intel-agp.c123
-rw-r--r--drivers/char/cyclades.c16
-rw-r--r--drivers/char/hvc_console.c2
-rw-r--r--drivers/char/hw_random/Kconfig2
-rw-r--r--drivers/char/ip2/ip2main.c26
-rw-r--r--drivers/char/isicom.c54
-rw-r--r--drivers/char/moxa.c20
-rw-r--r--drivers/char/mxser.c3
-rw-r--r--drivers/char/nozomi.c157
-rw-r--r--drivers/char/serial167.c3
-rw-r--r--drivers/char/specialix.c2
-rw-r--r--drivers/char/synclink.c4
-rw-r--r--drivers/char/synclink_gt.c186
-rw-r--r--drivers/char/tty_buffer.c17
-rw-r--r--drivers/char/tty_ldisc.c50
-rw-r--r--drivers/char/vt_ioctl.c39
-rw-r--r--drivers/clocksource/Kconfig9
-rw-r--r--drivers/cpuidle/governors/menu.c2
-rw-r--r--drivers/dma/Kconfig23
-rw-r--r--drivers/dma/Makefile8
-rw-r--r--drivers/dma/coh901318.c182
-rw-r--r--drivers/dma/coh901318_lli.c23
-rw-r--r--drivers/dma/dmaengine.c2
-rw-r--r--drivers/dma/dmatest.c8
-rw-r--r--drivers/dma/fsldma.c1177
-rw-r--r--drivers/dma/fsldma.h35
-rw-r--r--drivers/dma/ioat/dma.c48
-rw-r--r--drivers/dma/ioat/dma.h11
-rw-r--r--drivers/dma/ioat/dma_v2.c70
-rw-r--r--drivers/dma/ioat/dma_v2.h6
-rw-r--r--drivers/dma/ioat/dma_v3.c64
-rw-r--r--drivers/dma/ioat/registers.h2
-rw-r--r--drivers/dma/ipu/ipu_idmac.c15
-rw-r--r--drivers/dma/mpc512x_dma.c800
-rw-r--r--drivers/dma/ppc4xx/adma.c2
-rw-r--r--drivers/edac/amd64_edac.c39
-rw-r--r--drivers/edac/amd64_edac.h3
-rw-r--r--drivers/eisa/eisa-bus.c240
-rw-r--r--drivers/firewire/core-cdev.c368
-rw-r--r--drivers/firewire/core-device.c198
-rw-r--r--drivers/firewire/core-transaction.c17
-rw-r--r--drivers/firewire/core.h2
-rw-r--r--drivers/firewire/ohci.c364
-rw-r--r--drivers/firewire/sbp2.c5
-rw-r--r--drivers/firmware/iscsi_ibft.c6
-rw-r--r--drivers/firmware/memmap.c57
-rw-r--r--drivers/gpio/Kconfig26
-rw-r--r--drivers/gpio/Makefile3
-rw-r--r--drivers/gpio/cs5535-gpio.c4
-rw-r--r--drivers/gpio/gpiolib.c58
-rw-r--r--drivers/gpio/it8761e_gpio.c231
-rw-r--r--drivers/gpio/max7300.c94
-rw-r--r--drivers/gpio/max7301.c293
-rw-r--r--drivers/gpio/max730x.c244
-rw-r--r--drivers/gpio/pca953x.c249
-rw-r--r--drivers/gpio/pl061.c2
-rw-r--r--drivers/gpio/timbgpio.c35
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/drm_buffer.c184
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c6
-rw-r--r--drivers/gpu/drm/drm_drv.c44
-rw-r--r--drivers/gpu/drm/drm_edid.c30
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c26
-rw-r--r--drivers/gpu/drm/drm_gem.c70
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c253
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c326
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c27
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h69
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c430
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c169
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c313
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h170
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c10
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c3
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c14
-rw-r--r--drivers/gpu/drm/i915/intel_display.c216
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c6
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c2
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c4
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c2
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c41
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c29
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c23
-rw-r--r--drivers/gpu/drm/nouveau/Makefile2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c160
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c339
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h126
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_calc.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c39
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c167
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_debugfs.c24
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c7
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c108
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.h21
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h53
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c508
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_hw.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_notifier.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c40
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dac.c8
-rw-r--r--drivers/gpu/drm/nouveau/nv04_dfp.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv04_display.c49
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fifo.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv04_tv.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv17_tv.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv40_fifo.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv50_crtc.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_dac.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c54
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fifo.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c74
-rw-r--r--drivers/gpu/drm/nouveau/nv50_grctx.c2367
-rw-r--r--drivers/gpu/drm/nouveau/nv50_instmem.c2
-rw-r--r--drivers/gpu/drm/radeon/Makefile9
-rw-r--r--drivers/gpu/drm/radeon/atom.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios.h7300
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c456
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c64
-rw-r--r--drivers/gpu/drm/radeon/avivod.h2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c767
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h176
-rw-r--r--drivers/gpu/drm/radeon/r100.c176
-rw-r--r--drivers/gpu/drm/radeon/r200.c46
-rw-r--r--drivers/gpu/drm/radeon/r300.c157
-rw-r--r--drivers/gpu/drm/radeon/r300_cmdbuf.c280
-rw-r--r--drivers/gpu/drm/radeon/r300_reg.h2
-rw-r--r--drivers/gpu/drm/radeon/r420.c49
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h100
-rw-r--r--drivers/gpu/drm/radeon/r520.c21
-rw-r--r--drivers/gpu/drm/radeon/r600.c190
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c21
-rw-r--r--drivers/gpu/drm/radeon/r600_blit.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_kms.c17
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.c10
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c262
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c831
-rw-r--r--drivers/gpu/drm/radeon/r600d.h467
-rw-r--r--drivers/gpu/drm/radeon/radeon.h167
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h172
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c435
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c257
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c50
-rw-r--r--drivers/gpu/drm/radeon/radeon_clocks.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c290
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c50
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c235
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c332
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c14
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h46
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c354
-rw-r--r--drivers/gpu/drm/radeon/radeon_family.h5
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c12
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c32
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c36
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c768
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c29
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_encoders.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h55
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c399
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h50
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c67
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c203
-rw-r--r--drivers/gpu/drm/radeon/radeon_test.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c12
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r600837
-rw-r--r--drivers/gpu/drm/radeon/rs400.c39
-rw-r--r--drivers/gpu/drm/radeon/rs600.c56
-rw-r--r--drivers/gpu/drm/radeon/rs690.c41
-rw-r--r--drivers/gpu/drm/radeon/rv515.c21
-rw-r--r--drivers/gpu/drm/radeon/rv770.c259
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h2
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c18
-rw-r--r--drivers/gpu/vga/Kconfig11
-rw-r--r--drivers/gpu/vga/Makefile1
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c450
-rw-r--r--drivers/hid/usbhid/hiddev.c7
-rw-r--r--drivers/hwmon/Kconfig45
-rw-r--r--drivers/hwmon/Makefile3
-rw-r--r--drivers/hwmon/adcxx.c15
-rw-r--r--drivers/hwmon/adt7411.c366
-rw-r--r--drivers/hwmon/adt7473.c1180
-rw-r--r--drivers/hwmon/asc7621.c1255
-rw-r--r--drivers/hwmon/fschmd.c15
-rw-r--r--drivers/hwmon/g760a.c2
-rw-r--r--drivers/hwmon/it87.c939
-rw-r--r--drivers/hwmon/lm90.c89
-rw-r--r--drivers/hwmon/tmp401.c7
-rw-r--r--drivers/hwmon/tmp421.c24
-rw-r--r--drivers/hwmon/vt8231.c3
-rw-r--r--drivers/hwmon/w83793.c482
-rw-r--r--drivers/i2c/Kconfig10
-rw-r--r--drivers/i2c/Makefile1
-rw-r--r--drivers/i2c/algos/i2c-algo-pca.c2
-rw-r--r--drivers/i2c/busses/Kconfig7
-rw-r--r--drivers/i2c/busses/i2c-ali1535.c2
-rw-r--r--drivers/i2c/busses/i2c-ali1563.c2
-rw-r--r--drivers/i2c/busses/i2c-ali15x3.c2
-rw-r--r--drivers/i2c/busses/i2c-amd756.c2
-rw-r--r--drivers/i2c/busses/i2c-amd8111.c2
-rw-r--r--drivers/i2c/busses/i2c-designware.c4
-rw-r--r--drivers/i2c/busses/i2c-hydra.c2
-rw-r--r--drivers/i2c/busses/i2c-i801.c7
-rw-r--r--drivers/i2c/busses/i2c-isch.c2
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c2
-rw-r--r--drivers/i2c/busses/i2c-parport-light.c48
-rw-r--r--drivers/i2c/busses/i2c-parport.c43
-rw-r--r--drivers/i2c/busses/i2c-parport.h4
-rw-r--r--drivers/i2c/busses/i2c-pasemi.c2
-rw-r--r--drivers/i2c/busses/i2c-piix4.c2
-rw-r--r--drivers/i2c/busses/i2c-sis5595.c2
-rw-r--r--drivers/i2c/busses/i2c-sis630.c2
-rw-r--r--drivers/i2c/busses/i2c-sis96x.c2
-rw-r--r--drivers/i2c/busses/i2c-tiny-usb.c10
-rw-r--r--drivers/i2c/busses/i2c-via.c2
-rw-r--r--drivers/i2c/busses/i2c-viapro.c2
-rw-r--r--drivers/i2c/i2c-core.c54
-rw-r--r--drivers/i2c/i2c-smbus.c263
-rw-r--r--drivers/ide/aec62xx.c13
-rw-r--r--drivers/ide/ali14xx.c3
-rw-r--r--drivers/ide/alim15x3.c171
-rw-r--r--drivers/ide/amd74xx.c18
-rw-r--r--drivers/ide/at91_ide.c5
-rw-r--r--drivers/ide/atiixp.c14
-rw-r--r--drivers/ide/au1xxx-ide.c13
-rw-r--r--drivers/ide/cmd640.c5
-rw-r--r--drivers/ide/cmd64x.c114
-rw-r--r--drivers/ide/cs5520.c9
-rw-r--r--drivers/ide/cs5530.c13
-rw-r--r--drivers/ide/cs5535.c14
-rw-r--r--drivers/ide/cs5536.c16
-rw-r--r--drivers/ide/cy82c693.c146
-rw-r--r--drivers/ide/dtc2278.c4
-rw-r--r--drivers/ide/hpt366.c9
-rw-r--r--drivers/ide/ht6560b.c3
-rw-r--r--drivers/ide/icside.c67
-rw-r--r--drivers/ide/ide-cs.c23
-rw-r--r--drivers/ide/ide-devsets.c6
-rw-r--r--drivers/ide/ide-iops.c2
-rw-r--r--drivers/ide/ide-probe.c2
-rw-r--r--drivers/ide/ide-tape.c14
-rw-r--r--drivers/ide/ide-timings.c18
-rw-r--r--drivers/ide/ide-xfer-mode.c18
-rw-r--r--drivers/ide/it8172.c14
-rw-r--r--drivers/ide/it8213.c20
-rw-r--r--drivers/ide/it821x.c14
-rw-r--r--drivers/ide/jmicron.c6
-rw-r--r--drivers/ide/opti621.c77
-rw-r--r--drivers/ide/palm_bk3710.c12
-rw-r--r--drivers/ide/pdc202xx_new.c8
-rw-r--r--drivers/ide/pdc202xx_old.c31
-rw-r--r--drivers/ide/piix.c20
-rw-r--r--drivers/ide/pmac.c13
-rw-r--r--drivers/ide/qd65xx.c10
-rw-r--r--drivers/ide/sc1200.c8
-rw-r--r--drivers/ide/scc_pata.c24
-rw-r--r--drivers/ide/serverworks.c50
-rw-r--r--drivers/ide/sgiioc4.c2
-rw-r--r--drivers/ide/siimage.c14
-rw-r--r--drivers/ide/sis5513.c8
-rw-r--r--drivers/ide/sl82c105.c8
-rw-r--r--drivers/ide/slc90e66.c17
-rw-r--r--drivers/ide/tc86c001.c9
-rw-r--r--drivers/ide/triflex.c10
-rw-r--r--drivers/ide/tx4938ide.c7
-rw-r--r--drivers/ide/tx4939ide.c10
-rw-r--r--drivers/ide/umc8672.c5
-rw-r--r--drivers/ide/via82cxxx.c132
-rw-r--r--drivers/infiniband/Kconfig1
-rw-r--r--drivers/infiniband/core/mad.c21
-rw-r--r--drivers/infiniband/core/ucm.c63
-rw-r--r--drivers/infiniband/core/ud_header.c14
-rw-r--r--drivers/infiniband/core/umem.c2
-rw-r--r--drivers/infiniband/core/user_mad.c173
-rw-r--r--drivers/infiniband/core/uverbs.h13
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c25
-rw-r--r--drivers/infiniband/core/uverbs_main.c256
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c15
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.h4
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_wr.h17
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.c80
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch.h2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_cm.c11
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_qp.c9
-rw-r--r--drivers/infiniband/hw/ehca/ehca_irq.c5
-rw-r--r--drivers/infiniband/hw/ehca/ehca_qp.c4
-rw-r--r--drivers/infiniband/hw/ehca/ehca_sqp.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_user_pages.c3
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c4
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c2
-rw-r--r--drivers/infiniband/hw/nes/nes.c1
-rw-r--r--drivers/infiniband/hw/nes/nes.h9
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c11
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.c484
-rw-r--r--drivers/infiniband/hw/nes/nes_hw.h2
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c146
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c6
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_ethtool.c10
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c2
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c47
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h97
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c506
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c64
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c281
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c91
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.h6
-rw-r--r--drivers/input/touchscreen/mc13783_ts.c4
-rw-r--r--drivers/isdn/Kconfig43
-rw-r--r--drivers/isdn/capi/Kconfig16
-rw-r--r--drivers/isdn/capi/capi.c1203
-rw-r--r--drivers/isdn/capi/capidrv.c103
-rw-r--r--drivers/isdn/capi/capifs.c126
-rw-r--r--drivers/isdn/capi/capifs.h21
-rw-r--r--drivers/isdn/capi/kcapi.c817
-rw-r--r--drivers/isdn/capi/kcapi.h13
-rw-r--r--drivers/isdn/capi/kcapi_proc.c41
-rw-r--r--drivers/isdn/gigaset/asyncdata.c6
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c18
-rw-r--r--drivers/isdn/gigaset/capi.c106
-rw-r--r--drivers/isdn/gigaset/common.c49
-rw-r--r--drivers/isdn/gigaset/ev-layer.c63
-rw-r--r--drivers/isdn/gigaset/gigaset.h11
-rw-r--r--drivers/isdn/gigaset/i4l.c52
-rw-r--r--drivers/isdn/gigaset/interface.c12
-rw-r--r--drivers/isdn/gigaset/isocdata.c44
-rw-r--r--drivers/isdn/gigaset/proc.c2
-rw-r--r--drivers/isdn/gigaset/usb-gigaset.c2
-rw-r--r--drivers/isdn/hardware/avm/avmcard.h6
-rw-r--r--drivers/isdn/hardware/avm/b1.c54
-rw-r--r--drivers/isdn/hardware/avm/b1dma.c71
-rw-r--r--drivers/isdn/hardware/avm/b1isa.c2
-rw-r--r--drivers/isdn/hardware/avm/b1pci.c4
-rw-r--r--drivers/isdn/hardware/avm/b1pcmcia.c2
-rw-r--r--drivers/isdn/hardware/avm/c4.c53
-rw-r--r--drivers/isdn/hardware/avm/t1isa.c2
-rw-r--r--drivers/isdn/hardware/avm/t1pci.c2
-rw-r--r--drivers/isdn/hardware/eicon/capimain.c40
-rw-r--r--drivers/isdn/hardware/eicon/diva_didd.c45
-rw-r--r--drivers/isdn/hardware/eicon/divasi.c48
-rw-r--r--drivers/isdn/hardware/eicon/divasproc.c198
-rw-r--r--drivers/isdn/hardware/mISDN/hfcmulti.c2
-rw-r--r--drivers/isdn/hardware/mISDN/mISDNinfineon.c1
-rw-r--r--drivers/isdn/hardware/mISDN/w6692.c2
-rw-r--r--drivers/isdn/hisax/isar.c2
-rw-r--r--drivers/isdn/hysdn/hycapi.c56
-rw-r--r--drivers/isdn/i4l/Kconfig7
-rw-r--r--drivers/md/dm-crypt.c3
-rw-r--r--drivers/md/dm-delay.c8
-rw-r--r--drivers/md/dm-ioctl.c24
-rw-r--r--drivers/md/dm-linear.c3
-rw-r--r--drivers/md/dm-log.c3
-rw-r--r--drivers/md/dm-mpath.c111
-rw-r--r--drivers/md/dm-raid1.c53
-rw-r--r--drivers/md/dm-snap.c34
-rw-r--r--drivers/md/dm-stripe.c3
-rw-r--r--drivers/md/dm-table.c12
-rw-r--r--drivers/md/dm-uevent.c7
-rw-r--r--drivers/md/dm.c25
-rw-r--r--drivers/md/dm.h4
-rw-r--r--drivers/md/raid5.c2
-rw-r--r--drivers/md/raid5.h2
-rw-r--r--drivers/media/dvb/dvb-core/dvb_net.c14
-rw-r--r--drivers/media/dvb/firewire/firedtv-fw.c39
-rw-r--r--drivers/media/video/dabusb.c8
-rw-r--r--drivers/message/i2o/i2o_proc.c11
-rw-r--r--drivers/mfd/Kconfig2
-rw-r--r--drivers/mfd/htc-egpio.c2
-rw-r--r--drivers/mfd/mc13783-core.c73
-rw-r--r--drivers/misc/Kconfig9
-rw-r--r--drivers/misc/iwmc3200top/fw-download.c50
-rw-r--r--drivers/misc/iwmc3200top/iwmc3200top.h4
-rw-r--r--drivers/misc/iwmc3200top/log.h31
-rw-r--r--drivers/misc/iwmc3200top/main.c61
-rw-r--r--drivers/misc/lkdtm.c472
-rw-r--r--drivers/misc/sgi-xp/xpnet.c2
-rw-r--r--drivers/mmc/card/sdio_uart.c93
-rw-r--r--drivers/mmc/core/core.c12
-rw-r--r--drivers/mmc/core/sdio.c64
-rw-r--r--drivers/mmc/core/sdio_io.c56
-rw-r--r--drivers/mmc/host/Kconfig13
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/at91_mci.c224
-rw-r--r--drivers/mmc/host/bfin_sdh.c10
-rw-r--r--drivers/mmc/host/davinci_mmc.c45
-rw-r--r--drivers/mmc/host/omap_hsmmc.c400
-rw-r--r--drivers/mmc/host/ricoh_mmc.c262
-rw-r--r--drivers/mmc/host/s3cmci.c4
-rw-r--r--drivers/mmc/host/sdhci-pci.c24
-rw-r--r--drivers/mmc/host/sdhci.c76
-rw-r--r--drivers/mtd/maps/Kconfig9
-rw-r--r--drivers/mtd/maps/Makefile1
-rw-r--r--drivers/mtd/maps/omap_nor.c188
-rw-r--r--drivers/mtd/nand/omap2.c35
-rw-r--r--drivers/mtd/ubi/build.c133
-rw-r--r--drivers/mtd/ubi/debug.h4
-rw-r--r--drivers/mtd/ubi/io.c120
-rw-r--r--drivers/mtd/ubi/scan.c11
-rw-r--r--drivers/mtd/ubi/wl.c17
-rw-r--r--drivers/net/3c501.c2
-rw-r--r--drivers/net/3c505.c13
-rw-r--r--drivers/net/3c509.c10
-rw-r--r--drivers/net/3c515.c2
-rw-r--r--drivers/net/3c523.c13
-rw-r--r--drivers/net/3c527.c17
-rw-r--r--drivers/net/3c59x.c4
-rw-r--r--drivers/net/7990.c6
-rw-r--r--drivers/net/8139cp.c81
-rw-r--r--drivers/net/8139too.c196
-rw-r--r--drivers/net/82596.c15
-rw-r--r--drivers/net/Kconfig82
-rw-r--r--drivers/net/Makefile5
-rw-r--r--drivers/net/a2065.c6
-rw-r--r--drivers/net/acenic.c4
-rw-r--r--drivers/net/amd8111e.c21
-rw-r--r--drivers/net/amd8111e.h1
-rw-r--r--drivers/net/appletalk/ltpc.c1
-rw-r--r--drivers/net/arcnet/com20020-pci.c2
-rw-r--r--drivers/net/ariadne.c4
-rw-r--r--drivers/net/arm/am79c961a.c12
-rw-r--r--drivers/net/arm/at91_ether.c9
-rw-r--r--drivers/net/arm/ep93xx_eth.c140
-rw-r--r--drivers/net/arm/ether3.c2
-rw-r--r--drivers/net/arm/ixp4xx_eth.c13
-rw-r--r--drivers/net/arm/ks8695net.c23
-rw-r--r--drivers/net/arm/w90p910_ether.c8
-rw-r--r--drivers/net/at1700.c8
-rw-r--r--drivers/net/atarilance.c2
-rw-r--r--drivers/net/atl1c/atl1c.h11
-rw-r--r--drivers/net/atl1c/atl1c_ethtool.c2
-rw-r--r--drivers/net/atl1c/atl1c_hw.c83
-rw-r--r--drivers/net/atl1c/atl1c_hw.h5
-rw-r--r--drivers/net/atl1c/atl1c_main.c126
-rw-r--r--drivers/net/atl1e/atl1e_hw.c23
-rw-r--r--drivers/net/atl1e/atl1e_main.c160
-rw-r--r--drivers/net/atl1e/atl1e_param.c35
-rw-r--r--drivers/net/atlx/atl1.c2
-rw-r--r--drivers/net/atlx/atl2.c4
-rw-r--r--drivers/net/atlx/atlx.c2
-rw-r--r--drivers/net/atp.c9
-rw-r--r--drivers/net/au1000_eth.c7
-rw-r--r--drivers/net/b44.c94
-rw-r--r--drivers/net/bcm63xx_enet.c15
-rw-r--r--drivers/net/benet/Kconfig4
-rw-r--r--drivers/net/benet/be.h19
-rw-r--r--drivers/net/benet/be_cmds.c118
-rw-r--r--drivers/net/benet/be_cmds.h29
-rw-r--r--drivers/net/benet/be_ethtool.c65
-rw-r--r--drivers/net/benet/be_hw.h121
-rw-r--r--drivers/net/benet/be_main.c540
-rw-r--r--drivers/net/bfin_mac.c8
-rw-r--r--drivers/net/bmac.c13
-rw-r--r--drivers/net/bnx2.c410
-rw-r--r--drivers/net/bnx2.h3
-rw-r--r--drivers/net/bnx2x.h53
-rw-r--r--drivers/net/bnx2x_fw_defs.h7
-rw-r--r--drivers/net/bnx2x_hsi.h10
-rw-r--r--drivers/net/bnx2x_init_ops.h13
-rw-r--r--drivers/net/bnx2x_link.c21
-rw-r--r--drivers/net/bnx2x_main.c284
-rw-r--r--drivers/net/bonding/bond_main.c27
-rw-r--r--drivers/net/bonding/bonding.h1
-rw-r--r--drivers/net/can/at91_can.c4
-rw-r--r--drivers/net/can/bfin_can.c4
-rw-r--r--drivers/net/can/dev.c8
-rw-r--r--drivers/net/can/mcp251x.c426
-rw-r--r--drivers/net/can/mscan/Kconfig7
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c248
-rw-r--r--drivers/net/can/mscan/mscan.c58
-rw-r--r--drivers/net/can/mscan/mscan.h86
-rw-r--r--drivers/net/can/sja1000/Kconfig12
-rw-r--r--drivers/net/can/sja1000/Makefile1
-rw-r--r--drivers/net/can/sja1000/ems_pci.c2
-rw-r--r--drivers/net/can/sja1000/kvaser_pci.c2
-rw-r--r--drivers/net/can/sja1000/plx_pci.c472
-rw-r--r--drivers/net/can/sja1000/sja1000.c27
-rw-r--r--drivers/net/can/ti_hecc.c73
-rw-r--r--drivers/net/can/usb/Kconfig2
-rw-r--r--drivers/net/can/usb/ems_usb.c6
-rw-r--r--drivers/net/can/vcan.c12
-rw-r--r--drivers/net/cassini.c437
-rw-r--r--drivers/net/chelsio/common.h44
-rw-r--r--drivers/net/chelsio/cxgb2.c20
-rw-r--r--drivers/net/chelsio/espi.c4
-rw-r--r--drivers/net/chelsio/pm3393.c22
-rw-r--r--drivers/net/chelsio/sge.c12
-rw-r--r--drivers/net/chelsio/subr.c34
-rw-r--r--drivers/net/chelsio/vsc7326.c24
-rw-r--r--drivers/net/cnic.c206
-rw-r--r--drivers/net/cnic.h13
-rw-r--r--drivers/net/cnic_defs.h2
-rw-r--r--drivers/net/cnic_if.h6
-rw-r--r--drivers/net/cpmac.c4
-rw-r--r--drivers/net/cris/eth_v10.c8
-rw-r--r--drivers/net/cs89x0.c2
-rw-r--r--drivers/net/cxgb3/adapter.h5
-rw-r--r--drivers/net/cxgb3/common.h28
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c67
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c2
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.h5
-rw-r--r--drivers/net/cxgb3/regs.h16
-rw-r--r--drivers/net/cxgb3/sge.c26
-rw-r--r--drivers/net/cxgb3/t3_hw.c8
-rw-r--r--drivers/net/cxgb3/xgmac.c18
-rw-r--r--drivers/net/davinci_emac.c12
-rw-r--r--drivers/net/de620.c2
-rw-r--r--drivers/net/declance.c6
-rw-r--r--drivers/net/defxx.c24
-rw-r--r--drivers/net/depca.c5
-rw-r--r--drivers/net/dl2k.c9
-rw-r--r--drivers/net/dl2k.h2
-rw-r--r--drivers/net/dm9000.c5
-rw-r--r--drivers/net/e100.c18
-rw-r--r--drivers/net/e1000/e1000.h1
-rw-r--r--drivers/net/e1000/e1000_ethtool.c19
-rw-r--r--drivers/net/e1000/e1000_main.c46
-rw-r--r--drivers/net/e1000e/82571.c68
-rw-r--r--drivers/net/e1000e/defines.h2
-rw-r--r--drivers/net/e1000e/e1000.h19
-rw-r--r--drivers/net/e1000e/es2lan.c32
-rw-r--r--drivers/net/e1000e/ethtool.c2
-rw-r--r--drivers/net/e1000e/hw.h12
-rw-r--r--drivers/net/e1000e/ich8lan.c1
-rw-r--r--drivers/net/e1000e/lib.c230
-rw-r--r--drivers/net/e1000e/netdev.c45
-rw-r--r--drivers/net/eepro.c21
-rw-r--r--drivers/net/eexpress.c22
-rw-r--r--drivers/net/ehea/ehea_main.c9
-rw-r--r--drivers/net/enc28j60.c2
-rw-r--r--drivers/net/enic/enic.h5
-rw-r--r--drivers/net/enic/enic_main.c208
-rw-r--r--drivers/net/enic/enic_res.c16
-rw-r--r--drivers/net/enic/vnic_dev.c1
-rw-r--r--drivers/net/enic/vnic_enet.h5
-rw-r--r--drivers/net/enic/vnic_intr.c8
-rw-r--r--drivers/net/enic/vnic_intr.h3
-rw-r--r--drivers/net/enic/vnic_nic.h12
-rw-r--r--drivers/net/epic100.c9
-rw-r--r--drivers/net/eth16i.c2
-rw-r--r--drivers/net/ethoc.c14
-rw-r--r--drivers/net/ewrk3.c5
-rw-r--r--drivers/net/fealnx.c8
-rw-r--r--drivers/net/fec.c84
-rw-r--r--drivers/net/fec_mpc52xx.c5
-rw-r--r--drivers/net/forcedeth.c8
-rw-r--r--drivers/net/fs_enet/Kconfig10
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c93
-rw-r--r--drivers/net/fs_enet/fs_enet.h49
-rw-r--r--drivers/net/fs_enet/mac-fcc.c9
-rw-r--r--drivers/net/fs_enet/mac-fec.c62
-rw-r--r--drivers/net/fs_enet/mac-scc.c13
-rw-r--r--drivers/net/fs_enet/mii-fec.c4
-rw-r--r--drivers/net/gianfar.c16
-rw-r--r--drivers/net/greth.c1634
-rw-r--r--drivers/net/greth.h143
-rw-r--r--drivers/net/hamachi.c13
-rw-r--r--drivers/net/hp100.c13
-rw-r--r--drivers/net/ibm_newemac/core.c8
-rw-r--r--drivers/net/ibmlana.c2
-rw-r--r--drivers/net/ibmveth.c8
-rw-r--r--drivers/net/igb/e1000_82575.c65
-rw-r--r--drivers/net/igb/e1000_82575.h5
-rw-r--r--drivers/net/igb/e1000_defines.h7
-rw-r--r--drivers/net/igb/e1000_hw.h7
-rw-r--r--drivers/net/igb/e1000_mac.c70
-rw-r--r--drivers/net/igb/e1000_mac.h2
-rw-r--r--drivers/net/igb/e1000_phy.c35
-rw-r--r--drivers/net/igb/e1000_phy.h2
-rw-r--r--drivers/net/igb/e1000_regs.h1
-rw-r--r--drivers/net/igb/igb.h16
-rw-r--r--drivers/net/igb/igb_ethtool.c93
-rw-r--r--drivers/net/igb/igb_main.c414
-rw-r--r--drivers/net/igbvf/netdev.c31
-rw-r--r--drivers/net/ioc3-eth.c11
-rw-r--r--drivers/net/ipg.c13
-rw-r--r--drivers/net/irda/Kconfig10
-rw-r--r--drivers/net/irda/Makefile1
-rw-r--r--drivers/net/irda/donauboe.c2
-rw-r--r--drivers/net/irda/sh_sir.c823
-rw-r--r--drivers/net/irda/via-ircc.c2
-rw-r--r--drivers/net/irda/vlsi_ir.c2
-rw-r--r--drivers/net/isa-skeleton.c718
-rw-r--r--drivers/net/iseries_veth.c8
-rw-r--r--drivers/net/ixgb/ixgb.h11
-rw-r--r--drivers/net/ixgb/ixgb_main.c104
-rw-r--r--drivers/net/ixgbe/Makefile3
-rw-r--r--drivers/net/ixgbe/ixgbe.h54
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c233
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c19
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c195
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c4
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c678
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.c479
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.h96
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c362
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.h47
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h66
-rw-r--r--drivers/net/ixgbevf/Makefile38
-rw-r--r--drivers/net/ixgbevf/defines.h292
-rw-r--r--drivers/net/ixgbevf/ethtool.c716
-rw-r--r--drivers/net/ixgbevf/ixgbevf.h318
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c3578
-rw-r--r--drivers/net/ixgbevf/mbx.c341
-rw-r--r--drivers/net/ixgbevf/mbx.h100
-rw-r--r--drivers/net/ixgbevf/regs.h85
-rw-r--r--drivers/net/ixgbevf/vf.c387
-rw-r--r--drivers/net/ixgbevf/vf.h168
-rw-r--r--drivers/net/jme.c62
-rw-r--r--drivers/net/jme.h41
-rw-r--r--drivers/net/korina.c10
-rw-r--r--drivers/net/ks8851.c7
-rw-r--r--drivers/net/ks8851_mll.c7
-rw-r--r--drivers/net/ksz884x.c7335
-rw-r--r--drivers/net/lance.c2
-rw-r--r--drivers/net/lib82596.c21
-rw-r--r--drivers/net/lib8390.c15
-rw-r--r--drivers/net/ll_temac_main.c25
-rw-r--r--drivers/net/loopback.c16
-rw-r--r--drivers/net/lp486e.c16
-rw-r--r--drivers/net/mac8390.c632
-rw-r--r--drivers/net/mac89x0.c4
-rw-r--r--drivers/net/macb.c38
-rw-r--r--drivers/net/mace.c11
-rw-r--r--drivers/net/macmace.c12
-rw-r--r--drivers/net/macvlan.c117
-rw-r--r--drivers/net/macvtap.c803
-rw-r--r--drivers/net/meth.c3
-rw-r--r--drivers/net/mlx4/en_rx.c8
-rw-r--r--drivers/net/mlx4/main.c2
-rw-r--r--drivers/net/mv643xx_eth.c6
-rw-r--r--drivers/net/myri10ge/myri10ge.c198
-rw-r--r--drivers/net/myri_sbus.c6
-rw-r--r--drivers/net/natsemi.c8
-rw-r--r--drivers/net/ne2k-pci.c2
-rw-r--r--drivers/net/netxen/Makefile2
-rw-r--r--drivers/net/netxen/netxen_nic.h8
-rw-r--r--drivers/net/netxen/netxen_nic_ctx.c2
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c2
-rw-r--r--drivers/net/netxen/netxen_nic_hdr.h5
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c46
-rw-r--r--drivers/net/netxen/netxen_nic_hw.h2
-rw-r--r--drivers/net/netxen/netxen_nic_init.c5
-rw-r--r--drivers/net/netxen/netxen_nic_main.c213
-rw-r--r--drivers/net/ni5010.c3
-rw-r--r--drivers/net/ni52.c10
-rw-r--r--drivers/net/ni65.c2
-rw-r--r--drivers/net/niu.c699
-rw-r--r--drivers/net/ns83820.c4
-rw-r--r--drivers/net/octeon/octeon_mgmt.c18
-rw-r--r--drivers/net/pasemi_mac.c2
-rw-r--r--drivers/net/pci-skeleton.c1029
-rw-r--r--drivers/net/pcmcia/3c574_cs.c2
-rw-r--r--drivers/net/pcmcia/3c589_cs.c2
-rw-r--r--drivers/net/pcmcia/axnet_cs.c11
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c10
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c18
-rw-r--r--drivers/net/pcmcia/smc91c92_cs.c35
-rw-r--r--drivers/net/pcmcia/xirc2ps_cs.c88
-rw-r--r--drivers/net/pcnet32.c507
-rw-r--r--drivers/net/phy/broadcom.c5
-rw-r--r--drivers/net/phy/marvell.c38
-rw-r--r--drivers/net/phy/phy_device.c16
-rw-r--r--drivers/net/phy/smsc.c21
-rw-r--r--drivers/net/ppp_generic.c122
-rw-r--r--drivers/net/ps3_gelic_net.c4
-rw-r--r--drivers/net/ps3_gelic_wireless.c149
-rw-r--r--drivers/net/qla3xxx.c3
-rw-r--r--drivers/net/qlcnic/Makefile8
-rw-r--r--drivers/net/qlcnic/qlcnic.h1126
-rw-r--r--drivers/net/qlcnic/qlcnic_ctx.c534
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c1015
-rw-r--r--drivers/net/qlcnic/qlcnic_hdr.h937
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c1274
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c1541
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c2720
-rw-r--r--drivers/net/qlge/qlge.h446
-rw-r--r--drivers/net/qlge/qlge_dbg.c1183
-rw-r--r--drivers/net/qlge/qlge_ethtool.c56
-rw-r--r--drivers/net/qlge/qlge_main.c1189
-rw-r--r--drivers/net/qlge/qlge_mpi.c340
-rw-r--r--drivers/net/r6040.c37
-rw-r--r--drivers/net/r8169.c166
-rw-r--r--drivers/net/rrunner.c2
-rw-r--r--drivers/net/s2io.c13
-rw-r--r--drivers/net/sb1250-mac.c6
-rw-r--r--drivers/net/sc92031.c6
-rw-r--r--drivers/net/sfc/efx.c8
-rw-r--r--drivers/net/sfc/efx.h2
-rw-r--r--drivers/net/sfc/ethtool.c10
-rw-r--r--drivers/net/sfc/falcon.c6
-rw-r--r--drivers/net/sfc/mcdi.c109
-rw-r--r--drivers/net/sfc/mcdi.h1
-rw-r--r--drivers/net/sfc/mcdi_pcol.h202
-rw-r--r--drivers/net/sfc/mcdi_phy.c36
-rw-r--r--drivers/net/sfc/mdio_10g.c24
-rw-r--r--drivers/net/sfc/mdio_10g.h3
-rw-r--r--drivers/net/sfc/net_driver.h17
-rw-r--r--drivers/net/sfc/nic.c13
-rw-r--r--drivers/net/sfc/qt202x_phy.c1
-rw-r--r--drivers/net/sfc/selftest.c42
-rw-r--r--drivers/net/sfc/selftest.h4
-rw-r--r--drivers/net/sfc/siena.c16
-rw-r--r--drivers/net/sfc/tenxpress.c2
-rw-r--r--drivers/net/sgiseeq.c2
-rw-r--r--drivers/net/sh_eth.c10
-rw-r--r--drivers/net/sis190.c221
-rw-r--r--drivers/net/sis900.c9
-rw-r--r--drivers/net/skfp/skfddi.c35
-rw-r--r--drivers/net/skge.c218
-rw-r--r--drivers/net/sky2.c726
-rw-r--r--drivers/net/sky2.h10
-rw-r--r--drivers/net/smc911x.c14
-rw-r--r--drivers/net/smc911x.h4
-rw-r--r--drivers/net/smc9194.c12
-rw-r--r--drivers/net/smc91x.c11
-rw-r--r--drivers/net/smsc911x.c53
-rw-r--r--drivers/net/smsc9420.c9
-rw-r--r--drivers/net/sonic.c13
-rw-r--r--drivers/net/spider_net.c4
-rw-r--r--drivers/net/starfire.c13
-rw-r--r--drivers/net/stmmac/Kconfig8
-rw-r--r--drivers/net/stmmac/Makefile5
-rw-r--r--drivers/net/stmmac/common.h279
-rw-r--r--drivers/net/stmmac/descs.h4
-rw-r--r--drivers/net/stmmac/dwmac100.c (renamed from drivers/net/stmmac/mac100.c)212
-rw-r--r--drivers/net/stmmac/dwmac100.h (renamed from drivers/net/stmmac/mac100.h)0
-rw-r--r--drivers/net/stmmac/dwmac1000.h (renamed from drivers/net/stmmac/gmac.h)18
-rw-r--r--drivers/net/stmmac/dwmac1000_core.c243
-rw-r--r--drivers/net/stmmac/dwmac1000_dma.c (renamed from drivers/net/stmmac/gmac.c)351
-rw-r--r--drivers/net/stmmac/dwmac_dma.h107
-rw-r--r--drivers/net/stmmac/dwmac_lib.c263
-rw-r--r--drivers/net/stmmac/stmmac.h28
-rw-r--r--drivers/net/stmmac/stmmac_ethtool.c11
-rw-r--r--drivers/net/stmmac/stmmac_main.c436
-rw-r--r--drivers/net/stmmac/stmmac_mdio.c11
-rw-r--r--drivers/net/sun3_82586.c10
-rw-r--r--drivers/net/sun3lance.c2
-rw-r--r--drivers/net/sunbmac.c7
-rw-r--r--drivers/net/sundance.c9
-rw-r--r--drivers/net/sungem.c14
-rw-r--r--drivers/net/sunhme.c26
-rw-r--r--drivers/net/sunlance.c6
-rw-r--r--drivers/net/sunqe.c11
-rw-r--r--drivers/net/sunvnet.c7
-rw-r--r--drivers/net/tc35815.c27
-rw-r--r--drivers/net/tehuti.c159
-rw-r--r--drivers/net/tehuti.h30
-rw-r--r--drivers/net/tg3.c965
-rw-r--r--drivers/net/tg3.h162
-rw-r--r--drivers/net/tlan.c37
-rw-r--r--drivers/net/tlan.h3
-rw-r--r--drivers/net/tokenring/3c359.c7
-rw-r--r--drivers/net/tokenring/abyss.c2
-rw-r--r--drivers/net/tokenring/ibmtr.c4
-rw-r--r--drivers/net/tokenring/lanstreamer.c6
-rw-r--r--drivers/net/tokenring/olympic.c7
-rw-r--r--drivers/net/tokenring/tms380tr.c8
-rw-r--r--drivers/net/tokenring/tmspci.c2
-rw-r--r--drivers/net/tsi108_eth.c22
-rw-r--r--drivers/net/tulip/21142.c76
-rw-r--r--drivers/net/tulip/de2104x.c163
-rw-r--r--drivers/net/tulip/de4x5.c16
-rw-r--r--drivers/net/tulip/dmfe.c103
-rw-r--r--drivers/net/tulip/eeprom.c47
-rw-r--r--drivers/net/tulip/interrupt.c100
-rw-r--r--drivers/net/tulip/media.c74
-rw-r--r--drivers/net/tulip/pnic.c33
-rw-r--r--drivers/net/tulip/pnic2.c59
-rw-r--r--drivers/net/tulip/timer.c52
-rw-r--r--drivers/net/tulip/tulip_core.c187
-rw-r--r--drivers/net/tulip/uli526x.c64
-rw-r--r--drivers/net/tulip/winbond-840.c186
-rw-r--r--drivers/net/tulip/xircom_cb.c46
-rw-r--r--drivers/net/tun.c127
-rw-r--r--drivers/net/typhoon.c253
-rw-r--r--drivers/net/ucc_geth.c29
-rw-r--r--drivers/net/usb/asix.c117
-rw-r--r--drivers/net/usb/catc.c9
-rw-r--r--drivers/net/usb/cdc_eem.c10
-rw-r--r--drivers/net/usb/cdc_ether.c22
-rw-r--r--drivers/net/usb/dm9601.c59
-rw-r--r--drivers/net/usb/int51x1.c17
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/usb/mcs7830.c256
-rw-r--r--drivers/net/usb/net1080.c109
-rw-r--r--drivers/net/usb/pegasus.c172
-rw-r--r--drivers/net/usb/rndis_host.c24
-rw-r--r--drivers/net/usb/rtl8150.c9
-rw-r--r--drivers/net/usb/smsc95xx.c245
-rw-r--r--drivers/net/usb/usbnet.c238
-rw-r--r--drivers/net/veth.c19
-rw-r--r--drivers/net/via-rhine.c9
-rw-r--r--drivers/net/via-velocity.c16
-rw-r--r--drivers/net/virtio_net.c474
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c19
-rw-r--r--drivers/net/vxge/vxge-main.c16
-rw-r--r--drivers/net/wan/dscc4.c2
-rw-r--r--drivers/net/wan/farsync.c2
-rw-r--r--drivers/net/wan/lmc/lmc_main.c2
-rw-r--r--drivers/net/wan/pc300_drv.c2
-rw-r--r--drivers/net/wan/pc300too.c2
-rw-r--r--drivers/net/wan/pci200syn.c2
-rw-r--r--drivers/net/wan/wanxl.c2
-rw-r--r--drivers/net/wimax/i2400m/driver.c17
-rw-r--r--drivers/net/wimax/i2400m/fw.c11
-rw-r--r--drivers/net/wireless/Kconfig1
-rw-r--r--drivers/net/wireless/adm8211.c27
-rw-r--r--drivers/net/wireless/airo.c39
-rw-r--r--drivers/net/wireless/at76c50x-usb.c6
-rw-r--r--drivers/net/wireless/ath/ar9170/ar9170.h17
-rw-r--r--drivers/net/wireless/ath/ar9170/hw.h1
-rw-r--r--drivers/net/wireless/ath/ar9170/mac.c2
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c195
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.c2
-rw-r--r--drivers/net/wireless/ath/ath.h1
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h27
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c108
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h1
-rw-r--r--drivers/net/wireless/ath/ath5k/led.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c121
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c25
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c14
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile2
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c31
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h80
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c30
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c199
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h32
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c442
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c178
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h10
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c863
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h34
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c1451
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c75
-rw-r--r--drivers/net/wireless/ath/ath9k/phy.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c27
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c40
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/virtual.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c62
-rw-r--r--drivers/net/wireless/ath/debug.h8
-rw-r--r--drivers/net/wireless/ath/regd.c5
-rw-r--r--drivers/net/wireless/atmel_pci.c2
-rw-r--r--drivers/net/wireless/b43/Kconfig6
-rw-r--r--drivers/net/wireless/b43/Makefile2
-rw-r--r--drivers/net/wireless/b43/b43.h20
-rw-r--r--drivers/net/wireless/b43/dma.c19
-rw-r--r--drivers/net/wireless/b43/dma.h5
-rw-r--r--drivers/net/wireless/b43/main.c98
-rw-r--r--drivers/net/wireless/b43/phy_common.c45
-rw-r--r--drivers/net/wireless/b43/phy_common.h10
-rw-r--r--drivers/net/wireless/b43/phy_lp.c76
-rw-r--r--drivers/net/wireless/b43/phy_n.c3035
-rw-r--r--drivers/net/wireless/b43/phy_n.h98
-rw-r--r--drivers/net/wireless/b43/pio.c17
-rw-r--r--drivers/net/wireless/b43/pio.h45
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c744
-rw-r--r--drivers/net/wireless/b43/tables_nphy.h100
-rw-r--r--drivers/net/wireless/b43legacy/dma.c20
-rw-r--r--drivers/net/wireless/b43legacy/dma.h10
-rw-r--r--drivers/net/wireless/b43legacy/leds.h2
-rw-r--r--drivers/net/wireless/b43legacy/main.c61
-rw-r--r--drivers/net/wireless/b43legacy/pio.c13
-rw-r--r--drivers/net/wireless/b43legacy/pio.h11
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c17
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c9
-rw-r--r--drivers/net/wireless/hostap/hostap_pci.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_plx.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c2
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c2
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig14
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c84
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-fh.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-hw.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-led.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-led.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c25
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h25
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-hw.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000-hw.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c125
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000-hw.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c116
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-led.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-led.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c17
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c354
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h68
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c388
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h55
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h60
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c1461
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h97
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h44
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h23
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c17
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-helpers.h9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c9
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c136
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c258
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-spectrum.c198
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-spectrum.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c150
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c77
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c197
-rw-r--r--drivers/net/wireless/iwmc3200wifi/debugfs.c2
-rw-r--r--drivers/net/wireless/iwmc3200wifi/iwm.h2
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c73
-rw-r--r--drivers/net/wireless/libertas/Kconfig6
-rw-r--r--drivers/net/wireless/libertas/Makefile2
-rw-r--r--drivers/net/wireless/libertas/assoc.c95
-rw-r--r--drivers/net/wireless/libertas/cmd.c22
-rw-r--r--drivers/net/wireless/libertas/cmd.h12
-rw-r--r--drivers/net/wireless/libertas/cmdresp.c21
-rw-r--r--drivers/net/wireless/libertas/defs.h7
-rw-r--r--drivers/net/wireless/libertas/dev.h8
-rw-r--r--drivers/net/wireless/libertas/ethtool.c2
-rw-r--r--drivers/net/wireless/libertas/if_spi.c1
-rw-r--r--drivers/net/wireless/libertas/main.c81
-rw-r--r--drivers/net/wireless/libertas/mesh.c29
-rw-r--r--drivers/net/wireless/libertas/mesh.h32
-rw-r--r--drivers/net/wireless/libertas/scan.c2
-rw-r--r--drivers/net/wireless/libertas/tx.c2
-rw-r--r--drivers/net/wireless/libertas/wext.c26
-rw-r--r--drivers/net/wireless/libertas_tf/main.c13
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c198
-rw-r--r--drivers/net/wireless/mwl8k.c2084
-rw-r--r--drivers/net/wireless/orinoco/hw.c22
-rw-r--r--drivers/net/wireless/orinoco/hw.h2
-rw-r--r--drivers/net/wireless/orinoco/main.c7
-rw-r--r--drivers/net/wireless/orinoco/orinoco_cs.c9
-rw-r--r--drivers/net/wireless/orinoco/orinoco_nortel.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_pci.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_plx.c2
-rw-r--r--drivers/net/wireless/orinoco/orinoco_tmd.c2
-rw-r--r--drivers/net/wireless/p54/main.c51
-rw-r--r--drivers/net/wireless/p54/p54.h8
-rw-r--r--drivers/net/wireless/p54/p54pci.c76
-rw-r--r--drivers/net/wireless/p54/p54pci.h6
-rw-r--r--drivers/net/wireless/p54/p54usb.c2
-rw-r--r--drivers/net/wireless/p54/txrx.c4
-rw-r--r--drivers/net/wireless/prism54/islpci_hotplug.c2
-rw-r--r--drivers/net/wireless/ray_cs.c10
-rw-r--r--drivers/net/wireless/rndis_wlan.c388
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig71
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c12
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c48
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c39
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h14
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c203
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c104
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c373
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.h90
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h96
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c10
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c42
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c10
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c79
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h5
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00soc.c11
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00soc.h10
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c46
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.h9
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c41
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.h2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180.h1
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180_dev.c37
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187.h2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c26
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_leds.c6
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_leds.h2
-rw-r--r--drivers/net/wireless/wl12xx/Makefile4
-rw-r--r--drivers/net/wireless/wl12xx/wl1251.h4
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_acx.c69
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_acx.h87
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_cmd.c83
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_cmd.h22
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_debugfs.c23
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_init.c5
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_init.h47
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_main.c375
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_ps.c9
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_rx.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_tx.c9
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_tx.h17
-rw-r--r--drivers/net/wireless/wl12xx/wl1271.h67
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.c196
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.h50
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_boot.c102
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.c137
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.h67
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_conf.h174
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_debugfs.c62
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.c68
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_init.c50
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_init.h4
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_io.c213
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_io.h68
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_main.c823
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_ps.c37
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_ps.h3
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_reg.h99
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_rx.c11
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_spi.c158
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_spi.h30
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_testmode.c283
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_testmode.h31
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.c71
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.h36
-rw-r--r--drivers/net/wireless/zd1201.c14
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c10
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c14
-rw-r--r--drivers/net/xilinx_emaclite.c384
-rw-r--r--drivers/net/yellowfin.c173
-rw-r--r--drivers/net/znet.c3
-rw-r--r--drivers/parisc/eisa_enumerator.c2
-rw-r--r--drivers/parisc/superio.c2
-rw-r--r--drivers/parport/parport_pc.c6
-rw-r--r--drivers/pci/Makefile2
-rw-r--r--drivers/pci/hotplug/ibmphp_res.c14
-rw-r--r--drivers/pci/iov.c15
-rw-r--r--drivers/pci/quirks.c86
-rw-r--r--drivers/pci/vpd.c61
-rw-r--r--drivers/pcmcia/Kconfig14
-rw-r--r--drivers/pcmcia/cardbus.c2
-rw-r--r--drivers/pcmcia/cistpl.c1908
-rw-r--r--drivers/pcmcia/db1xxx_ss.c19
-rw-r--r--drivers/pcmcia/pd6729.c18
-rw-r--r--drivers/pcmcia/rsrc_mgr.c3
-rw-r--r--drivers/pcmcia/xxs1500_ss.c16
-rw-r--r--drivers/pcmcia/yenta_socket.c8
-rw-r--r--drivers/platform/x86/Kconfig13
-rw-r--r--drivers/platform/x86/asus-laptop.c1741
-rw-r--r--drivers/platform/x86/asus_acpi.c3
-rw-r--r--drivers/platform/x86/classmate-laptop.c4
-rw-r--r--drivers/platform/x86/dell-laptop.c9
-rw-r--r--drivers/platform/x86/eeepc-laptop.c21
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c116
-rw-r--r--drivers/platform/x86/toshiba_acpi.c30
-rw-r--r--drivers/power/Kconfig4
-rw-r--r--drivers/power/bq27x00_battery.c177
-rw-r--r--drivers/power/da9030_battery.c2
-rw-r--r--drivers/power/wm97xx_battery.c4
-rw-r--r--drivers/regulator/Kconfig29
-rw-r--r--drivers/regulator/Makefile3
-rw-r--r--drivers/regulator/ab3100.c6
-rw-r--r--drivers/regulator/core.c79
-rw-r--r--drivers/regulator/dummy.c66
-rw-r--r--drivers/regulator/dummy.h31
-rw-r--r--drivers/regulator/fixed.c30
-rw-r--r--drivers/regulator/lp3971.c68
-rw-r--r--drivers/regulator/max1586.c9
-rw-r--r--drivers/regulator/max8649.c408
-rw-r--r--drivers/regulator/max8660.c11
-rw-r--r--drivers/regulator/mc13783-regulator.c465
-rw-r--r--drivers/regulator/pcap-regulator.c8
-rw-r--r--drivers/regulator/tps65023-regulator.c35
-rw-r--r--drivers/regulator/tps6507x-regulator.c34
-rw-r--r--drivers/regulator/twl-regulator.c22
-rw-r--r--drivers/regulator/virtual.c64
-rw-r--r--drivers/regulator/wm831x-dcdc.c12
-rw-r--r--drivers/regulator/wm831x-isink.c3
-rw-r--r--drivers/regulator/wm831x-ldo.c5
-rw-r--r--drivers/regulator/wm8350-regulator.c46
-rw-r--r--drivers/regulator/wm8400-regulator.c7
-rw-r--r--drivers/regulator/wm8994-regulator.c307
-rw-r--r--drivers/rtc/class.c1
-rw-r--r--drivers/rtc/rtc-at91sam9.c2
-rw-r--r--drivers/rtc/rtc-coh901331.c5
-rw-r--r--drivers/rtc/rtc-ep93xx.c71
-rw-r--r--drivers/rtc/rtc-mc13783.c214
-rw-r--r--drivers/rtc/rtc-mxc.c7
-rw-r--r--drivers/rtc/rtc-pcf2123.c2
-rw-r--r--drivers/rtc/rtc-twl.c4
-rw-r--r--drivers/s390/net/qeth_core.h5
-rw-r--r--drivers/s390/net/qeth_core_main.c169
-rw-r--r--drivers/s390/net/qeth_core_mpc.h44
-rw-r--r--drivers/s390/net/qeth_core_sys.c14
-rw-r--r--drivers/s390/net/qeth_l2_main.c30
-rw-r--r--drivers/s390/net/qeth_l3.h2
-rw-r--r--drivers/s390/net/qeth_l3_main.c176
-rw-r--r--drivers/s390/net/qeth_l3_sys.c56
-rw-r--r--drivers/serial/68328serial.c8
-rw-r--r--drivers/serial/8250.c21
-rw-r--r--drivers/serial/8250_pci.c31
-rw-r--r--drivers/serial/Kconfig53
-rw-r--r--drivers/serial/atmel_serial.c22
-rw-r--r--drivers/serial/bcm63xx_uart.c7
-rw-r--r--drivers/serial/bfin_5xx.c22
-rw-r--r--drivers/serial/bfin_sport_uart.c701
-rw-r--r--drivers/serial/bfin_sport_uart.h38
-rw-r--r--drivers/serial/icom.c5
-rw-r--r--drivers/serial/imx.c6
-rw-r--r--drivers/serial/ioc3_serial.c3
-rw-r--r--drivers/serial/jsm/jsm_driver.c1
-rw-r--r--drivers/serial/jsm/jsm_tty.c9
-rw-r--r--drivers/serial/msm_serial.c6
-rw-r--r--drivers/serial/timbuart.c7
-rw-r--r--drivers/spi/Kconfig2
-rw-r--r--drivers/spi/omap2_mcspi.c2
-rw-r--r--drivers/ssb/driver_chipcommon_pmu.c7
-rw-r--r--drivers/ssb/driver_mipscore.c5
-rw-r--r--drivers/ssb/ssb_private.h4
-rw-r--r--drivers/staging/arlan/arlan-main.c25
-rw-r--r--drivers/staging/et131x/et131x_netdev.c19
-rw-r--r--drivers/staging/netwave/netwave_cs.c8
-rw-r--r--drivers/staging/octeon/ethernet.c2
-rw-r--r--drivers/staging/pohmelfs/inode.c2
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211.h9
-rw-r--r--drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c24
-rw-r--r--drivers/staging/rtl8187se/r8180_core.c4
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211.h12
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_r8192s.h2
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_rx.c18
-rw-r--r--drivers/staging/rtl8192su/ieee80211/ieee80211_softmac.c38
-rw-r--r--drivers/staging/rtl8192su/ieee80211/rtl819x_BAProc.c4
-rw-r--r--drivers/staging/rtl8192su/r8192U_core.c4
-rw-r--r--drivers/staging/slicoss/slicoss.c16
-rw-r--r--drivers/staging/usbip/vhci_sysfs.c2
-rw-r--r--drivers/staging/vt6655/device_main.c8
-rw-r--r--drivers/staging/vt6656/main_usb.c8
-rw-r--r--drivers/staging/wavelan/wavelan.c14
-rw-r--r--drivers/staging/wavelan/wavelan_cs.c20
-rw-r--r--drivers/staging/wlags49_h2/wl_netdev.c30
-rw-r--r--drivers/usb/Kconfig4
-rw-r--r--drivers/usb/Makefile1
-rw-r--r--drivers/usb/atm/cxacru.c192
-rw-r--r--drivers/usb/atm/usbatm.c3
-rw-r--r--drivers/usb/atm/usbatm.h15
-rw-r--r--drivers/usb/c67x00/c67x00-drv.c8
-rw-r--r--drivers/usb/class/cdc-acm.c82
-rw-r--r--drivers/usb/class/cdc-acm.h2
-rw-r--r--drivers/usb/class/cdc-wdm.c2
-rw-r--r--drivers/usb/class/usblp.c22
-rw-r--r--drivers/usb/class/usbtmc.c2
-rw-r--r--drivers/usb/core/Kconfig4
-rw-r--r--drivers/usb/core/devices.c83
-rw-r--r--drivers/usb/core/devio.c127
-rw-r--r--drivers/usb/core/driver.c918
-rw-r--r--drivers/usb/core/file.c2
-rw-r--r--drivers/usb/core/hcd.c27
-rw-r--r--drivers/usb/core/hcd.h13
-rw-r--r--drivers/usb/core/hub.c120
-rw-r--r--drivers/usb/core/message.c5
-rw-r--r--drivers/usb/core/quirks.c18
-rw-r--r--drivers/usb/core/sysfs.c85
-rw-r--r--drivers/usb/core/urb.c13
-rw-r--r--drivers/usb/core/usb.c38
-rw-r--r--drivers/usb/core/usb.h43
-rw-r--r--drivers/usb/early/ehci-dbgp.c68
-rw-r--r--drivers/usb/gadget/Kconfig10
-rw-r--r--drivers/usb/gadget/Makefile2
-rw-r--r--drivers/usb/gadget/at91_udc.c10
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c9
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.h1
-rw-r--r--drivers/usb/gadget/epautoconf.c24
-rw-r--r--drivers/usb/gadget/ether.c2
-rw-r--r--drivers/usb/gadget/f_acm.c8
-rw-r--r--drivers/usb/gadget/f_ecm.c7
-rw-r--r--drivers/usb/gadget/f_mass_storage.c52
-rw-r--r--drivers/usb/gadget/f_rndis.c4
-rw-r--r--drivers/usb/gadget/file_storage.c10
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c2
-rw-r--r--drivers/usb/gadget/gadget_chips.h59
-rw-r--r--drivers/usb/gadget/gmidi.c5
-rw-r--r--drivers/usb/gadget/goku_udc.c2
-rw-r--r--drivers/usb/gadget/inode.c39
-rw-r--r--drivers/usb/gadget/mass_storage.c8
-rw-r--r--drivers/usb/gadget/nokia.c259
-rw-r--r--drivers/usb/gadget/printer.c18
-rw-r--r--drivers/usb/gadget/pxa27x_udc.c135
-rw-r--r--drivers/usb/gadget/pxa27x_udc.h6
-rw-r--r--drivers/usb/gadget/s3c-hsotg.c11
-rw-r--r--drivers/usb/gadget/u_ether.c5
-rw-r--r--drivers/usb/gadget/u_ether.h7
-rw-r--r--drivers/usb/gadget/zero.c6
-rw-r--r--drivers/usb/host/Kconfig11
-rw-r--r--drivers/usb/host/Makefile2
-rw-r--r--drivers/usb/host/ehci-atmel.c2
-rw-r--r--drivers/usb/host/ehci-au1xxx.c6
-rw-r--r--drivers/usb/host/ehci-fsl.c97
-rw-r--r--drivers/usb/host/ehci-hcd.c2
-rw-r--r--drivers/usb/host/ehci-mxc.c23
-rw-r--r--drivers/usb/host/ehci-omap.c47
-rw-r--r--drivers/usb/host/ehci-orion.c8
-rw-r--r--drivers/usb/host/ehci-ppc-of.c14
-rw-r--r--drivers/usb/host/ehci-sched.c12
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c8
-rw-r--r--drivers/usb/host/fhci-hcd.c4
-rw-r--r--drivers/usb/host/imx21-dbg.c527
-rw-r--r--drivers/usb/host/imx21-hcd.c1789
-rw-r--r--drivers/usb/host/imx21-hcd.h436
-rw-r--r--drivers/usb/host/isp1362-hcd.c15
-rw-r--r--drivers/usb/host/isp1760-hcd.c10
-rw-r--r--drivers/usb/host/isp1760-if.c2
-rw-r--r--drivers/usb/host/ohci-da8xx.c456
-rw-r--r--drivers/usb/host/ohci-dbg.c4
-rw-r--r--drivers/usb/host/ohci-hcd.c5
-rw-r--r--drivers/usb/host/ohci-lh7a404.c11
-rw-r--r--drivers/usb/host/ohci-pnx4008.c6
-rw-r--r--drivers/usb/host/ohci-ppc-of.c10
-rw-r--r--drivers/usb/host/ohci-ppc-soc.c8
-rw-r--r--drivers/usb/host/ohci-sa1111.c8
-rw-r--r--drivers/usb/host/sl811-hcd.c5
-rw-r--r--drivers/usb/host/uhci-hcd.c1
-rw-r--r--drivers/usb/host/xhci-dbg.c19
-rw-r--r--drivers/usb/host/xhci-ext-caps.h7
-rw-r--r--drivers/usb/host/xhci-hcd.c150
-rw-r--r--drivers/usb/host/xhci-hub.c65
-rw-r--r--drivers/usb/host/xhci-mem.c47
-rw-r--r--drivers/usb/host/xhci-pci.c1
-rw-r--r--drivers/usb/host/xhci-ring.c41
-rw-r--r--drivers/usb/host/xhci.h11
-rw-r--r--drivers/usb/image/mdc800.c2
-rw-r--r--drivers/usb/image/microtek.c4
-rw-r--r--drivers/usb/misc/Kconfig25
-rw-r--r--drivers/usb/misc/Makefile2
-rw-r--r--drivers/usb/misc/adutux.c8
-rw-r--r--drivers/usb/misc/appledisplay.c5
-rw-r--r--drivers/usb/misc/berry_charge.c183
-rw-r--r--drivers/usb/misc/cypress_cy7c63.c2
-rw-r--r--drivers/usb/misc/cytherm.c2
-rw-r--r--drivers/usb/misc/emi26.c2
-rw-r--r--drivers/usb/misc/emi62.c2
-rw-r--r--drivers/usb/misc/ftdi-elan.c11
-rw-r--r--drivers/usb/misc/idmouse.c2
-rw-r--r--drivers/usb/misc/iowarrior.c6
-rw-r--r--drivers/usb/misc/isight_firmware.c4
-rw-r--r--drivers/usb/misc/ldusb.c4
-rw-r--r--drivers/usb/misc/legousbtower.c13
-rw-r--r--drivers/usb/misc/rio500.c11
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb.c20
-rw-r--r--drivers/usb/misc/trancevibrator.c2
-rw-r--r--drivers/usb/misc/usblcd.c7
-rw-r--r--drivers/usb/misc/usbled.c2
-rw-r--r--drivers/usb/misc/usbsevseg.c2
-rw-r--r--drivers/usb/misc/usbtest.c6
-rw-r--r--drivers/usb/misc/uss720.c2
-rw-r--r--drivers/usb/misc/vstusb.c783
-rw-r--r--drivers/usb/mon/mon_bin.c7
-rw-r--r--drivers/usb/mon/mon_text.c6
-rw-r--r--drivers/usb/musb/Kconfig6
-rw-r--r--drivers/usb/musb/blackfin.c28
-rw-r--r--drivers/usb/musb/cppi_dma.c33
-rw-r--r--drivers/usb/musb/musb_core.c564
-rw-r--r--drivers/usb/musb/musb_core.h74
-rw-r--r--drivers/usb/musb/musb_gadget.c20
-rw-r--r--drivers/usb/musb/musb_host.c34
-rw-r--r--drivers/usb/musb/musb_regs.h101
-rw-r--r--drivers/usb/musb/musbhsdma.c25
-rw-r--r--drivers/usb/musb/musbhsdma.h17
-rw-r--r--drivers/usb/musb/omap2430.c48
-rw-r--r--drivers/usb/musb/omap2430.h32
-rw-r--r--drivers/usb/musb/tusb6010.c2
-rw-r--r--drivers/usb/musb/tusb6010_omap.c2
-rw-r--r--drivers/usb/otg/twl4030-usb.c45
-rw-r--r--drivers/usb/serial/Kconfig19
-rw-r--r--drivers/usb/serial/Makefile2
-rw-r--r--drivers/usb/serial/aircable.c36
-rw-r--r--drivers/usb/serial/ark3116.c3
-rw-r--r--drivers/usb/serial/belkin_sa.c2
-rw-r--r--drivers/usb/serial/ch341.c27
-rw-r--r--drivers/usb/serial/cp210x.c7
-rw-r--r--drivers/usb/serial/cyberjack.c5
-rw-r--r--drivers/usb/serial/cypress_m8.c82
-rw-r--r--drivers/usb/serial/digi_acceleport.c38
-rw-r--r--drivers/usb/serial/empeg.c3
-rw-r--r--drivers/usb/serial/ftdi_sio.c195
-rw-r--r--drivers/usb/serial/ftdi_sio.h6
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h39
-rw-r--r--drivers/usb/serial/funsoft.c2
-rw-r--r--drivers/usb/serial/garmin_gps.c3
-rw-r--r--drivers/usb/serial/generic.c7
-rw-r--r--drivers/usb/serial/hp4x.c2
-rw-r--r--drivers/usb/serial/io_edgeport.c69
-rw-r--r--drivers/usb/serial/io_tables.h10
-rw-r--r--drivers/usb/serial/io_ti.c75
-rw-r--r--drivers/usb/serial/ipaq.c1
-rw-r--r--drivers/usb/serial/ipw.c3
-rw-r--r--drivers/usb/serial/ir-usb.c13
-rw-r--r--drivers/usb/serial/iuu_phoenix.c2
-rw-r--r--drivers/usb/serial/keyspan.c57
-rw-r--r--drivers/usb/serial/keyspan.h10
-rw-r--r--drivers/usb/serial/keyspan_pda.c60
-rw-r--r--drivers/usb/serial/kl5kusb105.c66
-rw-r--r--drivers/usb/serial/kobil_sct.c25
-rw-r--r--drivers/usb/serial/mct_u232.c57
-rw-r--r--drivers/usb/serial/mct_u232.h2
-rw-r--r--drivers/usb/serial/mos7720.c185
-rw-r--r--drivers/usb/serial/mos7840.c27
-rw-r--r--drivers/usb/serial/moto_modem.c2
-rw-r--r--drivers/usb/serial/navman.c3
-rw-r--r--drivers/usb/serial/omninet.c8
-rw-r--r--drivers/usb/serial/opticon.c17
-rw-r--r--drivers/usb/serial/option.c71
-rw-r--r--drivers/usb/serial/oti6858.c36
-rw-r--r--drivers/usb/serial/pl2303.c38
-rw-r--r--drivers/usb/serial/qcaux.c96
-rw-r--r--drivers/usb/serial/qcserial.c2
-rw-r--r--drivers/usb/serial/siemens_mpi.c2
-rw-r--r--drivers/usb/serial/sierra.c59
-rw-r--r--drivers/usb/serial/spcp8x5.c27
-rw-r--r--drivers/usb/serial/symbolserial.c12
-rw-r--r--drivers/usb/serial/ti_usb_3410_5052.c3
-rw-r--r--drivers/usb/serial/usb-serial.c15
-rw-r--r--drivers/usb/serial/usb_debug.c2
-rw-r--r--drivers/usb/serial/visor.c40
-rw-r--r--drivers/usb/serial/vivopay-serial.c76
-rw-r--r--drivers/usb/serial/whiteheat.c24
-rw-r--r--drivers/usb/storage/onetouch.c2
-rw-r--r--drivers/usb/storage/scsiglue.c10
-rw-r--r--drivers/usb/storage/shuttle_usbat.c15
-rw-r--r--drivers/usb/storage/transport.c6
-rw-r--r--drivers/usb/storage/unusual_devs.h88
-rw-r--r--drivers/usb/usb-skeleton.c2
-rw-r--r--drivers/usb/wusbcore/cbaf.c2
-rw-r--r--drivers/usb/wusbcore/devconnect.c2
-rw-r--r--drivers/usb/wusbcore/mmc.c2
-rw-r--r--drivers/vhost/Kconfig11
-rw-r--r--drivers/vhost/Makefile2
-rw-r--r--drivers/vhost/net.c669
-rw-r--r--drivers/vhost/vhost.c1104
-rw-r--r--drivers/vhost/vhost.h161
-rw-r--r--drivers/video/console/Kconfig1
-rw-r--r--drivers/video/console/fbcon.c18
-rw-r--r--drivers/video/console/vgacon.c2
-rw-r--r--drivers/video/fbmem.c1
-rw-r--r--drivers/video/omap/lcd_ams_delta.c93
-rw-r--r--drivers/video/omap/omapfb_main.c7
-rw-r--r--drivers/video/omap2/displays/Kconfig18
-rw-r--r--drivers/video/omap2/displays/Makefile3
-rw-r--r--drivers/video/omap2/displays/panel-generic.c56
-rw-r--r--drivers/video/omap2/displays/panel-sharp-lq043t1dg01.c159
-rw-r--r--drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c77
-rw-r--r--drivers/video/omap2/displays/panel-taal.c253
-rw-r--r--drivers/video/omap2/displays/panel-toppoly-tdo35s.c154
-rw-r--r--drivers/video/omap2/displays/panel-tpo-td043mtea1.c528
-rw-r--r--drivers/video/omap2/dss/Kconfig26
-rw-r--r--drivers/video/omap2/dss/core.c117
-rw-r--r--drivers/video/omap2/dss/dispc.c42
-rw-r--r--drivers/video/omap2/dss/display.c119
-rw-r--r--drivers/video/omap2/dss/dpi.c144
-rw-r--r--drivers/video/omap2/dss/dsi.c1031
-rw-r--r--drivers/video/omap2/dss/dss.c42
-rw-r--r--drivers/video/omap2/dss/dss.h23
-rw-r--r--drivers/video/omap2/dss/manager.c48
-rw-r--r--drivers/video/omap2/dss/overlay.c2
-rw-r--r--drivers/video/omap2/dss/rfbi.c321
-rw-r--r--drivers/video/omap2/dss/sdi.c115
-rw-r--r--drivers/video/omap2/dss/venc.c296
-rw-r--r--drivers/video/omap2/omapfb/Kconfig11
-rw-r--r--drivers/video/omap2/omapfb/omapfb-ioctl.c68
-rw-r--r--drivers/video/omap2/omapfb/omapfb-main.c133
-rw-r--r--drivers/video/omap2/omapfb/omapfb.h9
-rw-r--r--drivers/video/sunxvr500.c1
-rw-r--r--drivers/virtio/virtio_pci.c1
-rw-r--r--drivers/w1/masters/Kconfig2
-rw-r--r--drivers/watchdog/Kconfig2
-rw-r--r--drivers/xen/Kconfig12
-rw-r--r--drivers/xen/events.c8
1427 files changed, 103066 insertions, 41211 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 368ae6d3a09..a2b902f4d43 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -96,8 +96,6 @@ source "drivers/edac/Kconfig"
source "drivers/rtc/Kconfig"
-source "drivers/clocksource/Kconfig"
-
source "drivers/dma/Kconfig"
source "drivers/dca/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 6ee53c7a57a..81e36596b1e 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -106,6 +106,7 @@ obj-$(CONFIG_HID) += hid/
obj-$(CONFIG_PPC_PS3) += ps3/
obj-$(CONFIG_OF) += of/
obj-$(CONFIG_SSB) += ssb/
+obj-$(CONFIG_VHOST_NET) += vhost/
obj-$(CONFIG_VIRTIO) += virtio/
obj-$(CONFIG_VLYNQ) += vlynq/
obj-$(CONFIG_STAGING) += staging/
diff --git a/drivers/acpi/numa.c b/drivers/acpi/numa.c
index 7ad48dfc12d..b8725461d88 100644
--- a/drivers/acpi/numa.c
+++ b/drivers/acpi/numa.c
@@ -279,9 +279,9 @@ int __init acpi_numa_init(void)
/* SRAT: Static Resource Affinity Table */
if (!acpi_table_parse(ACPI_SIG_SRAT, acpi_parse_srat)) {
acpi_table_parse_srat(ACPI_SRAT_TYPE_X2APIC_CPU_AFFINITY,
- acpi_parse_x2apic_affinity, NR_CPUS);
+ acpi_parse_x2apic_affinity, nr_cpu_ids);
acpi_table_parse_srat(ACPI_SRAT_TYPE_CPU_AFFINITY,
- acpi_parse_processor_affinity, NR_CPUS);
+ acpi_parse_processor_affinity, nr_cpu_ids);
ret = acpi_table_parse_srat(ACPI_SRAT_TYPE_MEMORY_AFFINITY,
acpi_parse_memory_affinity,
NR_NODE_MEMBLKS);
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 9863c98c81b..e9b7b402dbf 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -123,6 +123,8 @@ static const struct file_operations acpi_processor_info_fops = {
#endif
DEFINE_PER_CPU(struct acpi_processor *, processors);
+EXPORT_PER_CPU_SYMBOL(processors);
+
struct acpi_processor_errata errata __read_mostly;
/* --------------------------------------------------------------------------
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index a959f6a0750..d648a9860b8 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -561,7 +561,7 @@ end:
}
int acpi_processor_preregister_performance(
- struct acpi_processor_performance *performance)
+ struct acpi_processor_performance __percpu *performance)
{
int count, count_target;
int retval = 0;
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index bc53fed89b1..f7d6ebaa041 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -2064,12 +2064,10 @@ fore200e_get_esi(struct fore200e* fore200e)
return -EBUSY;
}
- printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %02x:%02x:%02x:%02x:%02x:%02x\n",
+ printk(FORE200E "device %s, rev. %c, S/N: %d, ESI: %pM\n",
fore200e->name,
(prom->hw_revision & 0xFF) + '@', /* probably meaningless with SBA boards */
- prom->serial_number & 0xFFFF,
- prom->mac_addr[ 2 ], prom->mac_addr[ 3 ], prom->mac_addr[ 4 ],
- prom->mac_addr[ 5 ], prom->mac_addr[ 6 ], prom->mac_addr[ 7 ]);
+ prom->serial_number & 0xFFFF, &prom->mac_addr[2]);
for (i = 0; i < ESI_LEN; i++) {
fore200e->esi[ i ] = fore200e->atm_dev->esi[ i ] = prom->mac_addr[ i + 2 ];
@@ -2845,13 +2843,12 @@ fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
" interrupt line:\t\t%s\n"
" physical base address:\t0x%p\n"
" virtual base address:\t0x%p\n"
- " factory address (ESI):\t%02x:%02x:%02x:%02x:%02x:%02x\n"
+ " factory address (ESI):\t%pM\n"
" board serial number:\t\t%d\n\n",
fore200e_irq_itoa(fore200e->irq),
(void*)fore200e->phys_base,
fore200e->virt_base,
- fore200e->esi[0], fore200e->esi[1], fore200e->esi[2],
- fore200e->esi[3], fore200e->esi[4], fore200e->esi[5],
+ fore200e->esi,
fore200e->esi[4] * 256 + fore200e->esi[5]);
return len;
diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
index e33ae0025b1..01f36c08cb5 100644
--- a/drivers/atm/idt77252.c
+++ b/drivers/atm/idt77252.c
@@ -3557,10 +3557,7 @@ init_card(struct atm_dev *dev)
if (tmp) {
memcpy(card->atmdev->esi, tmp->dev_addr, 6);
- printk("%s: ESI %02x:%02x:%02x:%02x:%02x:%02x\n",
- card->name, card->atmdev->esi[0], card->atmdev->esi[1],
- card->atmdev->esi[2], card->atmdev->esi[3],
- card->atmdev->esi[4], card->atmdev->esi[5]);
+ printk("%s: ESI %pM\n", card->name, card->atmdev->esi);
}
/*
* XXX: </hack>
diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
index cf97c34cbaf..7fe7c324e7e 100644
--- a/drivers/atm/lanai.c
+++ b/drivers/atm/lanai.c
@@ -998,9 +998,7 @@ static int __devinit eeprom_validate(struct lanai_dev *lanai)
(unsigned int) e[EEPROM_MAC_REV + i]);
return -EIO;
}
- DPRINTK("eeprom: MAC address = %02X:%02X:%02X:%02X:%02X:%02X\n",
- e[EEPROM_MAC + 0], e[EEPROM_MAC + 1], e[EEPROM_MAC + 2],
- e[EEPROM_MAC + 3], e[EEPROM_MAC + 4], e[EEPROM_MAC + 5]);
+ DPRINTK("eeprom: MAC address = %pM\n", &e[EEPROM_MAC]);
/* Verify serial number */
lanai->serialno = eeprom_be4(lanai, EEPROM_SERIAL);
v = eeprom_be4(lanai, EEPROM_SERIAL_REV);
@@ -2483,14 +2481,8 @@ static int lanai_proc_read(struct atm_dev *atmdev, loff_t *pos, char *page)
return sprintf(page, "revision: board=%d, pci_if=%d\n",
lanai->board_rev, (int) lanai->pci->revision);
if (left-- == 0)
- return sprintf(page, "EEPROM ESI: "
- "%02X:%02X:%02X:%02X:%02X:%02X\n",
- lanai->eeprom[EEPROM_MAC + 0],
- lanai->eeprom[EEPROM_MAC + 1],
- lanai->eeprom[EEPROM_MAC + 2],
- lanai->eeprom[EEPROM_MAC + 3],
- lanai->eeprom[EEPROM_MAC + 4],
- lanai->eeprom[EEPROM_MAC + 5]);
+ return sprintf(page, "EEPROM ESI: %pM\n",
+ &lanai->eeprom[EEPROM_MAC]);
if (left-- == 0)
return sprintf(page, "status: SOOL=%d, LOCD=%d, LED=%d, "
"GPIN=%d\n", (lanai->status & STATUS_SOOL) ? 1 : 0,
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 3da804b1627..50838407b11 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -807,9 +807,7 @@ static int __devinit ns_init_card(int i, struct pci_dev *pcidev)
}
}
- printk("nicstar%d: MAC address %02X:%02X:%02X:%02X:%02X:%02X\n", i,
- card->atmdev->esi[0], card->atmdev->esi[1], card->atmdev->esi[2],
- card->atmdev->esi[3], card->atmdev->esi[4], card->atmdev->esi[5]);
+ printk("nicstar%d: MAC address %pM\n", i, card->atmdev->esi);
card->atmdev->dev_data = card;
card->atmdev->ci_range.vpi_bits = card->vpibits;
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 3ce3519e8f3..89de75325ce 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -1,6 +1,7 @@
obj-$(CONFIG_PM) += sysfs.o
obj-$(CONFIG_PM_SLEEP) += main.o
obj-$(CONFIG_PM_RUNTIME) += runtime.o
+obj-$(CONFIG_PM_OPS) += generic_ops.o
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
new file mode 100644
index 00000000000..4b29d498125
--- /dev/null
+++ b/drivers/base/power/generic_ops.c
@@ -0,0 +1,233 @@
+/*
+ * drivers/base/power/generic_ops.c - Generic PM callbacks for subsystems
+ *
+ * Copyright (c) 2010 Rafael J. Wysocki <rjw@sisk.pl>, Novell Inc.
+ *
+ * This file is released under the GPLv2.
+ */
+
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
+
+#ifdef CONFIG_PM_RUNTIME
+/**
+ * pm_generic_runtime_idle - Generic runtime idle callback for subsystems.
+ * @dev: Device to handle.
+ *
+ * If PM operations are defined for the @dev's driver and they include
+ * ->runtime_idle(), execute it and return its error code, if nonzero.
+ * Otherwise, execute pm_runtime_suspend() for the device and return 0.
+ */
+int pm_generic_runtime_idle(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ if (pm && pm->runtime_idle) {
+ int ret = pm->runtime_idle(dev);
+ if (ret)
+ return ret;
+ }
+
+ pm_runtime_suspend(dev);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pm_generic_runtime_idle);
+
+/**
+ * pm_generic_runtime_suspend - Generic runtime suspend callback for subsystems.
+ * @dev: Device to suspend.
+ *
+ * If PM operations are defined for the @dev's driver and they include
+ * ->runtime_suspend(), execute it and return its error code. Otherwise,
+ * return -EINVAL.
+ */
+int pm_generic_runtime_suspend(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int ret;
+
+ ret = pm && pm->runtime_suspend ? pm->runtime_suspend(dev) : -EINVAL;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pm_generic_runtime_suspend);
+
+/**
+ * pm_generic_runtime_resume - Generic runtime resume callback for subsystems.
+ * @dev: Device to resume.
+ *
+ * If PM operations are defined for the @dev's driver and they include
+ * ->runtime_resume(), execute it and return its error code. Otherwise,
+ * return -EINVAL.
+ */
+int pm_generic_runtime_resume(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int ret;
+
+ ret = pm && pm->runtime_resume ? pm->runtime_resume(dev) : -EINVAL;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pm_generic_runtime_resume);
+#endif /* CONFIG_PM_RUNTIME */
+
+#ifdef CONFIG_PM_SLEEP
+/**
+ * __pm_generic_call - Generic suspend/freeze/poweroff/thaw subsystem callback.
+ * @dev: Device to handle.
+ * @event: PM transition of the system under way.
+ *
+ * If the device has not been suspended at run time, execute the
+ * suspend/freeze/poweroff/thaw callback provided by its driver, if defined, and
+ * return its error code. Otherwise, return zero.
+ */
+static int __pm_generic_call(struct device *dev, int event)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int (*callback)(struct device *);
+
+ if (!pm || pm_runtime_suspended(dev))
+ return 0;
+
+ switch (event) {
+ case PM_EVENT_SUSPEND:
+ callback = pm->suspend;
+ break;
+ case PM_EVENT_FREEZE:
+ callback = pm->freeze;
+ break;
+ case PM_EVENT_HIBERNATE:
+ callback = pm->poweroff;
+ break;
+ case PM_EVENT_THAW:
+ callback = pm->thaw;
+ break;
+ default:
+ callback = NULL;
+ break;
+ }
+
+ return callback ? callback(dev) : 0;
+}
+
+/**
+ * pm_generic_suspend - Generic suspend callback for subsystems.
+ * @dev: Device to suspend.
+ */
+int pm_generic_suspend(struct device *dev)
+{
+ return __pm_generic_call(dev, PM_EVENT_SUSPEND);
+}
+EXPORT_SYMBOL_GPL(pm_generic_suspend);
+
+/**
+ * pm_generic_freeze - Generic freeze callback for subsystems.
+ * @dev: Device to freeze.
+ */
+int pm_generic_freeze(struct device *dev)
+{
+ return __pm_generic_call(dev, PM_EVENT_FREEZE);
+}
+EXPORT_SYMBOL_GPL(pm_generic_freeze);
+
+/**
+ * pm_generic_poweroff - Generic poweroff callback for subsystems.
+ * @dev: Device to handle.
+ */
+int pm_generic_poweroff(struct device *dev)
+{
+ return __pm_generic_call(dev, PM_EVENT_HIBERNATE);
+}
+EXPORT_SYMBOL_GPL(pm_generic_poweroff);
+
+/**
+ * pm_generic_thaw - Generic thaw callback for subsystems.
+ * @dev: Device to thaw.
+ */
+int pm_generic_thaw(struct device *dev)
+{
+ return __pm_generic_call(dev, PM_EVENT_THAW);
+}
+EXPORT_SYMBOL_GPL(pm_generic_thaw);
+
+/**
+ * __pm_generic_resume - Generic resume/restore callback for subsystems.
+ * @dev: Device to handle.
+ * @event: PM transition of the system under way.
+ *
+ * Execute the resume/resotre callback provided by the @dev's driver, if
+ * defined. If it returns 0, change the device's runtime PM status to 'active'.
+ * Return the callback's error code.
+ */
+static int __pm_generic_resume(struct device *dev, int event)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int (*callback)(struct device *);
+ int ret;
+
+ if (!pm)
+ return 0;
+
+ switch (event) {
+ case PM_EVENT_RESUME:
+ callback = pm->resume;
+ break;
+ case PM_EVENT_RESTORE:
+ callback = pm->restore;
+ break;
+ default:
+ callback = NULL;
+ break;
+ }
+
+ if (!callback)
+ return 0;
+
+ ret = callback(dev);
+ if (!ret) {
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ }
+
+ return ret;
+}
+
+/**
+ * pm_generic_resume - Generic resume callback for subsystems.
+ * @dev: Device to resume.
+ */
+int pm_generic_resume(struct device *dev)
+{
+ return __pm_generic_resume(dev, PM_EVENT_RESUME);
+}
+EXPORT_SYMBOL_GPL(pm_generic_resume);
+
+/**
+ * pm_generic_restore - Generic restore callback for subsystems.
+ * @dev: Device to restore.
+ */
+int pm_generic_restore(struct device *dev)
+{
+ return __pm_generic_resume(dev, PM_EVENT_RESTORE);
+}
+EXPORT_SYMBOL_GPL(pm_generic_restore);
+#endif /* CONFIG_PM_SLEEP */
+
+struct dev_pm_ops generic_subsys_pm_ops = {
+#ifdef CONFIG_PM_SLEEP
+ .suspend = pm_generic_suspend,
+ .resume = pm_generic_resume,
+ .freeze = pm_generic_freeze,
+ .thaw = pm_generic_thaw,
+ .poweroff = pm_generic_poweroff,
+ .restore = pm_generic_restore,
+#endif
+#ifdef CONFIG_PM_RUNTIME
+ .runtime_suspend = pm_generic_runtime_suspend,
+ .runtime_resume = pm_generic_runtime_resume,
+ .runtime_idle = pm_generic_runtime_idle,
+#endif
+};
+EXPORT_SYMBOL_GPL(generic_subsys_pm_ops);
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index add9485ca5b..128cae4e862 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -143,6 +143,8 @@ static int ath3k_probe(struct usb_interface *intf,
usb_set_intfdata(intf, data);
if (ath3k_load_firmware(data, data->fw_data, data->fw_size)) {
usb_set_intfdata(intf, NULL);
+ kfree(data->fw_data);
+ kfree(data);
return -EIO;
}
diff --git a/drivers/bluetooth/bcm203x.c b/drivers/bluetooth/bcm203x.c
index eafd4af0746..b0c84c19f44 100644
--- a/drivers/bluetooth/bcm203x.c
+++ b/drivers/bluetooth/bcm203x.c
@@ -39,7 +39,7 @@
#define VERSION "1.2"
-static struct usb_device_id bcm203x_table[] = {
+static const struct usb_device_id bcm203x_table[] = {
/* Broadcom Blutonium (BCM2033) */
{ USB_DEVICE(0x0a5c, 0x2033) },
diff --git a/drivers/bluetooth/bfusb.c b/drivers/bluetooth/bfusb.c
index 2a00707aba3..005919ab043 100644
--- a/drivers/bluetooth/bfusb.c
+++ b/drivers/bluetooth/bfusb.c
@@ -703,7 +703,7 @@ static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *i
data->hdev = hdev;
- hdev->type = HCI_USB;
+ hdev->bus = HCI_USB;
hdev->driver_data = data;
SET_HCIDEV_DEV(hdev, &intf->dev);
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index c2cf8114471..d9bf87ca9e8 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -736,7 +736,7 @@ static int bluecard_open(bluecard_info_t *info)
info->hdev = hdev;
- hdev->type = HCI_PCCARD;
+ hdev->bus = HCI_PCCARD;
hdev->driver_data = info;
SET_HCIDEV_DEV(hdev, &info->p_dev->dev);
diff --git a/drivers/bluetooth/bpa10x.c b/drivers/bluetooth/bpa10x.c
index c115285867c..d945cd12433 100644
--- a/drivers/bluetooth/bpa10x.c
+++ b/drivers/bluetooth/bpa10x.c
@@ -469,7 +469,7 @@ static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *
return -ENOMEM;
}
- hdev->type = HCI_USB;
+ hdev->bus = HCI_USB;
hdev->driver_data = data;
data->hdev = hdev;
diff --git a/drivers/bluetooth/bt3c_cs.c b/drivers/bluetooth/bt3c_cs.c
index 9f5926aaf57..027cb8bf650 100644
--- a/drivers/bluetooth/bt3c_cs.c
+++ b/drivers/bluetooth/bt3c_cs.c
@@ -582,7 +582,7 @@ static int bt3c_open(bt3c_info_t *info)
info->hdev = hdev;
- hdev->type = HCI_PCCARD;
+ hdev->bus = HCI_PCCARD;
hdev->driver_data = info;
SET_HCIDEV_DEV(hdev, &info->p_dev->dev);
diff --git a/drivers/bluetooth/btmrvl_debugfs.c b/drivers/bluetooth/btmrvl_debugfs.c
index d43b5cb864e..3126a3d0c45 100644
--- a/drivers/bluetooth/btmrvl_debugfs.c
+++ b/drivers/bluetooth/btmrvl_debugfs.c
@@ -26,7 +26,8 @@
#include "btmrvl_drv.h"
struct btmrvl_debugfs_data {
- struct dentry *root_dir, *config_dir, *status_dir;
+ struct dentry *config_dir;
+ struct dentry *status_dir;
/* config */
struct dentry *psmode;
@@ -363,6 +364,9 @@ void btmrvl_debugfs_init(struct hci_dev *hdev)
struct btmrvl_private *priv = hdev->driver_data;
struct btmrvl_debugfs_data *dbg;
+ if (!hdev->debugfs)
+ return;
+
dbg = kzalloc(sizeof(*dbg), GFP_KERNEL);
priv->debugfs_data = dbg;
@@ -371,9 +375,7 @@ void btmrvl_debugfs_init(struct hci_dev *hdev)
return;
}
- dbg->root_dir = debugfs_create_dir("btmrvl", NULL);
-
- dbg->config_dir = debugfs_create_dir("config", dbg->root_dir);
+ dbg->config_dir = debugfs_create_dir("config", hdev->debugfs);
dbg->psmode = debugfs_create_file("psmode", 0644, dbg->config_dir,
hdev->driver_data, &btmrvl_psmode_fops);
@@ -388,7 +390,7 @@ void btmrvl_debugfs_init(struct hci_dev *hdev)
dbg->hscfgcmd = debugfs_create_file("hscfgcmd", 0644, dbg->config_dir,
hdev->driver_data, &btmrvl_hscfgcmd_fops);
- dbg->status_dir = debugfs_create_dir("status", dbg->root_dir);
+ dbg->status_dir = debugfs_create_dir("status", hdev->debugfs);
dbg->curpsmode = debugfs_create_file("curpsmode", 0444,
dbg->status_dir,
hdev->driver_data,
@@ -425,7 +427,5 @@ void btmrvl_debugfs_remove(struct hci_dev *hdev)
debugfs_remove(dbg->txdnldready);
debugfs_remove(dbg->status_dir);
- debugfs_remove(dbg->root_dir);
-
kfree(dbg);
}
diff --git a/drivers/bluetooth/btmrvl_main.c b/drivers/bluetooth/btmrvl_main.c
index f97771ce432..53a43adf2e2 100644
--- a/drivers/bluetooth/btmrvl_main.c
+++ b/drivers/bluetooth/btmrvl_main.c
@@ -563,7 +563,7 @@ struct btmrvl_private *btmrvl_add_card(void *card)
priv->btmrvl_dev.tx_dnld_rdy = true;
- hdev->type = HCI_SDIO;
+ hdev->bus = HCI_SDIO;
hdev->open = btmrvl_open;
hdev->close = btmrvl_close;
hdev->flush = btmrvl_flush;
diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c
index 57d965b7f52..94f1f55f81f 100644
--- a/drivers/bluetooth/btmrvl_sdio.c
+++ b/drivers/bluetooth/btmrvl_sdio.c
@@ -976,7 +976,7 @@ static struct sdio_driver bt_mrvl_sdio = {
.remove = btmrvl_sdio_remove,
};
-static int btmrvl_sdio_init_module(void)
+static int __init btmrvl_sdio_init_module(void)
{
if (sdio_register_driver(&bt_mrvl_sdio) != 0) {
BT_ERR("SDIO Driver Registration Failed");
@@ -989,7 +989,7 @@ static int btmrvl_sdio_init_module(void)
return 0;
}
-static void btmrvl_sdio_exit_module(void)
+static void __exit btmrvl_sdio_exit_module(void)
{
/* Set the flag as user is removing this module. */
user_rmmod = 1;
diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c
index 7e298275c8f..76e5127884f 100644
--- a/drivers/bluetooth/btsdio.c
+++ b/drivers/bluetooth/btsdio.c
@@ -326,7 +326,7 @@ static int btsdio_probe(struct sdio_func *func,
return -ENOMEM;
}
- hdev->type = HCI_SDIO;
+ hdev->bus = HCI_SDIO;
hdev->driver_data = data;
data->hdev = hdev;
diff --git a/drivers/bluetooth/btuart_cs.c b/drivers/bluetooth/btuart_cs.c
index 91c52309980..60c0953d7d0 100644
--- a/drivers/bluetooth/btuart_cs.c
+++ b/drivers/bluetooth/btuart_cs.c
@@ -500,7 +500,7 @@ static int btuart_open(btuart_info_t *info)
info->hdev = hdev;
- hdev->type = HCI_PCCARD;
+ hdev->bus = HCI_PCCARD;
hdev->driver_data = info;
SET_HCIDEV_DEV(hdev, &info->p_dev->dev);
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index a699f09ddf7..5d9cc53bd64 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -939,7 +939,7 @@ static int btusb_probe(struct usb_interface *intf,
return -ENOMEM;
}
- hdev->type = HCI_USB;
+ hdev->bus = HCI_USB;
hdev->driver_data = data;
data->hdev = hdev;
diff --git a/drivers/bluetooth/dtl1_cs.c b/drivers/bluetooth/dtl1_cs.c
index 697591941e1..17788317c51 100644
--- a/drivers/bluetooth/dtl1_cs.c
+++ b/drivers/bluetooth/dtl1_cs.c
@@ -485,7 +485,7 @@ static int dtl1_open(dtl1_info_t *info)
info->hdev = hdev;
- hdev->type = HCI_PCCARD;
+ hdev->bus = HCI_PCCARD;
hdev->driver_data = info;
SET_HCIDEV_DEV(hdev, &info->p_dev->dev);
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c
index aa0919386b8..76a1abb8f21 100644
--- a/drivers/bluetooth/hci_ldisc.c
+++ b/drivers/bluetooth/hci_ldisc.c
@@ -383,7 +383,7 @@ static int hci_uart_register_dev(struct hci_uart *hu)
hu->hdev = hdev;
- hdev->type = HCI_UART;
+ hdev->bus = HCI_UART;
hdev->driver_data = hu;
hdev->open = hci_uart_open;
diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c
index 7595274103f..bb0aefdb426 100644
--- a/drivers/bluetooth/hci_vhci.c
+++ b/drivers/bluetooth/hci_vhci.c
@@ -236,7 +236,7 @@ static int vhci_open(struct inode *inode, struct file *file)
data->hdev = hdev;
- hdev->type = HCI_VIRTUAL;
+ hdev->bus = HCI_VIRTUAL;
hdev->driver_data = data;
hdev->open = vhci_open_dev;
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index 8a713f1e965..919a28558d3 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -11,6 +11,9 @@
#include <asm/smp.h>
#include "agp.h"
+int intel_agp_enabled;
+EXPORT_SYMBOL(intel_agp_enabled);
+
/*
* If we have Intel graphics, we're not going to have anything other than
* an Intel IOMMU. So make the correct use of the PCI DMA API contingent
@@ -65,6 +68,10 @@
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB 0x006a
#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG 0x0046
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB 0x0100
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG 0x0102
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB 0x0104
+#define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG 0x0106
/* cover 915 and 945 variants */
#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
@@ -99,7 +106,9 @@
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB)
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
extern int agp_memory_reserved;
@@ -148,6 +157,25 @@ extern int agp_memory_reserved;
#define INTEL_I7505_AGPCTRL 0x70
#define INTEL_I7505_MCHCFG 0x50
+#define SNB_GMCH_CTRL 0x50
+#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
+#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
+#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
+#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
+#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
+#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
+#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
+#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
+#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
+#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
+#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
+#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
+#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
+#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
+#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
+#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
+#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
+
static const struct aper_size_info_fixed intel_i810_sizes[] =
{
{64, 16384, 4},
@@ -294,6 +322,13 @@ static void intel_agp_insert_sg_entries(struct agp_memory *mem,
off_t pg_start, int mask_type)
{
int i, j;
+ u32 cache_bits = 0;
+
+ if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB)
+ {
+ cache_bits = I830_PTE_SYSTEM_CACHED;
+ }
for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
writel(agp_bridge->driver->mask_memory(agp_bridge,
@@ -614,7 +649,7 @@ static struct aper_size_info_fixed intel_i830_sizes[] =
static void intel_i830_init_gtt_entries(void)
{
u16 gmch_ctrl;
- int gtt_entries;
+ int gtt_entries = 0;
u8 rdct;
int local = 0;
static const int ddt[4] = { 0, 16, 32, 64 };
@@ -706,6 +741,63 @@ static void intel_i830_init_gtt_entries(void)
gtt_entries = 0;
break;
}
+ } else if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB ||
+ agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB) {
+ /*
+ * SandyBridge has new memory control reg at 0x50.w
+ */
+ u16 snb_gmch_ctl;
+ pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+ switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
+ case SNB_GMCH_GMS_STOLEN_32M:
+ gtt_entries = MB(32) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_64M:
+ gtt_entries = MB(64) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_96M:
+ gtt_entries = MB(96) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_128M:
+ gtt_entries = MB(128) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_160M:
+ gtt_entries = MB(160) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_192M:
+ gtt_entries = MB(192) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_224M:
+ gtt_entries = MB(224) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_256M:
+ gtt_entries = MB(256) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_288M:
+ gtt_entries = MB(288) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_320M:
+ gtt_entries = MB(320) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_352M:
+ gtt_entries = MB(352) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_384M:
+ gtt_entries = MB(384) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_416M:
+ gtt_entries = MB(416) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_448M:
+ gtt_entries = MB(448) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_480M:
+ gtt_entries = MB(480) - KB(size);
+ break;
+ case SNB_GMCH_GMS_STOLEN_512M:
+ gtt_entries = MB(512) - KB(size);
+ break;
+ }
} else {
switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
case I855_GMCH_GMS_STOLEN_1M:
@@ -1357,6 +1449,8 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
+ case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB:
+ case PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB:
*gtt_offset = *gtt_size = MB(2);
break;
default:
@@ -2338,9 +2432,9 @@ static const struct intel_driver_description {
NULL, &intel_g33_driver },
{ PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33",
NULL, &intel_g33_driver },
- { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, 0, "Pineview",
+ { PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, 0, "GMA3150",
NULL, &intel_g33_driver },
- { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, 0, "Pineview",
+ { PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, 0, "GMA3150",
NULL, &intel_g33_driver },
{ PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0,
"GM45", NULL, &intel_i965_driver },
@@ -2355,13 +2449,17 @@ static const struct intel_driver_description {
{ PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0,
"G41", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, 0,
- "Ironlake/D", NULL, &intel_i965_driver },
+ "HD Graphics", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
- "Ironlake/M", NULL, &intel_i965_driver },
+ "HD Graphics", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
- "Ironlake/MA", NULL, &intel_i965_driver },
+ "HD Graphics", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
- "Ironlake/MC2", NULL, &intel_i965_driver },
+ "HD Graphics", NULL, &intel_i965_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_IG, 0,
+ "Sandybridge", NULL, &intel_i965_driver },
+ { PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_IG, 0,
+ "Sandybridge", NULL, &intel_i965_driver },
{ 0, 0, 0, NULL, NULL, NULL }
};
@@ -2371,7 +2469,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
struct agp_bridge_data *bridge;
u8 cap_ptr = 0;
struct resource *r;
- int i;
+ int i, err;
cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
@@ -2463,7 +2561,10 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
}
pci_set_drvdata(pdev, bridge);
- return agp_add_bridge(bridge);
+ err = agp_add_bridge(bridge);
+ if (!err)
+ intel_agp_enabled = 1;
+ return err;
}
static void __devexit agp_intel_remove(struct pci_dev *pdev)
@@ -2568,6 +2669,8 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
+ ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB),
+ ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB),
{ }
};
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c
index 4254457d391..b861c08263a 100644
--- a/drivers/char/cyclades.c
+++ b/drivers/char/cyclades.c
@@ -158,13 +158,11 @@ static unsigned int cy_isa_addresses[] = {
#define NR_ISA_ADDRS ARRAY_SIZE(cy_isa_addresses)
-#ifdef MODULE
static long maddr[NR_CARDS];
static int irq[NR_CARDS];
module_param_array(maddr, long, NULL, 0);
module_param_array(irq, int, NULL, 0);
-#endif
#endif /* CONFIG_ISA */
@@ -598,12 +596,6 @@ static void cyy_chip_tx(struct cyclades_card *cinfo, unsigned int chip,
save_car = readb(base_addr + (CyCAR << index));
cy_writeb(base_addr + (CyCAR << index), save_xir);
- /* validate the port# (as configured and open) */
- if (channel + chip * 4 >= cinfo->nports) {
- cy_writeb(base_addr + (CySRER << index),
- readb(base_addr + (CySRER << index)) & ~CyTxRdy);
- goto end;
- }
info = &cinfo->ports[channel + chip * 4];
tty = tty_port_tty_get(&info->port);
if (tty == NULL) {
@@ -3316,13 +3308,10 @@ static int __init cy_detect_isa(void)
unsigned short cy_isa_irq, nboard;
void __iomem *cy_isa_address;
unsigned short i, j, cy_isa_nchan;
-#ifdef MODULE
int isparam = 0;
-#endif
nboard = 0;
-#ifdef MODULE
/* Check for module parameters */
for (i = 0; i < NR_CARDS; i++) {
if (maddr[i] || i) {
@@ -3332,7 +3321,6 @@ static int __init cy_detect_isa(void)
if (!maddr[i])
break;
}
-#endif
/* scan the address table probing for Cyclom-Y/ISA boards */
for (i = 0; i < NR_ISA_ADDRS; i++) {
@@ -3353,11 +3341,10 @@ static int __init cy_detect_isa(void)
iounmap(cy_isa_address);
continue;
}
-#ifdef MODULE
+
if (isparam && i < NR_CARDS && irq[i])
cy_isa_irq = irq[i];
else
-#endif
/* find out the board's irq by probing */
cy_isa_irq = detect_isa_irq(cy_isa_address);
if (cy_isa_irq == 0) {
@@ -4208,3 +4195,4 @@ module_exit(cy_cleanup_module);
MODULE_LICENSE("GPL");
MODULE_VERSION(CY_VERSION);
MODULE_ALIAS_CHARDEV_MAJOR(CYCLADES_MAJOR);
+MODULE_FIRMWARE("cyzfirm.bin");
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index 4c3b59be286..465185fc0f5 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -146,7 +146,7 @@ static void hvc_console_print(struct console *co, const char *b,
return;
/* This console adapter was removed so it is not usable. */
- if (vtermnos[index] < 0)
+ if (vtermnos[index] == -1)
return;
while (count > 0 || i > 0) {
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 6ea1014697d..d31483c5488 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -114,7 +114,7 @@ config HW_RANDOM_IXP4XX
config HW_RANDOM_OMAP
tristate "OMAP Random Number Generator support"
- depends on HW_RANDOM && (ARCH_OMAP16XX || ARCH_OMAP24XX)
+ depends on HW_RANDOM && (ARCH_OMAP16XX || ARCH_OMAP2)
default HW_RANDOM
---help---
This driver provides kernel-side support for the Random Number
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c
index 517271c762e..911e1da6def 100644
--- a/drivers/char/ip2/ip2main.c
+++ b/drivers/char/ip2/ip2main.c
@@ -208,6 +208,7 @@ static int DumpFifoBuffer( char __user *, int);
static void ip2_init_board(int, const struct firmware *);
static unsigned short find_eisa_board(int);
+static int ip2_setup(char *str);
/***************/
/* Static Data */
@@ -263,7 +264,7 @@ static int tracewrap;
/* Macros */
/**********/
-#if defined(MODULE) && defined(IP2DEBUG_OPEN)
+#ifdef IP2DEBUG_OPEN
#define DBG_CNT(s) printk(KERN_DEBUG "(%s): [%x] ttyc=%d, modc=%x -> %s\n", \
tty->name,(pCh->flags), \
tty->count,/*GET_USE_COUNT(module)*/0,s)
@@ -285,7 +286,10 @@ MODULE_AUTHOR("Doug McNash");
MODULE_DESCRIPTION("Computone IntelliPort Plus Driver");
MODULE_LICENSE("GPL");
+#define MAX_CMD_STR 50
+
static int poll_only;
+static char cmd[MAX_CMD_STR];
static int Eisa_irq;
static int Eisa_slot;
@@ -309,6 +313,8 @@ module_param_array(io, int, NULL, 0);
MODULE_PARM_DESC(io, "I/O ports for IntelliPort Cards");
module_param(poll_only, bool, 0);
MODULE_PARM_DESC(poll_only, "Do not use card interrupts");
+module_param_string(ip2, cmd, MAX_CMD_STR, 0);
+MODULE_PARM_DESC(ip2, "Contains module parameter passed with 'ip2='");
/* for sysfs class support */
static struct class *ip2_class;
@@ -487,7 +493,6 @@ static const struct firmware *ip2_request_firmware(void)
return fw;
}
-#ifndef MODULE
/******************************************************************************
* ip2_setup:
* str: kernel command line string
@@ -531,7 +536,6 @@ static int __init ip2_setup(char *str)
return 1;
}
__setup("ip2=", ip2_setup);
-#endif /* !MODULE */
static int __init ip2_loadmain(void)
{
@@ -539,14 +543,20 @@ static int __init ip2_loadmain(void)
int err = 0;
i2eBordStrPtr pB = NULL;
int rc = -1;
- struct pci_dev *pdev = NULL;
const struct firmware *fw = NULL;
+ char *str;
+
+ str = cmd;
if (poll_only) {
/* Hard lock the interrupts to zero */
irq[0] = irq[1] = irq[2] = irq[3] = poll_only = 0;
}
+ /* Check module parameter with 'ip2=' has been passed or not */
+ if (!poll_only && (!strncmp(str, "ip2=", 4)))
+ ip2_setup(str);
+
ip2trace(ITRC_NO_PORT, ITRC_INIT, ITRC_ENTER, 0);
/* process command line arguments to modprobe or
@@ -612,6 +622,7 @@ static int __init ip2_loadmain(void)
case PCI:
#ifdef CONFIG_PCI
{
+ struct pci_dev *pdev = NULL;
u32 addr;
int status;
@@ -626,7 +637,7 @@ static int __init ip2_loadmain(void)
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev, "can't enable device\n");
- break;
+ goto out;
}
ip2config.type[i] = PCI;
ip2config.pci_dev[i] = pci_dev_get(pdev);
@@ -638,6 +649,8 @@ static int __init ip2_loadmain(void)
dev_err(&pdev->dev, "I/O address error\n");
ip2config.irq[i] = pdev->irq;
+out:
+ pci_dev_put(pdev);
}
#else
printk(KERN_ERR "IP2: PCI card specified but PCI "
@@ -656,7 +669,6 @@ static int __init ip2_loadmain(void)
break;
} /* switch */
} /* for */
- pci_dev_put(pdev);
for (i = 0; i < IP2_MAX_BOARDS; ++i) {
if (ip2config.addr[i]) {
@@ -3197,3 +3209,5 @@ static struct pci_device_id ip2main_pci_tbl[] __devinitdata = {
};
MODULE_DEVICE_TABLE(pci, ip2main_pci_tbl);
+
+MODULE_FIRMWARE("intelliport2.bin");
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c
index 300d5bd6cd0..be2e8f9a27c 100644
--- a/drivers/char/isicom.c
+++ b/drivers/char/isicom.c
@@ -113,6 +113,8 @@
* 64-bit verification
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/kernel.h>
@@ -140,7 +142,6 @@
#define InterruptTheCard(base) outw(0, (base) + 0xc)
#define ClearInterrupt(base) inw((base) + 0x0a)
-#define pr_dbg(str...) pr_debug("ISICOM: " str)
#ifdef DEBUG
#define isicom_paranoia_check(a, b, c) __isicom_paranoia_check((a), (b), (c))
#else
@@ -249,8 +250,7 @@ static int lock_card(struct isi_board *card)
spin_unlock_irqrestore(&card->card_lock, card->flags);
msleep(10);
}
- printk(KERN_WARNING "ISICOM: Failed to lock Card (0x%lx)\n",
- card->base);
+ pr_warning("Failed to lock Card (0x%lx)\n", card->base);
return 0; /* Failed to acquire the card! */
}
@@ -379,13 +379,13 @@ static inline int __isicom_paranoia_check(struct isi_port const *port,
char *name, const char *routine)
{
if (!port) {
- printk(KERN_WARNING "ISICOM: Warning: bad isicom magic for "
- "dev %s in %s.\n", name, routine);
+ pr_warning("Warning: bad isicom magic for dev %s in %s.\n",
+ name, routine);
return 1;
}
if (port->magic != ISICOM_MAGIC) {
- printk(KERN_WARNING "ISICOM: Warning: NULL isicom port for "
- "dev %s in %s.\n", name, routine);
+ pr_warning("Warning: NULL isicom port for dev %s in %s.\n",
+ name, routine);
return 1;
}
@@ -450,8 +450,8 @@ static void isicom_tx(unsigned long _data)
if (!(inw(base + 0x02) & (1 << port->channel)))
continue;
- pr_dbg("txing %d bytes, port%d.\n", txcount,
- port->channel + 1);
+ pr_debug("txing %d bytes, port%d.\n",
+ txcount, port->channel + 1);
outw((port->channel << isi_card[card].shift_count) | txcount,
base);
residue = NO;
@@ -547,8 +547,8 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id)
byte_count = header & 0xff;
if (channel + 1 > card->port_count) {
- printk(KERN_WARNING "ISICOM: isicom_interrupt(0x%lx): "
- "%d(channel) > port_count.\n", base, channel+1);
+ pr_warning("%s(0x%lx): %d(channel) > port_count.\n",
+ __func__, base, channel+1);
outw(0x0000, base+0x04); /* enable interrupts */
spin_unlock(&card->card_lock);
return IRQ_HANDLED;
@@ -582,14 +582,15 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id)
if (port->status & ISI_DCD) {
if (!(header & ISI_DCD)) {
/* Carrier has been lost */
- pr_dbg("interrupt: DCD->low.\n"
- );
+ pr_debug("%s: DCD->low.\n",
+ __func__);
port->status &= ~ISI_DCD;
tty_hangup(tty);
}
} else if (header & ISI_DCD) {
/* Carrier has been detected */
- pr_dbg("interrupt: DCD->high.\n");
+ pr_debug("%s: DCD->high.\n",
+ __func__);
port->status |= ISI_DCD;
wake_up_interruptible(&port->port.open_wait);
}
@@ -641,17 +642,19 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id)
break;
case 2: /* Statistics */
- pr_dbg("isicom_interrupt: stats!!!.\n");
+ pr_debug("%s: stats!!!\n", __func__);
break;
default:
- pr_dbg("Intr: Unknown code in status packet.\n");
+ pr_debug("%s: Unknown code in status packet.\n",
+ __func__);
break;
}
} else { /* Data Packet */
count = tty_prepare_flip_string(tty, &rp, byte_count & ~1);
- pr_dbg("Intr: Can rx %d of %d bytes.\n", count, byte_count);
+ pr_debug("%s: Can rx %d of %d bytes.\n",
+ __func__, count, byte_count);
word_count = count >> 1;
insw(base, rp, word_count);
byte_count -= (word_count << 1);
@@ -661,8 +664,8 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id)
byte_count -= 2;
}
if (byte_count > 0) {
- pr_dbg("Intr(0x%lx:%d): Flip buffer overflow! dropping "
- "bytes...\n", base, channel + 1);
+ pr_debug("%s(0x%lx:%d): Flip buffer overflow! dropping bytes...\n",
+ __func__, base, channel + 1);
/* drain out unread xtra data */
while (byte_count > 0) {
inw(base);
@@ -888,8 +891,8 @@ static void isicom_shutdown_port(struct isi_port *port)
struct isi_board *card = port->card;
if (--card->count < 0) {
- pr_dbg("isicom_shutdown_port: bad board(0x%lx) count %d.\n",
- card->base, card->count);
+ pr_debug("%s: bad board(0x%lx) count %d.\n",
+ __func__, card->base, card->count);
card->count = 0;
}
/* last port was closed, shutdown that board too */
@@ -1681,13 +1684,13 @@ static int __init isicom_init(void)
retval = tty_register_driver(isicom_normal);
if (retval) {
- pr_dbg("Couldn't register the dialin driver\n");
+ pr_debug("Couldn't register the dialin driver\n");
goto err_puttty;
}
retval = pci_register_driver(&isicom_driver);
if (retval < 0) {
- printk(KERN_ERR "ISICOM: Unable to register pci driver.\n");
+ pr_err("Unable to register pci driver.\n");
goto err_unrtty;
}
@@ -1717,3 +1720,8 @@ module_exit(isicom_exit);
MODULE_AUTHOR("MultiTech");
MODULE_DESCRIPTION("Driver for the ISI series of cards by MultiTech");
MODULE_LICENSE("GPL");
+MODULE_FIRMWARE("isi608.bin");
+MODULE_FIRMWARE("isi608em.bin");
+MODULE_FIRMWARE("isi616em.bin");
+MODULE_FIRMWARE("isi4608.bin");
+MODULE_FIRMWARE("isi4616.bin");
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index 63ee3bbc1ce..166495d6a1d 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -164,24 +164,25 @@ static unsigned int moxaFuncTout = HZ / 2;
static unsigned int moxaLowWaterChk;
static DEFINE_MUTEX(moxa_openlock);
static DEFINE_SPINLOCK(moxa_lock);
-/* Variables for insmod */
-#ifdef MODULE
+
static unsigned long baseaddr[MAX_BOARDS];
static unsigned int type[MAX_BOARDS];
static unsigned int numports[MAX_BOARDS];
-#endif
MODULE_AUTHOR("William Chen");
MODULE_DESCRIPTION("MOXA Intellio Family Multiport Board Device Driver");
MODULE_LICENSE("GPL");
-#ifdef MODULE
+MODULE_FIRMWARE("c218tunx.cod");
+MODULE_FIRMWARE("cp204unx.cod");
+MODULE_FIRMWARE("c320tunx.cod");
+
module_param_array(type, uint, NULL, 0);
MODULE_PARM_DESC(type, "card type: C218=2, C320=4");
module_param_array(baseaddr, ulong, NULL, 0);
MODULE_PARM_DESC(baseaddr, "base address");
module_param_array(numports, uint, NULL, 0);
MODULE_PARM_DESC(numports, "numports (ignored for C218)");
-#endif
+
module_param(ttymajor, int, 0);
/*
@@ -1024,6 +1025,8 @@ static int __init moxa_init(void)
{
unsigned int isabrds = 0;
int retval = 0;
+ struct moxa_board_conf *brd = moxa_boards;
+ unsigned int i;
printk(KERN_INFO "MOXA Intellio family driver version %s\n",
MOXA_VERSION);
@@ -1051,10 +1054,7 @@ static int __init moxa_init(void)
}
/* Find the boards defined from module args. */
-#ifdef MODULE
- {
- struct moxa_board_conf *brd = moxa_boards;
- unsigned int i;
+
for (i = 0; i < MAX_BOARDS; i++) {
if (!baseaddr[i])
break;
@@ -1087,8 +1087,6 @@ static int __init moxa_init(void)
isabrds++;
}
}
- }
-#endif
#ifdef CONFIG_PCI
retval = pci_register_driver(&moxa_pci_driver);
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
index 3d923065d9a..e0c5d2a6904 100644
--- a/drivers/char/mxser.c
+++ b/drivers/char/mxser.c
@@ -895,8 +895,7 @@ static int mxser_activate(struct tty_port *port, struct tty_struct *tty)
if (inb(info->ioaddr + UART_LSR) == 0xff) {
spin_unlock_irqrestore(&info->slock, flags);
if (capable(CAP_SYS_ADMIN)) {
- if (tty)
- set_bit(TTY_IO_ERROR, &tty->flags);
+ set_bit(TTY_IO_ERROR, &tty->flags);
return 0;
} else
return -ENODEV;
diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c
index 2ad7d37afbd..a3f32a15fde 100644
--- a/drivers/char/nozomi.c
+++ b/drivers/char/nozomi.c
@@ -136,10 +136,6 @@ static int debug;
#define RECEIVE_BUF_MAX 4
-/* Define all types of vendors and devices to support */
-#define VENDOR1 0x1931 /* Vendor Option */
-#define DEVICE1 0x000c /* HSDPA card */
-
#define R_IIR 0x0000 /* Interrupt Identity Register */
#define R_FCR 0x0000 /* Flow Control Register */
#define R_IER 0x0004 /* Interrupt Enable Register */
@@ -371,6 +367,8 @@ struct port {
struct mutex tty_sem;
wait_queue_head_t tty_wait;
struct async_icount tty_icount;
+
+ struct nozomi *dc;
};
/* Private data one for each card in the system */
@@ -405,7 +403,7 @@ struct buffer {
/* Global variables */
static const struct pci_device_id nozomi_pci_tbl[] __devinitconst = {
- {PCI_DEVICE(VENDOR1, DEVICE1)},
+ {PCI_DEVICE(0x1931, 0x000c)}, /* Nozomi HSDPA */
{},
};
@@ -414,6 +412,8 @@ MODULE_DEVICE_TABLE(pci, nozomi_pci_tbl);
static struct nozomi *ndevs[NOZOMI_MAX_CARDS];
static struct tty_driver *ntty_driver;
+static const struct tty_port_operations noz_tty_port_ops;
+
/*
* find card by tty_index
*/
@@ -853,8 +853,6 @@ static int receive_data(enum port_type index, struct nozomi *dc)
goto put;
}
- tty_buffer_request_room(tty, size);
-
while (size > 0) {
read_mem32((u32 *) buf, addr + offset, RECEIVE_BUF_MAX);
@@ -1473,9 +1471,11 @@ static int __devinit nozomi_card_init(struct pci_dev *pdev,
for (i = 0; i < MAX_PORT; i++) {
struct device *tty_dev;
-
- mutex_init(&dc->port[i].tty_sem);
- tty_port_init(&dc->port[i].port);
+ struct port *port = &dc->port[i];
+ port->dc = dc;
+ mutex_init(&port->tty_sem);
+ tty_port_init(&port->port);
+ port->port.ops = &noz_tty_port_ops;
tty_dev = tty_register_device(ntty_driver, dc->index_start + i,
&pdev->dev);
@@ -1600,67 +1600,74 @@ static void set_dtr(const struct tty_struct *tty, int dtr)
* ----------------------------------------------------------------------------
*/
-/* Called when the userspace process opens the tty, /dev/noz*. */
-static int ntty_open(struct tty_struct *tty, struct file *file)
+static int ntty_install(struct tty_driver *driver, struct tty_struct *tty)
{
struct port *port = get_port_by_tty(tty);
struct nozomi *dc = get_dc_by_tty(tty);
- unsigned long flags;
-
+ int ret;
if (!port || !dc || dc->state != NOZOMI_STATE_READY)
return -ENODEV;
-
- if (mutex_lock_interruptible(&port->tty_sem))
- return -ERESTARTSYS;
-
- port->port.count++;
- dc->open_ttys++;
-
- /* Enable interrupt downlink for channel */
- if (port->port.count == 1) {
- tty->driver_data = port;
- tty_port_tty_set(&port->port, tty);
- DBG1("open: %d", port->token_dl);
- spin_lock_irqsave(&dc->spin_mutex, flags);
- dc->last_ier = dc->last_ier | port->token_dl;
- writew(dc->last_ier, dc->reg_ier);
- spin_unlock_irqrestore(&dc->spin_mutex, flags);
+ ret = tty_init_termios(tty);
+ if (ret == 0) {
+ tty_driver_kref_get(driver);
+ driver->ttys[tty->index] = tty;
}
- mutex_unlock(&port->tty_sem);
- return 0;
+ return ret;
}
-/* Called when the userspace process close the tty, /dev/noz*. Also
- called immediately if ntty_open fails in which case tty->driver_data
- will be NULL an we exit by the first return */
+static void ntty_cleanup(struct tty_struct *tty)
+{
+ tty->driver_data = NULL;
+}
-static void ntty_close(struct tty_struct *tty, struct file *file)
+static int ntty_activate(struct tty_port *tport, struct tty_struct *tty)
{
- struct nozomi *dc = get_dc_by_tty(tty);
- struct port *nport = tty->driver_data;
- struct tty_port *port = &nport->port;
+ struct port *port = container_of(tport, struct port, port);
+ struct nozomi *dc = port->dc;
unsigned long flags;
- if (!dc || !nport)
- return;
+ DBG1("open: %d", port->token_dl);
+ spin_lock_irqsave(&dc->spin_mutex, flags);
+ dc->last_ier = dc->last_ier | port->token_dl;
+ writew(dc->last_ier, dc->reg_ier);
+ dc->open_ttys++;
+ spin_unlock_irqrestore(&dc->spin_mutex, flags);
+ printk("noz: activated %d: %p\n", tty->index, tport);
+ return 0;
+}
- /* Users cannot interrupt a close */
- mutex_lock(&nport->tty_sem);
+static int ntty_open(struct tty_struct *tty, struct file *filp)
+{
+ struct port *port = get_port_by_tty(tty);
+ return tty_port_open(&port->port, tty, filp);
+}
- WARN_ON(!port->count);
+static void ntty_shutdown(struct tty_port *tport)
+{
+ struct port *port = container_of(tport, struct port, port);
+ struct nozomi *dc = port->dc;
+ unsigned long flags;
+ DBG1("close: %d", port->token_dl);
+ spin_lock_irqsave(&dc->spin_mutex, flags);
+ dc->last_ier &= ~(port->token_dl);
+ writew(dc->last_ier, dc->reg_ier);
dc->open_ttys--;
- port->count--;
+ spin_unlock_irqrestore(&dc->spin_mutex, flags);
+ printk("noz: shutdown %p\n", tport);
+}
- if (port->count == 0) {
- DBG1("close: %d", nport->token_dl);
- tty_port_tty_set(port, NULL);
- spin_lock_irqsave(&dc->spin_mutex, flags);
- dc->last_ier &= ~(nport->token_dl);
- writew(dc->last_ier, dc->reg_ier);
- spin_unlock_irqrestore(&dc->spin_mutex, flags);
- }
- mutex_unlock(&nport->tty_sem);
+static void ntty_close(struct tty_struct *tty, struct file *filp)
+{
+ struct port *port = tty->driver_data;
+ if (port)
+ tty_port_close(&port->port, tty, filp);
+}
+
+static void ntty_hangup(struct tty_struct *tty)
+{
+ struct port *port = tty->driver_data;
+ tty_port_hangup(&port->port);
}
/*
@@ -1680,15 +1687,7 @@ static int ntty_write(struct tty_struct *tty, const unsigned char *buffer,
if (!dc || !port)
return -ENODEV;
- if (unlikely(!mutex_trylock(&port->tty_sem))) {
- /*
- * must test lock as tty layer wraps calls
- * to this function with BKL
- */
- dev_err(&dc->pdev->dev, "Would have deadlocked - "
- "return EAGAIN\n");
- return -EAGAIN;
- }
+ mutex_lock(&port->tty_sem);
if (unlikely(!port->port.count)) {
DBG1(" ");
@@ -1728,25 +1727,23 @@ exit:
* This method is called by the upper tty layer.
* #according to sources N_TTY.c it expects a value >= 0 and
* does not check for negative values.
+ *
+ * If the port is unplugged report lots of room and let the bits
+ * dribble away so we don't block anything.
*/
static int ntty_write_room(struct tty_struct *tty)
{
struct port *port = tty->driver_data;
- int room = 0;
+ int room = 4096;
const struct nozomi *dc = get_dc_by_tty(tty);
- if (!dc || !port)
- return 0;
- if (!mutex_trylock(&port->tty_sem))
- return 0;
-
- if (!port->port.count)
- goto exit;
-
- room = port->fifo_ul.size - kfifo_len(&port->fifo_ul);
-
-exit:
- mutex_unlock(&port->tty_sem);
+ if (dc) {
+ mutex_lock(&port->tty_sem);
+ if (port->port.count)
+ room = port->fifo_ul.size -
+ kfifo_len(&port->fifo_ul);
+ mutex_unlock(&port->tty_sem);
+ }
return room;
}
@@ -1906,10 +1903,16 @@ exit_in_buffer:
return rval;
}
+static const struct tty_port_operations noz_tty_port_ops = {
+ .activate = ntty_activate,
+ .shutdown = ntty_shutdown,
+};
+
static const struct tty_operations tty_ops = {
.ioctl = ntty_ioctl,
.open = ntty_open,
.close = ntty_close,
+ .hangup = ntty_hangup,
.write = ntty_write,
.write_room = ntty_write_room,
.unthrottle = ntty_unthrottle,
@@ -1917,6 +1920,8 @@ static const struct tty_operations tty_ops = {
.chars_in_buffer = ntty_chars_in_buffer,
.tiocmget = ntty_tiocmget,
.tiocmset = ntty_tiocmset,
+ .install = ntty_install,
+ .cleanup = ntty_cleanup,
};
/* Module initialization */
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c
index 452370af95d..986aa606a6b 100644
--- a/drivers/char/serial167.c
+++ b/drivers/char/serial167.c
@@ -658,8 +658,7 @@ static irqreturn_t cd2401_rx_interrupt(int irq, void *dev_id)
info->mon.char_max = char_count;
info->mon.char_last = char_count;
#endif
- len = tty_buffer_request_room(tty, char_count);
- while (len--) {
+ while (char_count--) {
data = base_addr[CyRDR];
tty_insert_flip_char(tty, data, TTY_NORMAL);
#ifdef CYCLOM_16Y_HACK
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c
index 268e17f9ec3..07ac14d949c 100644
--- a/drivers/char/specialix.c
+++ b/drivers/char/specialix.c
@@ -646,8 +646,6 @@ static void sx_receive(struct specialix_board *bp)
dprintk(SX_DEBUG_RX, "port: %p: count: %d\n", port, count);
port->hits[count > 8 ? 9 : count]++;
- tty_buffer_request_room(tty, count);
-
while (count--)
tty_insert_flip_char(tty, sx_in(bp, CD186x_RDR), TTY_NORMAL);
tty_flip_buffer_push(tty);
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
index 4846b73ef28..0658fc54822 100644
--- a/drivers/char/synclink.c
+++ b/drivers/char/synclink.c
@@ -2031,7 +2031,7 @@ static int mgsl_put_char(struct tty_struct *tty, unsigned char ch)
if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char"))
return 0;
- if (!tty || !info->xmit_buf)
+ if (!info->xmit_buf)
return 0;
spin_lock_irqsave(&info->irq_spinlock, flags);
@@ -2121,7 +2121,7 @@ static int mgsl_write(struct tty_struct * tty,
if (mgsl_paranoia_check(info, tty->name, "mgsl_write"))
goto cleanup;
- if (!tty || !info->xmit_buf)
+ if (!info->xmit_buf)
goto cleanup;
if ( info->params.mode == MGSL_MODE_HDLC ||
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c
index 8678f0c8699..4561ce2fba6 100644
--- a/drivers/char/synclink_gt.c
+++ b/drivers/char/synclink_gt.c
@@ -468,7 +468,7 @@ static unsigned int free_tbuf_count(struct slgt_info *info);
static unsigned int tbuf_bytes(struct slgt_info *info);
static void reset_tbufs(struct slgt_info *info);
static void tdma_reset(struct slgt_info *info);
-static void tx_load(struct slgt_info *info, const char *buf, unsigned int count);
+static bool tx_load(struct slgt_info *info, const char *buf, unsigned int count);
static void get_signals(struct slgt_info *info);
static void set_signals(struct slgt_info *info);
@@ -813,59 +813,32 @@ static int write(struct tty_struct *tty,
int ret = 0;
struct slgt_info *info = tty->driver_data;
unsigned long flags;
- unsigned int bufs_needed;
if (sanity_check(info, tty->name, "write"))
- goto cleanup;
+ return -EIO;
+
DBGINFO(("%s write count=%d\n", info->device_name, count));
- if (!info->tx_buf)
- goto cleanup;
+ if (!info->tx_buf || (count > info->max_frame_size))
+ return -EIO;
- if (count > info->max_frame_size) {
- ret = -EIO;
- goto cleanup;
- }
+ if (!count || tty->stopped || tty->hw_stopped)
+ return 0;
- if (!count)
- goto cleanup;
+ spin_lock_irqsave(&info->lock, flags);
- if (!info->tx_active && info->tx_count) {
+ if (info->tx_count) {
/* send accumulated data from send_char() */
- tx_load(info, info->tx_buf, info->tx_count);
- goto start;
+ if (!tx_load(info, info->tx_buf, info->tx_count))
+ goto cleanup;
+ info->tx_count = 0;
}
- bufs_needed = (count/DMABUFSIZE);
- if (count % DMABUFSIZE)
- ++bufs_needed;
- if (bufs_needed > free_tbuf_count(info))
- goto cleanup;
- ret = info->tx_count = count;
- tx_load(info, buf, count);
- goto start;
-
-start:
- if (info->tx_count && !tty->stopped && !tty->hw_stopped) {
- spin_lock_irqsave(&info->lock,flags);
- if (!info->tx_active)
- tx_start(info);
- else if (!(rd_reg32(info, TDCSR) & BIT0)) {
- /* transmit still active but transmit DMA stopped */
- unsigned int i = info->tbuf_current;
- if (!i)
- i = info->tbuf_count;
- i--;
- /* if DMA buf unsent must try later after tx idle */
- if (desc_count(info->tbufs[i]))
- ret = 0;
- }
- if (ret > 0)
- update_tx_timer(info);
- spin_unlock_irqrestore(&info->lock,flags);
- }
+ if (tx_load(info, buf, count))
+ ret = count;
cleanup:
+ spin_unlock_irqrestore(&info->lock, flags);
DBGINFO(("%s write rc=%d\n", info->device_name, ret));
return ret;
}
@@ -882,7 +855,7 @@ static int put_char(struct tty_struct *tty, unsigned char ch)
if (!info->tx_buf)
return 0;
spin_lock_irqsave(&info->lock,flags);
- if (!info->tx_active && (info->tx_count < info->max_frame_size)) {
+ if (info->tx_count < info->max_frame_size) {
info->tx_buf[info->tx_count++] = ch;
ret = 1;
}
@@ -981,10 +954,8 @@ static void flush_chars(struct tty_struct *tty)
DBGINFO(("%s flush_chars start transmit\n", info->device_name));
spin_lock_irqsave(&info->lock,flags);
- if (!info->tx_active && info->tx_count) {
- tx_load(info, info->tx_buf,info->tx_count);
- tx_start(info);
- }
+ if (info->tx_count && tx_load(info, info->tx_buf, info->tx_count))
+ info->tx_count = 0;
spin_unlock_irqrestore(&info->lock,flags);
}
@@ -997,10 +968,9 @@ static void flush_buffer(struct tty_struct *tty)
return;
DBGINFO(("%s flush_buffer\n", info->device_name));
- spin_lock_irqsave(&info->lock,flags);
- if (!info->tx_active)
- info->tx_count = 0;
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
+ info->tx_count = 0;
+ spin_unlock_irqrestore(&info->lock, flags);
tty_wakeup(tty);
}
@@ -1033,12 +1003,10 @@ static void tx_release(struct tty_struct *tty)
if (sanity_check(info, tty->name, "tx_release"))
return;
DBGINFO(("%s tx_release\n", info->device_name));
- spin_lock_irqsave(&info->lock,flags);
- if (!info->tx_active && info->tx_count) {
- tx_load(info, info->tx_buf, info->tx_count);
- tx_start(info);
- }
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
+ if (info->tx_count && tx_load(info, info->tx_buf, info->tx_count))
+ info->tx_count = 0;
+ spin_unlock_irqrestore(&info->lock, flags);
}
/*
@@ -1506,27 +1474,25 @@ static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb,
DBGINFO(("%s hdlc_xmit\n", dev->name));
+ if (!skb->len)
+ return NETDEV_TX_OK;
+
/* stop sending until this frame completes */
netif_stop_queue(dev);
- /* copy data to device buffers */
- info->tx_count = skb->len;
- tx_load(info, skb->data, skb->len);
-
/* update network statistics */
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
- /* done with socket buffer, so free it */
- dev_kfree_skb(skb);
-
/* save start time for transmit timeout detection */
dev->trans_start = jiffies;
- spin_lock_irqsave(&info->lock,flags);
- tx_start(info);
- update_tx_timer(info);
- spin_unlock_irqrestore(&info->lock,flags);
+ spin_lock_irqsave(&info->lock, flags);
+ tx_load(info, skb->data, skb->len);
+ spin_unlock_irqrestore(&info->lock, flags);
+
+ /* done with socket buffer, so free it */
+ dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -2180,7 +2146,7 @@ static void isr_serial(struct slgt_info *info)
if (info->params.mode == MGSL_MODE_ASYNC) {
if (status & IRQ_TXIDLE) {
- if (info->tx_count)
+ if (info->tx_active)
isr_txeom(info, status);
}
if (info->rx_pio && (status & IRQ_RXDATA))
@@ -2276,13 +2242,42 @@ static void isr_tdma(struct slgt_info *info)
}
}
+/*
+ * return true if there are unsent tx DMA buffers, otherwise false
+ *
+ * if there are unsent buffers then info->tbuf_start
+ * is set to index of first unsent buffer
+ */
+static bool unsent_tbufs(struct slgt_info *info)
+{
+ unsigned int i = info->tbuf_current;
+ bool rc = false;
+
+ /*
+ * search backwards from last loaded buffer (precedes tbuf_current)
+ * for first unsent buffer (desc_count > 0)
+ */
+
+ do {
+ if (i)
+ i--;
+ else
+ i = info->tbuf_count - 1;
+ if (!desc_count(info->tbufs[i]))
+ break;
+ info->tbuf_start = i;
+ rc = true;
+ } while (i != info->tbuf_current);
+
+ return rc;
+}
+
static void isr_txeom(struct slgt_info *info, unsigned short status)
{
DBGISR(("%s txeom status=%04x\n", info->device_name, status));
slgt_irq_off(info, IRQ_TXDATA + IRQ_TXIDLE + IRQ_TXUNDER);
tdma_reset(info);
- reset_tbufs(info);
if (status & IRQ_TXUNDER) {
unsigned short val = rd_reg16(info, TCR);
wr_reg16(info, TCR, (unsigned short)(val | BIT2)); /* set reset bit */
@@ -2297,8 +2292,12 @@ static void isr_txeom(struct slgt_info *info, unsigned short status)
info->icount.txok++;
}
+ if (unsent_tbufs(info)) {
+ tx_start(info);
+ update_tx_timer(info);
+ return;
+ }
info->tx_active = false;
- info->tx_count = 0;
del_timer(&info->tx_timer);
@@ -3949,7 +3948,7 @@ static void tx_start(struct slgt_info *info)
info->tx_enabled = true;
}
- if (info->tx_count) {
+ if (desc_count(info->tbufs[info->tbuf_start])) {
info->drop_rts_on_tx_done = false;
if (info->params.mode != MGSL_MODE_ASYNC) {
@@ -4772,25 +4771,36 @@ static unsigned int tbuf_bytes(struct slgt_info *info)
}
/*
- * load transmit DMA buffer(s) with data
+ * load data into transmit DMA buffer ring and start transmitter if needed
+ * return true if data accepted, otherwise false (buffers full)
*/
-static void tx_load(struct slgt_info *info, const char *buf, unsigned int size)
+static bool tx_load(struct slgt_info *info, const char *buf, unsigned int size)
{
unsigned short count;
unsigned int i;
struct slgt_desc *d;
- if (size == 0)
- return;
+ /* check required buffer space */
+ if (DIV_ROUND_UP(size, DMABUFSIZE) > free_tbuf_count(info))
+ return false;
DBGDATA(info, buf, size, "tx");
+ /*
+ * copy data to one or more DMA buffers in circular ring
+ * tbuf_start = first buffer for this data
+ * tbuf_current = next free buffer
+ *
+ * Copy all data before making data visible to DMA controller by
+ * setting descriptor count of the first buffer.
+ * This prevents an active DMA controller from reading the first DMA
+ * buffers of a frame and stopping before the final buffers are filled.
+ */
+
info->tbuf_start = i = info->tbuf_current;
while (size) {
d = &info->tbufs[i];
- if (++i == info->tbuf_count)
- i = 0;
count = (unsigned short)((size > DMABUFSIZE) ? DMABUFSIZE : size);
memcpy(d->buf, buf, count);
@@ -4808,11 +4818,27 @@ static void tx_load(struct slgt_info *info, const char *buf, unsigned int size)
else
set_desc_eof(*d, 0);
- set_desc_count(*d, count);
+ /* set descriptor count for all but first buffer */
+ if (i != info->tbuf_start)
+ set_desc_count(*d, count);
d->buf_count = count;
+
+ if (++i == info->tbuf_count)
+ i = 0;
}
info->tbuf_current = i;
+
+ /* set first buffer count to make new data visible to DMA controller */
+ d = &info->tbufs[info->tbuf_start];
+ set_desc_count(*d, d->buf_count);
+
+ /* start transmitter if needed and update transmit timeout */
+ if (!info->tx_active)
+ tx_start(info);
+ update_tx_timer(info);
+
+ return true;
}
static int register_test(struct slgt_info *info)
@@ -4934,9 +4960,7 @@ static int loopback_test(struct slgt_info *info)
spin_lock_irqsave(&info->lock,flags);
async_mode(info);
rx_start(info);
- info->tx_count = count;
tx_load(info, buf, count);
- tx_start(info);
spin_unlock_irqrestore(&info->lock, flags);
/* wait for receive complete */
diff --git a/drivers/char/tty_buffer.c b/drivers/char/tty_buffer.c
index 66fa4e10d76..af8d9771572 100644
--- a/drivers/char/tty_buffer.c
+++ b/drivers/char/tty_buffer.c
@@ -231,9 +231,10 @@ int tty_buffer_request_room(struct tty_struct *tty, size_t size)
EXPORT_SYMBOL_GPL(tty_buffer_request_room);
/**
- * tty_insert_flip_string - Add characters to the tty buffer
+ * tty_insert_flip_string_fixed_flag - Add characters to the tty buffer
* @tty: tty structure
* @chars: characters
+ * @flag: flag value for each character
* @size: size
*
* Queue a series of bytes to the tty buffering. All the characters
@@ -242,18 +243,19 @@ EXPORT_SYMBOL_GPL(tty_buffer_request_room);
* Locking: Called functions may take tty->buf.lock
*/
-int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars,
- size_t size)
+int tty_insert_flip_string_fixed_flag(struct tty_struct *tty,
+ const unsigned char *chars, char flag, size_t size)
{
int copied = 0;
do {
- int space = tty_buffer_request_room(tty, size - copied);
+ int goal = min(size - copied, TTY_BUFFER_PAGE);
+ int space = tty_buffer_request_room(tty, goal);
struct tty_buffer *tb = tty->buf.tail;
/* If there is no space then tb may be NULL */
if (unlikely(space == 0))
break;
memcpy(tb->char_buf_ptr + tb->used, chars, space);
- memset(tb->flag_buf_ptr + tb->used, TTY_NORMAL, space);
+ memset(tb->flag_buf_ptr + tb->used, flag, space);
tb->used += space;
copied += space;
chars += space;
@@ -262,7 +264,7 @@ int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars,
} while (unlikely(size > copied));
return copied;
}
-EXPORT_SYMBOL(tty_insert_flip_string);
+EXPORT_SYMBOL(tty_insert_flip_string_fixed_flag);
/**
* tty_insert_flip_string_flags - Add characters to the tty buffer
@@ -283,7 +285,8 @@ int tty_insert_flip_string_flags(struct tty_struct *tty,
{
int copied = 0;
do {
- int space = tty_buffer_request_room(tty, size - copied);
+ int goal = min(size - copied, TTY_BUFFER_PAGE);
+ int space = tty_buffer_request_room(tty, goal);
struct tty_buffer *tb = tty->buf.tail;
/* If there is no space then tb may be NULL */
if (unlikely(space == 0))
diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
index 3f653f7d849..500e740ec5e 100644
--- a/drivers/char/tty_ldisc.c
+++ b/drivers/char/tty_ldisc.c
@@ -706,12 +706,13 @@ static void tty_reset_termios(struct tty_struct *tty)
/**
* tty_ldisc_reinit - reinitialise the tty ldisc
* @tty: tty to reinit
+ * @ldisc: line discipline to reinitialize
*
- * Switch the tty back to N_TTY line discipline and leave the
- * ldisc state closed
+ * Switch the tty to a line discipline and leave the ldisc
+ * state closed
*/
-static void tty_ldisc_reinit(struct tty_struct *tty)
+static void tty_ldisc_reinit(struct tty_struct *tty, int ldisc)
{
struct tty_ldisc *ld;
@@ -721,10 +722,10 @@ static void tty_ldisc_reinit(struct tty_struct *tty)
/*
* Switch the line discipline back
*/
- ld = tty_ldisc_get(N_TTY);
+ ld = tty_ldisc_get(ldisc);
BUG_ON(IS_ERR(ld));
tty_ldisc_assign(tty, ld);
- tty_set_termios_ldisc(tty, N_TTY);
+ tty_set_termios_ldisc(tty, ldisc);
}
/**
@@ -745,6 +746,8 @@ static void tty_ldisc_reinit(struct tty_struct *tty)
void tty_ldisc_hangup(struct tty_struct *tty)
{
struct tty_ldisc *ld;
+ int reset = tty->driver->flags & TTY_DRIVER_RESET_TERMIOS;
+ int err = 0;
/*
* FIXME! What are the locking issues here? This may me overdoing
@@ -772,25 +775,32 @@ void tty_ldisc_hangup(struct tty_struct *tty)
wake_up_interruptible_poll(&tty->read_wait, POLLIN);
/*
* Shutdown the current line discipline, and reset it to
- * N_TTY.
+ * N_TTY if need be.
+ *
+ * Avoid racing set_ldisc or tty_ldisc_release
*/
- if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) {
- /* Avoid racing set_ldisc or tty_ldisc_release */
- mutex_lock(&tty->ldisc_mutex);
- tty_ldisc_halt(tty);
- if (tty->ldisc) { /* Not yet closed */
- /* Switch back to N_TTY */
- tty_ldisc_reinit(tty);
- /* At this point we have a closed ldisc and we want to
- reopen it. We could defer this to the next open but
- it means auditing a lot of other paths so this is
- a FIXME */
+ mutex_lock(&tty->ldisc_mutex);
+ tty_ldisc_halt(tty);
+ /* At this point we have a closed ldisc and we want to
+ reopen it. We could defer this to the next open but
+ it means auditing a lot of other paths so this is
+ a FIXME */
+ if (tty->ldisc) { /* Not yet closed */
+ if (reset == 0) {
+ tty_ldisc_reinit(tty, tty->termios->c_line);
+ err = tty_ldisc_open(tty, tty->ldisc);
+ }
+ /* If the re-open fails or we reset then go to N_TTY. The
+ N_TTY open cannot fail */
+ if (reset || err) {
+ tty_ldisc_reinit(tty, N_TTY);
WARN_ON(tty_ldisc_open(tty, tty->ldisc));
- tty_ldisc_enable(tty);
}
- mutex_unlock(&tty->ldisc_mutex);
- tty_reset_termios(tty);
+ tty_ldisc_enable(tty);
}
+ mutex_unlock(&tty->ldisc_mutex);
+ if (reset)
+ tty_reset_termios(tty);
}
/**
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index 6aa10284104..87778dcf872 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -888,7 +888,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
ret = -EFAULT;
goto out;
}
- if (tmp.mode != VT_AUTO && tmp.mode != VT_PROCESS) {
+ if (tmp.mode != VT_AUTO && tmp.mode != VT_PROCESS && tmp.mode != VT_PROCESS_AUTO) {
ret = -EINVAL;
goto out;
}
@@ -1622,7 +1622,7 @@ static void complete_change_console(struct vc_data *vc)
* telling it that it has acquired. Also check if it has died and
* clean up (similar to logic employed in change_console())
*/
- if (vc->vt_mode.mode == VT_PROCESS) {
+ if (vc->vt_mode.mode == VT_PROCESS || vc->vt_mode.mode == VT_PROCESS_AUTO) {
/*
* Send the signal as privileged - kill_pid() will
* tell us if the process has gone or something else
@@ -1682,7 +1682,7 @@ void change_console(struct vc_data *new_vc)
* vt to auto control.
*/
vc = vc_cons[fg_console].d;
- if (vc->vt_mode.mode == VT_PROCESS) {
+ if (vc->vt_mode.mode == VT_PROCESS || vc->vt_mode.mode == VT_PROCESS_AUTO) {
/*
* Send the signal as privileged - kill_pid() will
* tell us if the process has gone or something else
@@ -1693,27 +1693,28 @@ void change_console(struct vc_data *new_vc)
*/
vc->vt_newvt = new_vc->vc_num;
if (kill_pid(vc->vt_pid, vc->vt_mode.relsig, 1) == 0) {
+ if(vc->vt_mode.mode == VT_PROCESS)
+ /*
+ * It worked. Mark the vt to switch to and
+ * return. The process needs to send us a
+ * VT_RELDISP ioctl to complete the switch.
+ */
+ return;
+ } else {
/*
- * It worked. Mark the vt to switch to and
- * return. The process needs to send us a
- * VT_RELDISP ioctl to complete the switch.
+ * The controlling process has died, so we revert back to
+ * normal operation. In this case, we'll also change back
+ * to KD_TEXT mode. I'm not sure if this is strictly correct
+ * but it saves the agony when the X server dies and the screen
+ * remains blanked due to KD_GRAPHICS! It would be nice to do
+ * this outside of VT_PROCESS but there is no single process
+ * to account for and tracking tty count may be undesirable.
*/
- return;
+ reset_vc(vc);
}
/*
- * The controlling process has died, so we revert back to
- * normal operation. In this case, we'll also change back
- * to KD_TEXT mode. I'm not sure if this is strictly correct
- * but it saves the agony when the X server dies and the screen
- * remains blanked due to KD_GRAPHICS! It would be nice to do
- * this outside of VT_PROCESS but there is no single process
- * to account for and tracking tty count may be undesirable.
- */
- reset_vc(vc);
-
- /*
- * Fall through to normal (VT_AUTO) handling of the switch...
+ * Fall through to normal (VT_AUTO and VT_PROCESS_AUTO) handling of the switch...
*/
}
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
deleted file mode 100644
index 08f726c5fee..00000000000
--- a/drivers/clocksource/Kconfig
+++ /dev/null
@@ -1,9 +0,0 @@
-config CS5535_CLOCK_EVENT_SRC
- tristate "CS5535/CS5536 high-res timer (MFGPT) events"
- depends on GENERIC_TIME && GENERIC_CLOCKEVENTS && CS5535_MFGPT
- help
- This driver provides a clock event source based on the MFGPT
- timer(s) in the CS5535 and CS5536 companion chips.
- MFGPTs have a better resolution and max interval than the
- generic PIT, and are suitable for use as high-res timers.
-
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 73655aeb3a6..1aea7157d8f 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -100,8 +100,8 @@ struct menu_device {
int needs_update;
unsigned int expected_us;
- u64 predicted_us;
unsigned int measured_us;
+ u64 predicted_us;
unsigned int exit_us;
unsigned int bucket;
u64 correction_factor[BUCKETS];
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index e02d74b1e89..c27f80e5d53 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -13,6 +13,22 @@ menuconfig DMADEVICES
DMA Device drivers supported by the configured arch, it may
be empty in some cases.
+config DMADEVICES_DEBUG
+ bool "DMA Engine debugging"
+ depends on DMADEVICES != n
+ help
+ This is an option for use by developers; most people should
+ say N here. This enables DMA engine core and driver debugging.
+
+config DMADEVICES_VDEBUG
+ bool "DMA Engine verbose debugging"
+ depends on DMADEVICES_DEBUG != n
+ help
+ This is an option for use by developers; most people should
+ say N here. This enables deeper (more verbose) debugging of
+ the DMA engine core and drivers.
+
+
if DMADEVICES
comment "DMA Devices"
@@ -69,6 +85,13 @@ config FSL_DMA
The Elo is the DMA controller on some 82xx and 83xx parts, and the
Elo Plus is the DMA controller on 85xx and 86xx parts.
+config MPC512X_DMA
+ tristate "Freescale MPC512x built-in DMA engine support"
+ depends on PPC_MPC512x
+ select DMA_ENGINE
+ ---help---
+ Enable support for the Freescale MPC512x built-in DMA engine.
+
config MV_XOR
bool "Marvell XOR engine support"
depends on PLAT_ORION
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 807053d4823..22bba3d5e2b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -1,9 +1,17 @@
+ifeq ($(CONFIG_DMADEVICES_DEBUG),y)
+ EXTRA_CFLAGS += -DDEBUG
+endif
+ifeq ($(CONFIG_DMADEVICES_VDEBUG),y)
+ EXTRA_CFLAGS += -DVERBOSE_DEBUG
+endif
+
obj-$(CONFIG_DMA_ENGINE) += dmaengine.o
obj-$(CONFIG_NET_DMA) += iovlock.o
obj-$(CONFIG_DMATEST) += dmatest.o
obj-$(CONFIG_INTEL_IOATDMA) += ioat/
obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o
obj-$(CONFIG_FSL_DMA) += fsldma.o
+obj-$(CONFIG_MPC512X_DMA) += mpc512x_dma.o
obj-$(CONFIG_MV_XOR) += mv_xor.o
obj-$(CONFIG_DW_DMAC) += dw_dmac.o
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 64a937262a4..1656fdcdb6c 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -39,7 +39,6 @@ struct coh901318_desc {
unsigned int sg_len;
struct coh901318_lli *data;
enum dma_data_direction dir;
- int pending_irqs;
unsigned long flags;
};
@@ -72,7 +71,6 @@ struct coh901318_chan {
unsigned long nbr_active_done;
unsigned long busy;
- int pending_irqs;
struct coh901318_base *base;
};
@@ -80,18 +78,16 @@ struct coh901318_chan {
static void coh901318_list_print(struct coh901318_chan *cohc,
struct coh901318_lli *lli)
{
- struct coh901318_lli *l;
- dma_addr_t addr = virt_to_phys(lli);
+ struct coh901318_lli *l = lli;
int i = 0;
- while (addr) {
- l = phys_to_virt(addr);
+ while (l) {
dev_vdbg(COHC_2_DEV(cohc), "i %d, lli %p, ctrl 0x%x, src 0x%x"
- ", dst 0x%x, link 0x%x link_virt 0x%p\n",
+ ", dst 0x%x, link 0x%x virt_link_addr 0x%p\n",
i, l, l->control, l->src_addr, l->dst_addr,
- l->link_addr, phys_to_virt(l->link_addr));
+ l->link_addr, l->virt_link_addr);
i++;
- addr = l->link_addr;
+ l = l->virt_link_addr;
}
}
@@ -125,7 +121,7 @@ static int coh901318_debugfs_read(struct file *file, char __user *buf,
goto err_kmalloc;
tmp = dev_buf;
- tmp += sprintf(tmp, "DMA -- enable dma channels\n");
+ tmp += sprintf(tmp, "DMA -- enabled dma channels\n");
for (i = 0; i < debugfs_dma_base->platform->max_channels; i++)
if (started_channels & (1 << i))
@@ -337,16 +333,22 @@ coh901318_desc_get(struct coh901318_chan *cohc)
* TODO: alloc a pile of descs instead of just one,
* avoid many small allocations.
*/
- desc = kmalloc(sizeof(struct coh901318_desc), GFP_NOWAIT);
+ desc = kzalloc(sizeof(struct coh901318_desc), GFP_NOWAIT);
if (desc == NULL)
goto out;
INIT_LIST_HEAD(&desc->node);
+ dma_async_tx_descriptor_init(&desc->desc, &cohc->chan);
} else {
/* Reuse an old desc. */
desc = list_first_entry(&cohc->free,
struct coh901318_desc,
node);
list_del(&desc->node);
+ /* Initialize it a bit so it's not insane */
+ desc->sg = NULL;
+ desc->sg_len = 0;
+ desc->desc.callback = NULL;
+ desc->desc.callback_param = NULL;
}
out:
@@ -364,10 +366,6 @@ static void
coh901318_desc_submit(struct coh901318_chan *cohc, struct coh901318_desc *desc)
{
list_add_tail(&desc->node, &cohc->active);
-
- BUG_ON(cohc->pending_irqs != 0);
-
- cohc->pending_irqs = desc->pending_irqs;
}
static struct coh901318_desc *
@@ -592,6 +590,10 @@ static struct coh901318_desc *coh901318_queue_start(struct coh901318_chan *cohc)
return cohd_que;
}
+/*
+ * This tasklet is called from the interrupt handler to
+ * handle each descriptor (DMA job) that is sent to a channel.
+ */
static void dma_tasklet(unsigned long data)
{
struct coh901318_chan *cohc = (struct coh901318_chan *) data;
@@ -600,55 +602,58 @@ static void dma_tasklet(unsigned long data)
dma_async_tx_callback callback;
void *callback_param;
+ dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d"
+ " nbr_active_done %ld\n", __func__,
+ cohc->id, cohc->nbr_active_done);
+
spin_lock_irqsave(&cohc->lock, flags);
- /* get first active entry from list */
+ /* get first active descriptor entry from list */
cohd_fin = coh901318_first_active_get(cohc);
- BUG_ON(cohd_fin->pending_irqs == 0);
-
if (cohd_fin == NULL)
goto err;
- cohd_fin->pending_irqs--;
- cohc->completed = cohd_fin->desc.cookie;
+ /* locate callback to client */
+ callback = cohd_fin->desc.callback;
+ callback_param = cohd_fin->desc.callback_param;
- if (cohc->nbr_active_done == 0)
- return;
+ /* sign this job as completed on the channel */
+ cohc->completed = cohd_fin->desc.cookie;
- if (!cohd_fin->pending_irqs) {
- /* release the lli allocation*/
- coh901318_lli_free(&cohc->base->pool, &cohd_fin->data);
- }
+ /* release the lli allocation and remove the descriptor */
+ coh901318_lli_free(&cohc->base->pool, &cohd_fin->data);
- dev_vdbg(COHC_2_DEV(cohc), "[%s] chan_id %d pending_irqs %d"
- " nbr_active_done %ld\n", __func__,
- cohc->id, cohc->pending_irqs, cohc->nbr_active_done);
+ /* return desc to free-list */
+ coh901318_desc_remove(cohd_fin);
+ coh901318_desc_free(cohc, cohd_fin);
- /* callback to client */
- callback = cohd_fin->desc.callback;
- callback_param = cohd_fin->desc.callback_param;
-
- if (!cohd_fin->pending_irqs) {
- coh901318_desc_remove(cohd_fin);
+ spin_unlock_irqrestore(&cohc->lock, flags);
- /* return desc to free-list */
- coh901318_desc_free(cohc, cohd_fin);
- }
+ /* Call the callback when we're done */
+ if (callback)
+ callback(callback_param);
- if (cohc->nbr_active_done)
- cohc->nbr_active_done--;
+ spin_lock_irqsave(&cohc->lock, flags);
+ /*
+ * If another interrupt fired while the tasklet was scheduling,
+ * we don't get called twice, so we have this number of active
+ * counter that keep track of the number of IRQs expected to
+ * be handled for this channel. If there happen to be more than
+ * one IRQ to be ack:ed, we simply schedule this tasklet again.
+ */
+ cohc->nbr_active_done--;
if (cohc->nbr_active_done) {
+ dev_dbg(COHC_2_DEV(cohc), "scheduling tasklet again, new IRQs "
+ "came in while we were scheduling this tasklet\n");
if (cohc_chan_conf(cohc)->priority_high)
tasklet_hi_schedule(&cohc->tasklet);
else
tasklet_schedule(&cohc->tasklet);
}
- spin_unlock_irqrestore(&cohc->lock, flags);
- if (callback)
- callback(callback_param);
+ spin_unlock_irqrestore(&cohc->lock, flags);
return;
@@ -667,16 +672,17 @@ static void dma_tc_handle(struct coh901318_chan *cohc)
if (!cohc->allocated)
return;
- BUG_ON(cohc->pending_irqs == 0);
+ spin_lock(&cohc->lock);
- cohc->pending_irqs--;
cohc->nbr_active_done++;
- if (cohc->pending_irqs == 0 && coh901318_queue_start(cohc) == NULL)
+ if (coh901318_queue_start(cohc) == NULL)
cohc->busy = 0;
BUG_ON(list_empty(&cohc->active));
+ spin_unlock(&cohc->lock);
+
if (cohc_chan_conf(cohc)->priority_high)
tasklet_hi_schedule(&cohc->tasklet);
else
@@ -870,6 +876,7 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
struct coh901318_chan *cohc = to_coh901318_chan(chan);
int lli_len;
u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last;
+ int ret;
spin_lock_irqsave(&cohc->lock, flg);
@@ -890,22 +897,19 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
if (data == NULL)
goto err;
- cohd = coh901318_desc_get(cohc);
- cohd->sg = NULL;
- cohd->sg_len = 0;
- cohd->data = data;
-
- cohd->pending_irqs =
- coh901318_lli_fill_memcpy(
- &cohc->base->pool, data, src, size, dest,
- cohc_chan_param(cohc)->ctrl_lli_chained,
- ctrl_last);
- cohd->flags = flags;
+ ret = coh901318_lli_fill_memcpy(
+ &cohc->base->pool, data, src, size, dest,
+ cohc_chan_param(cohc)->ctrl_lli_chained,
+ ctrl_last);
+ if (ret)
+ goto err;
COH_DBG(coh901318_list_print(cohc, data));
- dma_async_tx_descriptor_init(&cohd->desc, chan);
-
+ /* Pick a descriptor to handle this transfer */
+ cohd = coh901318_desc_get(cohc);
+ cohd->data = data;
+ cohd->flags = flags;
cohd->desc.tx_submit = coh901318_tx_submit;
spin_unlock_irqrestore(&cohc->lock, flg);
@@ -924,6 +928,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct coh901318_chan *cohc = to_coh901318_chan(chan);
struct coh901318_lli *data;
struct coh901318_desc *cohd;
+ const struct coh901318_params *params;
struct scatterlist *sg;
int len = 0;
int size;
@@ -931,7 +936,9 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
u32 ctrl_chained = cohc_chan_param(cohc)->ctrl_lli_chained;
u32 ctrl = cohc_chan_param(cohc)->ctrl_lli;
u32 ctrl_last = cohc_chan_param(cohc)->ctrl_lli_last;
+ u32 config;
unsigned long flg;
+ int ret;
if (!sgl)
goto out;
@@ -947,15 +954,14 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
/* Trigger interrupt after last lli */
ctrl_last |= COH901318_CX_CTRL_TC_IRQ_ENABLE;
- cohd = coh901318_desc_get(cohc);
- cohd->sg = NULL;
- cohd->sg_len = 0;
- cohd->dir = direction;
+ params = cohc_chan_param(cohc);
+ config = params->config;
if (direction == DMA_TO_DEVICE) {
u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE |
COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE;
+ config |= COH901318_CX_CFG_RM_MEMORY_TO_PRIMARY;
ctrl_chained |= tx_flags;
ctrl_last |= tx_flags;
ctrl |= tx_flags;
@@ -963,16 +969,14 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST |
COH901318_CX_CTRL_DST_ADDR_INC_ENABLE;
+ config |= COH901318_CX_CFG_RM_PRIMARY_TO_MEMORY;
ctrl_chained |= rx_flags;
ctrl_last |= rx_flags;
ctrl |= rx_flags;
} else
goto err_direction;
- dma_async_tx_descriptor_init(&cohd->desc, chan);
-
- cohd->desc.tx_submit = coh901318_tx_submit;
-
+ coh901318_set_conf(cohc, config);
/* The dma only supports transmitting packages up to
* MAX_DMA_PACKET_SIZE. Calculate to total number of
@@ -994,32 +998,37 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
len += factor;
}
+ pr_debug("Allocate %d lli:s for this transfer\n", len);
data = coh901318_lli_alloc(&cohc->base->pool, len);
if (data == NULL)
goto err_dma_alloc;
/* initiate allocated data list */
- cohd->pending_irqs =
- coh901318_lli_fill_sg(&cohc->base->pool, data, sgl, sg_len,
- cohc_dev_addr(cohc),
- ctrl_chained,
- ctrl,
- ctrl_last,
- direction, COH901318_CX_CTRL_TC_IRQ_ENABLE);
- cohd->data = data;
-
- cohd->flags = flags;
+ ret = coh901318_lli_fill_sg(&cohc->base->pool, data, sgl, sg_len,
+ cohc_dev_addr(cohc),
+ ctrl_chained,
+ ctrl,
+ ctrl_last,
+ direction, COH901318_CX_CTRL_TC_IRQ_ENABLE);
+ if (ret)
+ goto err_lli_fill;
COH_DBG(coh901318_list_print(cohc, data));
+ /* Pick a descriptor to handle this transfer */
+ cohd = coh901318_desc_get(cohc);
+ cohd->dir = direction;
+ cohd->flags = flags;
+ cohd->desc.tx_submit = coh901318_tx_submit;
+ cohd->data = data;
+
spin_unlock_irqrestore(&cohc->lock, flg);
return &cohd->desc;
+ err_lli_fill:
err_dma_alloc:
err_direction:
- coh901318_desc_remove(cohd);
- coh901318_desc_free(cohc, cohd);
spin_unlock_irqrestore(&cohc->lock, flg);
out:
return NULL;
@@ -1092,9 +1101,8 @@ coh901318_terminate_all(struct dma_chan *chan)
/* release the lli allocation*/
coh901318_lli_free(&cohc->base->pool, &cohd->data);
- coh901318_desc_remove(cohd);
-
/* return desc to free-list */
+ coh901318_desc_remove(cohd);
coh901318_desc_free(cohc, cohd);
}
@@ -1102,16 +1110,14 @@ coh901318_terminate_all(struct dma_chan *chan)
/* release the lli allocation*/
coh901318_lli_free(&cohc->base->pool, &cohd->data);
- coh901318_desc_remove(cohd);
-
/* return desc to free-list */
+ coh901318_desc_remove(cohd);
coh901318_desc_free(cohc, cohd);
}
cohc->nbr_active_done = 0;
cohc->busy = 0;
- cohc->pending_irqs = 0;
spin_unlock_irqrestore(&cohc->lock, flags);
}
@@ -1138,7 +1144,6 @@ void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
spin_lock_init(&cohc->lock);
- cohc->pending_irqs = 0;
cohc->nbr_active_done = 0;
cohc->busy = 0;
INIT_LIST_HEAD(&cohc->free);
@@ -1254,12 +1259,17 @@ static int __init coh901318_probe(struct platform_device *pdev)
base->dma_memcpy.device_issue_pending = coh901318_issue_pending;
base->dma_memcpy.device_terminate_all = coh901318_terminate_all;
base->dma_memcpy.dev = &pdev->dev;
+ /*
+ * This controller can only access address at even 32bit boundaries,
+ * i.e. 2^2
+ */
+ base->dma_memcpy.copy_align = 2;
err = dma_async_device_register(&base->dma_memcpy);
if (err)
goto err_register_memcpy;
- dev_dbg(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n",
+ dev_info(&pdev->dev, "Initialized COH901318 DMA on virtual base 0x%08x\n",
(u32) base->virtbase);
return err;
diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c
index f5120f238a4..71d58c1a1e8 100644
--- a/drivers/dma/coh901318_lli.c
+++ b/drivers/dma/coh901318_lli.c
@@ -74,6 +74,8 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
lli = head;
lli->phy_this = phy;
+ lli->link_addr = 0x00000000;
+ lli->virt_link_addr = 0x00000000U;
for (i = 1; i < len; i++) {
lli_prev = lli;
@@ -85,13 +87,13 @@ coh901318_lli_alloc(struct coh901318_pool *pool, unsigned int len)
DEBUGFS_POOL_COUNTER_ADD(pool, 1);
lli->phy_this = phy;
+ lli->link_addr = 0x00000000;
+ lli->virt_link_addr = 0x00000000U;
lli_prev->link_addr = phy;
lli_prev->virt_link_addr = lli;
}
- lli->link_addr = 0x00000000U;
-
spin_unlock(&pool->lock);
return head;
@@ -166,8 +168,7 @@ coh901318_lli_fill_memcpy(struct coh901318_pool *pool,
lli->src_addr = src;
lli->dst_addr = dst;
- /* One irq per single transfer */
- return 1;
+ return 0;
}
int
@@ -223,8 +224,7 @@ coh901318_lli_fill_single(struct coh901318_pool *pool,
lli->src_addr = src;
lli->dst_addr = dst;
- /* One irq per single transfer */
- return 1;
+ return 0;
}
int
@@ -240,7 +240,6 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
u32 ctrl_sg;
dma_addr_t src = 0;
dma_addr_t dst = 0;
- int nbr_of_irq = 0;
u32 bytes_to_transfer;
u32 elem_size;
@@ -269,15 +268,12 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
ctrl_sg = ctrl ? ctrl : ctrl_last;
- if ((ctrl_sg & ctrl_irq_mask))
- nbr_of_irq++;
-
if (dir == DMA_TO_DEVICE)
/* increment source address */
- src = sg_dma_address(sg);
+ src = sg_phys(sg);
else
/* increment destination address */
- dst = sg_dma_address(sg);
+ dst = sg_phys(sg);
bytes_to_transfer = sg_dma_len(sg);
@@ -310,8 +306,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
}
spin_unlock(&pool->lock);
- /* There can be many IRQs per sg transfer */
- return nbr_of_irq;
+ return 0;
err:
spin_unlock(&pool->lock);
return -EINVAL;
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index e7a3230fb7d..87399cafce3 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -284,7 +284,7 @@ struct dma_chan_tbl_ent {
/**
* channel_table - percpu lookup table for memory-to-memory offload providers
*/
-static struct dma_chan_tbl_ent *channel_table[DMA_TX_TYPE_END];
+static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
static int __init dma_channel_table_init(void)
{
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 948d563941c..6fa55fe3dd2 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -237,7 +237,7 @@ static int dmatest_func(void *data)
dma_cookie_t cookie;
enum dma_status status;
enum dma_ctrl_flags flags;
- u8 pq_coefs[pq_sources];
+ u8 pq_coefs[pq_sources + 1];
int ret;
int src_cnt;
int dst_cnt;
@@ -257,7 +257,7 @@ static int dmatest_func(void *data)
} else if (thread->type == DMA_PQ) {
src_cnt = pq_sources | 1; /* force odd to ensure dst = src */
dst_cnt = 2;
- for (i = 0; i < pq_sources; i++)
+ for (i = 0; i < src_cnt; i++)
pq_coefs[i] = 1;
} else
goto err_srcs;
@@ -347,7 +347,7 @@ static int dmatest_func(void *data)
else if (thread->type == DMA_XOR)
tx = dev->device_prep_dma_xor(chan,
dma_dsts[0] + dst_off,
- dma_srcs, xor_sources,
+ dma_srcs, src_cnt,
len, flags);
else if (thread->type == DMA_PQ) {
dma_addr_t dma_pq[dst_cnt];
@@ -355,7 +355,7 @@ static int dmatest_func(void *data)
for (i = 0; i < dst_cnt; i++)
dma_pq[i] = dma_dsts[i] + dst_off;
tx = dev->device_prep_dma_pq(chan, dma_pq, dma_srcs,
- pq_sources, pq_coefs,
+ src_cnt, pq_coefs,
len, flags);
}
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 296f9e747fa..bbb4be5a3ff 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -37,19 +37,19 @@
#include <asm/fsldma.h>
#include "fsldma.h"
-static void dma_init(struct fsl_dma_chan *fsl_chan)
+static void dma_init(struct fsldma_chan *chan)
{
/* Reset the channel */
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 0, 32);
+ DMA_OUT(chan, &chan->regs->mr, 0, 32);
- switch (fsl_chan->feature & FSL_DMA_IP_MASK) {
+ switch (chan->feature & FSL_DMA_IP_MASK) {
case FSL_DMA_IP_85XX:
/* Set the channel to below modes:
* EIE - Error interrupt enable
* EOSIE - End of segments interrupt enable (basic mode)
* EOLNIE - End of links interrupt enable
*/
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EIE
+ DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EIE
| FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32);
break;
case FSL_DMA_IP_83XX:
@@ -57,170 +57,146 @@ static void dma_init(struct fsl_dma_chan *fsl_chan)
* EOTIE - End-of-transfer interrupt enable
* PRC_RM - PCI read multiple
*/
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE
+ DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE
| FSL_DMA_MR_PRC_RM, 32);
break;
}
-
}
-static void set_sr(struct fsl_dma_chan *fsl_chan, u32 val)
+static void set_sr(struct fsldma_chan *chan, u32 val)
{
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32);
+ DMA_OUT(chan, &chan->regs->sr, val, 32);
}
-static u32 get_sr(struct fsl_dma_chan *fsl_chan)
+static u32 get_sr(struct fsldma_chan *chan)
{
- return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32);
+ return DMA_IN(chan, &chan->regs->sr, 32);
}
-static void set_desc_cnt(struct fsl_dma_chan *fsl_chan,
+static void set_desc_cnt(struct fsldma_chan *chan,
struct fsl_dma_ld_hw *hw, u32 count)
{
- hw->count = CPU_TO_DMA(fsl_chan, count, 32);
+ hw->count = CPU_TO_DMA(chan, count, 32);
}
-static void set_desc_src(struct fsl_dma_chan *fsl_chan,
+static void set_desc_src(struct fsldma_chan *chan,
struct fsl_dma_ld_hw *hw, dma_addr_t src)
{
u64 snoop_bits;
- snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
+ snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
- hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64);
+ hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
}
-static void set_desc_dest(struct fsl_dma_chan *fsl_chan,
- struct fsl_dma_ld_hw *hw, dma_addr_t dest)
+static void set_desc_dst(struct fsldma_chan *chan,
+ struct fsl_dma_ld_hw *hw, dma_addr_t dst)
{
u64 snoop_bits;
- snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
+ snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
- hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dest, 64);
+ hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
}
-static void set_desc_next(struct fsl_dma_chan *fsl_chan,
+static void set_desc_next(struct fsldma_chan *chan,
struct fsl_dma_ld_hw *hw, dma_addr_t next)
{
u64 snoop_bits;
- snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
+ snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
? FSL_DMA_SNEN : 0;
- hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64);
-}
-
-static void set_cdar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr)
-{
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->cdar, addr | FSL_DMA_SNEN, 64);
+ hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64);
}
-static dma_addr_t get_cdar(struct fsl_dma_chan *fsl_chan)
+static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
{
- return DMA_IN(fsl_chan, &fsl_chan->reg_base->cdar, 64) & ~FSL_DMA_SNEN;
+ DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
}
-static void set_ndar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr)
+static dma_addr_t get_cdar(struct fsldma_chan *chan)
{
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->ndar, addr, 64);
+ return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
}
-static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan)
+static dma_addr_t get_ndar(struct fsldma_chan *chan)
{
- return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64);
+ return DMA_IN(chan, &chan->regs->ndar, 64);
}
-static u32 get_bcr(struct fsl_dma_chan *fsl_chan)
+static u32 get_bcr(struct fsldma_chan *chan)
{
- return DMA_IN(fsl_chan, &fsl_chan->reg_base->bcr, 32);
+ return DMA_IN(chan, &chan->regs->bcr, 32);
}
-static int dma_is_idle(struct fsl_dma_chan *fsl_chan)
+static int dma_is_idle(struct fsldma_chan *chan)
{
- u32 sr = get_sr(fsl_chan);
+ u32 sr = get_sr(chan);
return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
}
-static void dma_start(struct fsl_dma_chan *fsl_chan)
+static void dma_start(struct fsldma_chan *chan)
{
- u32 mr_set = 0;
-
- if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32);
- mr_set |= FSL_DMA_MR_EMP_EN;
- } else if ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
- DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
- & ~FSL_DMA_MR_EMP_EN, 32);
+ u32 mode;
+
+ mode = DMA_IN(chan, &chan->regs->mr, 32);
+
+ if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
+ if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
+ DMA_OUT(chan, &chan->regs->bcr, 0, 32);
+ mode |= FSL_DMA_MR_EMP_EN;
+ } else {
+ mode &= ~FSL_DMA_MR_EMP_EN;
+ }
}
- if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT)
- mr_set |= FSL_DMA_MR_EMS_EN;
+ if (chan->feature & FSL_DMA_CHAN_START_EXT)
+ mode |= FSL_DMA_MR_EMS_EN;
else
- mr_set |= FSL_DMA_MR_CS;
+ mode |= FSL_DMA_MR_CS;
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
- DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
- | mr_set, 32);
+ DMA_OUT(chan, &chan->regs->mr, mode, 32);
}
-static void dma_halt(struct fsl_dma_chan *fsl_chan)
+static void dma_halt(struct fsldma_chan *chan)
{
+ u32 mode;
int i;
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
- DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | FSL_DMA_MR_CA,
- 32);
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
- DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~(FSL_DMA_MR_CS
- | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA), 32);
+ mode = DMA_IN(chan, &chan->regs->mr, 32);
+ mode |= FSL_DMA_MR_CA;
+ DMA_OUT(chan, &chan->regs->mr, mode, 32);
+
+ mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA);
+ DMA_OUT(chan, &chan->regs->mr, mode, 32);
for (i = 0; i < 100; i++) {
- if (dma_is_idle(fsl_chan))
- break;
+ if (dma_is_idle(chan))
+ return;
+
udelay(10);
}
- if (i >= 100 && !dma_is_idle(fsl_chan))
- dev_err(fsl_chan->dev, "DMA halt timeout!\n");
+
+ if (!dma_is_idle(chan))
+ dev_err(chan->dev, "DMA halt timeout!\n");
}
-static void set_ld_eol(struct fsl_dma_chan *fsl_chan,
+static void set_ld_eol(struct fsldma_chan *chan,
struct fsl_desc_sw *desc)
{
u64 snoop_bits;
- snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
+ snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
? FSL_DMA_SNEN : 0;
- desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
- DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
+ desc->hw.next_ln_addr = CPU_TO_DMA(chan,
+ DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
| snoop_bits, 64);
}
-static void append_ld_queue(struct fsl_dma_chan *fsl_chan,
- struct fsl_desc_sw *new_desc)
-{
- struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev);
-
- if (list_empty(&fsl_chan->ld_queue))
- return;
-
- /* Link to the new descriptor physical address and
- * Enable End-of-segment interrupt for
- * the last link descriptor.
- * (the previous node's next link descriptor)
- *
- * For FSL_DMA_IP_83xx, the snoop enable bit need be set.
- */
- queue_tail->hw.next_ln_addr = CPU_TO_DMA(fsl_chan,
- new_desc->async_tx.phys | FSL_DMA_EOSIE |
- (((fsl_chan->feature & FSL_DMA_IP_MASK)
- == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64);
-}
-
/**
* fsl_chan_set_src_loop_size - Set source address hold transfer size
- * @fsl_chan : Freescale DMA channel
+ * @chan : Freescale DMA channel
* @size : Address loop size, 0 for disable loop
*
* The set source address hold transfer size. The source
@@ -229,29 +205,30 @@ static void append_ld_queue(struct fsl_dma_chan *fsl_chan,
* read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
* SA + 1 ... and so on.
*/
-static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size)
+static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size)
{
+ u32 mode;
+
+ mode = DMA_IN(chan, &chan->regs->mr, 32);
+
switch (size) {
case 0:
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
- DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) &
- (~FSL_DMA_MR_SAHE), 32);
+ mode &= ~FSL_DMA_MR_SAHE;
break;
case 1:
case 2:
case 4:
case 8:
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
- DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) |
- FSL_DMA_MR_SAHE | (__ilog2(size) << 14),
- 32);
+ mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14);
break;
}
+
+ DMA_OUT(chan, &chan->regs->mr, mode, 32);
}
/**
- * fsl_chan_set_dest_loop_size - Set destination address hold transfer size
- * @fsl_chan : Freescale DMA channel
+ * fsl_chan_set_dst_loop_size - Set destination address hold transfer size
+ * @chan : Freescale DMA channel
* @size : Address loop size, 0 for disable loop
*
* The set destination address hold transfer size. The destination
@@ -260,29 +237,30 @@ static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size)
* write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
* TA + 1 ... and so on.
*/
-static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size)
+static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size)
{
+ u32 mode;
+
+ mode = DMA_IN(chan, &chan->regs->mr, 32);
+
switch (size) {
case 0:
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
- DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) &
- (~FSL_DMA_MR_DAHE), 32);
+ mode &= ~FSL_DMA_MR_DAHE;
break;
case 1:
case 2:
case 4:
case 8:
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
- DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) |
- FSL_DMA_MR_DAHE | (__ilog2(size) << 16),
- 32);
+ mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16);
break;
}
+
+ DMA_OUT(chan, &chan->regs->mr, mode, 32);
}
/**
* fsl_chan_set_request_count - Set DMA Request Count for external control
- * @fsl_chan : Freescale DMA channel
+ * @chan : Freescale DMA channel
* @size : Number of bytes to transfer in a single request
*
* The Freescale DMA channel can be controlled by the external signal DREQ#.
@@ -292,35 +270,38 @@ static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size)
*
* A size of 0 disables external pause control. The maximum size is 1024.
*/
-static void fsl_chan_set_request_count(struct fsl_dma_chan *fsl_chan, int size)
+static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size)
{
+ u32 mode;
+
BUG_ON(size > 1024);
- DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr,
- DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32)
- | ((__ilog2(size) << 24) & 0x0f000000),
- 32);
+
+ mode = DMA_IN(chan, &chan->regs->mr, 32);
+ mode |= (__ilog2(size) << 24) & 0x0f000000;
+
+ DMA_OUT(chan, &chan->regs->mr, mode, 32);
}
/**
* fsl_chan_toggle_ext_pause - Toggle channel external pause status
- * @fsl_chan : Freescale DMA channel
+ * @chan : Freescale DMA channel
* @enable : 0 is disabled, 1 is enabled.
*
* The Freescale DMA channel can be controlled by the external signal DREQ#.
* The DMA Request Count feature should be used in addition to this feature
* to set the number of bytes to transfer before pausing the channel.
*/
-static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int enable)
+static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable)
{
if (enable)
- fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
+ chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
else
- fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
+ chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
}
/**
* fsl_chan_toggle_ext_start - Toggle channel external start status
- * @fsl_chan : Freescale DMA channel
+ * @chan : Freescale DMA channel
* @enable : 0 is disabled, 1 is enabled.
*
* If enable the external start, the channel can be started by an
@@ -328,141 +309,196 @@ static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int enable)
* transfer immediately. The DMA channel will wait for the
* control pin asserted.
*/
-static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable)
+static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable)
{
if (enable)
- fsl_chan->feature |= FSL_DMA_CHAN_START_EXT;
+ chan->feature |= FSL_DMA_CHAN_START_EXT;
else
- fsl_chan->feature &= ~FSL_DMA_CHAN_START_EXT;
+ chan->feature &= ~FSL_DMA_CHAN_START_EXT;
+}
+
+static void append_ld_queue(struct fsldma_chan *chan,
+ struct fsl_desc_sw *desc)
+{
+ struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev);
+
+ if (list_empty(&chan->ld_pending))
+ goto out_splice;
+
+ /*
+ * Add the hardware descriptor to the chain of hardware descriptors
+ * that already exists in memory.
+ *
+ * This will un-set the EOL bit of the existing transaction, and the
+ * last link in this transaction will become the EOL descriptor.
+ */
+ set_desc_next(chan, &tail->hw, desc->async_tx.phys);
+
+ /*
+ * Add the software descriptor and all children to the list
+ * of pending transactions
+ */
+out_splice:
+ list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
}
static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
{
- struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan);
+ struct fsldma_chan *chan = to_fsl_chan(tx->chan);
struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
struct fsl_desc_sw *child;
unsigned long flags;
dma_cookie_t cookie;
- /* cookie increment and adding to ld_queue must be atomic */
- spin_lock_irqsave(&fsl_chan->desc_lock, flags);
+ spin_lock_irqsave(&chan->desc_lock, flags);
- cookie = fsl_chan->common.cookie;
+ /*
+ * assign cookies to all of the software descriptors
+ * that make up this transaction
+ */
+ cookie = chan->common.cookie;
list_for_each_entry(child, &desc->tx_list, node) {
cookie++;
if (cookie < 0)
cookie = 1;
- desc->async_tx.cookie = cookie;
+ child->async_tx.cookie = cookie;
}
- fsl_chan->common.cookie = cookie;
- append_ld_queue(fsl_chan, desc);
- list_splice_init(&desc->tx_list, fsl_chan->ld_queue.prev);
+ chan->common.cookie = cookie;
+
+ /* put this transaction onto the tail of the pending queue */
+ append_ld_queue(chan, desc);
- spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
return cookie;
}
/**
* fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
- * @fsl_chan : Freescale DMA channel
+ * @chan : Freescale DMA channel
*
* Return - The descriptor allocated. NULL for failed.
*/
static struct fsl_desc_sw *fsl_dma_alloc_descriptor(
- struct fsl_dma_chan *fsl_chan)
+ struct fsldma_chan *chan)
{
+ struct fsl_desc_sw *desc;
dma_addr_t pdesc;
- struct fsl_desc_sw *desc_sw;
-
- desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc);
- if (desc_sw) {
- memset(desc_sw, 0, sizeof(struct fsl_desc_sw));
- INIT_LIST_HEAD(&desc_sw->tx_list);
- dma_async_tx_descriptor_init(&desc_sw->async_tx,
- &fsl_chan->common);
- desc_sw->async_tx.tx_submit = fsl_dma_tx_submit;
- desc_sw->async_tx.phys = pdesc;
+
+ desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
+ if (!desc) {
+ dev_dbg(chan->dev, "out of memory for link desc\n");
+ return NULL;
}
- return desc_sw;
+ memset(desc, 0, sizeof(*desc));
+ INIT_LIST_HEAD(&desc->tx_list);
+ dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
+ desc->async_tx.tx_submit = fsl_dma_tx_submit;
+ desc->async_tx.phys = pdesc;
+
+ return desc;
}
/**
* fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
- * @fsl_chan : Freescale DMA channel
+ * @chan : Freescale DMA channel
*
* This function will create a dma pool for descriptor allocation.
*
* Return - The number of descriptors allocated.
*/
-static int fsl_dma_alloc_chan_resources(struct dma_chan *chan)
+static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
{
- struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
+ struct fsldma_chan *chan = to_fsl_chan(dchan);
/* Has this channel already been allocated? */
- if (fsl_chan->desc_pool)
+ if (chan->desc_pool)
return 1;
- /* We need the descriptor to be aligned to 32bytes
+ /*
+ * We need the descriptor to be aligned to 32bytes
* for meeting FSL DMA specification requirement.
*/
- fsl_chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool",
- fsl_chan->dev, sizeof(struct fsl_desc_sw),
- 32, 0);
- if (!fsl_chan->desc_pool) {
- dev_err(fsl_chan->dev, "No memory for channel %d "
- "descriptor dma pool.\n", fsl_chan->id);
- return 0;
+ chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool",
+ chan->dev,
+ sizeof(struct fsl_desc_sw),
+ __alignof__(struct fsl_desc_sw), 0);
+ if (!chan->desc_pool) {
+ dev_err(chan->dev, "unable to allocate channel %d "
+ "descriptor pool\n", chan->id);
+ return -ENOMEM;
}
+ /* there is at least one descriptor free to be allocated */
return 1;
}
/**
- * fsl_dma_free_chan_resources - Free all resources of the channel.
- * @fsl_chan : Freescale DMA channel
+ * fsldma_free_desc_list - Free all descriptors in a queue
+ * @chan: Freescae DMA channel
+ * @list: the list to free
+ *
+ * LOCKING: must hold chan->desc_lock
*/
-static void fsl_dma_free_chan_resources(struct dma_chan *chan)
+static void fsldma_free_desc_list(struct fsldma_chan *chan,
+ struct list_head *list)
{
- struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
struct fsl_desc_sw *desc, *_desc;
- unsigned long flags;
- dev_dbg(fsl_chan->dev, "Free all channel resources.\n");
- spin_lock_irqsave(&fsl_chan->desc_lock, flags);
- list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) {
-#ifdef FSL_DMA_LD_DEBUG
- dev_dbg(fsl_chan->dev,
- "LD %p will be released.\n", desc);
-#endif
+ list_for_each_entry_safe(desc, _desc, list, node) {
+ list_del(&desc->node);
+ dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
+ }
+}
+
+static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
+ struct list_head *list)
+{
+ struct fsl_desc_sw *desc, *_desc;
+
+ list_for_each_entry_safe_reverse(desc, _desc, list, node) {
list_del(&desc->node);
- /* free link descriptor */
- dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
+ dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
}
- spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
- dma_pool_destroy(fsl_chan->desc_pool);
+}
+
+/**
+ * fsl_dma_free_chan_resources - Free all resources of the channel.
+ * @chan : Freescale DMA channel
+ */
+static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
+{
+ struct fsldma_chan *chan = to_fsl_chan(dchan);
+ unsigned long flags;
+
+ dev_dbg(chan->dev, "Free all channel resources.\n");
+ spin_lock_irqsave(&chan->desc_lock, flags);
+ fsldma_free_desc_list(chan, &chan->ld_pending);
+ fsldma_free_desc_list(chan, &chan->ld_running);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
- fsl_chan->desc_pool = NULL;
+ dma_pool_destroy(chan->desc_pool);
+ chan->desc_pool = NULL;
}
static struct dma_async_tx_descriptor *
-fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags)
+fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
{
- struct fsl_dma_chan *fsl_chan;
+ struct fsldma_chan *chan;
struct fsl_desc_sw *new;
- if (!chan)
+ if (!dchan)
return NULL;
- fsl_chan = to_fsl_chan(chan);
+ chan = to_fsl_chan(dchan);
- new = fsl_dma_alloc_descriptor(fsl_chan);
+ new = fsl_dma_alloc_descriptor(chan);
if (!new) {
- dev_err(fsl_chan->dev, "No free memory for link descriptor\n");
+ dev_err(chan->dev, "No free memory for link descriptor\n");
return NULL;
}
@@ -473,51 +509,50 @@ fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags)
list_add_tail(&new->node, &new->tx_list);
/* Set End-of-link to the last link descriptor of new list*/
- set_ld_eol(fsl_chan, new);
+ set_ld_eol(chan, new);
return &new->async_tx;
}
static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
- struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src,
+ struct dma_chan *dchan, dma_addr_t dma_dst, dma_addr_t dma_src,
size_t len, unsigned long flags)
{
- struct fsl_dma_chan *fsl_chan;
+ struct fsldma_chan *chan;
struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
- struct list_head *list;
size_t copy;
- if (!chan)
+ if (!dchan)
return NULL;
if (!len)
return NULL;
- fsl_chan = to_fsl_chan(chan);
+ chan = to_fsl_chan(dchan);
do {
/* Allocate the link descriptor from DMA pool */
- new = fsl_dma_alloc_descriptor(fsl_chan);
+ new = fsl_dma_alloc_descriptor(chan);
if (!new) {
- dev_err(fsl_chan->dev,
+ dev_err(chan->dev,
"No free memory for link descriptor\n");
goto fail;
}
#ifdef FSL_DMA_LD_DEBUG
- dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new);
+ dev_dbg(chan->dev, "new link desc alloc %p\n", new);
#endif
copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
- set_desc_cnt(fsl_chan, &new->hw, copy);
- set_desc_src(fsl_chan, &new->hw, dma_src);
- set_desc_dest(fsl_chan, &new->hw, dma_dest);
+ set_desc_cnt(chan, &new->hw, copy);
+ set_desc_src(chan, &new->hw, dma_src);
+ set_desc_dst(chan, &new->hw, dma_dst);
if (!first)
first = new;
else
- set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys);
+ set_desc_next(chan, &prev->hw, new->async_tx.phys);
new->async_tx.cookie = 0;
async_tx_ack(&new->async_tx);
@@ -525,7 +560,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
prev = new;
len -= copy;
dma_src += copy;
- dma_dest += copy;
+ dma_dst += copy;
/* Insert the link descriptor to the LD ring */
list_add_tail(&new->node, &first->tx_list);
@@ -535,7 +570,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy(
new->async_tx.cookie = -EBUSY;
/* Set End-of-link to the last link descriptor of new list*/
- set_ld_eol(fsl_chan, new);
+ set_ld_eol(chan, new);
return &first->async_tx;
@@ -543,12 +578,7 @@ fail:
if (!first)
return NULL;
- list = &first->tx_list;
- list_for_each_entry_safe_reverse(new, prev, list, node) {
- list_del(&new->node);
- dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys);
- }
-
+ fsldma_free_desc_list_reverse(chan, &first->tx_list);
return NULL;
}
@@ -565,13 +595,12 @@ fail:
* chan->private variable.
*/
static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
- struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
+ struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
enum dma_data_direction direction, unsigned long flags)
{
- struct fsl_dma_chan *fsl_chan;
+ struct fsldma_chan *chan;
struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
struct fsl_dma_slave *slave;
- struct list_head *tx_list;
size_t copy;
int i;
@@ -581,14 +610,14 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
struct fsl_dma_hw_addr *hw;
dma_addr_t dma_dst, dma_src;
- if (!chan)
+ if (!dchan)
return NULL;
- if (!chan->private)
+ if (!dchan->private)
return NULL;
- fsl_chan = to_fsl_chan(chan);
- slave = chan->private;
+ chan = to_fsl_chan(dchan);
+ slave = dchan->private;
if (list_empty(&slave->addresses))
return NULL;
@@ -637,14 +666,14 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
}
/* Allocate the link descriptor from DMA pool */
- new = fsl_dma_alloc_descriptor(fsl_chan);
+ new = fsl_dma_alloc_descriptor(chan);
if (!new) {
- dev_err(fsl_chan->dev, "No free memory for "
+ dev_err(chan->dev, "No free memory for "
"link descriptor\n");
goto fail;
}
#ifdef FSL_DMA_LD_DEBUG
- dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new);
+ dev_dbg(chan->dev, "new link desc alloc %p\n", new);
#endif
/*
@@ -671,9 +700,9 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
}
/* Fill in the descriptor */
- set_desc_cnt(fsl_chan, &new->hw, copy);
- set_desc_src(fsl_chan, &new->hw, dma_src);
- set_desc_dest(fsl_chan, &new->hw, dma_dst);
+ set_desc_cnt(chan, &new->hw, copy);
+ set_desc_src(chan, &new->hw, dma_src);
+ set_desc_dst(chan, &new->hw, dma_dst);
/*
* If this is not the first descriptor, chain the
@@ -682,7 +711,7 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
if (!first) {
first = new;
} else {
- set_desc_next(fsl_chan, &prev->hw,
+ set_desc_next(chan, &prev->hw,
new->async_tx.phys);
}
@@ -708,23 +737,23 @@ finished:
new->async_tx.cookie = -EBUSY;
/* Set End-of-link to the last link descriptor of new list */
- set_ld_eol(fsl_chan, new);
+ set_ld_eol(chan, new);
/* Enable extra controller features */
- if (fsl_chan->set_src_loop_size)
- fsl_chan->set_src_loop_size(fsl_chan, slave->src_loop_size);
+ if (chan->set_src_loop_size)
+ chan->set_src_loop_size(chan, slave->src_loop_size);
- if (fsl_chan->set_dest_loop_size)
- fsl_chan->set_dest_loop_size(fsl_chan, slave->dst_loop_size);
+ if (chan->set_dst_loop_size)
+ chan->set_dst_loop_size(chan, slave->dst_loop_size);
- if (fsl_chan->toggle_ext_start)
- fsl_chan->toggle_ext_start(fsl_chan, slave->external_start);
+ if (chan->toggle_ext_start)
+ chan->toggle_ext_start(chan, slave->external_start);
- if (fsl_chan->toggle_ext_pause)
- fsl_chan->toggle_ext_pause(fsl_chan, slave->external_pause);
+ if (chan->toggle_ext_pause)
+ chan->toggle_ext_pause(chan, slave->external_pause);
- if (fsl_chan->set_request_count)
- fsl_chan->set_request_count(fsl_chan, slave->request_count);
+ if (chan->set_request_count)
+ chan->set_request_count(chan, slave->request_count);
return &first->async_tx;
@@ -741,215 +770,216 @@ fail:
*
* We're re-using variables for the loop, oh well
*/
- tx_list = &first->tx_list;
- list_for_each_entry_safe_reverse(new, prev, tx_list, node) {
- list_del_init(&new->node);
- dma_pool_free(fsl_chan->desc_pool, new, new->async_tx.phys);
- }
-
+ fsldma_free_desc_list_reverse(chan, &first->tx_list);
return NULL;
}
-static void fsl_dma_device_terminate_all(struct dma_chan *chan)
+static void fsl_dma_device_terminate_all(struct dma_chan *dchan)
{
- struct fsl_dma_chan *fsl_chan;
- struct fsl_desc_sw *desc, *tmp;
+ struct fsldma_chan *chan;
unsigned long flags;
- if (!chan)
+ if (!dchan)
return;
- fsl_chan = to_fsl_chan(chan);
+ chan = to_fsl_chan(dchan);
/* Halt the DMA engine */
- dma_halt(fsl_chan);
+ dma_halt(chan);
- spin_lock_irqsave(&fsl_chan->desc_lock, flags);
+ spin_lock_irqsave(&chan->desc_lock, flags);
/* Remove and free all of the descriptors in the LD queue */
- list_for_each_entry_safe(desc, tmp, &fsl_chan->ld_queue, node) {
- list_del(&desc->node);
- dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
- }
+ fsldma_free_desc_list(chan, &chan->ld_pending);
+ fsldma_free_desc_list(chan, &chan->ld_running);
- spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
}
/**
* fsl_dma_update_completed_cookie - Update the completed cookie.
- * @fsl_chan : Freescale DMA channel
+ * @chan : Freescale DMA channel
+ *
+ * CONTEXT: hardirq
*/
-static void fsl_dma_update_completed_cookie(struct fsl_dma_chan *fsl_chan)
+static void fsl_dma_update_completed_cookie(struct fsldma_chan *chan)
{
- struct fsl_desc_sw *cur_desc, *desc;
- dma_addr_t ld_phy;
+ struct fsl_desc_sw *desc;
+ unsigned long flags;
+ dma_cookie_t cookie;
- ld_phy = get_cdar(fsl_chan) & FSL_DMA_NLDA_MASK;
+ spin_lock_irqsave(&chan->desc_lock, flags);
- if (ld_phy) {
- cur_desc = NULL;
- list_for_each_entry(desc, &fsl_chan->ld_queue, node)
- if (desc->async_tx.phys == ld_phy) {
- cur_desc = desc;
- break;
- }
+ if (list_empty(&chan->ld_running)) {
+ dev_dbg(chan->dev, "no running descriptors\n");
+ goto out_unlock;
+ }
- if (cur_desc && cur_desc->async_tx.cookie) {
- if (dma_is_idle(fsl_chan))
- fsl_chan->completed_cookie =
- cur_desc->async_tx.cookie;
- else
- fsl_chan->completed_cookie =
- cur_desc->async_tx.cookie - 1;
- }
+ /* Get the last descriptor, update the cookie to that */
+ desc = to_fsl_desc(chan->ld_running.prev);
+ if (dma_is_idle(chan))
+ cookie = desc->async_tx.cookie;
+ else {
+ cookie = desc->async_tx.cookie - 1;
+ if (unlikely(cookie < DMA_MIN_COOKIE))
+ cookie = DMA_MAX_COOKIE;
}
+
+ chan->completed_cookie = cookie;
+
+out_unlock:
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+}
+
+/**
+ * fsldma_desc_status - Check the status of a descriptor
+ * @chan: Freescale DMA channel
+ * @desc: DMA SW descriptor
+ *
+ * This function will return the status of the given descriptor
+ */
+static enum dma_status fsldma_desc_status(struct fsldma_chan *chan,
+ struct fsl_desc_sw *desc)
+{
+ return dma_async_is_complete(desc->async_tx.cookie,
+ chan->completed_cookie,
+ chan->common.cookie);
}
/**
* fsl_chan_ld_cleanup - Clean up link descriptors
- * @fsl_chan : Freescale DMA channel
+ * @chan : Freescale DMA channel
*
* This function clean up the ld_queue of DMA channel.
- * If 'in_intr' is set, the function will move the link descriptor to
- * the recycle list. Otherwise, free it directly.
*/
-static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan)
+static void fsl_chan_ld_cleanup(struct fsldma_chan *chan)
{
struct fsl_desc_sw *desc, *_desc;
unsigned long flags;
- spin_lock_irqsave(&fsl_chan->desc_lock, flags);
+ spin_lock_irqsave(&chan->desc_lock, flags);
- dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n",
- fsl_chan->completed_cookie);
- list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) {
+ dev_dbg(chan->dev, "chan completed_cookie = %d\n", chan->completed_cookie);
+ list_for_each_entry_safe(desc, _desc, &chan->ld_running, node) {
dma_async_tx_callback callback;
void *callback_param;
- if (dma_async_is_complete(desc->async_tx.cookie,
- fsl_chan->completed_cookie, fsl_chan->common.cookie)
- == DMA_IN_PROGRESS)
+ if (fsldma_desc_status(chan, desc) == DMA_IN_PROGRESS)
break;
- callback = desc->async_tx.callback;
- callback_param = desc->async_tx.callback_param;
-
- /* Remove from ld_queue list */
+ /* Remove from the list of running transactions */
list_del(&desc->node);
- dev_dbg(fsl_chan->dev, "link descriptor %p will be recycle.\n",
- desc);
- dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys);
-
/* Run the link descriptor callback function */
+ callback = desc->async_tx.callback;
+ callback_param = desc->async_tx.callback_param;
if (callback) {
- spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
- dev_dbg(fsl_chan->dev, "link descriptor %p callback\n",
- desc);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
+ dev_dbg(chan->dev, "LD %p callback\n", desc);
callback(callback_param);
- spin_lock_irqsave(&fsl_chan->desc_lock, flags);
+ spin_lock_irqsave(&chan->desc_lock, flags);
}
+
+ /* Run any dependencies, then free the descriptor */
+ dma_run_dependencies(&desc->async_tx);
+ dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
}
- spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
+
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
}
/**
- * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue.
- * @fsl_chan : Freescale DMA channel
+ * fsl_chan_xfer_ld_queue - transfer any pending transactions
+ * @chan : Freescale DMA channel
+ *
+ * This will make sure that any pending transactions will be run.
+ * If the DMA controller is idle, it will be started. Otherwise,
+ * the DMA controller's interrupt handler will start any pending
+ * transactions when it becomes idle.
*/
-static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan)
+static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
{
- struct list_head *ld_node;
- dma_addr_t next_dest_addr;
+ struct fsl_desc_sw *desc;
unsigned long flags;
- spin_lock_irqsave(&fsl_chan->desc_lock, flags);
+ spin_lock_irqsave(&chan->desc_lock, flags);
- if (!dma_is_idle(fsl_chan))
+ /*
+ * If the list of pending descriptors is empty, then we
+ * don't need to do any work at all
+ */
+ if (list_empty(&chan->ld_pending)) {
+ dev_dbg(chan->dev, "no pending LDs\n");
goto out_unlock;
+ }
- dma_halt(fsl_chan);
+ /*
+ * The DMA controller is not idle, which means the interrupt
+ * handler will start any queued transactions when it runs
+ * at the end of the current transaction
+ */
+ if (!dma_is_idle(chan)) {
+ dev_dbg(chan->dev, "DMA controller still busy\n");
+ goto out_unlock;
+ }
- /* If there are some link descriptors
- * not transfered in queue. We need to start it.
+ /*
+ * TODO:
+ * make sure the dma_halt() function really un-wedges the
+ * controller as much as possible
*/
+ dma_halt(chan);
- /* Find the first un-transfer desciptor */
- for (ld_node = fsl_chan->ld_queue.next;
- (ld_node != &fsl_chan->ld_queue)
- && (dma_async_is_complete(
- to_fsl_desc(ld_node)->async_tx.cookie,
- fsl_chan->completed_cookie,
- fsl_chan->common.cookie) == DMA_SUCCESS);
- ld_node = ld_node->next);
-
- if (ld_node != &fsl_chan->ld_queue) {
- /* Get the ld start address from ld_queue */
- next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys;
- dev_dbg(fsl_chan->dev, "xfer LDs staring from 0x%llx\n",
- (unsigned long long)next_dest_addr);
- set_cdar(fsl_chan, next_dest_addr);
- dma_start(fsl_chan);
- } else {
- set_cdar(fsl_chan, 0);
- set_ndar(fsl_chan, 0);
- }
+ /*
+ * If there are some link descriptors which have not been
+ * transferred, we need to start the controller
+ */
+
+ /*
+ * Move all elements from the queue of pending transactions
+ * onto the list of running transactions
+ */
+ desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
+ list_splice_tail_init(&chan->ld_pending, &chan->ld_running);
+
+ /*
+ * Program the descriptor's address into the DMA controller,
+ * then start the DMA transaction
+ */
+ set_cdar(chan, desc->async_tx.phys);
+ dma_start(chan);
out_unlock:
- spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
+ spin_unlock_irqrestore(&chan->desc_lock, flags);
}
/**
* fsl_dma_memcpy_issue_pending - Issue the DMA start command
- * @fsl_chan : Freescale DMA channel
+ * @chan : Freescale DMA channel
*/
-static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan)
+static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
{
- struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
-
-#ifdef FSL_DMA_LD_DEBUG
- struct fsl_desc_sw *ld;
- unsigned long flags;
-
- spin_lock_irqsave(&fsl_chan->desc_lock, flags);
- if (list_empty(&fsl_chan->ld_queue)) {
- spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
- return;
- }
-
- dev_dbg(fsl_chan->dev, "--memcpy issue--\n");
- list_for_each_entry(ld, &fsl_chan->ld_queue, node) {
- int i;
- dev_dbg(fsl_chan->dev, "Ch %d, LD %08x\n",
- fsl_chan->id, ld->async_tx.phys);
- for (i = 0; i < 8; i++)
- dev_dbg(fsl_chan->dev, "LD offset %d: %08x\n",
- i, *(((u32 *)&ld->hw) + i));
- }
- dev_dbg(fsl_chan->dev, "----------------\n");
- spin_unlock_irqrestore(&fsl_chan->desc_lock, flags);
-#endif
-
- fsl_chan_xfer_ld_queue(fsl_chan);
+ struct fsldma_chan *chan = to_fsl_chan(dchan);
+ fsl_chan_xfer_ld_queue(chan);
}
/**
* fsl_dma_is_complete - Determine the DMA status
- * @fsl_chan : Freescale DMA channel
+ * @chan : Freescale DMA channel
*/
-static enum dma_status fsl_dma_is_complete(struct dma_chan *chan,
+static enum dma_status fsl_dma_is_complete(struct dma_chan *dchan,
dma_cookie_t cookie,
dma_cookie_t *done,
dma_cookie_t *used)
{
- struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan);
+ struct fsldma_chan *chan = to_fsl_chan(dchan);
dma_cookie_t last_used;
dma_cookie_t last_complete;
- fsl_chan_ld_cleanup(fsl_chan);
+ fsl_chan_ld_cleanup(chan);
- last_used = chan->cookie;
- last_complete = fsl_chan->completed_cookie;
+ last_used = dchan->cookie;
+ last_complete = chan->completed_cookie;
if (done)
*done = last_complete;
@@ -960,32 +990,37 @@ static enum dma_status fsl_dma_is_complete(struct dma_chan *chan,
return dma_async_is_complete(cookie, last_complete, last_used);
}
-static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data)
+/*----------------------------------------------------------------------------*/
+/* Interrupt Handling */
+/*----------------------------------------------------------------------------*/
+
+static irqreturn_t fsldma_chan_irq(int irq, void *data)
{
- struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data;
- u32 stat;
+ struct fsldma_chan *chan = data;
int update_cookie = 0;
int xfer_ld_q = 0;
+ u32 stat;
- stat = get_sr(fsl_chan);
- dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n",
- fsl_chan->id, stat);
- set_sr(fsl_chan, stat); /* Clear the event register */
+ /* save and clear the status register */
+ stat = get_sr(chan);
+ set_sr(chan, stat);
+ dev_dbg(chan->dev, "irq: channel %d, stat = 0x%x\n", chan->id, stat);
stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
if (!stat)
return IRQ_NONE;
if (stat & FSL_DMA_SR_TE)
- dev_err(fsl_chan->dev, "Transfer Error!\n");
+ dev_err(chan->dev, "Transfer Error!\n");
- /* Programming Error
+ /*
+ * Programming Error
* The DMA_INTERRUPT async_tx is a NULL transfer, which will
* triger a PE interrupt.
*/
if (stat & FSL_DMA_SR_PE) {
- dev_dbg(fsl_chan->dev, "event: Programming Error INT\n");
- if (get_bcr(fsl_chan) == 0) {
+ dev_dbg(chan->dev, "irq: Programming Error INT\n");
+ if (get_bcr(chan) == 0) {
/* BCR register is 0, this is a DMA_INTERRUPT async_tx.
* Now, update the completed cookie, and continue the
* next uncompleted transfer.
@@ -996,208 +1031,296 @@ static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data)
stat &= ~FSL_DMA_SR_PE;
}
- /* If the link descriptor segment transfer finishes,
+ /*
+ * If the link descriptor segment transfer finishes,
* we will recycle the used descriptor.
*/
if (stat & FSL_DMA_SR_EOSI) {
- dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n");
- dev_dbg(fsl_chan->dev, "event: clndar 0x%llx, nlndar 0x%llx\n",
- (unsigned long long)get_cdar(fsl_chan),
- (unsigned long long)get_ndar(fsl_chan));
+ dev_dbg(chan->dev, "irq: End-of-segments INT\n");
+ dev_dbg(chan->dev, "irq: clndar 0x%llx, nlndar 0x%llx\n",
+ (unsigned long long)get_cdar(chan),
+ (unsigned long long)get_ndar(chan));
stat &= ~FSL_DMA_SR_EOSI;
update_cookie = 1;
}
- /* For MPC8349, EOCDI event need to update cookie
+ /*
+ * For MPC8349, EOCDI event need to update cookie
* and start the next transfer if it exist.
*/
if (stat & FSL_DMA_SR_EOCDI) {
- dev_dbg(fsl_chan->dev, "event: End-of-Chain link INT\n");
+ dev_dbg(chan->dev, "irq: End-of-Chain link INT\n");
stat &= ~FSL_DMA_SR_EOCDI;
update_cookie = 1;
xfer_ld_q = 1;
}
- /* If it current transfer is the end-of-transfer,
+ /*
+ * If it current transfer is the end-of-transfer,
* we should clear the Channel Start bit for
* prepare next transfer.
*/
if (stat & FSL_DMA_SR_EOLNI) {
- dev_dbg(fsl_chan->dev, "event: End-of-link INT\n");
+ dev_dbg(chan->dev, "irq: End-of-link INT\n");
stat &= ~FSL_DMA_SR_EOLNI;
xfer_ld_q = 1;
}
if (update_cookie)
- fsl_dma_update_completed_cookie(fsl_chan);
+ fsl_dma_update_completed_cookie(chan);
if (xfer_ld_q)
- fsl_chan_xfer_ld_queue(fsl_chan);
+ fsl_chan_xfer_ld_queue(chan);
if (stat)
- dev_dbg(fsl_chan->dev, "event: unhandled sr 0x%02x\n",
- stat);
+ dev_dbg(chan->dev, "irq: unhandled sr 0x%02x\n", stat);
- dev_dbg(fsl_chan->dev, "event: Exit\n");
- tasklet_schedule(&fsl_chan->tasklet);
+ dev_dbg(chan->dev, "irq: Exit\n");
+ tasklet_schedule(&chan->tasklet);
return IRQ_HANDLED;
}
-static irqreturn_t fsl_dma_do_interrupt(int irq, void *data)
+static void dma_do_tasklet(unsigned long data)
{
- struct fsl_dma_device *fdev = (struct fsl_dma_device *)data;
- u32 gsr;
- int ch_nr;
+ struct fsldma_chan *chan = (struct fsldma_chan *)data;
+ fsl_chan_ld_cleanup(chan);
+}
+
+static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
+{
+ struct fsldma_device *fdev = data;
+ struct fsldma_chan *chan;
+ unsigned int handled = 0;
+ u32 gsr, mask;
+ int i;
- gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->reg_base)
- : in_le32(fdev->reg_base);
- ch_nr = (32 - ffs(gsr)) / 8;
+ gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs)
+ : in_le32(fdev->regs);
+ mask = 0xff000000;
+ dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr);
+
+ for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
+ chan = fdev->chan[i];
+ if (!chan)
+ continue;
+
+ if (gsr & mask) {
+ dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id);
+ fsldma_chan_irq(irq, chan);
+ handled++;
+ }
- return fdev->chan[ch_nr] ? fsl_dma_chan_do_interrupt(irq,
- fdev->chan[ch_nr]) : IRQ_NONE;
+ gsr &= ~mask;
+ mask >>= 8;
+ }
+
+ return IRQ_RETVAL(handled);
}
-static void dma_do_tasklet(unsigned long data)
+static void fsldma_free_irqs(struct fsldma_device *fdev)
{
- struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data;
- fsl_chan_ld_cleanup(fsl_chan);
+ struct fsldma_chan *chan;
+ int i;
+
+ if (fdev->irq != NO_IRQ) {
+ dev_dbg(fdev->dev, "free per-controller IRQ\n");
+ free_irq(fdev->irq, fdev);
+ return;
+ }
+
+ for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
+ chan = fdev->chan[i];
+ if (chan && chan->irq != NO_IRQ) {
+ dev_dbg(fdev->dev, "free channel %d IRQ\n", chan->id);
+ free_irq(chan->irq, chan);
+ }
+ }
}
-static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev,
+static int fsldma_request_irqs(struct fsldma_device *fdev)
+{
+ struct fsldma_chan *chan;
+ int ret;
+ int i;
+
+ /* if we have a per-controller IRQ, use that */
+ if (fdev->irq != NO_IRQ) {
+ dev_dbg(fdev->dev, "request per-controller IRQ\n");
+ ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED,
+ "fsldma-controller", fdev);
+ return ret;
+ }
+
+ /* no per-controller IRQ, use the per-channel IRQs */
+ for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
+ chan = fdev->chan[i];
+ if (!chan)
+ continue;
+
+ if (chan->irq == NO_IRQ) {
+ dev_err(fdev->dev, "no interrupts property defined for "
+ "DMA channel %d. Please fix your "
+ "device tree\n", chan->id);
+ ret = -ENODEV;
+ goto out_unwind;
+ }
+
+ dev_dbg(fdev->dev, "request channel %d IRQ\n", chan->id);
+ ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED,
+ "fsldma-chan", chan);
+ if (ret) {
+ dev_err(fdev->dev, "unable to request IRQ for DMA "
+ "channel %d\n", chan->id);
+ goto out_unwind;
+ }
+ }
+
+ return 0;
+
+out_unwind:
+ for (/* none */; i >= 0; i--) {
+ chan = fdev->chan[i];
+ if (!chan)
+ continue;
+
+ if (chan->irq == NO_IRQ)
+ continue;
+
+ free_irq(chan->irq, chan);
+ }
+
+ return ret;
+}
+
+/*----------------------------------------------------------------------------*/
+/* OpenFirmware Subsystem */
+/*----------------------------------------------------------------------------*/
+
+static int __devinit fsl_dma_chan_probe(struct fsldma_device *fdev,
struct device_node *node, u32 feature, const char *compatible)
{
- struct fsl_dma_chan *new_fsl_chan;
+ struct fsldma_chan *chan;
+ struct resource res;
int err;
/* alloc channel */
- new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL);
- if (!new_fsl_chan) {
- dev_err(fdev->dev, "No free memory for allocating "
- "dma channels!\n");
- return -ENOMEM;
+ chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+ if (!chan) {
+ dev_err(fdev->dev, "no free memory for DMA channels!\n");
+ err = -ENOMEM;
+ goto out_return;
}
- /* get dma channel register base */
- err = of_address_to_resource(node, 0, &new_fsl_chan->reg);
- if (err) {
- dev_err(fdev->dev, "Can't get %s property 'reg'\n",
- node->full_name);
- goto err_no_reg;
+ /* ioremap registers for use */
+ chan->regs = of_iomap(node, 0);
+ if (!chan->regs) {
+ dev_err(fdev->dev, "unable to ioremap registers\n");
+ err = -ENOMEM;
+ goto out_free_chan;
}
- new_fsl_chan->feature = feature;
+ err = of_address_to_resource(node, 0, &res);
+ if (err) {
+ dev_err(fdev->dev, "unable to find 'reg' property\n");
+ goto out_iounmap_regs;
+ }
+ chan->feature = feature;
if (!fdev->feature)
- fdev->feature = new_fsl_chan->feature;
+ fdev->feature = chan->feature;
- /* If the DMA device's feature is different than its channels',
- * report the bug.
+ /*
+ * If the DMA device's feature is different than the feature
+ * of its channels, report the bug
*/
- WARN_ON(fdev->feature != new_fsl_chan->feature);
+ WARN_ON(fdev->feature != chan->feature);
- new_fsl_chan->dev = fdev->dev;
- new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start,
- new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1);
-
- new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7;
- if (new_fsl_chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
- dev_err(fdev->dev, "There is no %d channel!\n",
- new_fsl_chan->id);
+ chan->dev = fdev->dev;
+ chan->id = ((res.start - 0x100) & 0xfff) >> 7;
+ if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
+ dev_err(fdev->dev, "too many channels for device\n");
err = -EINVAL;
- goto err_no_chan;
+ goto out_iounmap_regs;
}
- fdev->chan[new_fsl_chan->id] = new_fsl_chan;
- tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet,
- (unsigned long)new_fsl_chan);
- /* Init the channel */
- dma_init(new_fsl_chan);
+ fdev->chan[chan->id] = chan;
+ tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
+
+ /* Initialize the channel */
+ dma_init(chan);
/* Clear cdar registers */
- set_cdar(new_fsl_chan, 0);
+ set_cdar(chan, 0);
- switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) {
+ switch (chan->feature & FSL_DMA_IP_MASK) {
case FSL_DMA_IP_85XX:
- new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
+ chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
case FSL_DMA_IP_83XX:
- new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start;
- new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size;
- new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size;
- new_fsl_chan->set_request_count = fsl_chan_set_request_count;
+ chan->toggle_ext_start = fsl_chan_toggle_ext_start;
+ chan->set_src_loop_size = fsl_chan_set_src_loop_size;
+ chan->set_dst_loop_size = fsl_chan_set_dst_loop_size;
+ chan->set_request_count = fsl_chan_set_request_count;
}
- spin_lock_init(&new_fsl_chan->desc_lock);
- INIT_LIST_HEAD(&new_fsl_chan->ld_queue);
+ spin_lock_init(&chan->desc_lock);
+ INIT_LIST_HEAD(&chan->ld_pending);
+ INIT_LIST_HEAD(&chan->ld_running);
+
+ chan->common.device = &fdev->common;
- new_fsl_chan->common.device = &fdev->common;
+ /* find the IRQ line, if it exists in the device tree */
+ chan->irq = irq_of_parse_and_map(node, 0);
/* Add the channel to DMA device channel list */
- list_add_tail(&new_fsl_chan->common.device_node,
- &fdev->common.channels);
+ list_add_tail(&chan->common.device_node, &fdev->common.channels);
fdev->common.chancnt++;
- new_fsl_chan->irq = irq_of_parse_and_map(node, 0);
- if (new_fsl_chan->irq != NO_IRQ) {
- err = request_irq(new_fsl_chan->irq,
- &fsl_dma_chan_do_interrupt, IRQF_SHARED,
- "fsldma-channel", new_fsl_chan);
- if (err) {
- dev_err(fdev->dev, "DMA channel %s request_irq error "
- "with return %d\n", node->full_name, err);
- goto err_no_irq;
- }
- }
-
- dev_info(fdev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id,
- compatible,
- new_fsl_chan->irq != NO_IRQ ? new_fsl_chan->irq : fdev->irq);
+ dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible,
+ chan->irq != NO_IRQ ? chan->irq : fdev->irq);
return 0;
-err_no_irq:
- list_del(&new_fsl_chan->common.device_node);
-err_no_chan:
- iounmap(new_fsl_chan->reg_base);
-err_no_reg:
- kfree(new_fsl_chan);
+out_iounmap_regs:
+ iounmap(chan->regs);
+out_free_chan:
+ kfree(chan);
+out_return:
return err;
}
-static void fsl_dma_chan_remove(struct fsl_dma_chan *fchan)
+static void fsl_dma_chan_remove(struct fsldma_chan *chan)
{
- if (fchan->irq != NO_IRQ)
- free_irq(fchan->irq, fchan);
- list_del(&fchan->common.device_node);
- iounmap(fchan->reg_base);
- kfree(fchan);
+ irq_dispose_mapping(chan->irq);
+ list_del(&chan->common.device_node);
+ iounmap(chan->regs);
+ kfree(chan);
}
-static int __devinit of_fsl_dma_probe(struct of_device *dev,
+static int __devinit fsldma_of_probe(struct of_device *op,
const struct of_device_id *match)
{
- int err;
- struct fsl_dma_device *fdev;
+ struct fsldma_device *fdev;
struct device_node *child;
+ int err;
- fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL);
+ fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
if (!fdev) {
- dev_err(&dev->dev, "No enough memory for 'priv'\n");
- return -ENOMEM;
+ dev_err(&op->dev, "No enough memory for 'priv'\n");
+ err = -ENOMEM;
+ goto out_return;
}
- fdev->dev = &dev->dev;
+
+ fdev->dev = &op->dev;
INIT_LIST_HEAD(&fdev->common.channels);
- /* get DMA controller register base */
- err = of_address_to_resource(dev->node, 0, &fdev->reg);
- if (err) {
- dev_err(&dev->dev, "Can't get %s property 'reg'\n",
- dev->node->full_name);
- goto err_no_reg;
+ /* ioremap the registers for use */
+ fdev->regs = of_iomap(op->node, 0);
+ if (!fdev->regs) {
+ dev_err(&op->dev, "unable to ioremap registers\n");
+ err = -ENOMEM;
+ goto out_free_fdev;
}
- dev_info(&dev->dev, "Probe the Freescale DMA driver for %s "
- "controller at 0x%llx...\n",
- match->compatible, (unsigned long long)fdev->reg.start);
- fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end
- - fdev->reg.start + 1);
+ /* map the channel IRQ if it exists, but don't hookup the handler yet */
+ fdev->irq = irq_of_parse_and_map(op->node, 0);
dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
@@ -1210,103 +1333,111 @@ static int __devinit of_fsl_dma_probe(struct of_device *dev,
fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
fdev->common.device_terminate_all = fsl_dma_device_terminate_all;
- fdev->common.dev = &dev->dev;
+ fdev->common.dev = &op->dev;
- fdev->irq = irq_of_parse_and_map(dev->node, 0);
- if (fdev->irq != NO_IRQ) {
- err = request_irq(fdev->irq, &fsl_dma_do_interrupt, IRQF_SHARED,
- "fsldma-device", fdev);
- if (err) {
- dev_err(&dev->dev, "DMA device request_irq error "
- "with return %d\n", err);
- goto err;
- }
- }
-
- dev_set_drvdata(&(dev->dev), fdev);
+ dev_set_drvdata(&op->dev, fdev);
- /* We cannot use of_platform_bus_probe() because there is no
- * of_platform_bus_remove. Instead, we manually instantiate every DMA
+ /*
+ * We cannot use of_platform_bus_probe() because there is no
+ * of_platform_bus_remove(). Instead, we manually instantiate every DMA
* channel object.
*/
- for_each_child_of_node(dev->node, child) {
- if (of_device_is_compatible(child, "fsl,eloplus-dma-channel"))
+ for_each_child_of_node(op->node, child) {
+ if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) {
fsl_dma_chan_probe(fdev, child,
FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
"fsl,eloplus-dma-channel");
- if (of_device_is_compatible(child, "fsl,elo-dma-channel"))
+ }
+
+ if (of_device_is_compatible(child, "fsl,elo-dma-channel")) {
fsl_dma_chan_probe(fdev, child,
FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
"fsl,elo-dma-channel");
+ }
+ }
+
+ /*
+ * Hookup the IRQ handler(s)
+ *
+ * If we have a per-controller interrupt, we prefer that to the
+ * per-channel interrupts to reduce the number of shared interrupt
+ * handlers on the same IRQ line
+ */
+ err = fsldma_request_irqs(fdev);
+ if (err) {
+ dev_err(fdev->dev, "unable to request IRQs\n");
+ goto out_free_fdev;
}
dma_async_device_register(&fdev->common);
return 0;
-err:
- iounmap(fdev->reg_base);
-err_no_reg:
+out_free_fdev:
+ irq_dispose_mapping(fdev->irq);
kfree(fdev);
+out_return:
return err;
}
-static int of_fsl_dma_remove(struct of_device *of_dev)
+static int fsldma_of_remove(struct of_device *op)
{
- struct fsl_dma_device *fdev;
+ struct fsldma_device *fdev;
unsigned int i;
- fdev = dev_get_drvdata(&of_dev->dev);
-
+ fdev = dev_get_drvdata(&op->dev);
dma_async_device_unregister(&fdev->common);
- for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++)
+ fsldma_free_irqs(fdev);
+
+ for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
if (fdev->chan[i])
fsl_dma_chan_remove(fdev->chan[i]);
+ }
- if (fdev->irq != NO_IRQ)
- free_irq(fdev->irq, fdev);
-
- iounmap(fdev->reg_base);
-
+ iounmap(fdev->regs);
+ dev_set_drvdata(&op->dev, NULL);
kfree(fdev);
- dev_set_drvdata(&of_dev->dev, NULL);
return 0;
}
-static struct of_device_id of_fsl_dma_ids[] = {
+static const struct of_device_id fsldma_of_ids[] = {
{ .compatible = "fsl,eloplus-dma", },
{ .compatible = "fsl,elo-dma", },
{}
};
-static struct of_platform_driver of_fsl_dma_driver = {
- .name = "fsl-elo-dma",
- .match_table = of_fsl_dma_ids,
- .probe = of_fsl_dma_probe,
- .remove = of_fsl_dma_remove,
+static struct of_platform_driver fsldma_of_driver = {
+ .name = "fsl-elo-dma",
+ .match_table = fsldma_of_ids,
+ .probe = fsldma_of_probe,
+ .remove = fsldma_of_remove,
};
-static __init int of_fsl_dma_init(void)
+/*----------------------------------------------------------------------------*/
+/* Module Init / Exit */
+/*----------------------------------------------------------------------------*/
+
+static __init int fsldma_init(void)
{
int ret;
pr_info("Freescale Elo / Elo Plus DMA driver\n");
- ret = of_register_platform_driver(&of_fsl_dma_driver);
+ ret = of_register_platform_driver(&fsldma_of_driver);
if (ret)
pr_err("fsldma: failed to register platform driver\n");
return ret;
}
-static void __exit of_fsl_dma_exit(void)
+static void __exit fsldma_exit(void)
{
- of_unregister_platform_driver(&of_fsl_dma_driver);
+ of_unregister_platform_driver(&fsldma_of_driver);
}
-subsys_initcall(of_fsl_dma_init);
-module_exit(of_fsl_dma_exit);
+subsys_initcall(fsldma_init);
+module_exit(fsldma_exit);
MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/dma/fsldma.h b/drivers/dma/fsldma.h
index 0df14cbb8ca..cb4d6ff5159 100644
--- a/drivers/dma/fsldma.h
+++ b/drivers/dma/fsldma.h
@@ -92,11 +92,9 @@ struct fsl_desc_sw {
struct list_head node;
struct list_head tx_list;
struct dma_async_tx_descriptor async_tx;
- struct list_head *ld;
- void *priv;
} __attribute__((aligned(32)));
-struct fsl_dma_chan_regs {
+struct fsldma_chan_regs {
u32 mr; /* 0x00 - Mode Register */
u32 sr; /* 0x04 - Status Register */
u64 cdar; /* 0x08 - Current descriptor address register */
@@ -106,20 +104,19 @@ struct fsl_dma_chan_regs {
u64 ndar; /* 0x24 - Next Descriptor Address Register */
};
-struct fsl_dma_chan;
+struct fsldma_chan;
#define FSL_DMA_MAX_CHANS_PER_DEVICE 4
-struct fsl_dma_device {
- void __iomem *reg_base; /* DGSR register base */
- struct resource reg; /* Resource for register */
+struct fsldma_device {
+ void __iomem *regs; /* DGSR register base */
struct device *dev;
struct dma_device common;
- struct fsl_dma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE];
+ struct fsldma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE];
u32 feature; /* The same as DMA channels */
int irq; /* Channel IRQ */
};
-/* Define macros for fsl_dma_chan->feature property */
+/* Define macros for fsldma_chan->feature property */
#define FSL_DMA_LITTLE_ENDIAN 0x00000000
#define FSL_DMA_BIG_ENDIAN 0x00000001
@@ -130,28 +127,28 @@ struct fsl_dma_device {
#define FSL_DMA_CHAN_PAUSE_EXT 0x00001000
#define FSL_DMA_CHAN_START_EXT 0x00002000
-struct fsl_dma_chan {
- struct fsl_dma_chan_regs __iomem *reg_base;
+struct fsldma_chan {
+ struct fsldma_chan_regs __iomem *regs;
dma_cookie_t completed_cookie; /* The maximum cookie completed */
spinlock_t desc_lock; /* Descriptor operation lock */
- struct list_head ld_queue; /* Link descriptors queue */
+ struct list_head ld_pending; /* Link descriptors queue */
+ struct list_head ld_running; /* Link descriptors queue */
struct dma_chan common; /* DMA common channel */
struct dma_pool *desc_pool; /* Descriptors pool */
struct device *dev; /* Channel device */
- struct resource reg; /* Resource for register */
int irq; /* Channel IRQ */
int id; /* Raw id of this channel */
struct tasklet_struct tasklet;
u32 feature;
- void (*toggle_ext_pause)(struct fsl_dma_chan *fsl_chan, int enable);
- void (*toggle_ext_start)(struct fsl_dma_chan *fsl_chan, int enable);
- void (*set_src_loop_size)(struct fsl_dma_chan *fsl_chan, int size);
- void (*set_dest_loop_size)(struct fsl_dma_chan *fsl_chan, int size);
- void (*set_request_count)(struct fsl_dma_chan *fsl_chan, int size);
+ void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable);
+ void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable);
+ void (*set_src_loop_size)(struct fsldma_chan *fsl_chan, int size);
+ void (*set_dst_loop_size)(struct fsldma_chan *fsl_chan, int size);
+ void (*set_request_count)(struct fsldma_chan *fsl_chan, int size);
};
-#define to_fsl_chan(chan) container_of(chan, struct fsl_dma_chan, common)
+#define to_fsl_chan(chan) container_of(chan, struct fsldma_chan, common)
#define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node)
#define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx)
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index dcc4ab78b32..af14c9a5b8d 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -71,7 +71,7 @@ static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
}
attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
- for_each_bit(bit, &attnstatus, BITS_PER_LONG) {
+ for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
chan = ioat_chan_by_index(instance, bit);
tasklet_schedule(&chan->cleanup_task);
}
@@ -94,16 +94,12 @@ static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
return IRQ_HANDLED;
}
-static void ioat1_cleanup_tasklet(unsigned long data);
-
/* common channel initialization */
-void ioat_init_channel(struct ioatdma_device *device,
- struct ioat_chan_common *chan, int idx,
- void (*timer_fn)(unsigned long),
- void (*tasklet)(unsigned long),
- unsigned long ioat)
+void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx)
{
struct dma_device *dma = &device->common;
+ struct dma_chan *c = &chan->common;
+ unsigned long data = (unsigned long) c;
chan->device = device;
chan->reg_base = device->reg_base + (0x80 * (idx + 1));
@@ -112,14 +108,12 @@ void ioat_init_channel(struct ioatdma_device *device,
list_add_tail(&chan->common.device_node, &dma->channels);
device->idx[idx] = chan;
init_timer(&chan->timer);
- chan->timer.function = timer_fn;
- chan->timer.data = ioat;
- tasklet_init(&chan->cleanup_task, tasklet, ioat);
+ chan->timer.function = device->timer_fn;
+ chan->timer.data = data;
+ tasklet_init(&chan->cleanup_task, device->cleanup_fn, data);
tasklet_disable(&chan->cleanup_task);
}
-static void ioat1_timer_event(unsigned long data);
-
/**
* ioat1_dma_enumerate_channels - find and initialize the device's channels
* @device: the device to be enumerated
@@ -155,10 +149,7 @@ static int ioat1_enumerate_channels(struct ioatdma_device *device)
if (!ioat)
break;
- ioat_init_channel(device, &ioat->base, i,
- ioat1_timer_event,
- ioat1_cleanup_tasklet,
- (unsigned long) ioat);
+ ioat_init_channel(device, &ioat->base, i);
ioat->xfercap = xfercap;
spin_lock_init(&ioat->desc_lock);
INIT_LIST_HEAD(&ioat->free_desc);
@@ -532,12 +523,12 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
return &desc->txd;
}
-static void ioat1_cleanup_tasklet(unsigned long data)
+static void ioat1_cleanup_event(unsigned long data)
{
- struct ioat_dma_chan *chan = (void *)data;
+ struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
- ioat1_cleanup(chan);
- writew(IOAT_CHANCTRL_RUN, chan->base.reg_base + IOAT_CHANCTRL_OFFSET);
+ ioat1_cleanup(ioat);
+ writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
}
void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
@@ -687,7 +678,7 @@ static void ioat1_cleanup(struct ioat_dma_chan *ioat)
static void ioat1_timer_event(unsigned long data)
{
- struct ioat_dma_chan *ioat = (void *) data;
+ struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
struct ioat_chan_common *chan = &ioat->base;
dev_dbg(to_dev(chan), "%s: state: %lx\n", __func__, chan->state);
@@ -734,16 +725,17 @@ static void ioat1_timer_event(unsigned long data)
spin_unlock_bh(&chan->cleanup_lock);
}
-static enum dma_status
-ioat1_dma_is_complete(struct dma_chan *c, dma_cookie_t cookie,
+enum dma_status
+ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie,
dma_cookie_t *done, dma_cookie_t *used)
{
- struct ioat_dma_chan *ioat = to_ioat_chan(c);
+ struct ioat_chan_common *chan = to_chan_common(c);
+ struct ioatdma_device *device = chan->device;
if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
return DMA_SUCCESS;
- ioat1_cleanup(ioat);
+ device->cleanup_fn((unsigned long) c);
return ioat_is_complete(c, cookie, done, used);
}
@@ -1199,12 +1191,14 @@ int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca)
device->intr_quirk = ioat1_intr_quirk;
device->enumerate_channels = ioat1_enumerate_channels;
device->self_test = ioat_dma_self_test;
+ device->timer_fn = ioat1_timer_event;
+ device->cleanup_fn = ioat1_cleanup_event;
dma = &device->common;
dma->device_prep_dma_memcpy = ioat1_dma_prep_memcpy;
dma->device_issue_pending = ioat1_dma_memcpy_issue_pending;
dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources;
dma->device_free_chan_resources = ioat1_dma_free_chan_resources;
- dma->device_is_tx_complete = ioat1_dma_is_complete;
+ dma->device_is_tx_complete = ioat_is_dma_complete;
err = ioat_probe(device);
if (err)
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index bbc3e78ef33..4f747a25407 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -61,7 +61,7 @@
* @intr_quirk: interrupt setup quirk (for ioat_v1 devices)
* @enumerate_channels: hw version specific channel enumeration
* @reset_hw: hw version specific channel (re)initialization
- * @cleanup_tasklet: select between the v2 and v3 cleanup routines
+ * @cleanup_fn: select between the v2 and v3 cleanup routines
* @timer_fn: select between the v2 and v3 timer watchdog routines
* @self_test: hardware version specific self test for each supported op type
*
@@ -80,7 +80,7 @@ struct ioatdma_device {
void (*intr_quirk)(struct ioatdma_device *device);
int (*enumerate_channels)(struct ioatdma_device *device);
int (*reset_hw)(struct ioat_chan_common *chan);
- void (*cleanup_tasklet)(unsigned long data);
+ void (*cleanup_fn)(unsigned long data);
void (*timer_fn)(unsigned long data);
int (*self_test)(struct ioatdma_device *device);
};
@@ -337,10 +337,9 @@ struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev,
void __iomem *iobase);
unsigned long ioat_get_current_completion(struct ioat_chan_common *chan);
void ioat_init_channel(struct ioatdma_device *device,
- struct ioat_chan_common *chan, int idx,
- void (*timer_fn)(unsigned long),
- void (*tasklet)(unsigned long),
- unsigned long ioat);
+ struct ioat_chan_common *chan, int idx);
+enum dma_status ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie,
+ dma_cookie_t *done, dma_cookie_t *used);
void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags,
size_t len, struct ioat_dma_descriptor *hw);
bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 5cc37afe2bc..1ed5d66d7dc 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -51,48 +51,40 @@ MODULE_PARM_DESC(ioat_ring_max_alloc_order,
void __ioat2_issue_pending(struct ioat2_dma_chan *ioat)
{
- void * __iomem reg_base = ioat->base.reg_base;
+ struct ioat_chan_common *chan = &ioat->base;
- ioat->pending = 0;
ioat->dmacount += ioat2_ring_pending(ioat);
ioat->issued = ioat->head;
/* make descriptor updates globally visible before notifying channel */
wmb();
- writew(ioat->dmacount, reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
- dev_dbg(to_dev(&ioat->base),
+ writew(ioat->dmacount, chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
+ dev_dbg(to_dev(chan),
"%s: head: %#x tail: %#x issued: %#x count: %#x\n",
__func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
}
-void ioat2_issue_pending(struct dma_chan *chan)
+void ioat2_issue_pending(struct dma_chan *c)
{
- struct ioat2_dma_chan *ioat = to_ioat2_chan(chan);
+ struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
- spin_lock_bh(&ioat->ring_lock);
- if (ioat->pending == 1)
+ if (ioat2_ring_pending(ioat)) {
+ spin_lock_bh(&ioat->ring_lock);
__ioat2_issue_pending(ioat);
- spin_unlock_bh(&ioat->ring_lock);
+ spin_unlock_bh(&ioat->ring_lock);
+ }
}
/**
* ioat2_update_pending - log pending descriptors
* @ioat: ioat2+ channel
*
- * set pending to '1' unless pending is already set to '2', pending == 2
- * indicates that submission is temporarily blocked due to an in-flight
- * reset. If we are already above the ioat_pending_level threshold then
- * just issue pending.
- *
- * called with ring_lock held
+ * Check if the number of unsubmitted descriptors has exceeded the
+ * watermark. Called with ring_lock held
*/
static void ioat2_update_pending(struct ioat2_dma_chan *ioat)
{
- if (unlikely(ioat->pending == 2))
- return;
- else if (ioat2_ring_pending(ioat) > ioat_pending_level)
+ if (ioat2_ring_pending(ioat) > ioat_pending_level)
__ioat2_issue_pending(ioat);
- else
- ioat->pending = 1;
}
static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
@@ -166,7 +158,7 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
seen_current = true;
}
ioat->tail += i;
- BUG_ON(!seen_current); /* no active descs have written a completion? */
+ BUG_ON(active && !seen_current); /* no active descs have written a completion? */
chan->last_completion = phys_complete;
if (ioat->head == ioat->tail) {
@@ -207,9 +199,9 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
spin_unlock_bh(&chan->cleanup_lock);
}
-void ioat2_cleanup_tasklet(unsigned long data)
+void ioat2_cleanup_event(unsigned long data)
{
- struct ioat2_dma_chan *ioat = (void *) data;
+ struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
ioat2_cleanup(ioat);
writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
@@ -291,7 +283,7 @@ static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
void ioat2_timer_event(unsigned long data)
{
- struct ioat2_dma_chan *ioat = (void *) data;
+ struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
struct ioat_chan_common *chan = &ioat->base;
spin_lock_bh(&chan->cleanup_lock);
@@ -397,10 +389,7 @@ int ioat2_enumerate_channels(struct ioatdma_device *device)
if (!ioat)
break;
- ioat_init_channel(device, &ioat->base, i,
- device->timer_fn,
- device->cleanup_tasklet,
- (unsigned long) ioat);
+ ioat_init_channel(device, &ioat->base, i);
ioat->xfercap_log = xfercap_log;
spin_lock_init(&ioat->ring_lock);
if (device->reset_hw(&ioat->base)) {
@@ -546,7 +535,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
ioat->head = 0;
ioat->issued = 0;
ioat->tail = 0;
- ioat->pending = 0;
ioat->alloc_order = order;
spin_unlock_bh(&ioat->ring_lock);
@@ -701,7 +689,7 @@ int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs)
mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
spin_unlock_bh(&chan->cleanup_lock);
- device->timer_fn((unsigned long) ioat);
+ device->timer_fn((unsigned long) &chan->common);
} else
spin_unlock_bh(&chan->cleanup_lock);
return -ENOMEM;
@@ -785,7 +773,7 @@ void ioat2_free_chan_resources(struct dma_chan *c)
tasklet_disable(&chan->cleanup_task);
del_timer_sync(&chan->timer);
- device->cleanup_tasklet((unsigned long) ioat);
+ device->cleanup_fn((unsigned long) c);
device->reset_hw(chan);
spin_lock_bh(&ioat->ring_lock);
@@ -815,25 +803,9 @@ void ioat2_free_chan_resources(struct dma_chan *c)
chan->last_completion = 0;
chan->completion_dma = 0;
- ioat->pending = 0;
ioat->dmacount = 0;
}
-enum dma_status
-ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie,
- dma_cookie_t *done, dma_cookie_t *used)
-{
- struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
- struct ioatdma_device *device = ioat->base.device;
-
- if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
- return DMA_SUCCESS;
-
- device->cleanup_tasklet((unsigned long) ioat);
-
- return ioat_is_complete(c, cookie, done, used);
-}
-
static ssize_t ring_size_show(struct dma_chan *c, char *page)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
@@ -874,7 +846,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
device->enumerate_channels = ioat2_enumerate_channels;
device->reset_hw = ioat2_reset_hw;
- device->cleanup_tasklet = ioat2_cleanup_tasklet;
+ device->cleanup_fn = ioat2_cleanup_event;
device->timer_fn = ioat2_timer_event;
device->self_test = ioat_dma_self_test;
dma = &device->common;
@@ -882,7 +854,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
dma->device_issue_pending = ioat2_issue_pending;
dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
dma->device_free_chan_resources = ioat2_free_chan_resources;
- dma->device_is_tx_complete = ioat2_is_complete;
+ dma->device_is_tx_complete = ioat_is_dma_complete;
err = ioat_probe(device);
if (err)
diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h
index 3afad8da43c..ef2871fd786 100644
--- a/drivers/dma/ioat/dma_v2.h
+++ b/drivers/dma/ioat/dma_v2.h
@@ -47,7 +47,6 @@ extern int ioat_ring_alloc_order;
* @head: allocated index
* @issued: hardware notification point
* @tail: cleanup index
- * @pending: lock free indicator for issued != head
* @dmacount: identical to 'head' except for occasionally resetting to zero
* @alloc_order: log2 of the number of allocated descriptors
* @ring: software ring buffer implementation of hardware ring
@@ -61,7 +60,6 @@ struct ioat2_dma_chan {
u16 tail;
u16 dmacount;
u16 alloc_order;
- int pending;
struct ioat_ring_ent **ring;
spinlock_t ring_lock;
};
@@ -178,12 +176,10 @@ ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
void ioat2_issue_pending(struct dma_chan *chan);
int ioat2_alloc_chan_resources(struct dma_chan *c);
void ioat2_free_chan_resources(struct dma_chan *c);
-enum dma_status ioat2_is_complete(struct dma_chan *c, dma_cookie_t cookie,
- dma_cookie_t *done, dma_cookie_t *used);
void __ioat2_restart_chan(struct ioat2_dma_chan *ioat);
bool reshape_ring(struct ioat2_dma_chan *ioat, int order);
void __ioat2_issue_pending(struct ioat2_dma_chan *ioat);
-void ioat2_cleanup_tasklet(unsigned long data);
+void ioat2_cleanup_event(unsigned long data);
void ioat2_timer_event(unsigned long data);
int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo);
int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo);
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 9908c9e94b2..26febc56dab 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -293,17 +293,25 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
}
}
ioat->tail += i;
- BUG_ON(!seen_current); /* no active descs have written a completion? */
+ BUG_ON(active && !seen_current); /* no active descs have written a completion? */
chan->last_completion = phys_complete;
- if (ioat->head == ioat->tail) {
+
+ active = ioat2_ring_active(ioat);
+ if (active == 0) {
dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
__func__);
clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
}
+ /* 5 microsecond delay per pending descriptor */
+ writew(min((5 * active), IOAT_INTRDELAY_MASK),
+ chan->device->reg_base + IOAT_INTRDELAY_OFFSET);
}
-static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
+/* try to cleanup, but yield (via spin_trylock) to incoming submissions
+ * with the expectation that we will immediately poll again shortly
+ */
+static void ioat3_cleanup_poll(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
unsigned long phys_complete;
@@ -329,29 +337,41 @@ static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
spin_unlock_bh(&chan->cleanup_lock);
}
-static void ioat3_cleanup_tasklet(unsigned long data)
+/* run cleanup now because we already delayed the interrupt via INTRDELAY */
+static void ioat3_cleanup_sync(struct ioat2_dma_chan *ioat)
+{
+ struct ioat_chan_common *chan = &ioat->base;
+ unsigned long phys_complete;
+
+ prefetch(chan->completion);
+
+ spin_lock_bh(&chan->cleanup_lock);
+ if (!ioat_cleanup_preamble(chan, &phys_complete)) {
+ spin_unlock_bh(&chan->cleanup_lock);
+ return;
+ }
+ spin_lock_bh(&ioat->ring_lock);
+
+ __cleanup(ioat, phys_complete);
+
+ spin_unlock_bh(&ioat->ring_lock);
+ spin_unlock_bh(&chan->cleanup_lock);
+}
+
+static void ioat3_cleanup_event(unsigned long data)
{
- struct ioat2_dma_chan *ioat = (void *) data;
+ struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
- ioat3_cleanup(ioat);
- writew(IOAT_CHANCTRL_RUN | IOAT3_CHANCTRL_COMPL_DCA_EN,
- ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
+ ioat3_cleanup_sync(ioat);
+ writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
}
static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
{
struct ioat_chan_common *chan = &ioat->base;
unsigned long phys_complete;
- u32 status;
-
- status = ioat_chansts(chan);
- if (is_ioat_active(status) || is_ioat_idle(status))
- ioat_suspend(chan);
- while (is_ioat_active(status) || is_ioat_idle(status)) {
- status = ioat_chansts(chan);
- cpu_relax();
- }
+ ioat2_quiesce(chan, 0);
if (ioat_cleanup_preamble(chan, &phys_complete))
__cleanup(ioat, phys_complete);
@@ -360,7 +380,7 @@ static void ioat3_restart_channel(struct ioat2_dma_chan *ioat)
static void ioat3_timer_event(unsigned long data)
{
- struct ioat2_dma_chan *ioat = (void *) data;
+ struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
struct ioat_chan_common *chan = &ioat->base;
spin_lock_bh(&chan->cleanup_lock);
@@ -426,7 +446,7 @@ ioat3_is_complete(struct dma_chan *c, dma_cookie_t cookie,
if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS)
return DMA_SUCCESS;
- ioat3_cleanup(ioat);
+ ioat3_cleanup_poll(ioat);
return ioat_is_complete(c, cookie, done, used);
}
@@ -1239,11 +1259,11 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
if (is_raid_device) {
dma->device_is_tx_complete = ioat3_is_complete;
- device->cleanup_tasklet = ioat3_cleanup_tasklet;
+ device->cleanup_fn = ioat3_cleanup_event;
device->timer_fn = ioat3_timer_event;
} else {
- dma->device_is_tx_complete = ioat2_is_complete;
- device->cleanup_tasklet = ioat2_cleanup_tasklet;
+ dma->device_is_tx_complete = ioat_is_dma_complete;
+ device->cleanup_fn = ioat2_cleanup_event;
device->timer_fn = ioat2_timer_event;
}
diff --git a/drivers/dma/ioat/registers.h b/drivers/dma/ioat/registers.h
index e8ae63baf58..1391798542b 100644
--- a/drivers/dma/ioat/registers.h
+++ b/drivers/dma/ioat/registers.h
@@ -60,7 +60,7 @@
#define IOAT_PERPORTOFFSET_OFFSET 0x0A /* 16-bit */
#define IOAT_INTRDELAY_OFFSET 0x0C /* 16-bit */
-#define IOAT_INTRDELAY_INT_DELAY_MASK 0x3FFF /* Interrupt Delay Time */
+#define IOAT_INTRDELAY_MASK 0x3FFF /* Interrupt Delay Time */
#define IOAT_INTRDELAY_COALESE_SUPPORT 0x8000 /* Interrupt Coalescing Supported */
#define IOAT_DEVICE_STATUS_OFFSET 0x0E /* 16-bit */
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index e80bae1673f..2a446397c88 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -348,6 +348,7 @@ static void ipu_ch_param_set_size(union chan_param_mem *params,
break;
case IPU_PIX_FMT_BGRA32:
case IPU_PIX_FMT_BGR32:
+ case IPU_PIX_FMT_ABGR32:
params->ip.bpp = 0;
params->ip.pfs = 4;
params->ip.npb = 7;
@@ -376,20 +377,6 @@ static void ipu_ch_param_set_size(union chan_param_mem *params,
params->ip.wid2 = 7; /* Blue bit width - 1 */
params->ip.wid3 = 7; /* Alpha bit width - 1 */
break;
- case IPU_PIX_FMT_ABGR32:
- params->ip.bpp = 0;
- params->ip.pfs = 4;
- params->ip.npb = 7;
- params->ip.sat = 2; /* SAT = 32-bit access */
- params->ip.ofs0 = 8; /* Red bit offset */
- params->ip.ofs1 = 16; /* Green bit offset */
- params->ip.ofs2 = 24; /* Blue bit offset */
- params->ip.ofs3 = 0; /* Alpha bit offset */
- params->ip.wid0 = 7; /* Red bit width - 1 */
- params->ip.wid1 = 7; /* Green bit width - 1 */
- params->ip.wid2 = 7; /* Blue bit width - 1 */
- params->ip.wid3 = 7; /* Alpha bit width - 1 */
- break;
case IPU_PIX_FMT_UYVY:
params->ip.bpp = 2;
params->ip.pfs = 6;
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
new file mode 100644
index 00000000000..3fdf1f46bd6
--- /dev/null
+++ b/drivers/dma/mpc512x_dma.c
@@ -0,0 +1,800 @@
+/*
+ * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
+ * Copyright (C) Semihalf 2009
+ *
+ * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
+ * (defines, structures and comments) was taken from MPC5121 DMA driver
+ * written by Hongjun Chen <hong-jun.chen@freescale.com>.
+ *
+ * Approved as OSADL project by a majority of OSADL members and funded
+ * by OSADL membership fees in 2009; for details see www.osadl.org.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 59
+ * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution in the
+ * file called COPYING.
+ */
+
+/*
+ * This is initial version of MPC5121 DMA driver. Only memory to memory
+ * transfers are supported (tested using dmatest module).
+ */
+
+#include <linux/module.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+
+#include <linux/random.h>
+
+/* Number of DMA Transfer descriptors allocated per channel */
+#define MPC_DMA_DESCRIPTORS 64
+
+/* Macro definitions */
+#define MPC_DMA_CHANNELS 64
+#define MPC_DMA_TCD_OFFSET 0x1000
+
+/* Arbitration mode of group and channel */
+#define MPC_DMA_DMACR_EDCG (1 << 31)
+#define MPC_DMA_DMACR_ERGA (1 << 3)
+#define MPC_DMA_DMACR_ERCA (1 << 2)
+
+/* Error codes */
+#define MPC_DMA_DMAES_VLD (1 << 31)
+#define MPC_DMA_DMAES_GPE (1 << 15)
+#define MPC_DMA_DMAES_CPE (1 << 14)
+#define MPC_DMA_DMAES_ERRCHN(err) \
+ (((err) >> 8) & 0x3f)
+#define MPC_DMA_DMAES_SAE (1 << 7)
+#define MPC_DMA_DMAES_SOE (1 << 6)
+#define MPC_DMA_DMAES_DAE (1 << 5)
+#define MPC_DMA_DMAES_DOE (1 << 4)
+#define MPC_DMA_DMAES_NCE (1 << 3)
+#define MPC_DMA_DMAES_SGE (1 << 2)
+#define MPC_DMA_DMAES_SBE (1 << 1)
+#define MPC_DMA_DMAES_DBE (1 << 0)
+
+#define MPC_DMA_TSIZE_1 0x00
+#define MPC_DMA_TSIZE_2 0x01
+#define MPC_DMA_TSIZE_4 0x02
+#define MPC_DMA_TSIZE_16 0x04
+#define MPC_DMA_TSIZE_32 0x05
+
+/* MPC5121 DMA engine registers */
+struct __attribute__ ((__packed__)) mpc_dma_regs {
+ /* 0x00 */
+ u32 dmacr; /* DMA control register */
+ u32 dmaes; /* DMA error status */
+ /* 0x08 */
+ u32 dmaerqh; /* DMA enable request high(channels 63~32) */
+ u32 dmaerql; /* DMA enable request low(channels 31~0) */
+ u32 dmaeeih; /* DMA enable error interrupt high(ch63~32) */
+ u32 dmaeeil; /* DMA enable error interrupt low(ch31~0) */
+ /* 0x18 */
+ u8 dmaserq; /* DMA set enable request */
+ u8 dmacerq; /* DMA clear enable request */
+ u8 dmaseei; /* DMA set enable error interrupt */
+ u8 dmaceei; /* DMA clear enable error interrupt */
+ /* 0x1c */
+ u8 dmacint; /* DMA clear interrupt request */
+ u8 dmacerr; /* DMA clear error */
+ u8 dmassrt; /* DMA set start bit */
+ u8 dmacdne; /* DMA clear DONE status bit */
+ /* 0x20 */
+ u32 dmainth; /* DMA interrupt request high(ch63~32) */
+ u32 dmaintl; /* DMA interrupt request low(ch31~0) */
+ u32 dmaerrh; /* DMA error high(ch63~32) */
+ u32 dmaerrl; /* DMA error low(ch31~0) */
+ /* 0x30 */
+ u32 dmahrsh; /* DMA hw request status high(ch63~32) */
+ u32 dmahrsl; /* DMA hardware request status low(ch31~0) */
+ u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */
+ u32 dmailsa; /* DMA interrupt low select AXE(ch31~0) */
+ /* 0x40 ~ 0xff */
+ u32 reserve0[48]; /* Reserved */
+ /* 0x100 */
+ u8 dchpri[MPC_DMA_CHANNELS];
+ /* DMA channels(0~63) priority */
+};
+
+struct __attribute__ ((__packed__)) mpc_dma_tcd {
+ /* 0x00 */
+ u32 saddr; /* Source address */
+
+ u32 smod:5; /* Source address modulo */
+ u32 ssize:3; /* Source data transfer size */
+ u32 dmod:5; /* Destination address modulo */
+ u32 dsize:3; /* Destination data transfer size */
+ u32 soff:16; /* Signed source address offset */
+
+ /* 0x08 */
+ u32 nbytes; /* Inner "minor" byte count */
+ u32 slast; /* Last source address adjustment */
+ u32 daddr; /* Destination address */
+
+ /* 0x14 */
+ u32 citer_elink:1; /* Enable channel-to-channel linking on
+ * minor loop complete
+ */
+ u32 citer_linkch:6; /* Link channel for minor loop complete */
+ u32 citer:9; /* Current "major" iteration count */
+ u32 doff:16; /* Signed destination address offset */
+
+ /* 0x18 */
+ u32 dlast_sga; /* Last Destination address adjustment/scatter
+ * gather address
+ */
+
+ /* 0x1c */
+ u32 biter_elink:1; /* Enable channel-to-channel linking on major
+ * loop complete
+ */
+ u32 biter_linkch:6;
+ u32 biter:9; /* Beginning "major" iteration count */
+ u32 bwc:2; /* Bandwidth control */
+ u32 major_linkch:6; /* Link channel number */
+ u32 done:1; /* Channel done */
+ u32 active:1; /* Channel active */
+ u32 major_elink:1; /* Enable channel-to-channel linking on major
+ * loop complete
+ */
+ u32 e_sg:1; /* Enable scatter/gather processing */
+ u32 d_req:1; /* Disable request */
+ u32 int_half:1; /* Enable an interrupt when major counter is
+ * half complete
+ */
+ u32 int_maj:1; /* Enable an interrupt when major iteration
+ * count completes
+ */
+ u32 start:1; /* Channel start */
+};
+
+struct mpc_dma_desc {
+ struct dma_async_tx_descriptor desc;
+ struct mpc_dma_tcd *tcd;
+ dma_addr_t tcd_paddr;
+ int error;
+ struct list_head node;
+};
+
+struct mpc_dma_chan {
+ struct dma_chan chan;
+ struct list_head free;
+ struct list_head prepared;
+ struct list_head queued;
+ struct list_head active;
+ struct list_head completed;
+ struct mpc_dma_tcd *tcd;
+ dma_addr_t tcd_paddr;
+ dma_cookie_t completed_cookie;
+
+ /* Lock for this structure */
+ spinlock_t lock;
+};
+
+struct mpc_dma {
+ struct dma_device dma;
+ struct tasklet_struct tasklet;
+ struct mpc_dma_chan channels[MPC_DMA_CHANNELS];
+ struct mpc_dma_regs __iomem *regs;
+ struct mpc_dma_tcd __iomem *tcd;
+ int irq;
+ uint error_status;
+
+ /* Lock for error_status field in this structure */
+ spinlock_t error_status_lock;
+};
+
+#define DRV_NAME "mpc512x_dma"
+
+/* Convert struct dma_chan to struct mpc_dma_chan */
+static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c)
+{
+ return container_of(c, struct mpc_dma_chan, chan);
+}
+
+/* Convert struct dma_chan to struct mpc_dma */
+static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c)
+{
+ struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c);
+ return container_of(mchan, struct mpc_dma, channels[c->chan_id]);
+}
+
+/*
+ * Execute all queued DMA descriptors.
+ *
+ * Following requirements must be met while calling mpc_dma_execute():
+ * a) mchan->lock is acquired,
+ * b) mchan->active list is empty,
+ * c) mchan->queued list contains at least one entry.
+ */
+static void mpc_dma_execute(struct mpc_dma_chan *mchan)
+{
+ struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan);
+ struct mpc_dma_desc *first = NULL;
+ struct mpc_dma_desc *prev = NULL;
+ struct mpc_dma_desc *mdesc;
+ int cid = mchan->chan.chan_id;
+
+ /* Move all queued descriptors to active list */
+ list_splice_tail_init(&mchan->queued, &mchan->active);
+
+ /* Chain descriptors into one transaction */
+ list_for_each_entry(mdesc, &mchan->active, node) {
+ if (!first)
+ first = mdesc;
+
+ if (!prev) {
+ prev = mdesc;
+ continue;
+ }
+
+ prev->tcd->dlast_sga = mdesc->tcd_paddr;
+ prev->tcd->e_sg = 1;
+ mdesc->tcd->start = 1;
+
+ prev = mdesc;
+ }
+
+ prev->tcd->start = 0;
+ prev->tcd->int_maj = 1;
+
+ /* Send first descriptor in chain into hardware */
+ memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd));
+ out_8(&mdma->regs->dmassrt, cid);
+}
+
+/* Handle interrupt on one half of DMA controller (32 channels) */
+static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off)
+{
+ struct mpc_dma_chan *mchan;
+ struct mpc_dma_desc *mdesc;
+ u32 status = is | es;
+ int ch;
+
+ while ((ch = fls(status) - 1) >= 0) {
+ status &= ~(1 << ch);
+ mchan = &mdma->channels[ch + off];
+
+ spin_lock(&mchan->lock);
+
+ /* Check error status */
+ if (es & (1 << ch))
+ list_for_each_entry(mdesc, &mchan->active, node)
+ mdesc->error = -EIO;
+
+ /* Execute queued descriptors */
+ list_splice_tail_init(&mchan->active, &mchan->completed);
+ if (!list_empty(&mchan->queued))
+ mpc_dma_execute(mchan);
+
+ spin_unlock(&mchan->lock);
+ }
+}
+
+/* Interrupt handler */
+static irqreturn_t mpc_dma_irq(int irq, void *data)
+{
+ struct mpc_dma *mdma = data;
+ uint es;
+
+ /* Save error status register */
+ es = in_be32(&mdma->regs->dmaes);
+ spin_lock(&mdma->error_status_lock);
+ if ((es & MPC_DMA_DMAES_VLD) && mdma->error_status == 0)
+ mdma->error_status = es;
+ spin_unlock(&mdma->error_status_lock);
+
+ /* Handle interrupt on each channel */
+ mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),
+ in_be32(&mdma->regs->dmaerrh), 32);
+ mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl),
+ in_be32(&mdma->regs->dmaerrl), 0);
+
+ /* Ack interrupt on all channels */
+ out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
+ out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
+ out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
+ out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
+
+ /* Schedule tasklet */
+ tasklet_schedule(&mdma->tasklet);
+
+ return IRQ_HANDLED;
+}
+
+/* DMA Tasklet */
+static void mpc_dma_tasklet(unsigned long data)
+{
+ struct mpc_dma *mdma = (void *)data;
+ dma_cookie_t last_cookie = 0;
+ struct mpc_dma_chan *mchan;
+ struct mpc_dma_desc *mdesc;
+ struct dma_async_tx_descriptor *desc;
+ unsigned long flags;
+ LIST_HEAD(list);
+ uint es;
+ int i;
+
+ spin_lock_irqsave(&mdma->error_status_lock, flags);
+ es = mdma->error_status;
+ mdma->error_status = 0;
+ spin_unlock_irqrestore(&mdma->error_status_lock, flags);
+
+ /* Print nice error report */
+ if (es) {
+ dev_err(mdma->dma.dev,
+ "Hardware reported following error(s) on channel %u:\n",
+ MPC_DMA_DMAES_ERRCHN(es));
+
+ if (es & MPC_DMA_DMAES_GPE)
+ dev_err(mdma->dma.dev, "- Group Priority Error\n");
+ if (es & MPC_DMA_DMAES_CPE)
+ dev_err(mdma->dma.dev, "- Channel Priority Error\n");
+ if (es & MPC_DMA_DMAES_SAE)
+ dev_err(mdma->dma.dev, "- Source Address Error\n");
+ if (es & MPC_DMA_DMAES_SOE)
+ dev_err(mdma->dma.dev, "- Source Offset"
+ " Configuration Error\n");
+ if (es & MPC_DMA_DMAES_DAE)
+ dev_err(mdma->dma.dev, "- Destination Address"
+ " Error\n");
+ if (es & MPC_DMA_DMAES_DOE)
+ dev_err(mdma->dma.dev, "- Destination Offset"
+ " Configuration Error\n");
+ if (es & MPC_DMA_DMAES_NCE)
+ dev_err(mdma->dma.dev, "- NBytes/Citter"
+ " Configuration Error\n");
+ if (es & MPC_DMA_DMAES_SGE)
+ dev_err(mdma->dma.dev, "- Scatter/Gather"
+ " Configuration Error\n");
+ if (es & MPC_DMA_DMAES_SBE)
+ dev_err(mdma->dma.dev, "- Source Bus Error\n");
+ if (es & MPC_DMA_DMAES_DBE)
+ dev_err(mdma->dma.dev, "- Destination Bus Error\n");
+ }
+
+ for (i = 0; i < mdma->dma.chancnt; i++) {
+ mchan = &mdma->channels[i];
+
+ /* Get all completed descriptors */
+ spin_lock_irqsave(&mchan->lock, flags);
+ if (!list_empty(&mchan->completed))
+ list_splice_tail_init(&mchan->completed, &list);
+ spin_unlock_irqrestore(&mchan->lock, flags);
+
+ if (list_empty(&list))
+ continue;
+
+ /* Execute callbacks and run dependencies */
+ list_for_each_entry(mdesc, &list, node) {
+ desc = &mdesc->desc;
+
+ if (desc->callback)
+ desc->callback(desc->callback_param);
+
+ last_cookie = desc->cookie;
+ dma_run_dependencies(desc);
+ }
+
+ /* Free descriptors */
+ spin_lock_irqsave(&mchan->lock, flags);
+ list_splice_tail_init(&list, &mchan->free);
+ mchan->completed_cookie = last_cookie;
+ spin_unlock_irqrestore(&mchan->lock, flags);
+ }
+}
+
+/* Submit descriptor to hardware */
+static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
+{
+ struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(txd->chan);
+ struct mpc_dma_desc *mdesc;
+ unsigned long flags;
+ dma_cookie_t cookie;
+
+ mdesc = container_of(txd, struct mpc_dma_desc, desc);
+
+ spin_lock_irqsave(&mchan->lock, flags);
+
+ /* Move descriptor to queue */
+ list_move_tail(&mdesc->node, &mchan->queued);
+
+ /* If channel is idle, execute all queued descriptors */
+ if (list_empty(&mchan->active))
+ mpc_dma_execute(mchan);
+
+ /* Update cookie */
+ cookie = mchan->chan.cookie + 1;
+ if (cookie <= 0)
+ cookie = 1;
+
+ mchan->chan.cookie = cookie;
+ mdesc->desc.cookie = cookie;
+
+ spin_unlock_irqrestore(&mchan->lock, flags);
+
+ return cookie;
+}
+
+/* Alloc channel resources */
+static int mpc_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+ struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
+ struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
+ struct mpc_dma_desc *mdesc;
+ struct mpc_dma_tcd *tcd;
+ dma_addr_t tcd_paddr;
+ unsigned long flags;
+ LIST_HEAD(descs);
+ int i;
+
+ /* Alloc DMA memory for Transfer Control Descriptors */
+ tcd = dma_alloc_coherent(mdma->dma.dev,
+ MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
+ &tcd_paddr, GFP_KERNEL);
+ if (!tcd)
+ return -ENOMEM;
+
+ /* Alloc descriptors for this channel */
+ for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) {
+ mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL);
+ if (!mdesc) {
+ dev_notice(mdma->dma.dev, "Memory allocation error. "
+ "Allocated only %u descriptors\n", i);
+ break;
+ }
+
+ dma_async_tx_descriptor_init(&mdesc->desc, chan);
+ mdesc->desc.flags = DMA_CTRL_ACK;
+ mdesc->desc.tx_submit = mpc_dma_tx_submit;
+
+ mdesc->tcd = &tcd[i];
+ mdesc->tcd_paddr = tcd_paddr + (i * sizeof(struct mpc_dma_tcd));
+
+ list_add_tail(&mdesc->node, &descs);
+ }
+
+ /* Return error only if no descriptors were allocated */
+ if (i == 0) {
+ dma_free_coherent(mdma->dma.dev,
+ MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
+ tcd, tcd_paddr);
+ return -ENOMEM;
+ }
+
+ spin_lock_irqsave(&mchan->lock, flags);
+ mchan->tcd = tcd;
+ mchan->tcd_paddr = tcd_paddr;
+ list_splice_tail_init(&descs, &mchan->free);
+ spin_unlock_irqrestore(&mchan->lock, flags);
+
+ /* Enable Error Interrupt */
+ out_8(&mdma->regs->dmaseei, chan->chan_id);
+
+ return 0;
+}
+
+/* Free channel resources */
+static void mpc_dma_free_chan_resources(struct dma_chan *chan)
+{
+ struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
+ struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
+ struct mpc_dma_desc *mdesc, *tmp;
+ struct mpc_dma_tcd *tcd;
+ dma_addr_t tcd_paddr;
+ unsigned long flags;
+ LIST_HEAD(descs);
+
+ spin_lock_irqsave(&mchan->lock, flags);
+
+ /* Channel must be idle */
+ BUG_ON(!list_empty(&mchan->prepared));
+ BUG_ON(!list_empty(&mchan->queued));
+ BUG_ON(!list_empty(&mchan->active));
+ BUG_ON(!list_empty(&mchan->completed));
+
+ /* Move data */
+ list_splice_tail_init(&mchan->free, &descs);
+ tcd = mchan->tcd;
+ tcd_paddr = mchan->tcd_paddr;
+
+ spin_unlock_irqrestore(&mchan->lock, flags);
+
+ /* Free DMA memory used by descriptors */
+ dma_free_coherent(mdma->dma.dev,
+ MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
+ tcd, tcd_paddr);
+
+ /* Free descriptors */
+ list_for_each_entry_safe(mdesc, tmp, &descs, node)
+ kfree(mdesc);
+
+ /* Disable Error Interrupt */
+ out_8(&mdma->regs->dmaceei, chan->chan_id);
+}
+
+/* Send all pending descriptor to hardware */
+static void mpc_dma_issue_pending(struct dma_chan *chan)
+{
+ /*
+ * We are posting descriptors to the hardware as soon as
+ * they are ready, so this function does nothing.
+ */
+}
+
+/* Check request completion status */
+static enum dma_status
+mpc_dma_is_tx_complete(struct dma_chan *chan, dma_cookie_t cookie,
+ dma_cookie_t *done, dma_cookie_t *used)
+{
+ struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
+ unsigned long flags;
+ dma_cookie_t last_used;
+ dma_cookie_t last_complete;
+
+ spin_lock_irqsave(&mchan->lock, flags);
+ last_used = mchan->chan.cookie;
+ last_complete = mchan->completed_cookie;
+ spin_unlock_irqrestore(&mchan->lock, flags);
+
+ if (done)
+ *done = last_complete;
+
+ if (used)
+ *used = last_used;
+
+ return dma_async_is_complete(cookie, last_complete, last_used);
+}
+
+/* Prepare descriptor for memory to memory copy */
+static struct dma_async_tx_descriptor *
+mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
+ size_t len, unsigned long flags)
+{
+ struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
+ struct mpc_dma_desc *mdesc = NULL;
+ struct mpc_dma_tcd *tcd;
+ unsigned long iflags;
+
+ /* Get free descriptor */
+ spin_lock_irqsave(&mchan->lock, iflags);
+ if (!list_empty(&mchan->free)) {
+ mdesc = list_first_entry(&mchan->free, struct mpc_dma_desc,
+ node);
+ list_del(&mdesc->node);
+ }
+ spin_unlock_irqrestore(&mchan->lock, iflags);
+
+ if (!mdesc)
+ return NULL;
+
+ mdesc->error = 0;
+ tcd = mdesc->tcd;
+
+ /* Prepare Transfer Control Descriptor for this transaction */
+ memset(tcd, 0, sizeof(struct mpc_dma_tcd));
+
+ if (IS_ALIGNED(src | dst | len, 32)) {
+ tcd->ssize = MPC_DMA_TSIZE_32;
+ tcd->dsize = MPC_DMA_TSIZE_32;
+ tcd->soff = 32;
+ tcd->doff = 32;
+ } else if (IS_ALIGNED(src | dst | len, 16)) {
+ tcd->ssize = MPC_DMA_TSIZE_16;
+ tcd->dsize = MPC_DMA_TSIZE_16;
+ tcd->soff = 16;
+ tcd->doff = 16;
+ } else if (IS_ALIGNED(src | dst | len, 4)) {
+ tcd->ssize = MPC_DMA_TSIZE_4;
+ tcd->dsize = MPC_DMA_TSIZE_4;
+ tcd->soff = 4;
+ tcd->doff = 4;
+ } else if (IS_ALIGNED(src | dst | len, 2)) {
+ tcd->ssize = MPC_DMA_TSIZE_2;
+ tcd->dsize = MPC_DMA_TSIZE_2;
+ tcd->soff = 2;
+ tcd->doff = 2;
+ } else {
+ tcd->ssize = MPC_DMA_TSIZE_1;
+ tcd->dsize = MPC_DMA_TSIZE_1;
+ tcd->soff = 1;
+ tcd->doff = 1;
+ }
+
+ tcd->saddr = src;
+ tcd->daddr = dst;
+ tcd->nbytes = len;
+ tcd->biter = 1;
+ tcd->citer = 1;
+
+ /* Place descriptor in prepared list */
+ spin_lock_irqsave(&mchan->lock, iflags);
+ list_add_tail(&mdesc->node, &mchan->prepared);
+ spin_unlock_irqrestore(&mchan->lock, iflags);
+
+ return &mdesc->desc;
+}
+
+static int __devinit mpc_dma_probe(struct of_device *op,
+ const struct of_device_id *match)
+{
+ struct device_node *dn = op->node;
+ struct device *dev = &op->dev;
+ struct dma_device *dma;
+ struct mpc_dma *mdma;
+ struct mpc_dma_chan *mchan;
+ struct resource res;
+ ulong regs_start, regs_size;
+ int retval, i;
+
+ mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL);
+ if (!mdma) {
+ dev_err(dev, "Memory exhausted!\n");
+ return -ENOMEM;
+ }
+
+ mdma->irq = irq_of_parse_and_map(dn, 0);
+ if (mdma->irq == NO_IRQ) {
+ dev_err(dev, "Error mapping IRQ!\n");
+ return -EINVAL;
+ }
+
+ retval = of_address_to_resource(dn, 0, &res);
+ if (retval) {
+ dev_err(dev, "Error parsing memory region!\n");
+ return retval;
+ }
+
+ regs_start = res.start;
+ regs_size = res.end - res.start + 1;
+
+ if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
+ dev_err(dev, "Error requesting memory region!\n");
+ return -EBUSY;
+ }
+
+ mdma->regs = devm_ioremap(dev, regs_start, regs_size);
+ if (!mdma->regs) {
+ dev_err(dev, "Error mapping memory region!\n");
+ return -ENOMEM;
+ }
+
+ mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs)
+ + MPC_DMA_TCD_OFFSET);
+
+ retval = devm_request_irq(dev, mdma->irq, &mpc_dma_irq, 0, DRV_NAME,
+ mdma);
+ if (retval) {
+ dev_err(dev, "Error requesting IRQ!\n");
+ return -EINVAL;
+ }
+
+ spin_lock_init(&mdma->error_status_lock);
+
+ dma = &mdma->dma;
+ dma->dev = dev;
+ dma->chancnt = MPC_DMA_CHANNELS;
+ dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
+ dma->device_free_chan_resources = mpc_dma_free_chan_resources;
+ dma->device_issue_pending = mpc_dma_issue_pending;
+ dma->device_is_tx_complete = mpc_dma_is_tx_complete;
+ dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
+
+ INIT_LIST_HEAD(&dma->channels);
+ dma_cap_set(DMA_MEMCPY, dma->cap_mask);
+
+ for (i = 0; i < dma->chancnt; i++) {
+ mchan = &mdma->channels[i];
+
+ mchan->chan.device = dma;
+ mchan->chan.chan_id = i;
+ mchan->chan.cookie = 1;
+ mchan->completed_cookie = mchan->chan.cookie;
+
+ INIT_LIST_HEAD(&mchan->free);
+ INIT_LIST_HEAD(&mchan->prepared);
+ INIT_LIST_HEAD(&mchan->queued);
+ INIT_LIST_HEAD(&mchan->active);
+ INIT_LIST_HEAD(&mchan->completed);
+
+ spin_lock_init(&mchan->lock);
+ list_add_tail(&mchan->chan.device_node, &dma->channels);
+ }
+
+ tasklet_init(&mdma->tasklet, mpc_dma_tasklet, (unsigned long)mdma);
+
+ /*
+ * Configure DMA Engine:
+ * - Dynamic clock,
+ * - Round-robin group arbitration,
+ * - Round-robin channel arbitration.
+ */
+ out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
+ MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA);
+
+ /* Disable hardware DMA requests */
+ out_be32(&mdma->regs->dmaerqh, 0);
+ out_be32(&mdma->regs->dmaerql, 0);
+
+ /* Disable error interrupts */
+ out_be32(&mdma->regs->dmaeeih, 0);
+ out_be32(&mdma->regs->dmaeeil, 0);
+
+ /* Clear interrupts status */
+ out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
+ out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
+ out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
+ out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
+
+ /* Route interrupts to IPIC */
+ out_be32(&mdma->regs->dmaihsa, 0);
+ out_be32(&mdma->regs->dmailsa, 0);
+
+ /* Register DMA engine */
+ dev_set_drvdata(dev, mdma);
+ retval = dma_async_device_register(dma);
+ if (retval) {
+ devm_free_irq(dev, mdma->irq, mdma);
+ irq_dispose_mapping(mdma->irq);
+ }
+
+ return retval;
+}
+
+static int __devexit mpc_dma_remove(struct of_device *op)
+{
+ struct device *dev = &op->dev;
+ struct mpc_dma *mdma = dev_get_drvdata(dev);
+
+ dma_async_device_unregister(&mdma->dma);
+ devm_free_irq(dev, mdma->irq, mdma);
+ irq_dispose_mapping(mdma->irq);
+
+ return 0;
+}
+
+static struct of_device_id mpc_dma_match[] = {
+ { .compatible = "fsl,mpc5121-dma", },
+ {},
+};
+
+static struct of_platform_driver mpc_dma_driver = {
+ .match_table = mpc_dma_match,
+ .probe = mpc_dma_probe,
+ .remove = __devexit_p(mpc_dma_remove),
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init mpc_dma_init(void)
+{
+ return of_register_platform_driver(&mpc_dma_driver);
+}
+module_init(mpc_dma_init);
+
+static void __exit mpc_dma_exit(void)
+{
+ of_unregister_platform_driver(&mpc_dma_driver);
+}
+module_exit(mpc_dma_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>");
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 0a3478e910f..e69d87f24a2 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -4940,7 +4940,7 @@ out_free:
return ret;
}
-static struct of_device_id __devinitdata ppc440spe_adma_of_match[] = {
+static const struct of_device_id ppc440spe_adma_of_match[] __devinitconst = {
{ .compatible = "ibm,dma-440spe", },
{ .compatible = "amcc,xor-accelerator", },
{},
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 3391e6739d0..cf17dbb8014 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -13,7 +13,7 @@ module_param(report_gart_errors, int, 0644);
static int ecc_enable_override;
module_param(ecc_enable_override, int, 0644);
-static struct msr *msrs;
+static struct msr __percpu *msrs;
/* Lookup table for all possible MC control instances */
struct amd64_pvt;
@@ -2553,14 +2553,14 @@ static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
if (on) {
if (reg->l & K8_MSR_MCGCTL_NBE)
- pvt->flags.ecc_report = 1;
+ pvt->flags.nb_mce_enable = 1;
reg->l |= K8_MSR_MCGCTL_NBE;
} else {
/*
- * Turn off ECC reporting only when it was off before
+ * Turn off NB MCE reporting only when it was off before
*/
- if (!pvt->flags.ecc_report)
+ if (!pvt->flags.nb_mce_enable)
reg->l &= ~K8_MSR_MCGCTL_NBE;
}
}
@@ -2571,22 +2571,11 @@ static int amd64_toggle_ecc_err_reporting(struct amd64_pvt *pvt, bool on)
return 0;
}
-/*
- * Only if 'ecc_enable_override' is set AND BIOS had ECC disabled, do "we"
- * enable it.
- */
static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
{
struct amd64_pvt *pvt = mci->pvt_info;
u32 value, mask = K8_NBCTL_CECCEn | K8_NBCTL_UECCEn;
- if (!ecc_enable_override)
- return;
-
- amd64_printk(KERN_WARNING,
- "'ecc_enable_override' parameter is active, "
- "Enabling AMD ECC hardware now: CAUTION\n");
-
amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCTL, &value);
/* turn on UECCn and CECCEn bits */
@@ -2611,6 +2600,8 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
"This node reports that DRAM ECC is "
"currently Disabled; ENABLING now\n");
+ pvt->flags.nb_ecc_prev = 0;
+
/* Attempt to turn on DRAM ECC Enable */
value |= K8_NBCFG_ECC_ENABLE;
pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value);
@@ -2625,7 +2616,10 @@ static void amd64_enable_ecc_error_reporting(struct mem_ctl_info *mci)
amd64_printk(KERN_DEBUG,
"Hardware accepted DRAM ECC Enable\n");
}
+ } else {
+ pvt->flags.nb_ecc_prev = 1;
}
+
debugf0("NBCFG(2)= 0x%x CHIPKILL= %s ECC_ENABLE= %s\n", value,
(value & K8_NBCFG_CHIPKILL) ? "Enabled" : "Disabled",
(value & K8_NBCFG_ECC_ENABLE) ? "Enabled" : "Disabled");
@@ -2644,12 +2638,18 @@ static void amd64_restore_ecc_error_reporting(struct amd64_pvt *pvt)
value &= ~mask;
value |= pvt->old_nbctl;
- /* restore the NB Enable MCGCTL bit */
pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCTL, value);
+ /* restore previous BIOS DRAM ECC "off" setting which we force-enabled */
+ if (!pvt->flags.nb_ecc_prev) {
+ amd64_read_pci_cfg(pvt->misc_f3_ctl, K8_NBCFG, &value);
+ value &= ~K8_NBCFG_ECC_ENABLE;
+ pci_write_config_dword(pvt->misc_f3_ctl, K8_NBCFG, value);
+ }
+
+ /* restore the NB Enable MCGCTL bit */
if (amd64_toggle_ecc_err_reporting(pvt, OFF))
- amd64_printk(KERN_WARNING, "Error restoring ECC reporting over "
- "MCGCTL!\n");
+ amd64_printk(KERN_WARNING, "Error restoring NB MCGCTL settings!\n");
}
/*
@@ -2690,8 +2690,9 @@ static int amd64_check_ecc_enabled(struct amd64_pvt *pvt)
if (!ecc_enable_override) {
amd64_printk(KERN_NOTICE, "%s", ecc_msg);
return -ENODEV;
+ } else {
+ amd64_printk(KERN_WARNING, "Forcing ECC checking on!\n");
}
- ecc_enable_override = 0;
}
return 0;
diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h
index 41bc561e598..0d4bf563824 100644
--- a/drivers/edac/amd64_edac.h
+++ b/drivers/edac/amd64_edac.h
@@ -487,7 +487,8 @@ struct amd64_pvt {
/* misc settings */
struct flags {
unsigned long cf8_extcfg:1;
- unsigned long ecc_report:1;
+ unsigned long nb_mce_enable:1;
+ unsigned long nb_ecc_prev:1;
} flags;
};
diff --git a/drivers/eisa/eisa-bus.c b/drivers/eisa/eisa-bus.c
index 66958b3f10b..806c77bfd43 100644
--- a/drivers/eisa/eisa-bus.c
+++ b/drivers/eisa/eisa-bus.c
@@ -39,10 +39,10 @@ static unsigned int enable_dev_count;
static int disable_dev[EISA_MAX_FORCED_DEV];
static unsigned int disable_dev_count;
-static int is_forced_dev (int *forced_tab,
- int forced_count,
- struct eisa_root_device *root,
- struct eisa_device *edev)
+static int is_forced_dev(int *forced_tab,
+ int forced_count,
+ struct eisa_root_device *root,
+ struct eisa_device *edev)
{
int i, x;
@@ -55,21 +55,21 @@ static int is_forced_dev (int *forced_tab,
return 0;
}
-static void __init eisa_name_device (struct eisa_device *edev)
+static void __init eisa_name_device(struct eisa_device *edev)
{
#ifdef CONFIG_EISA_NAMES
int i;
for (i = 0; i < EISA_INFOS; i++) {
- if (!strcmp (edev->id.sig, eisa_table[i].id.sig)) {
- strlcpy (edev->pretty_name,
- eisa_table[i].name,
- sizeof(edev->pretty_name));
+ if (!strcmp(edev->id.sig, eisa_table[i].id.sig)) {
+ strlcpy(edev->pretty_name,
+ eisa_table[i].name,
+ sizeof(edev->pretty_name));
return;
}
}
/* No name was found */
- sprintf (edev->pretty_name, "EISA device %.7s", edev->id.sig);
+ sprintf(edev->pretty_name, "EISA device %.7s", edev->id.sig);
#endif
}
@@ -91,7 +91,7 @@ static char __init *decode_eisa_sig(unsigned long addr)
*/
outb(0x80 + i, addr);
#endif
- sig[i] = inb (addr + i);
+ sig[i] = inb(addr + i);
if (!i && (sig[0] & 0x80))
return NULL;
@@ -106,17 +106,17 @@ static char __init *decode_eisa_sig(unsigned long addr)
return sig_str;
}
-static int eisa_bus_match (struct device *dev, struct device_driver *drv)
+static int eisa_bus_match(struct device *dev, struct device_driver *drv)
{
- struct eisa_device *edev = to_eisa_device (dev);
- struct eisa_driver *edrv = to_eisa_driver (drv);
+ struct eisa_device *edev = to_eisa_device(dev);
+ struct eisa_driver *edrv = to_eisa_driver(drv);
const struct eisa_device_id *eids = edrv->id_table;
if (!eids)
return 0;
- while (strlen (eids->sig)) {
- if (!strcmp (eids->sig, edev->id.sig) &&
+ while (strlen(eids->sig)) {
+ if (!strcmp(eids->sig, edev->id.sig) &&
edev->state & EISA_CONFIG_ENABLED) {
edev->id.driver_data = eids->driver_data;
return 1;
@@ -141,61 +141,71 @@ struct bus_type eisa_bus_type = {
.match = eisa_bus_match,
.uevent = eisa_bus_uevent,
};
+EXPORT_SYMBOL(eisa_bus_type);
-int eisa_driver_register (struct eisa_driver *edrv)
+int eisa_driver_register(struct eisa_driver *edrv)
{
edrv->driver.bus = &eisa_bus_type;
- return driver_register (&edrv->driver);
+ return driver_register(&edrv->driver);
}
+EXPORT_SYMBOL(eisa_driver_register);
-void eisa_driver_unregister (struct eisa_driver *edrv)
+void eisa_driver_unregister(struct eisa_driver *edrv)
{
- driver_unregister (&edrv->driver);
+ driver_unregister(&edrv->driver);
}
+EXPORT_SYMBOL(eisa_driver_unregister);
-static ssize_t eisa_show_sig (struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t eisa_show_sig(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
- struct eisa_device *edev = to_eisa_device (dev);
- return sprintf (buf,"%s\n", edev->id.sig);
+ struct eisa_device *edev = to_eisa_device(dev);
+ return sprintf(buf, "%s\n", edev->id.sig);
}
static DEVICE_ATTR(signature, S_IRUGO, eisa_show_sig, NULL);
-static ssize_t eisa_show_state (struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t eisa_show_state(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
- struct eisa_device *edev = to_eisa_device (dev);
- return sprintf (buf,"%d\n", edev->state & EISA_CONFIG_ENABLED);
+ struct eisa_device *edev = to_eisa_device(dev);
+ return sprintf(buf, "%d\n", edev->state & EISA_CONFIG_ENABLED);
}
static DEVICE_ATTR(enabled, S_IRUGO, eisa_show_state, NULL);
-static ssize_t eisa_show_modalias (struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t eisa_show_modalias(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
- struct eisa_device *edev = to_eisa_device (dev);
- return sprintf (buf, EISA_DEVICE_MODALIAS_FMT "\n", edev->id.sig);
+ struct eisa_device *edev = to_eisa_device(dev);
+ return sprintf(buf, EISA_DEVICE_MODALIAS_FMT "\n", edev->id.sig);
}
static DEVICE_ATTR(modalias, S_IRUGO, eisa_show_modalias, NULL);
-static int __init eisa_init_device (struct eisa_root_device *root,
- struct eisa_device *edev,
- int slot)
+static int __init eisa_init_device(struct eisa_root_device *root,
+ struct eisa_device *edev,
+ int slot)
{
char *sig;
- unsigned long sig_addr;
+ unsigned long sig_addr;
int i;
- sig_addr = SLOT_ADDRESS (root, slot) + EISA_VENDOR_ID_OFFSET;
+ sig_addr = SLOT_ADDRESS(root, slot) + EISA_VENDOR_ID_OFFSET;
- if (!(sig = decode_eisa_sig (sig_addr)))
+ sig = decode_eisa_sig(sig_addr);
+ if (!sig)
return -1; /* No EISA device here */
- memcpy (edev->id.sig, sig, EISA_SIG_LEN);
+ memcpy(edev->id.sig, sig, EISA_SIG_LEN);
edev->slot = slot;
- edev->state = inb (SLOT_ADDRESS (root, slot) + EISA_CONFIG_OFFSET) & EISA_CONFIG_ENABLED;
- edev->base_addr = SLOT_ADDRESS (root, slot);
+ edev->state = inb(SLOT_ADDRESS(root, slot) + EISA_CONFIG_OFFSET)
+ & EISA_CONFIG_ENABLED;
+ edev->base_addr = SLOT_ADDRESS(root, slot);
edev->dma_mask = root->dma_mask; /* Default DMA mask */
- eisa_name_device (edev);
+ eisa_name_device(edev);
edev->dev.parent = root->dev;
edev->dev.bus = &eisa_bus_type;
edev->dev.dma_mask = &edev->dma_mask;
@@ -210,42 +220,45 @@ static int __init eisa_init_device (struct eisa_root_device *root,
#endif
}
- if (is_forced_dev (enable_dev, enable_dev_count, root, edev))
+ if (is_forced_dev(enable_dev, enable_dev_count, root, edev))
edev->state = EISA_CONFIG_ENABLED | EISA_CONFIG_FORCED;
- if (is_forced_dev (disable_dev, disable_dev_count, root, edev))
+ if (is_forced_dev(disable_dev, disable_dev_count, root, edev))
edev->state = EISA_CONFIG_FORCED;
return 0;
}
-static int __init eisa_register_device (struct eisa_device *edev)
+static int __init eisa_register_device(struct eisa_device *edev)
{
- int rc = device_register (&edev->dev);
+ int rc = device_register(&edev->dev);
if (rc)
return rc;
- rc = device_create_file (&edev->dev, &dev_attr_signature);
- if (rc) goto err_devreg;
- rc = device_create_file (&edev->dev, &dev_attr_enabled);
- if (rc) goto err_sig;
- rc = device_create_file (&edev->dev, &dev_attr_modalias);
- if (rc) goto err_enab;
+ rc = device_create_file(&edev->dev, &dev_attr_signature);
+ if (rc)
+ goto err_devreg;
+ rc = device_create_file(&edev->dev, &dev_attr_enabled);
+ if (rc)
+ goto err_sig;
+ rc = device_create_file(&edev->dev, &dev_attr_modalias);
+ if (rc)
+ goto err_enab;
return 0;
err_enab:
- device_remove_file (&edev->dev, &dev_attr_enabled);
+ device_remove_file(&edev->dev, &dev_attr_enabled);
err_sig:
- device_remove_file (&edev->dev, &dev_attr_signature);
+ device_remove_file(&edev->dev, &dev_attr_signature);
err_devreg:
device_unregister(&edev->dev);
return rc;
}
-static int __init eisa_request_resources (struct eisa_root_device *root,
- struct eisa_device *edev,
- int slot)
+static int __init eisa_request_resources(struct eisa_root_device *root,
+ struct eisa_device *edev,
+ int slot)
{
int i;
@@ -263,17 +276,19 @@ static int __init eisa_request_resources (struct eisa_root_device *root,
if (slot) {
edev->res[i].name = NULL;
- edev->res[i].start = SLOT_ADDRESS (root, slot) + (i * 0x400);
+ edev->res[i].start = SLOT_ADDRESS(root, slot)
+ + (i * 0x400);
edev->res[i].end = edev->res[i].start + 0xff;
edev->res[i].flags = IORESOURCE_IO;
} else {
edev->res[i].name = NULL;
- edev->res[i].start = SLOT_ADDRESS (root, slot) + EISA_VENDOR_ID_OFFSET;
+ edev->res[i].start = SLOT_ADDRESS(root, slot)
+ + EISA_VENDOR_ID_OFFSET;
edev->res[i].end = edev->res[i].start + 3;
edev->res[i].flags = IORESOURCE_BUSY;
}
- if (request_resource (root->res, &edev->res[i]))
+ if (request_resource(root->res, &edev->res[i]))
goto failed;
}
@@ -281,99 +296,100 @@ static int __init eisa_request_resources (struct eisa_root_device *root,
failed:
while (--i >= 0)
- release_resource (&edev->res[i]);
+ release_resource(&edev->res[i]);
return -1;
}
-static void __init eisa_release_resources (struct eisa_device *edev)
+static void __init eisa_release_resources(struct eisa_device *edev)
{
int i;
for (i = 0; i < EISA_MAX_RESOURCES; i++)
if (edev->res[i].start || edev->res[i].end)
- release_resource (&edev->res[i]);
+ release_resource(&edev->res[i]);
}
-static int __init eisa_probe (struct eisa_root_device *root)
+static int __init eisa_probe(struct eisa_root_device *root)
{
int i, c;
struct eisa_device *edev;
- printk (KERN_INFO "EISA: Probing bus %d at %s\n",
- root->bus_nr, dev_name(root->dev));
+ printk(KERN_INFO "EISA: Probing bus %d at %s\n",
+ root->bus_nr, dev_name(root->dev));
/* First try to get hold of slot 0. If there is no device
* here, simply fail, unless root->force_probe is set. */
- if (!(edev = kzalloc (sizeof (*edev), GFP_KERNEL))) {
- printk (KERN_ERR "EISA: Couldn't allocate mainboard slot\n");
+ edev = kzalloc(sizeof(*edev), GFP_KERNEL);
+ if (!edev) {
+ printk(KERN_ERR "EISA: Couldn't allocate mainboard slot\n");
return -ENOMEM;
}
- if (eisa_request_resources (root, edev, 0)) {
- printk (KERN_WARNING \
- "EISA: Cannot allocate resource for mainboard\n");
- kfree (edev);
+ if (eisa_request_resources(root, edev, 0)) {
+ printk(KERN_WARNING \
+ "EISA: Cannot allocate resource for mainboard\n");
+ kfree(edev);
if (!root->force_probe)
return -EBUSY;
goto force_probe;
}
- if (eisa_init_device (root, edev, 0)) {
- eisa_release_resources (edev);
- kfree (edev);
+ if (eisa_init_device(root, edev, 0)) {
+ eisa_release_resources(edev);
+ kfree(edev);
if (!root->force_probe)
return -ENODEV;
goto force_probe;
}
- printk (KERN_INFO "EISA: Mainboard %s detected.\n", edev->id.sig);
+ printk(KERN_INFO "EISA: Mainboard %s detected.\n", edev->id.sig);
- if (eisa_register_device (edev)) {
- printk (KERN_ERR "EISA: Failed to register %s\n",
- edev->id.sig);
- eisa_release_resources (edev);
- kfree (edev);
+ if (eisa_register_device(edev)) {
+ printk(KERN_ERR "EISA: Failed to register %s\n",
+ edev->id.sig);
+ eisa_release_resources(edev);
+ kfree(edev);
}
force_probe:
for (c = 0, i = 1; i <= root->slots; i++) {
- if (!(edev = kzalloc (sizeof (*edev), GFP_KERNEL))) {
- printk (KERN_ERR "EISA: Out of memory for slot %d\n",
- i);
+ edev = kzalloc(sizeof(*edev), GFP_KERNEL);
+ if (!edev) {
+ printk(KERN_ERR "EISA: Out of memory for slot %d\n", i);
continue;
}
- if (eisa_request_resources (root, edev, i)) {
- printk (KERN_WARNING \
- "Cannot allocate resource for EISA slot %d\n",
- i);
- kfree (edev);
+ if (eisa_request_resources(root, edev, i)) {
+ printk(KERN_WARNING \
+ "Cannot allocate resource for EISA slot %d\n",
+ i);
+ kfree(edev);
continue;
}
- if (eisa_init_device (root, edev, i)) {
- eisa_release_resources (edev);
- kfree (edev);
+ if (eisa_init_device(root, edev, i)) {
+ eisa_release_resources(edev);
+ kfree(edev);
continue;
}
- printk (KERN_INFO "EISA: slot %d : %s detected",
- i, edev->id.sig);
+ printk(KERN_INFO "EISA: slot %d : %s detected",
+ i, edev->id.sig);
switch (edev->state) {
case EISA_CONFIG_ENABLED | EISA_CONFIG_FORCED:
- printk (" (forced enabled)");
+ printk(" (forced enabled)");
break;
case EISA_CONFIG_FORCED:
- printk (" (forced disabled)");
+ printk(" (forced disabled)");
break;
case 0:
- printk (" (disabled)");
+ printk(" (disabled)");
break;
}
@@ -381,15 +397,15 @@ static int __init eisa_probe (struct eisa_root_device *root)
c++;
- if (eisa_register_device (edev)) {
- printk (KERN_ERR "EISA: Failed to register %s\n",
- edev->id.sig);
- eisa_release_resources (edev);
- kfree (edev);
+ if (eisa_register_device(edev)) {
+ printk(KERN_ERR "EISA: Failed to register %s\n",
+ edev->id.sig);
+ eisa_release_resources(edev);
+ kfree(edev);
}
}
- printk (KERN_INFO "EISA: Detected %d card%s.\n", c, c == 1 ? "" : "s");
+ printk(KERN_INFO "EISA: Detected %d card%s.\n", c, c == 1 ? "" : "s");
return 0;
}
@@ -403,7 +419,7 @@ static struct resource eisa_root_res = {
static int eisa_bus_count;
-int __init eisa_root_register (struct eisa_root_device *root)
+int __init eisa_root_register(struct eisa_root_device *root)
{
int err;
@@ -417,35 +433,35 @@ int __init eisa_root_register (struct eisa_root_device *root)
root->eisa_root_res.end = root->res->end;
root->eisa_root_res.flags = IORESOURCE_BUSY;
- if ((err = request_resource (&eisa_root_res, &root->eisa_root_res)))
+ err = request_resource(&eisa_root_res, &root->eisa_root_res);
+ if (err)
return err;
root->bus_nr = eisa_bus_count++;
- if ((err = eisa_probe (root)))
- release_resource (&root->eisa_root_res);
+ err = eisa_probe(root);
+ if (err)
+ release_resource(&root->eisa_root_res);
return err;
}
-static int __init eisa_init (void)
+static int __init eisa_init(void)
{
int r;
- if ((r = bus_register (&eisa_bus_type)))
+ r = bus_register(&eisa_bus_type);
+ if (r)
return r;
- printk (KERN_INFO "EISA bus registered\n");
+ printk(KERN_INFO "EISA bus registered\n");
return 0;
}
module_param_array(enable_dev, int, &enable_dev_count, 0444);
module_param_array(disable_dev, int, &disable_dev_count, 0444);
-postcore_initcall (eisa_init);
+postcore_initcall(eisa_init);
int EISA_bus; /* for legacy drivers */
-EXPORT_SYMBOL (EISA_bus);
-EXPORT_SYMBOL (eisa_bus_type);
-EXPORT_SYMBOL (eisa_driver_register);
-EXPORT_SYMBOL (eisa_driver_unregister);
+EXPORT_SYMBOL(EISA_bus);
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 4eeaed57e21..8be720b278b 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -25,6 +25,7 @@
#include <linux/firewire.h>
#include <linux/firewire-cdev.h>
#include <linux/idr.h>
+#include <linux/irqflags.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/kref.h>
@@ -32,7 +33,6 @@
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/poll.h>
-#include <linux/preempt.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/string.h>
@@ -368,39 +368,56 @@ void fw_device_cdev_remove(struct fw_device *device)
for_each_client(device, wake_up_client);
}
-static int ioctl_get_info(struct client *client, void *buffer)
+union ioctl_arg {
+ struct fw_cdev_get_info get_info;
+ struct fw_cdev_send_request send_request;
+ struct fw_cdev_allocate allocate;
+ struct fw_cdev_deallocate deallocate;
+ struct fw_cdev_send_response send_response;
+ struct fw_cdev_initiate_bus_reset initiate_bus_reset;
+ struct fw_cdev_add_descriptor add_descriptor;
+ struct fw_cdev_remove_descriptor remove_descriptor;
+ struct fw_cdev_create_iso_context create_iso_context;
+ struct fw_cdev_queue_iso queue_iso;
+ struct fw_cdev_start_iso start_iso;
+ struct fw_cdev_stop_iso stop_iso;
+ struct fw_cdev_get_cycle_timer get_cycle_timer;
+ struct fw_cdev_allocate_iso_resource allocate_iso_resource;
+ struct fw_cdev_send_stream_packet send_stream_packet;
+ struct fw_cdev_get_cycle_timer2 get_cycle_timer2;
+};
+
+static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_get_info *get_info = buffer;
+ struct fw_cdev_get_info *a = &arg->get_info;
struct fw_cdev_event_bus_reset bus_reset;
unsigned long ret = 0;
- client->version = get_info->version;
- get_info->version = FW_CDEV_VERSION;
- get_info->card = client->device->card->index;
+ client->version = a->version;
+ a->version = FW_CDEV_VERSION;
+ a->card = client->device->card->index;
down_read(&fw_device_rwsem);
- if (get_info->rom != 0) {
- void __user *uptr = u64_to_uptr(get_info->rom);
- size_t want = get_info->rom_length;
+ if (a->rom != 0) {
+ size_t want = a->rom_length;
size_t have = client->device->config_rom_length * 4;
- ret = copy_to_user(uptr, client->device->config_rom,
- min(want, have));
+ ret = copy_to_user(u64_to_uptr(a->rom),
+ client->device->config_rom, min(want, have));
}
- get_info->rom_length = client->device->config_rom_length * 4;
+ a->rom_length = client->device->config_rom_length * 4;
up_read(&fw_device_rwsem);
if (ret != 0)
return -EFAULT;
- client->bus_reset_closure = get_info->bus_reset_closure;
- if (get_info->bus_reset != 0) {
- void __user *uptr = u64_to_uptr(get_info->bus_reset);
-
+ client->bus_reset_closure = a->bus_reset_closure;
+ if (a->bus_reset != 0) {
fill_bus_reset_event(&bus_reset, client);
- if (copy_to_user(uptr, &bus_reset, sizeof(bus_reset)))
+ if (copy_to_user(u64_to_uptr(a->bus_reset),
+ &bus_reset, sizeof(bus_reset)))
return -EFAULT;
}
@@ -571,11 +588,9 @@ static int init_request(struct client *client,
return ret;
}
-static int ioctl_send_request(struct client *client, void *buffer)
+static int ioctl_send_request(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_send_request *request = buffer;
-
- switch (request->tcode) {
+ switch (arg->send_request.tcode) {
case TCODE_WRITE_QUADLET_REQUEST:
case TCODE_WRITE_BLOCK_REQUEST:
case TCODE_READ_QUADLET_REQUEST:
@@ -592,7 +607,7 @@ static int ioctl_send_request(struct client *client, void *buffer)
return -EINVAL;
}
- return init_request(client, request, client->device->node_id,
+ return init_request(client, &arg->send_request, client->device->node_id,
client->device->max_speed);
}
@@ -683,9 +698,9 @@ static void release_address_handler(struct client *client,
kfree(r);
}
-static int ioctl_allocate(struct client *client, void *buffer)
+static int ioctl_allocate(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_allocate *request = buffer;
+ struct fw_cdev_allocate *a = &arg->allocate;
struct address_handler_resource *r;
struct fw_address_region region;
int ret;
@@ -694,13 +709,13 @@ static int ioctl_allocate(struct client *client, void *buffer)
if (r == NULL)
return -ENOMEM;
- region.start = request->offset;
- region.end = request->offset + request->length;
- r->handler.length = request->length;
+ region.start = a->offset;
+ region.end = a->offset + a->length;
+ r->handler.length = a->length;
r->handler.address_callback = handle_request;
- r->handler.callback_data = r;
- r->closure = request->closure;
- r->client = client;
+ r->handler.callback_data = r;
+ r->closure = a->closure;
+ r->client = client;
ret = fw_core_add_address_handler(&r->handler, &region);
if (ret < 0) {
@@ -714,27 +729,25 @@ static int ioctl_allocate(struct client *client, void *buffer)
release_address_handler(client, &r->resource);
return ret;
}
- request->handle = r->resource.handle;
+ a->handle = r->resource.handle;
return 0;
}
-static int ioctl_deallocate(struct client *client, void *buffer)
+static int ioctl_deallocate(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_deallocate *request = buffer;
-
- return release_client_resource(client, request->handle,
+ return release_client_resource(client, arg->deallocate.handle,
release_address_handler, NULL);
}
-static int ioctl_send_response(struct client *client, void *buffer)
+static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_send_response *request = buffer;
+ struct fw_cdev_send_response *a = &arg->send_response;
struct client_resource *resource;
struct inbound_transaction_resource *r;
int ret = 0;
- if (release_client_resource(client, request->handle,
+ if (release_client_resource(client, a->handle,
release_request, &resource) < 0)
return -EINVAL;
@@ -743,28 +756,24 @@ static int ioctl_send_response(struct client *client, void *buffer)
if (is_fcp_request(r->request))
goto out;
- if (request->length < r->length)
- r->length = request->length;
- if (copy_from_user(r->data, u64_to_uptr(request->data), r->length)) {
+ if (a->length < r->length)
+ r->length = a->length;
+ if (copy_from_user(r->data, u64_to_uptr(a->data), r->length)) {
ret = -EFAULT;
kfree(r->request);
goto out;
}
- fw_send_response(client->device->card, r->request, request->rcode);
+ fw_send_response(client->device->card, r->request, a->rcode);
out:
kfree(r);
return ret;
}
-static int ioctl_initiate_bus_reset(struct client *client, void *buffer)
+static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_initiate_bus_reset *request = buffer;
- int short_reset;
-
- short_reset = (request->type == FW_CDEV_SHORT_RESET);
-
- return fw_core_initiate_bus_reset(client->device->card, short_reset);
+ return fw_core_initiate_bus_reset(client->device->card,
+ arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET);
}
static void release_descriptor(struct client *client,
@@ -777,9 +786,9 @@ static void release_descriptor(struct client *client,
kfree(r);
}
-static int ioctl_add_descriptor(struct client *client, void *buffer)
+static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_add_descriptor *request = buffer;
+ struct fw_cdev_add_descriptor *a = &arg->add_descriptor;
struct descriptor_resource *r;
int ret;
@@ -787,22 +796,21 @@ static int ioctl_add_descriptor(struct client *client, void *buffer)
if (!client->device->is_local)
return -ENOSYS;
- if (request->length > 256)
+ if (a->length > 256)
return -EINVAL;
- r = kmalloc(sizeof(*r) + request->length * 4, GFP_KERNEL);
+ r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL);
if (r == NULL)
return -ENOMEM;
- if (copy_from_user(r->data,
- u64_to_uptr(request->data), request->length * 4)) {
+ if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) {
ret = -EFAULT;
goto failed;
}
- r->descriptor.length = request->length;
- r->descriptor.immediate = request->immediate;
- r->descriptor.key = request->key;
+ r->descriptor.length = a->length;
+ r->descriptor.immediate = a->immediate;
+ r->descriptor.key = a->key;
r->descriptor.data = r->data;
ret = fw_core_add_descriptor(&r->descriptor);
@@ -815,7 +823,7 @@ static int ioctl_add_descriptor(struct client *client, void *buffer)
fw_core_remove_descriptor(&r->descriptor);
goto failed;
}
- request->handle = r->resource.handle;
+ a->handle = r->resource.handle;
return 0;
failed:
@@ -824,11 +832,9 @@ static int ioctl_add_descriptor(struct client *client, void *buffer)
return ret;
}
-static int ioctl_remove_descriptor(struct client *client, void *buffer)
+static int ioctl_remove_descriptor(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_remove_descriptor *request = buffer;
-
- return release_client_resource(client, request->handle,
+ return release_client_resource(client, arg->remove_descriptor.handle,
release_descriptor, NULL);
}
@@ -851,49 +857,44 @@ static void iso_callback(struct fw_iso_context *context, u32 cycle,
sizeof(e->interrupt) + header_length, NULL, 0);
}
-static int ioctl_create_iso_context(struct client *client, void *buffer)
+static int ioctl_create_iso_context(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_create_iso_context *request = buffer;
+ struct fw_cdev_create_iso_context *a = &arg->create_iso_context;
struct fw_iso_context *context;
/* We only support one context at this time. */
if (client->iso_context != NULL)
return -EBUSY;
- if (request->channel > 63)
+ if (a->channel > 63)
return -EINVAL;
- switch (request->type) {
+ switch (a->type) {
case FW_ISO_CONTEXT_RECEIVE:
- if (request->header_size < 4 || (request->header_size & 3))
+ if (a->header_size < 4 || (a->header_size & 3))
return -EINVAL;
-
break;
case FW_ISO_CONTEXT_TRANSMIT:
- if (request->speed > SCODE_3200)
+ if (a->speed > SCODE_3200)
return -EINVAL;
-
break;
default:
return -EINVAL;
}
- context = fw_iso_context_create(client->device->card,
- request->type,
- request->channel,
- request->speed,
- request->header_size,
- iso_callback, client);
+ context = fw_iso_context_create(client->device->card, a->type,
+ a->channel, a->speed, a->header_size,
+ iso_callback, client);
if (IS_ERR(context))
return PTR_ERR(context);
- client->iso_closure = request->closure;
+ client->iso_closure = a->closure;
client->iso_context = context;
/* We only support one context at this time. */
- request->handle = 0;
+ a->handle = 0;
return 0;
}
@@ -906,9 +907,9 @@ static int ioctl_create_iso_context(struct client *client, void *buffer)
#define GET_SY(v) (((v) >> 20) & 0x0f)
#define GET_HEADER_LENGTH(v) (((v) >> 24) & 0xff)
-static int ioctl_queue_iso(struct client *client, void *buffer)
+static int ioctl_queue_iso(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_queue_iso *request = buffer;
+ struct fw_cdev_queue_iso *a = &arg->queue_iso;
struct fw_cdev_iso_packet __user *p, *end, *next;
struct fw_iso_context *ctx = client->iso_context;
unsigned long payload, buffer_end, header_length;
@@ -919,7 +920,7 @@ static int ioctl_queue_iso(struct client *client, void *buffer)
u8 header[256];
} u;
- if (ctx == NULL || request->handle != 0)
+ if (ctx == NULL || a->handle != 0)
return -EINVAL;
/*
@@ -929,23 +930,23 @@ static int ioctl_queue_iso(struct client *client, void *buffer)
* set them both to 0, which will still let packets with
* payload_length == 0 through. In other words, if no packets
* use the indirect payload, the iso buffer need not be mapped
- * and the request->data pointer is ignored.
+ * and the a->data pointer is ignored.
*/
- payload = (unsigned long)request->data - client->vm_start;
+ payload = (unsigned long)a->data - client->vm_start;
buffer_end = client->buffer.page_count << PAGE_SHIFT;
- if (request->data == 0 || client->buffer.pages == NULL ||
+ if (a->data == 0 || client->buffer.pages == NULL ||
payload >= buffer_end) {
payload = 0;
buffer_end = 0;
}
- p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(request->packets);
+ p = (struct fw_cdev_iso_packet __user *)u64_to_uptr(a->packets);
- if (!access_ok(VERIFY_READ, p, request->size))
+ if (!access_ok(VERIFY_READ, p, a->size))
return -EFAULT;
- end = (void __user *)p + request->size;
+ end = (void __user *)p + a->size;
count = 0;
while (p < end) {
if (get_user(control, &p->control))
@@ -995,61 +996,78 @@ static int ioctl_queue_iso(struct client *client, void *buffer)
count++;
}
- request->size -= uptr_to_u64(p) - request->packets;
- request->packets = uptr_to_u64(p);
- request->data = client->vm_start + payload;
+ a->size -= uptr_to_u64(p) - a->packets;
+ a->packets = uptr_to_u64(p);
+ a->data = client->vm_start + payload;
return count;
}
-static int ioctl_start_iso(struct client *client, void *buffer)
+static int ioctl_start_iso(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_start_iso *request = buffer;
+ struct fw_cdev_start_iso *a = &arg->start_iso;
- if (client->iso_context == NULL || request->handle != 0)
+ if (client->iso_context == NULL || a->handle != 0)
return -EINVAL;
- if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE) {
- if (request->tags == 0 || request->tags > 15)
- return -EINVAL;
-
- if (request->sync > 15)
- return -EINVAL;
- }
+ if (client->iso_context->type == FW_ISO_CONTEXT_RECEIVE &&
+ (a->tags == 0 || a->tags > 15 || a->sync > 15))
+ return -EINVAL;
- return fw_iso_context_start(client->iso_context, request->cycle,
- request->sync, request->tags);
+ return fw_iso_context_start(client->iso_context,
+ a->cycle, a->sync, a->tags);
}
-static int ioctl_stop_iso(struct client *client, void *buffer)
+static int ioctl_stop_iso(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_stop_iso *request = buffer;
+ struct fw_cdev_stop_iso *a = &arg->stop_iso;
- if (client->iso_context == NULL || request->handle != 0)
+ if (client->iso_context == NULL || a->handle != 0)
return -EINVAL;
return fw_iso_context_stop(client->iso_context);
}
-static int ioctl_get_cycle_timer(struct client *client, void *buffer)
+static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_get_cycle_timer *request = buffer;
+ struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2;
struct fw_card *card = client->device->card;
- unsigned long long bus_time;
- struct timeval tv;
- unsigned long flags;
+ struct timespec ts = {0, 0};
+ u32 cycle_time;
+ int ret = 0;
+
+ local_irq_disable();
+
+ cycle_time = card->driver->get_cycle_time(card);
- preempt_disable();
- local_irq_save(flags);
+ switch (a->clk_id) {
+ case CLOCK_REALTIME: getnstimeofday(&ts); break;
+ case CLOCK_MONOTONIC: do_posix_clock_monotonic_gettime(&ts); break;
+ case CLOCK_MONOTONIC_RAW: getrawmonotonic(&ts); break;
+ default:
+ ret = -EINVAL;
+ }
- bus_time = card->driver->get_bus_time(card);
- do_gettimeofday(&tv);
+ local_irq_enable();
- local_irq_restore(flags);
- preempt_enable();
+ a->tv_sec = ts.tv_sec;
+ a->tv_nsec = ts.tv_nsec;
+ a->cycle_timer = cycle_time;
+
+ return ret;
+}
+
+static int ioctl_get_cycle_timer(struct client *client, union ioctl_arg *arg)
+{
+ struct fw_cdev_get_cycle_timer *a = &arg->get_cycle_timer;
+ struct fw_cdev_get_cycle_timer2 ct2;
+
+ ct2.clk_id = CLOCK_REALTIME;
+ ioctl_get_cycle_timer2(client, (union ioctl_arg *)&ct2);
+
+ a->local_time = ct2.tv_sec * USEC_PER_SEC + ct2.tv_nsec / NSEC_PER_USEC;
+ a->cycle_timer = ct2.cycle_timer;
- request->local_time = tv.tv_sec * 1000000ULL + tv.tv_usec;
- request->cycle_timer = bus_time & 0xffffffff;
return 0;
}
@@ -1220,33 +1238,32 @@ static int init_iso_resource(struct client *client,
return ret;
}
-static int ioctl_allocate_iso_resource(struct client *client, void *buffer)
+static int ioctl_allocate_iso_resource(struct client *client,
+ union ioctl_arg *arg)
{
- struct fw_cdev_allocate_iso_resource *request = buffer;
-
- return init_iso_resource(client, request, ISO_RES_ALLOC);
+ return init_iso_resource(client,
+ &arg->allocate_iso_resource, ISO_RES_ALLOC);
}
-static int ioctl_deallocate_iso_resource(struct client *client, void *buffer)
+static int ioctl_deallocate_iso_resource(struct client *client,
+ union ioctl_arg *arg)
{
- struct fw_cdev_deallocate *request = buffer;
-
- return release_client_resource(client, request->handle,
- release_iso_resource, NULL);
+ return release_client_resource(client,
+ arg->deallocate.handle, release_iso_resource, NULL);
}
-static int ioctl_allocate_iso_resource_once(struct client *client, void *buffer)
+static int ioctl_allocate_iso_resource_once(struct client *client,
+ union ioctl_arg *arg)
{
- struct fw_cdev_allocate_iso_resource *request = buffer;
-
- return init_iso_resource(client, request, ISO_RES_ALLOC_ONCE);
+ return init_iso_resource(client,
+ &arg->allocate_iso_resource, ISO_RES_ALLOC_ONCE);
}
-static int ioctl_deallocate_iso_resource_once(struct client *client, void *buffer)
+static int ioctl_deallocate_iso_resource_once(struct client *client,
+ union ioctl_arg *arg)
{
- struct fw_cdev_allocate_iso_resource *request = buffer;
-
- return init_iso_resource(client, request, ISO_RES_DEALLOC_ONCE);
+ return init_iso_resource(client,
+ &arg->allocate_iso_resource, ISO_RES_DEALLOC_ONCE);
}
/*
@@ -1254,16 +1271,17 @@ static int ioctl_deallocate_iso_resource_once(struct client *client, void *buffe
* limited by the device's link speed, the local node's link speed,
* and all PHY port speeds between the two links.
*/
-static int ioctl_get_speed(struct client *client, void *buffer)
+static int ioctl_get_speed(struct client *client, union ioctl_arg *arg)
{
return client->device->max_speed;
}
-static int ioctl_send_broadcast_request(struct client *client, void *buffer)
+static int ioctl_send_broadcast_request(struct client *client,
+ union ioctl_arg *arg)
{
- struct fw_cdev_send_request *request = buffer;
+ struct fw_cdev_send_request *a = &arg->send_request;
- switch (request->tcode) {
+ switch (a->tcode) {
case TCODE_WRITE_QUADLET_REQUEST:
case TCODE_WRITE_BLOCK_REQUEST:
break;
@@ -1272,36 +1290,36 @@ static int ioctl_send_broadcast_request(struct client *client, void *buffer)
}
/* Security policy: Only allow accesses to Units Space. */
- if (request->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
+ if (a->offset < CSR_REGISTER_BASE + CSR_CONFIG_ROM_END)
return -EACCES;
- return init_request(client, request, LOCAL_BUS | 0x3f, SCODE_100);
+ return init_request(client, a, LOCAL_BUS | 0x3f, SCODE_100);
}
-static int ioctl_send_stream_packet(struct client *client, void *buffer)
+static int ioctl_send_stream_packet(struct client *client, union ioctl_arg *arg)
{
- struct fw_cdev_send_stream_packet *p = buffer;
+ struct fw_cdev_send_stream_packet *a = &arg->send_stream_packet;
struct fw_cdev_send_request request;
int dest;
- if (p->speed > client->device->card->link_speed ||
- p->length > 1024 << p->speed)
+ if (a->speed > client->device->card->link_speed ||
+ a->length > 1024 << a->speed)
return -EIO;
- if (p->tag > 3 || p->channel > 63 || p->sy > 15)
+ if (a->tag > 3 || a->channel > 63 || a->sy > 15)
return -EINVAL;
- dest = fw_stream_packet_destination_id(p->tag, p->channel, p->sy);
+ dest = fw_stream_packet_destination_id(a->tag, a->channel, a->sy);
request.tcode = TCODE_STREAM_DATA;
- request.length = p->length;
- request.closure = p->closure;
- request.data = p->data;
- request.generation = p->generation;
+ request.length = a->length;
+ request.closure = a->closure;
+ request.data = a->data;
+ request.generation = a->generation;
- return init_request(client, &request, dest, p->speed);
+ return init_request(client, &request, dest, a->speed);
}
-static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
+static int (* const ioctl_handlers[])(struct client *, union ioctl_arg *) = {
ioctl_get_info,
ioctl_send_request,
ioctl_allocate,
@@ -1322,47 +1340,35 @@ static int (* const ioctl_handlers[])(struct client *client, void *buffer) = {
ioctl_get_speed,
ioctl_send_broadcast_request,
ioctl_send_stream_packet,
+ ioctl_get_cycle_timer2,
};
static int dispatch_ioctl(struct client *client,
unsigned int cmd, void __user *arg)
{
- char buffer[sizeof(union {
- struct fw_cdev_get_info _00;
- struct fw_cdev_send_request _01;
- struct fw_cdev_allocate _02;
- struct fw_cdev_deallocate _03;
- struct fw_cdev_send_response _04;
- struct fw_cdev_initiate_bus_reset _05;
- struct fw_cdev_add_descriptor _06;
- struct fw_cdev_remove_descriptor _07;
- struct fw_cdev_create_iso_context _08;
- struct fw_cdev_queue_iso _09;
- struct fw_cdev_start_iso _0a;
- struct fw_cdev_stop_iso _0b;
- struct fw_cdev_get_cycle_timer _0c;
- struct fw_cdev_allocate_iso_resource _0d;
- struct fw_cdev_send_stream_packet _13;
- })];
+ union ioctl_arg buffer;
int ret;
+ if (fw_device_is_shutdown(client->device))
+ return -ENODEV;
+
if (_IOC_TYPE(cmd) != '#' ||
_IOC_NR(cmd) >= ARRAY_SIZE(ioctl_handlers))
return -EINVAL;
if (_IOC_DIR(cmd) & _IOC_WRITE) {
if (_IOC_SIZE(cmd) > sizeof(buffer) ||
- copy_from_user(buffer, arg, _IOC_SIZE(cmd)))
+ copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
return -EFAULT;
}
- ret = ioctl_handlers[_IOC_NR(cmd)](client, buffer);
+ ret = ioctl_handlers[_IOC_NR(cmd)](client, &buffer);
if (ret < 0)
return ret;
if (_IOC_DIR(cmd) & _IOC_READ) {
if (_IOC_SIZE(cmd) > sizeof(buffer) ||
- copy_to_user(arg, buffer, _IOC_SIZE(cmd)))
+ copy_to_user(arg, &buffer, _IOC_SIZE(cmd)))
return -EFAULT;
}
@@ -1372,24 +1378,14 @@ static int dispatch_ioctl(struct client *client,
static long fw_device_op_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
- struct client *client = file->private_data;
-
- if (fw_device_is_shutdown(client->device))
- return -ENODEV;
-
- return dispatch_ioctl(client, cmd, (void __user *) arg);
+ return dispatch_ioctl(file->private_data, cmd, (void __user *)arg);
}
#ifdef CONFIG_COMPAT
static long fw_device_op_compat_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
- struct client *client = file->private_data;
-
- if (fw_device_is_shutdown(client->device))
- return -ENODEV;
-
- return dispatch_ioctl(client, cmd, compat_ptr(arg));
+ return dispatch_ioctl(file->private_data, cmd, compat_ptr(arg));
}
#endif
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 9d0dfcbe2c1..014cabd3afd 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -18,6 +18,7 @@
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
+#include <linux/bug.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -43,7 +44,7 @@
#include "core.h"
-void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 * p)
+void fw_csr_iterator_init(struct fw_csr_iterator *ci, const u32 *p)
{
ci->p = p + 1;
ci->end = ci->p + (p[0] >> 16);
@@ -59,9 +60,76 @@ int fw_csr_iterator_next(struct fw_csr_iterator *ci, int *key, int *value)
}
EXPORT_SYMBOL(fw_csr_iterator_next);
+static const u32 *search_leaf(const u32 *directory, int search_key)
+{
+ struct fw_csr_iterator ci;
+ int last_key = 0, key, value;
+
+ fw_csr_iterator_init(&ci, directory);
+ while (fw_csr_iterator_next(&ci, &key, &value)) {
+ if (last_key == search_key &&
+ key == (CSR_DESCRIPTOR | CSR_LEAF))
+ return ci.p - 1 + value;
+
+ last_key = key;
+ }
+
+ return NULL;
+}
+
+static int textual_leaf_to_string(const u32 *block, char *buf, size_t size)
+{
+ unsigned int quadlets, i;
+ char c;
+
+ if (!size || !buf)
+ return -EINVAL;
+
+ quadlets = min(block[0] >> 16, 256U);
+ if (quadlets < 2)
+ return -ENODATA;
+
+ if (block[1] != 0 || block[2] != 0)
+ /* unknown language/character set */
+ return -ENODATA;
+
+ block += 3;
+ quadlets -= 2;
+ for (i = 0; i < quadlets * 4 && i < size - 1; i++) {
+ c = block[i / 4] >> (24 - 8 * (i % 4));
+ if (c == '\0')
+ break;
+ buf[i] = c;
+ }
+ buf[i] = '\0';
+
+ return i;
+}
+
+/**
+ * fw_csr_string - reads a string from the configuration ROM
+ * @directory: e.g. root directory or unit directory
+ * @key: the key of the preceding directory entry
+ * @buf: where to put the string
+ * @size: size of @buf, in bytes
+ *
+ * The string is taken from a minimal ASCII text descriptor leaf after
+ * the immediate entry with @key. The string is zero-terminated.
+ * Returns strlen(buf) or a negative error code.
+ */
+int fw_csr_string(const u32 *directory, int key, char *buf, size_t size)
+{
+ const u32 *leaf = search_leaf(directory, key);
+ if (!leaf)
+ return -ENOENT;
+
+ return textual_leaf_to_string(leaf, buf, size);
+}
+EXPORT_SYMBOL(fw_csr_string);
+
static bool is_fw_unit(struct device *dev);
-static int match_unit_directory(u32 *directory, u32 match_flags,
+static int match_unit_directory(const u32 *directory, u32 match_flags,
const struct ieee1394_device_id *id)
{
struct fw_csr_iterator ci;
@@ -195,7 +263,7 @@ static ssize_t show_immediate(struct device *dev,
struct config_rom_attribute *attr =
container_of(dattr, struct config_rom_attribute, attr);
struct fw_csr_iterator ci;
- u32 *dir;
+ const u32 *dir;
int key, value, ret = -ENOENT;
down_read(&fw_device_rwsem);
@@ -226,10 +294,10 @@ static ssize_t show_text_leaf(struct device *dev,
{
struct config_rom_attribute *attr =
container_of(dattr, struct config_rom_attribute, attr);
- struct fw_csr_iterator ci;
- u32 *dir, *block = NULL, *p, *end;
- int length, key, value, last_key = 0, ret = -ENOENT;
- char *b;
+ const u32 *dir;
+ size_t bufsize;
+ char dummy_buf[2];
+ int ret;
down_read(&fw_device_rwsem);
@@ -238,40 +306,23 @@ static ssize_t show_text_leaf(struct device *dev,
else
dir = fw_device(dev)->config_rom + 5;
- fw_csr_iterator_init(&ci, dir);
- while (fw_csr_iterator_next(&ci, &key, &value)) {
- if (attr->key == last_key &&
- key == (CSR_DESCRIPTOR | CSR_LEAF))
- block = ci.p - 1 + value;
- last_key = key;
+ if (buf) {
+ bufsize = PAGE_SIZE - 1;
+ } else {
+ buf = dummy_buf;
+ bufsize = 1;
}
- if (block == NULL)
- goto out;
-
- length = min(block[0] >> 16, 256U);
- if (length < 3)
- goto out;
-
- if (block[1] != 0 || block[2] != 0)
- /* Unknown encoding. */
- goto out;
+ ret = fw_csr_string(dir, attr->key, buf, bufsize);
- if (buf == NULL) {
- ret = length * 4;
- goto out;
+ if (ret >= 0) {
+ /* Strip trailing whitespace and add newline. */
+ while (ret > 0 && isspace(buf[ret - 1]))
+ ret--;
+ strcpy(buf + ret, "\n");
+ ret++;
}
- b = buf;
- end = &block[length + 1];
- for (p = &block[3]; p < end; p++, b += 4)
- * (u32 *) b = (__force u32) __cpu_to_be32(*p);
-
- /* Strip trailing whitespace and add newline. */
- while (b--, (isspace(*b) || *b == '\0') && b > buf);
- strcpy(b + 1, "\n");
- ret = b + 2 - buf;
- out:
up_read(&fw_device_rwsem);
return ret;
@@ -371,7 +422,7 @@ static ssize_t guid_show(struct device *dev,
return ret;
}
-static int units_sprintf(char *buf, u32 *directory)
+static int units_sprintf(char *buf, const u32 *directory)
{
struct fw_csr_iterator ci;
int key, value;
@@ -441,28 +492,29 @@ static int read_rom(struct fw_device *device,
return rcode;
}
-#define READ_BIB_ROM_SIZE 256
-#define READ_BIB_STACK_SIZE 16
+#define MAX_CONFIG_ROM_SIZE 256
/*
* Read the bus info block, perform a speed probe, and read all of the rest of
* the config ROM. We do all this with a cached bus generation. If the bus
- * generation changes under us, read_bus_info_block will fail and get retried.
+ * generation changes under us, read_config_rom will fail and get retried.
* It's better to start all over in this case because the node from which we
* are reading the ROM may have changed the ROM during the reset.
*/
-static int read_bus_info_block(struct fw_device *device, int generation)
+static int read_config_rom(struct fw_device *device, int generation)
{
- u32 *rom, *stack, *old_rom, *new_rom;
+ const u32 *old_rom, *new_rom;
+ u32 *rom, *stack;
u32 sp, key;
int i, end, length, ret = -1;
- rom = kmalloc(sizeof(*rom) * READ_BIB_ROM_SIZE +
- sizeof(*stack) * READ_BIB_STACK_SIZE, GFP_KERNEL);
+ rom = kmalloc(sizeof(*rom) * MAX_CONFIG_ROM_SIZE +
+ sizeof(*stack) * MAX_CONFIG_ROM_SIZE, GFP_KERNEL);
if (rom == NULL)
return -ENOMEM;
- stack = &rom[READ_BIB_ROM_SIZE];
+ stack = &rom[MAX_CONFIG_ROM_SIZE];
+ memset(rom, 0, sizeof(*rom) * MAX_CONFIG_ROM_SIZE);
device->max_speed = SCODE_100;
@@ -529,40 +581,54 @@ static int read_bus_info_block(struct fw_device *device, int generation)
*/
key = stack[--sp];
i = key & 0xffffff;
- if (i >= READ_BIB_ROM_SIZE)
- /*
- * The reference points outside the standard
- * config rom area, something's fishy.
- */
+ if (WARN_ON(i >= MAX_CONFIG_ROM_SIZE))
goto out;
/* Read header quadlet for the block to get the length. */
if (read_rom(device, generation, i, &rom[i]) != RCODE_COMPLETE)
goto out;
end = i + (rom[i] >> 16) + 1;
- i++;
- if (end > READ_BIB_ROM_SIZE)
+ if (end > MAX_CONFIG_ROM_SIZE) {
/*
- * This block extends outside standard config
- * area (and the array we're reading it
- * into). That's broken, so ignore this
- * device.
+ * This block extends outside the config ROM which is
+ * a firmware bug. Ignore this whole block, i.e.
+ * simply set a fake block length of 0.
*/
- goto out;
+ fw_error("skipped invalid ROM block %x at %llx\n",
+ rom[i],
+ i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM);
+ rom[i] = 0;
+ end = i;
+ }
+ i++;
/*
* Now read in the block. If this is a directory
* block, check the entries as we read them to see if
* it references another block, and push it in that case.
*/
- while (i < end) {
+ for (; i < end; i++) {
if (read_rom(device, generation, i, &rom[i]) !=
RCODE_COMPLETE)
goto out;
- if ((key >> 30) == 3 && (rom[i] >> 30) > 1 &&
- sp < READ_BIB_STACK_SIZE)
- stack[sp++] = i + rom[i];
- i++;
+
+ if ((key >> 30) != 3 || (rom[i] >> 30) < 2)
+ continue;
+ /*
+ * Offset points outside the ROM. May be a firmware
+ * bug or an Extended ROM entry (IEEE 1212-2001 clause
+ * 7.7.18). Simply overwrite this pointer here by a
+ * fake immediate entry so that later iterators over
+ * the ROM don't have to check offsets all the time.
+ */
+ if (i + (rom[i] & 0xffffff) >= MAX_CONFIG_ROM_SIZE) {
+ fw_error("skipped unsupported ROM entry %x at %llx\n",
+ rom[i],
+ i * 4 | CSR_REGISTER_BASE | CSR_CONFIG_ROM);
+ rom[i] = 0;
+ continue;
+ }
+ stack[sp++] = i + rom[i];
}
if (length < i)
length = i;
@@ -905,7 +971,7 @@ static void fw_device_init(struct work_struct *work)
* device.
*/
- if (read_bus_info_block(device, device->generation) < 0) {
+ if (read_config_rom(device, device->generation) < 0) {
if (device->config_rom_retries < MAX_RETRIES &&
atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
device->config_rom_retries++;
@@ -1022,7 +1088,7 @@ enum {
};
/* Reread and compare bus info block and header of root directory */
-static int reread_bus_info_block(struct fw_device *device, int generation)
+static int reread_config_rom(struct fw_device *device, int generation)
{
u32 q;
int i;
@@ -1048,7 +1114,7 @@ static void fw_device_refresh(struct work_struct *work)
struct fw_card *card = device->card;
int node_id = device->node_id;
- switch (reread_bus_info_block(device, device->generation)) {
+ switch (reread_config_rom(device, device->generation)) {
case REREAD_BIB_ERROR:
if (device->config_rom_retries < MAX_RETRIES / 2 &&
atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
@@ -1082,7 +1148,7 @@ static void fw_device_refresh(struct work_struct *work)
*/
device_for_each_child(&device->device, NULL, shutdown_unit);
- if (read_bus_info_block(device, device->generation) < 0) {
+ if (read_config_rom(device, device->generation) < 0) {
if (device->config_rom_retries < MAX_RETRIES &&
atomic_read(&device->state) == FW_DEVICE_INITIALIZING) {
device->config_rom_retries++;
diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
index 495849eb13c..673b03f8b4e 100644
--- a/drivers/firewire/core-transaction.c
+++ b/drivers/firewire/core-transaction.c
@@ -921,23 +921,15 @@ static void handle_registers(struct fw_card *card, struct fw_request *request,
void *payload, size_t length, void *callback_data)
{
int reg = offset & ~CSR_REGISTER_BASE;
- unsigned long long bus_time;
__be32 *data = payload;
int rcode = RCODE_COMPLETE;
switch (reg) {
case CSR_CYCLE_TIME:
- case CSR_BUS_TIME:
- if (!TCODE_IS_READ_REQUEST(tcode) || length != 4) {
- rcode = RCODE_TYPE_ERROR;
- break;
- }
-
- bus_time = card->driver->get_bus_time(card);
- if (reg == CSR_CYCLE_TIME)
- *data = cpu_to_be32(bus_time);
+ if (TCODE_IS_READ_REQUEST(tcode) && length == 4)
+ *data = cpu_to_be32(card->driver->get_cycle_time(card));
else
- *data = cpu_to_be32(bus_time >> 25);
+ rcode = RCODE_TYPE_ERROR;
break;
case CSR_BROADCAST_CHANNEL:
@@ -968,6 +960,9 @@ static void handle_registers(struct fw_card *card, struct fw_request *request,
case CSR_BUSY_TIMEOUT:
/* FIXME: Implement this. */
+ case CSR_BUS_TIME:
+ /* Useless without initialization by the bus manager. */
+
default:
rcode = RCODE_ADDRESS_ERROR;
break;
diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
index ed3b1a765c0..fb0321300cc 100644
--- a/drivers/firewire/core.h
+++ b/drivers/firewire/core.h
@@ -70,7 +70,7 @@ struct fw_card_driver {
int (*enable_phys_dma)(struct fw_card *card,
int node_id, int generation);
- u64 (*get_bus_time)(struct fw_card *card);
+ u32 (*get_cycle_time)(struct fw_card *card);
struct fw_iso_context *
(*allocate_iso_context)(struct fw_card *card,
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 43ebf337b13..75dc6988cff 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -38,7 +38,6 @@
#include <linux/spinlock.h>
#include <linux/string.h>
-#include <asm/atomic.h>
#include <asm/byteorder.h>
#include <asm/page.h>
#include <asm/system.h>
@@ -73,20 +72,6 @@ struct descriptor {
__le16 transfer_status;
} __attribute__((aligned(16)));
-struct db_descriptor {
- __le16 first_size;
- __le16 control;
- __le16 second_req_count;
- __le16 first_req_count;
- __le32 branch_address;
- __le16 second_res_count;
- __le16 first_res_count;
- __le32 reserved0;
- __le32 first_buffer;
- __le32 second_buffer;
- __le32 reserved1;
-} __attribute__((aligned(16)));
-
#define CONTROL_SET(regs) (regs)
#define CONTROL_CLEAR(regs) ((regs) + 4)
#define COMMAND_PTR(regs) ((regs) + 12)
@@ -181,31 +166,16 @@ struct fw_ohci {
struct fw_card card;
__iomem char *registers;
- dma_addr_t self_id_bus;
- __le32 *self_id_cpu;
- struct tasklet_struct bus_reset_tasklet;
int node_id;
int generation;
int request_generation; /* for timestamping incoming requests */
- atomic_t bus_seconds;
-
- bool use_dualbuffer;
- bool old_uninorth;
- bool bus_reset_packet_quirk;
+ unsigned quirks;
/*
* Spinlock for accessing fw_ohci data. Never call out of
* this driver with this lock held.
*/
spinlock_t lock;
- u32 self_id_buffer[512];
-
- /* Config rom buffers */
- __be32 *config_rom;
- dma_addr_t config_rom_bus;
- __be32 *next_config_rom;
- dma_addr_t next_config_rom_bus;
- __be32 next_header;
struct ar_context ar_request_ctx;
struct ar_context ar_response_ctx;
@@ -217,6 +187,18 @@ struct fw_ohci {
u64 ir_context_channels;
u32 ir_context_mask;
struct iso_context *ir_context_list;
+
+ __be32 *config_rom;
+ dma_addr_t config_rom_bus;
+ __be32 *next_config_rom;
+ dma_addr_t next_config_rom_bus;
+ __be32 next_header;
+
+ __le32 *self_id_cpu;
+ dma_addr_t self_id_bus;
+ struct tasklet_struct bus_reset_tasklet;
+
+ u32 self_id_buffer[512];
};
static inline struct fw_ohci *fw_ohci(struct fw_card *card)
@@ -249,6 +231,30 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
static char ohci_driver_name[] = KBUILD_MODNAME;
+#define QUIRK_CYCLE_TIMER 1
+#define QUIRK_RESET_PACKET 2
+#define QUIRK_BE_HEADERS 4
+
+/* In case of multiple matches in ohci_quirks[], only the first one is used. */
+static const struct {
+ unsigned short vendor, device, flags;
+} ohci_quirks[] = {
+ {PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET},
+ {PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
+ {PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
+ {PCI_VENDOR_ID_VIA, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
+ {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, QUIRK_BE_HEADERS},
+};
+
+/* This overrides anything that was found in ohci_quirks[]. */
+static int param_quirks;
+module_param_named(quirks, param_quirks, int, 0644);
+MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
+ ", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER)
+ ", reset packet generation = " __stringify(QUIRK_RESET_PACKET)
+ ", AR/selfID endianess = " __stringify(QUIRK_BE_HEADERS)
+ ")");
+
#ifdef CONFIG_FIREWIRE_OHCI_DEBUG
#define OHCI_PARAM_DEBUG_AT_AR 1
@@ -275,7 +281,7 @@ static void log_irqs(u32 evt)
!(evt & OHCI1394_busReset))
return;
- fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
+ fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
evt & OHCI1394_selfIDComplete ? " selfID" : "",
evt & OHCI1394_RQPkt ? " AR_req" : "",
evt & OHCI1394_RSPkt ? " AR_resp" : "",
@@ -285,7 +291,6 @@ static void log_irqs(u32 evt)
evt & OHCI1394_isochTx ? " IT" : "",
evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "",
evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "",
- evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "",
evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
evt & OHCI1394_busReset ? " busReset" : "",
@@ -293,8 +298,7 @@ static void log_irqs(u32 evt)
OHCI1394_RSPkt | OHCI1394_reqTxComplete |
OHCI1394_respTxComplete | OHCI1394_isochRx |
OHCI1394_isochTx | OHCI1394_postedWriteErr |
- OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
- OHCI1394_cycleInconsistent |
+ OHCI1394_cycleTooLong | OHCI1394_cycleInconsistent |
OHCI1394_regAccessFail | OHCI1394_busReset)
? " ?" : "");
}
@@ -524,7 +528,7 @@ static void ar_context_release(struct ar_context *ctx)
#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
#define cond_le32_to_cpu(v) \
- (ohci->old_uninorth ? (__force __u32)(v) : le32_to_cpu(v))
+ (ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v))
#else
#define cond_le32_to_cpu(v) le32_to_cpu(v)
#endif
@@ -605,7 +609,7 @@ static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
* at a slightly incorrect time (in bus_reset_tasklet).
*/
if (evt == OHCI1394_evt_bus_reset) {
- if (!ohci->bus_reset_packet_quirk)
+ if (!(ohci->quirks & QUIRK_RESET_PACKET))
ohci->request_generation = (p.header[2] >> 16) & 0xff;
} else if (ctx == &ohci->ar_request_ctx) {
fw_core_handle_request(&ohci->card, &p);
@@ -1329,7 +1333,7 @@ static void bus_reset_tasklet(unsigned long data)
context_stop(&ohci->at_response_ctx);
reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
- if (ohci->bus_reset_packet_quirk)
+ if (ohci->quirks & QUIRK_RESET_PACKET)
ohci->request_generation = generation;
/*
@@ -1384,7 +1388,7 @@ static void bus_reset_tasklet(unsigned long data)
static irqreturn_t irq_handler(int irq, void *data)
{
struct fw_ohci *ohci = data;
- u32 event, iso_event, cycle_time;
+ u32 event, iso_event;
int i;
event = reg_read(ohci, OHCI1394_IntEventClear);
@@ -1454,12 +1458,6 @@ static irqreturn_t irq_handler(int irq, void *data)
fw_notify("isochronous cycle inconsistent\n");
}
- if (event & OHCI1394_cycle64Seconds) {
- cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
- if ((cycle_time & 0x80000000) == 0)
- atomic_inc(&ohci->bus_seconds);
- }
-
return IRQ_HANDLED;
}
@@ -1553,8 +1551,7 @@ static int ohci_enable(struct fw_card *card,
OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
OHCI1394_isochRx | OHCI1394_isochTx |
OHCI1394_postedWriteErr | OHCI1394_cycleTooLong |
- OHCI1394_cycleInconsistent |
- OHCI1394_cycle64Seconds | OHCI1394_regAccessFail |
+ OHCI1394_cycleInconsistent | OHCI1394_regAccessFail |
OHCI1394_masterIntEnable);
if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_busReset);
@@ -1794,16 +1791,61 @@ static int ohci_enable_phys_dma(struct fw_card *card,
#endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
}
-static u64 ohci_get_bus_time(struct fw_card *card)
+static u32 cycle_timer_ticks(u32 cycle_timer)
{
- struct fw_ohci *ohci = fw_ohci(card);
- u32 cycle_time;
- u64 bus_time;
+ u32 ticks;
- cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
- bus_time = ((u64)atomic_read(&ohci->bus_seconds) << 32) | cycle_time;
+ ticks = cycle_timer & 0xfff;
+ ticks += 3072 * ((cycle_timer >> 12) & 0x1fff);
+ ticks += (3072 * 8000) * (cycle_timer >> 25);
+
+ return ticks;
+}
+
+/*
+ * Some controllers exhibit one or more of the following bugs when updating the
+ * iso cycle timer register:
+ * - When the lowest six bits are wrapping around to zero, a read that happens
+ * at the same time will return garbage in the lowest ten bits.
+ * - When the cycleOffset field wraps around to zero, the cycleCount field is
+ * not incremented for about 60 ns.
+ * - Occasionally, the entire register reads zero.
+ *
+ * To catch these, we read the register three times and ensure that the
+ * difference between each two consecutive reads is approximately the same, i.e.
+ * less than twice the other. Furthermore, any negative difference indicates an
+ * error. (A PCI read should take at least 20 ticks of the 24.576 MHz timer to
+ * execute, so we have enough precision to compute the ratio of the differences.)
+ */
+static u32 ohci_get_cycle_time(struct fw_card *card)
+{
+ struct fw_ohci *ohci = fw_ohci(card);
+ u32 c0, c1, c2;
+ u32 t0, t1, t2;
+ s32 diff01, diff12;
+ int i;
- return bus_time;
+ c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
+
+ if (ohci->quirks & QUIRK_CYCLE_TIMER) {
+ i = 0;
+ c1 = c2;
+ c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
+ do {
+ c0 = c1;
+ c1 = c2;
+ c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
+ t0 = cycle_timer_ticks(c0);
+ t1 = cycle_timer_ticks(c1);
+ t2 = cycle_timer_ticks(c2);
+ diff01 = t1 - t0;
+ diff12 = t2 - t1;
+ } while ((diff01 <= 0 || diff12 <= 0 ||
+ diff01 / diff12 >= 2 || diff12 / diff01 >= 2)
+ && i++ < 20);
+ }
+
+ return c2;
}
static void copy_iso_headers(struct iso_context *ctx, void *p)
@@ -1828,52 +1870,6 @@ static void copy_iso_headers(struct iso_context *ctx, void *p)
ctx->header_length += ctx->base.header_size;
}
-static int handle_ir_dualbuffer_packet(struct context *context,
- struct descriptor *d,
- struct descriptor *last)
-{
- struct iso_context *ctx =
- container_of(context, struct iso_context, context);
- struct db_descriptor *db = (struct db_descriptor *) d;
- __le32 *ir_header;
- size_t header_length;
- void *p, *end;
-
- if (db->first_res_count != 0 && db->second_res_count != 0) {
- if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) {
- /* This descriptor isn't done yet, stop iteration. */
- return 0;
- }
- ctx->excess_bytes -= le16_to_cpu(db->second_req_count);
- }
-
- header_length = le16_to_cpu(db->first_req_count) -
- le16_to_cpu(db->first_res_count);
-
- p = db + 1;
- end = p + header_length;
- while (p < end) {
- copy_iso_headers(ctx, p);
- ctx->excess_bytes +=
- (le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff;
- p += max(ctx->base.header_size, (size_t)8);
- }
-
- ctx->excess_bytes -= le16_to_cpu(db->second_req_count) -
- le16_to_cpu(db->second_res_count);
-
- if (le16_to_cpu(db->control) & DESCRIPTOR_IRQ_ALWAYS) {
- ir_header = (__le32 *) (db + 1);
- ctx->base.callback(&ctx->base,
- le32_to_cpu(ir_header[0]) & 0xffff,
- ctx->header_length, ctx->header,
- ctx->base.callback_data);
- ctx->header_length = 0;
- }
-
- return 1;
-}
-
static int handle_ir_packet_per_buffer(struct context *context,
struct descriptor *d,
struct descriptor *last)
@@ -1960,10 +1956,7 @@ static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
channels = &ohci->ir_context_channels;
mask = &ohci->ir_context_mask;
list = ohci->ir_context_list;
- if (ohci->use_dualbuffer)
- callback = handle_ir_dualbuffer_packet;
- else
- callback = handle_ir_packet_per_buffer;
+ callback = handle_ir_packet_per_buffer;
}
spin_lock_irqsave(&ohci->lock, flags);
@@ -2026,8 +2019,6 @@ static int ohci_start_iso(struct fw_iso_context *base,
} else {
index = ctx - ohci->ir_context_list;
control = IR_CONTEXT_ISOCH_HEADER;
- if (ohci->use_dualbuffer)
- control |= IR_CONTEXT_DUAL_BUFFER_MODE;
match = (tags << 28) | (sync << 8) | ctx->base.channel;
if (cycle >= 0) {
match |= (cycle & 0x07fff) << 12;
@@ -2188,92 +2179,6 @@ static int ohci_queue_iso_transmit(struct fw_iso_context *base,
return 0;
}
-static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
- struct fw_iso_packet *packet,
- struct fw_iso_buffer *buffer,
- unsigned long payload)
-{
- struct iso_context *ctx = container_of(base, struct iso_context, base);
- struct db_descriptor *db = NULL;
- struct descriptor *d;
- struct fw_iso_packet *p;
- dma_addr_t d_bus, page_bus;
- u32 z, header_z, length, rest;
- int page, offset, packet_count, header_size;
-
- /*
- * FIXME: Cycle lost behavior should be configurable: lose
- * packet, retransmit or terminate..
- */
-
- p = packet;
- z = 2;
-
- /*
- * The OHCI controller puts the isochronous header and trailer in the
- * buffer, so we need at least 8 bytes.
- */
- packet_count = p->header_length / ctx->base.header_size;
- header_size = packet_count * max(ctx->base.header_size, (size_t)8);
-
- /* Get header size in number of descriptors. */
- header_z = DIV_ROUND_UP(header_size, sizeof(*d));
- page = payload >> PAGE_SHIFT;
- offset = payload & ~PAGE_MASK;
- rest = p->payload_length;
- /*
- * The controllers I've tested have not worked correctly when
- * second_req_count is zero. Rather than do something we know won't
- * work, return an error
- */
- if (rest == 0)
- return -EINVAL;
-
- while (rest > 0) {
- d = context_get_descriptors(&ctx->context,
- z + header_z, &d_bus);
- if (d == NULL)
- return -ENOMEM;
-
- db = (struct db_descriptor *) d;
- db->control = cpu_to_le16(DESCRIPTOR_STATUS |
- DESCRIPTOR_BRANCH_ALWAYS);
- db->first_size =
- cpu_to_le16(max(ctx->base.header_size, (size_t)8));
- if (p->skip && rest == p->payload_length) {
- db->control |= cpu_to_le16(DESCRIPTOR_WAIT);
- db->first_req_count = db->first_size;
- } else {
- db->first_req_count = cpu_to_le16(header_size);
- }
- db->first_res_count = db->first_req_count;
- db->first_buffer = cpu_to_le32(d_bus + sizeof(*db));
-
- if (p->skip && rest == p->payload_length)
- length = 4;
- else if (offset + rest < PAGE_SIZE)
- length = rest;
- else
- length = PAGE_SIZE - offset;
-
- db->second_req_count = cpu_to_le16(length);
- db->second_res_count = db->second_req_count;
- page_bus = page_private(buffer->pages[page]);
- db->second_buffer = cpu_to_le32(page_bus + offset);
-
- if (p->interrupt && length == rest)
- db->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
-
- context_append(&ctx->context, d, z, header_z);
- offset = (offset + length) & ~PAGE_MASK;
- rest -= length;
- if (offset == 0)
- page++;
- }
-
- return 0;
-}
-
static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
@@ -2364,9 +2269,6 @@ static int ohci_queue_iso(struct fw_iso_context *base,
spin_lock_irqsave(&ctx->context.ohci->lock, flags);
if (base->type == FW_ISO_CONTEXT_TRANSMIT)
ret = ohci_queue_iso_transmit(base, packet, buffer, payload);
- else if (ctx->context.ohci->use_dualbuffer)
- ret = ohci_queue_iso_receive_dualbuffer(base, packet,
- buffer, payload);
else
ret = ohci_queue_iso_receive_packet_per_buffer(base, packet,
buffer, payload);
@@ -2383,7 +2285,7 @@ static const struct fw_card_driver ohci_driver = {
.send_response = ohci_send_response,
.cancel_packet = ohci_cancel_packet,
.enable_phys_dma = ohci_enable_phys_dma,
- .get_bus_time = ohci_get_bus_time,
+ .get_cycle_time = ohci_get_cycle_time,
.allocate_iso_context = ohci_allocate_iso_context,
.free_iso_context = ohci_free_iso_context,
@@ -2421,17 +2323,13 @@ static void ohci_pmac_off(struct pci_dev *dev)
#define ohci_pmac_off(dev)
#endif /* CONFIG_PPC_PMAC */
-#define PCI_VENDOR_ID_AGERE PCI_VENDOR_ID_ATT
-#define PCI_DEVICE_ID_AGERE_FW643 0x5901
-#define PCI_DEVICE_ID_TI_TSB43AB23 0x8024
-
static int __devinit pci_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
{
struct fw_ohci *ohci;
u32 bus_options, max_receive, link_speed, version;
u64 guid;
- int err;
+ int i, err, n_ir, n_it;
size_t size;
ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
@@ -2472,36 +2370,15 @@ static int __devinit pci_probe(struct pci_dev *dev,
goto fail_iomem;
}
- version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
-#if 0
- /* FIXME: make it a context option or remove dual-buffer mode */
- ohci->use_dualbuffer = version >= OHCI_VERSION_1_1;
-#endif
-
- /* dual-buffer mode is broken if more than one IR context is active */
- if (dev->vendor == PCI_VENDOR_ID_AGERE &&
- dev->device == PCI_DEVICE_ID_AGERE_FW643)
- ohci->use_dualbuffer = false;
-
- /* dual-buffer mode is broken */
- if (dev->vendor == PCI_VENDOR_ID_RICOH &&
- dev->device == PCI_DEVICE_ID_RICOH_R5C832)
- ohci->use_dualbuffer = false;
-
-/* x86-32 currently doesn't use highmem for dma_alloc_coherent */
-#if !defined(CONFIG_X86_32)
- /* dual-buffer mode is broken with descriptor addresses above 2G */
- if (dev->vendor == PCI_VENDOR_ID_TI &&
- (dev->device == PCI_DEVICE_ID_TI_TSB43AB22 ||
- dev->device == PCI_DEVICE_ID_TI_TSB43AB23))
- ohci->use_dualbuffer = false;
-#endif
-
-#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
- ohci->old_uninorth = dev->vendor == PCI_VENDOR_ID_APPLE &&
- dev->device == PCI_DEVICE_ID_APPLE_UNI_N_FW;
-#endif
- ohci->bus_reset_packet_quirk = dev->vendor == PCI_VENDOR_ID_TI;
+ for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++)
+ if (ohci_quirks[i].vendor == dev->vendor &&
+ (ohci_quirks[i].device == dev->device ||
+ ohci_quirks[i].device == (unsigned short)PCI_ANY_ID)) {
+ ohci->quirks = ohci_quirks[i].flags;
+ break;
+ }
+ if (param_quirks)
+ ohci->quirks = param_quirks;
ar_context_init(&ohci->ar_request_ctx, ohci,
OHCI1394_AsReqRcvContextControlSet);
@@ -2516,17 +2393,19 @@ static int __devinit pci_probe(struct pci_dev *dev,
OHCI1394_AsRspTrContextControlSet, handle_at_packet);
reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
- ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
+ ohci->ir_context_channels = ~0ULL;
+ ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
- size = sizeof(struct iso_context) * hweight32(ohci->it_context_mask);
- ohci->it_context_list = kzalloc(size, GFP_KERNEL);
+ n_ir = hweight32(ohci->ir_context_mask);
+ size = sizeof(struct iso_context) * n_ir;
+ ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
- ohci->ir_context_channels = ~0ULL;
- ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
+ ohci->it_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
- size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask);
- ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
+ n_it = hweight32(ohci->it_context_mask);
+ size = sizeof(struct iso_context) * n_it;
+ ohci->it_context_list = kzalloc(size, GFP_KERNEL);
if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
err = -ENOMEM;
@@ -2553,8 +2432,11 @@ static int __devinit pci_probe(struct pci_dev *dev,
if (err)
goto fail_self_id;
- fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
- dev_name(&dev->dev), version >> 16, version & 0xff);
+ version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
+ fw_notify("Added fw-ohci device %s, OHCI v%x.%x, "
+ "%d IR + %d IT contexts, quirks 0x%x\n",
+ dev_name(&dev->dev), version >> 16, version & 0xff,
+ n_ir, n_it, ohci->quirks);
return 0;
@@ -2662,7 +2544,7 @@ static int pci_resume(struct pci_dev *dev)
}
#endif
-static struct pci_device_id pci_table[] = {
+static const struct pci_device_id pci_table[] = {
{ PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
{ }
};
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 70fef40cd22..ca264f2fdf0 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1014,7 +1014,8 @@ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
return 0;
}
-static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt, u32 *directory)
+static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt,
+ const u32 *directory)
{
struct fw_csr_iterator ci;
int key, value;
@@ -1027,7 +1028,7 @@ static int sbp2_scan_logical_unit_dir(struct sbp2_target *tgt, u32 *directory)
return 0;
}
-static int sbp2_scan_unit_dir(struct sbp2_target *tgt, u32 *directory,
+static int sbp2_scan_unit_dir(struct sbp2_target *tgt, const u32 *directory,
u32 *model, u32 *firmware_revision)
{
struct fw_csr_iterator ci;
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index f82bcdae130..a3600e3ed0f 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -380,7 +380,6 @@ static ssize_t ibft_attr_show_nic(struct ibft_kobject *entry,
struct ibft_nic *nic = entry->nic;
void *ibft_loc = entry->header;
char *str = buf;
- char *mac;
__be32 val;
if (!nic)
@@ -419,10 +418,7 @@ static ssize_t ibft_attr_show_nic(struct ibft_kobject *entry,
str += sprintf(str, "%d\n", nic->vlan);
break;
case ibft_eth_mac:
- mac = nic->mac;
- str += sprintf(str, "%02x:%02x:%02x:%02x:%02x:%02x\n",
- (u8)mac[0], (u8)mac[1], (u8)mac[2],
- (u8)mac[3], (u8)mac[4], (u8)mac[5]);
+ str += sprintf(str, "%pM\n", nic->mac);
break;
case ibft_eth_hostname:
str += sprintf_string(str, nic->hostname_len,
diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
index 56f9234781f..20f645743ea 100644
--- a/drivers/firmware/memmap.c
+++ b/drivers/firmware/memmap.c
@@ -122,29 +122,53 @@ static int firmware_map_add_entry(u64 start, u64 end,
return 0;
}
+/*
+ * Add memmap entry on sysfs
+ */
+static int add_sysfs_fw_map_entry(struct firmware_map_entry *entry)
+{
+ static int map_entries_nr;
+ static struct kset *mmap_kset;
+
+ if (!mmap_kset) {
+ mmap_kset = kset_create_and_add("memmap", NULL, firmware_kobj);
+ if (!mmap_kset)
+ return -ENOMEM;
+ }
+
+ entry->kobj.kset = mmap_kset;
+ if (kobject_add(&entry->kobj, NULL, "%d", map_entries_nr++))
+ kobject_put(&entry->kobj);
+
+ return 0;
+}
+
/**
- * firmware_map_add() - Adds a firmware mapping entry.
+ * firmware_map_add_hotplug() - Adds a firmware mapping entry when we do
+ * memory hotplug.
* @start: Start of the memory range.
* @end: End of the memory range (inclusive).
* @type: Type of the memory range.
*
- * This function uses kmalloc() for memory
- * allocation. Use firmware_map_add_early() if you want to use the bootmem
- * allocator.
- *
- * That function must be called before late_initcall.
+ * Adds a firmware mapping entry. This function is for memory hotplug, it is
+ * similar to function firmware_map_add_early(). The only difference is that
+ * it will create the syfs entry dynamically.
*
* Returns 0 on success, or -ENOMEM if no memory could be allocated.
**/
-int firmware_map_add(u64 start, u64 end, const char *type)
+int __meminit firmware_map_add_hotplug(u64 start, u64 end, const char *type)
{
struct firmware_map_entry *entry;
- entry = kmalloc(sizeof(struct firmware_map_entry), GFP_ATOMIC);
+ entry = kzalloc(sizeof(struct firmware_map_entry), GFP_ATOMIC);
if (!entry)
return -ENOMEM;
- return firmware_map_add_entry(start, end, type, entry);
+ firmware_map_add_entry(start, end, type, entry);
+ /* create the memmap entry */
+ add_sysfs_fw_map_entry(entry);
+
+ return 0;
}
/**
@@ -154,7 +178,7 @@ int firmware_map_add(u64 start, u64 end, const char *type)
* @type: Type of the memory range.
*
* Adds a firmware mapping entry. This function uses the bootmem allocator
- * for memory allocation. Use firmware_map_add() if you want to use kmalloc().
+ * for memory allocation.
*
* That function must be called before late_initcall.
*
@@ -214,19 +238,10 @@ static ssize_t memmap_attr_show(struct kobject *kobj,
*/
static int __init memmap_init(void)
{
- int i = 0;
struct firmware_map_entry *entry;
- struct kset *memmap_kset;
-
- memmap_kset = kset_create_and_add("memmap", NULL, firmware_kobj);
- if (WARN_ON(!memmap_kset))
- return -ENOMEM;
- list_for_each_entry(entry, &map_entries, list) {
- entry->kobj.kset = memmap_kset;
- if (kobject_add(&entry->kobj, NULL, "%d", i++))
- kobject_put(&entry->kobj);
- }
+ list_for_each_entry(entry, &map_entries, list)
+ add_sysfs_fw_map_entry(entry);
return 0;
}
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 1f1d88ae68d..a4cdbd51b1c 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -65,8 +65,17 @@ config GPIO_SYSFS
# put expanders in the right section, in alphabetical order
+config GPIO_MAX730X
+ tristate
+
comment "Memory mapped GPIO expanders:"
+config GPIO_IT8761E
+ tristate "IT8761E GPIO support"
+ depends on GPIOLIB
+ help
+ Say yes here to support GPIO functionality of IT8761E super I/O chip.
+
config GPIO_PL061
bool "PrimeCell PL061 GPIO support"
depends on ARM_AMBA
@@ -87,6 +96,13 @@ config GPIO_VR41XX
comment "I2C GPIO expanders:"
+config GPIO_MAX7300
+ tristate "Maxim MAX7300 GPIO expander"
+ depends on I2C
+ select GPIO_MAX730X
+ help
+ GPIO driver for Maxim MAX7301 I2C-based GPIO expander.
+
config GPIO_MAX732X
tristate "MAX7319, MAX7320-7327 I2C Port Expanders"
depends on I2C
@@ -124,6 +140,13 @@ config GPIO_PCA953X
This driver can also be built as a module. If so, the module
will be called pca953x.
+config GPIO_PCA953X_IRQ
+ bool "Interrupt controller support for PCA953x"
+ depends on GPIO_PCA953X=y
+ help
+ Say yes here to enable the pca953x to be used as an interrupt
+ controller. It requires the driver to be built in the kernel.
+
config GPIO_PCF857X
tristate "PCF857x, PCA{85,96}7x, and MAX732[89] I2C GPIO expanders"
depends on I2C
@@ -226,8 +249,9 @@ comment "SPI GPIO expanders:"
config GPIO_MAX7301
tristate "Maxim MAX7301 GPIO expander"
depends on SPI_MASTER
+ select GPIO_MAX730X
help
- gpio driver for Maxim MAX7301 SPI GPIO expander.
+ GPIO driver for Maxim MAX7301 SPI-based GPIO expander.
config GPIO_MCP23S08
tristate "Microchip MCP23S08 I/O expander"
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 48687238edb..128abf8a98d 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -7,6 +7,8 @@ obj-$(CONFIG_GPIOLIB) += gpiolib.o
obj-$(CONFIG_GPIO_ADP5520) += adp5520-gpio.o
obj-$(CONFIG_GPIO_ADP5588) += adp5588-gpio.o
obj-$(CONFIG_GPIO_LANGWELL) += langwell_gpio.o
+obj-$(CONFIG_GPIO_MAX730X) += max730x.o
+obj-$(CONFIG_GPIO_MAX7300) += max7300.o
obj-$(CONFIG_GPIO_MAX7301) += max7301.o
obj-$(CONFIG_GPIO_MAX732X) += max732x.o
obj-$(CONFIG_GPIO_MC33880) += mc33880.o
@@ -20,5 +22,6 @@ obj-$(CONFIG_GPIO_UCB1400) += ucb1400_gpio.o
obj-$(CONFIG_GPIO_XILINX) += xilinx_gpio.o
obj-$(CONFIG_GPIO_CS5535) += cs5535-gpio.o
obj-$(CONFIG_GPIO_BT8XX) += bt8xxgpio.o
+obj-$(CONFIG_GPIO_IT8761E) += it8761e_gpio.o
obj-$(CONFIG_GPIO_VR41XX) += vr41xx_giu.o
obj-$(CONFIG_GPIO_WM831X) += wm831x-gpio.o
diff --git a/drivers/gpio/cs5535-gpio.c b/drivers/gpio/cs5535-gpio.c
index 0fdbe94f24a..0c3c498f226 100644
--- a/drivers/gpio/cs5535-gpio.c
+++ b/drivers/gpio/cs5535-gpio.c
@@ -154,7 +154,7 @@ static int chip_gpio_request(struct gpio_chip *c, unsigned offset)
static int chip_gpio_get(struct gpio_chip *chip, unsigned offset)
{
- return cs5535_gpio_isset(offset, GPIO_OUTPUT_VAL);
+ return cs5535_gpio_isset(offset, GPIO_READ_BACK);
}
static void chip_gpio_set(struct gpio_chip *chip, unsigned offset, int val)
@@ -172,6 +172,7 @@ static int chip_direction_input(struct gpio_chip *c, unsigned offset)
spin_lock_irqsave(&chip->lock, flags);
__cs5535_gpio_set(chip, offset, GPIO_INPUT_ENABLE);
+ __cs5535_gpio_clear(chip, offset, GPIO_OUTPUT_ENABLE);
spin_unlock_irqrestore(&chip->lock, flags);
return 0;
@@ -184,6 +185,7 @@ static int chip_direction_output(struct gpio_chip *c, unsigned offset, int val)
spin_lock_irqsave(&chip->lock, flags);
+ __cs5535_gpio_set(chip, offset, GPIO_INPUT_ENABLE);
__cs5535_gpio_set(chip, offset, GPIO_OUTPUT_ENABLE);
if (val)
__cs5535_gpio_set(chip, offset, GPIO_OUTPUT_VAL);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 350842ad363..9006fdb26fe 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1237,6 +1237,64 @@ void gpio_free(unsigned gpio)
}
EXPORT_SYMBOL_GPL(gpio_free);
+/**
+ * gpio_request_one - request a single GPIO with initial configuration
+ * @gpio: the GPIO number
+ * @flags: GPIO configuration as specified by GPIOF_*
+ * @label: a literal description string of this GPIO
+ */
+int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
+{
+ int err;
+
+ err = gpio_request(gpio, label);
+ if (err)
+ return err;
+
+ if (flags & GPIOF_DIR_IN)
+ err = gpio_direction_input(gpio);
+ else
+ err = gpio_direction_output(gpio,
+ (flags & GPIOF_INIT_HIGH) ? 1 : 0);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(gpio_request_one);
+
+/**
+ * gpio_request_array - request multiple GPIOs in a single call
+ * @array: array of the 'struct gpio'
+ * @num: how many GPIOs in the array
+ */
+int gpio_request_array(struct gpio *array, size_t num)
+{
+ int i, err;
+
+ for (i = 0; i < num; i++, array++) {
+ err = gpio_request_one(array->gpio, array->flags, array->label);
+ if (err)
+ goto err_free;
+ }
+ return 0;
+
+err_free:
+ while (i--)
+ gpio_free((--array)->gpio);
+ return err;
+}
+EXPORT_SYMBOL_GPL(gpio_request_array);
+
+/**
+ * gpio_free_array - release multiple GPIOs in a single call
+ * @array: array of the 'struct gpio'
+ * @num: how many GPIOs in the array
+ */
+void gpio_free_array(struct gpio *array, size_t num)
+{
+ while (num--)
+ gpio_free((array++)->gpio);
+}
+EXPORT_SYMBOL_GPL(gpio_free_array);
/**
* gpiochip_is_requested - return string iff signal was requested
diff --git a/drivers/gpio/it8761e_gpio.c b/drivers/gpio/it8761e_gpio.c
new file mode 100644
index 00000000000..753219cf993
--- /dev/null
+++ b/drivers/gpio/it8761e_gpio.c
@@ -0,0 +1,231 @@
+/*
+ * it8761_gpio.c - GPIO interface for IT8761E Super I/O chip
+ *
+ * Author: Denis Turischev <denis@compulab.co.il>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License 2 as published
+ * by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; see the file COPYING. If not, write to
+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+
+#include <linux/gpio.h>
+
+#define SIO_CHIP_ID 0x8761
+#define CHIP_ID_HIGH_BYTE 0x20
+#define CHIP_ID_LOW_BYTE 0x21
+
+static u8 ports[2] = { 0x2e, 0x4e };
+static u8 port;
+
+static DEFINE_SPINLOCK(sio_lock);
+
+#define GPIO_NAME "it8761-gpio"
+#define GPIO_BA_HIGH_BYTE 0x60
+#define GPIO_BA_LOW_BYTE 0x61
+#define GPIO_IOSIZE 4
+#define GPIO1X_IO 0xf0
+#define GPIO2X_IO 0xf1
+
+static u16 gpio_ba;
+
+static u8 read_reg(u8 addr, u8 port)
+{
+ outb(addr, port);
+ return inb(port + 1);
+}
+
+static void write_reg(u8 data, u8 addr, u8 port)
+{
+ outb(addr, port);
+ outb(data, port + 1);
+}
+
+static void enter_conf_mode(u8 port)
+{
+ outb(0x87, port);
+ outb(0x61, port);
+ outb(0x55, port);
+ outb((port == 0x2e) ? 0x55 : 0xaa, port);
+}
+
+static void exit_conf_mode(u8 port)
+{
+ outb(0x2, port);
+ outb(0x2, port + 1);
+}
+
+static void enter_gpio_mode(u8 port)
+{
+ write_reg(0x2, 0x7, port);
+}
+
+static int it8761e_gpio_get(struct gpio_chip *gc, unsigned gpio_num)
+{
+ u16 reg;
+ u8 bit;
+
+ bit = gpio_num % 7;
+ reg = (gpio_num >= 7) ? gpio_ba + 1 : gpio_ba;
+
+ return !!(inb(reg) & (1 << bit));
+}
+
+static int it8761e_gpio_direction_in(struct gpio_chip *gc, unsigned gpio_num)
+{
+ u8 curr_dirs;
+ u8 io_reg, bit;
+
+ bit = gpio_num % 7;
+ io_reg = (gpio_num >= 7) ? GPIO2X_IO : GPIO1X_IO;
+
+ spin_lock(&sio_lock);
+
+ enter_conf_mode(port);
+ enter_gpio_mode(port);
+
+ curr_dirs = read_reg(io_reg, port);
+
+ if (curr_dirs & (1 << bit))
+ write_reg(curr_dirs & ~(1 << bit), io_reg, port);
+
+ exit_conf_mode(port);
+
+ spin_unlock(&sio_lock);
+ return 0;
+}
+
+static void it8761e_gpio_set(struct gpio_chip *gc,
+ unsigned gpio_num, int val)
+{
+ u8 curr_vals, bit;
+ u16 reg;
+
+ bit = gpio_num % 7;
+ reg = (gpio_num >= 7) ? gpio_ba + 1 : gpio_ba;
+
+ spin_lock(&sio_lock);
+
+ curr_vals = inb(reg);
+ if (val)
+ outb(curr_vals | (1 << bit) , reg);
+ else
+ outb(curr_vals & ~(1 << bit), reg);
+
+ spin_unlock(&sio_lock);
+}
+
+static int it8761e_gpio_direction_out(struct gpio_chip *gc,
+ unsigned gpio_num, int val)
+{
+ u8 curr_dirs, io_reg, bit;
+
+ bit = gpio_num % 7;
+ io_reg = (gpio_num >= 7) ? GPIO2X_IO : GPIO1X_IO;
+
+ it8761e_gpio_set(gc, gpio_num, val);
+
+ spin_lock(&sio_lock);
+
+ enter_conf_mode(port);
+ enter_gpio_mode(port);
+
+ curr_dirs = read_reg(io_reg, port);
+
+ if (!(curr_dirs & (1 << bit)))
+ write_reg(curr_dirs | (1 << bit), io_reg, port);
+
+ exit_conf_mode(port);
+
+ spin_unlock(&sio_lock);
+ return 0;
+}
+
+static struct gpio_chip it8761e_gpio_chip = {
+ .label = GPIO_NAME,
+ .owner = THIS_MODULE,
+ .get = it8761e_gpio_get,
+ .direction_input = it8761e_gpio_direction_in,
+ .set = it8761e_gpio_set,
+ .direction_output = it8761e_gpio_direction_out,
+};
+
+static int __init it8761e_gpio_init(void)
+{
+ int i, id, err;
+
+ /* chip and port detection */
+ for (i = 0; i < ARRAY_SIZE(ports); i++) {
+ spin_lock(&sio_lock);
+ enter_conf_mode(ports[i]);
+
+ id = (read_reg(CHIP_ID_HIGH_BYTE, ports[i]) << 8) +
+ read_reg(CHIP_ID_LOW_BYTE, ports[i]);
+
+ exit_conf_mode(ports[i]);
+ spin_unlock(&sio_lock);
+
+ if (id == SIO_CHIP_ID) {
+ port = ports[i];
+ break;
+ }
+ }
+
+ if (!port)
+ return -ENODEV;
+
+ /* fetch GPIO base address */
+ enter_conf_mode(port);
+ enter_gpio_mode(port);
+ gpio_ba = (read_reg(GPIO_BA_HIGH_BYTE, port) << 8) +
+ read_reg(GPIO_BA_LOW_BYTE, port);
+ exit_conf_mode(port);
+
+ if (!request_region(gpio_ba, GPIO_IOSIZE, GPIO_NAME))
+ return -EBUSY;
+
+ it8761e_gpio_chip.base = -1;
+ it8761e_gpio_chip.ngpio = 14;
+
+ err = gpiochip_add(&it8761e_gpio_chip);
+ if (err < 0)
+ goto gpiochip_add_err;
+
+ return 0;
+
+gpiochip_add_err:
+ release_region(gpio_ba, GPIO_IOSIZE);
+ gpio_ba = 0;
+ return err;
+}
+
+static void __exit it8761e_gpio_exit(void)
+{
+ if (gpio_ba) {
+ gpiochip_remove(&it8761e_gpio_chip);
+
+ release_region(gpio_ba, GPIO_IOSIZE);
+ gpio_ba = 0;
+ }
+}
+module_init(it8761e_gpio_init);
+module_exit(it8761e_gpio_exit);
+
+MODULE_AUTHOR("Denis Turischev <denis@compulab.co.il>");
+MODULE_DESCRIPTION("GPIO interface for IT8761E Super I/O chip");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpio/max7300.c b/drivers/gpio/max7300.c
new file mode 100644
index 00000000000..9d74eef1157
--- /dev/null
+++ b/drivers/gpio/max7300.c
@@ -0,0 +1,94 @@
+/*
+ * drivers/gpio/max7300.c
+ *
+ * Copyright (C) 2009 Wolfram Sang, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Check max730x.c for further details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/i2c.h>
+#include <linux/spi/max7301.h>
+
+static int max7300_i2c_write(struct device *dev, unsigned int reg,
+ unsigned int val)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ return i2c_smbus_write_byte_data(client, reg, val);
+}
+
+static int max7300_i2c_read(struct device *dev, unsigned int reg)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+
+ return i2c_smbus_read_byte_data(client, reg);
+}
+
+static int __devinit max7300_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct max7301 *ts;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE_DATA))
+ return -EIO;
+
+ ts = kzalloc(sizeof(struct max7301), GFP_KERNEL);
+ if (!ts)
+ return -ENOMEM;
+
+ ts->read = max7300_i2c_read;
+ ts->write = max7300_i2c_write;
+ ts->dev = &client->dev;
+
+ ret = __max730x_probe(ts);
+ if (ret)
+ kfree(ts);
+ return ret;
+}
+
+static int __devexit max7300_remove(struct i2c_client *client)
+{
+ return __max730x_remove(&client->dev);
+}
+
+static const struct i2c_device_id max7300_id[] = {
+ { "max7300", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max7300_id);
+
+static struct i2c_driver max7300_driver = {
+ .driver = {
+ .name = "max7300",
+ .owner = THIS_MODULE,
+ },
+ .probe = max7300_probe,
+ .remove = __devexit_p(max7300_remove),
+ .id_table = max7300_id,
+};
+
+static int __init max7300_init(void)
+{
+ return i2c_add_driver(&max7300_driver);
+}
+subsys_initcall(max7300_init);
+
+static void __exit max7300_exit(void)
+{
+ i2c_del_driver(&max7300_driver);
+}
+module_exit(max7300_exit);
+
+MODULE_AUTHOR("Wolfram Sang");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MAX7300 GPIO-Expander");
diff --git a/drivers/gpio/max7301.c b/drivers/gpio/max7301.c
index 480956f1ca5..965d9b1ea13 100644
--- a/drivers/gpio/max7301.c
+++ b/drivers/gpio/max7301.c
@@ -1,98 +1,41 @@
-/**
+/*
* drivers/gpio/max7301.c
*
* Copyright (C) 2006 Juergen Beisert, Pengutronix
* Copyright (C) 2008 Guennadi Liakhovetski, Pengutronix
+ * Copyright (C) 2009 Wolfram Sang, Pengutronix
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
- * The Maxim's MAX7301 device is an SPI driven GPIO expander. There are
- * 28 GPIOs. 8 of them can trigger an interrupt. See datasheet for more
- * details
- * Note:
- * - DIN must be stable at the rising edge of clock.
- * - when writing:
- * - always clock in 16 clocks at once
- * - at DIN: D15 first, D0 last
- * - D0..D7 = databyte, D8..D14 = commandbyte
- * - D15 = low -> write command
- * - when reading
- * - always clock in 16 clocks at once
- * - at DIN: D15 first, D0 last
- * - D0..D7 = dummy, D8..D14 = register address
- * - D15 = high -> read command
- * - raise CS and assert it again
- * - always clock in 16 clocks at once
- * - at DOUT: D15 first, D0 last
- * - D0..D7 contains the data from the first cycle
- *
- * The driver exports a standard gpiochip interface
+ * Check max730x.c for further details.
*/
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
#include <linux/spi/spi.h>
#include <linux/spi/max7301.h>
-#include <linux/gpio.h>
-
-#define DRIVER_NAME "max7301"
-
-/*
- * Pin configurations, see MAX7301 datasheet page 6
- */
-#define PIN_CONFIG_MASK 0x03
-#define PIN_CONFIG_IN_PULLUP 0x03
-#define PIN_CONFIG_IN_WO_PULLUP 0x02
-#define PIN_CONFIG_OUT 0x01
-
-#define PIN_NUMBER 28
-
-
-/*
- * Some registers must be read back to modify.
- * To save time we cache them here in memory
- */
-struct max7301 {
- struct mutex lock;
- u8 port_config[8]; /* field 0 is unused */
- u32 out_level; /* cached output levels */
- struct gpio_chip chip;
- struct spi_device *spi;
-};
-/**
- * max7301_write - Write a new register content
- * @spi: The SPI device
- * @reg: Register offset
- * @val: Value to write
- *
- * A write to the MAX7301 means one message with one transfer
- *
- * Returns 0 if successful or a negative value on error
- */
-static int max7301_write(struct spi_device *spi, unsigned int reg, unsigned int val)
+/* A write to the MAX7301 means one message with one transfer */
+static int max7301_spi_write(struct device *dev, unsigned int reg,
+ unsigned int val)
{
+ struct spi_device *spi = to_spi_device(dev);
u16 word = ((reg & 0x7F) << 8) | (val & 0xFF);
+
return spi_write(spi, (const u8 *)&word, sizeof(word));
}
-/**
- * max7301_read - Read back register content
- * @spi: The SPI device
- * @reg: Register offset
- *
- * A read from the MAX7301 means two transfers; here, one message each
- *
- * Returns positive 8 bit value from device if successful or a
- * negative value on error
- */
-static int max7301_read(struct spi_device *spi, unsigned int reg)
+/* A read from the MAX7301 means two transfers; here, one message each */
+
+static int max7301_spi_read(struct device *dev, unsigned int reg)
{
int ret;
u16 word;
+ struct spi_device *spi = to_spi_device(dev);
word = 0x8000 | (reg << 8);
ret = spi_write(spi, (const u8 *)&word, sizeof(word));
@@ -108,125 +51,13 @@ static int max7301_read(struct spi_device *spi, unsigned int reg)
return word & 0xff;
}
-static int max7301_direction_input(struct gpio_chip *chip, unsigned offset)
-{
- struct max7301 *ts = container_of(chip, struct max7301, chip);
- u8 *config;
- int ret;
-
- /* First 4 pins are unused in the controller */
- offset += 4;
-
- config = &ts->port_config[offset >> 2];
-
- mutex_lock(&ts->lock);
-
- /* Standard GPIO API doesn't support pull-ups, has to be extended.
- * Hard-coding no pollup for now. */
- *config = (*config & ~(3 << (offset & 3))) | (1 << (offset & 3));
-
- ret = max7301_write(ts->spi, 0x08 + (offset >> 2), *config);
-
- mutex_unlock(&ts->lock);
-
- return ret;
-}
-
-static int __max7301_set(struct max7301 *ts, unsigned offset, int value)
-{
- if (value) {
- ts->out_level |= 1 << offset;
- return max7301_write(ts->spi, 0x20 + offset, 0x01);
- } else {
- ts->out_level &= ~(1 << offset);
- return max7301_write(ts->spi, 0x20 + offset, 0x00);
- }
-}
-
-static int max7301_direction_output(struct gpio_chip *chip, unsigned offset,
- int value)
-{
- struct max7301 *ts = container_of(chip, struct max7301, chip);
- u8 *config;
- int ret;
-
- /* First 4 pins are unused in the controller */
- offset += 4;
-
- config = &ts->port_config[offset >> 2];
-
- mutex_lock(&ts->lock);
-
- *config = (*config & ~(3 << (offset & 3))) | (1 << (offset & 3));
-
- ret = __max7301_set(ts, offset, value);
-
- if (!ret)
- ret = max7301_write(ts->spi, 0x08 + (offset >> 2), *config);
-
- mutex_unlock(&ts->lock);
-
- return ret;
-}
-
-static int max7301_get(struct gpio_chip *chip, unsigned offset)
-{
- struct max7301 *ts = container_of(chip, struct max7301, chip);
- int config, level = -EINVAL;
-
- /* First 4 pins are unused in the controller */
- offset += 4;
-
- mutex_lock(&ts->lock);
-
- config = (ts->port_config[offset >> 2] >> ((offset & 3) * 2)) & 3;
-
- switch (config) {
- case 1:
- /* Output: return cached level */
- level = !!(ts->out_level & (1 << offset));
- break;
- case 2:
- case 3:
- /* Input: read out */
- level = max7301_read(ts->spi, 0x20 + offset) & 0x01;
- }
- mutex_unlock(&ts->lock);
-
- return level;
-}
-
-static void max7301_set(struct gpio_chip *chip, unsigned offset, int value)
-{
- struct max7301 *ts = container_of(chip, struct max7301, chip);
-
- /* First 4 pins are unused in the controller */
- offset += 4;
-
- mutex_lock(&ts->lock);
-
- __max7301_set(ts, offset, value);
-
- mutex_unlock(&ts->lock);
-}
-
static int __devinit max7301_probe(struct spi_device *spi)
{
struct max7301 *ts;
- struct max7301_platform_data *pdata;
- int i, ret;
-
- pdata = spi->dev.platform_data;
- if (!pdata || !pdata->base) {
- dev_dbg(&spi->dev, "incorrect or missing platform data\n");
- return -EINVAL;
- }
+ int ret;
- /*
- * bits_per_word cannot be configured in platform data
- */
+ /* bits_per_word cannot be configured in platform data */
spi->bits_per_word = 16;
-
ret = spi_setup(spi);
if (ret < 0)
return ret;
@@ -235,90 +66,35 @@ static int __devinit max7301_probe(struct spi_device *spi)
if (!ts)
return -ENOMEM;
- mutex_init(&ts->lock);
-
- dev_set_drvdata(&spi->dev, ts);
+ ts->read = max7301_spi_read;
+ ts->write = max7301_spi_write;
+ ts->dev = &spi->dev;
- /* Power up the chip and disable IRQ output */
- max7301_write(spi, 0x04, 0x01);
-
- ts->spi = spi;
-
- ts->chip.label = DRIVER_NAME,
-
- ts->chip.direction_input = max7301_direction_input;
- ts->chip.get = max7301_get;
- ts->chip.direction_output = max7301_direction_output;
- ts->chip.set = max7301_set;
-
- ts->chip.base = pdata->base;
- ts->chip.ngpio = PIN_NUMBER;
- ts->chip.can_sleep = 1;
- ts->chip.dev = &spi->dev;
- ts->chip.owner = THIS_MODULE;
-
- /*
- * tristate all pins in hardware and cache the
- * register values for later use.
- */
- for (i = 1; i < 8; i++) {
- int j;
- /* 0xAA means input with internal pullup disabled */
- max7301_write(spi, 0x08 + i, 0xAA);
- ts->port_config[i] = 0xAA;
- for (j = 0; j < 4; j++) {
- int offset = (i - 1) * 4 + j;
- ret = max7301_direction_input(&ts->chip, offset);
- if (ret)
- goto exit_destroy;
- }
- }
-
- ret = gpiochip_add(&ts->chip);
+ ret = __max730x_probe(ts);
if (ret)
- goto exit_destroy;
-
- return ret;
-
-exit_destroy:
- dev_set_drvdata(&spi->dev, NULL);
- mutex_destroy(&ts->lock);
- kfree(ts);
+ kfree(ts);
return ret;
}
static int __devexit max7301_remove(struct spi_device *spi)
{
- struct max7301 *ts;
- int ret;
-
- ts = dev_get_drvdata(&spi->dev);
- if (ts == NULL)
- return -ENODEV;
-
- dev_set_drvdata(&spi->dev, NULL);
-
- /* Power down the chip and disable IRQ output */
- max7301_write(spi, 0x04, 0x00);
-
- ret = gpiochip_remove(&ts->chip);
- if (!ret) {
- mutex_destroy(&ts->lock);
- kfree(ts);
- } else
- dev_err(&spi->dev, "Failed to remove the GPIO controller: %d\n",
- ret);
-
- return ret;
+ return __max730x_remove(&spi->dev);
}
+static const struct spi_device_id max7301_id[] = {
+ { "max7301", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, max7301_id);
+
static struct spi_driver max7301_driver = {
.driver = {
- .name = DRIVER_NAME,
- .owner = THIS_MODULE,
+ .name = "max7301",
+ .owner = THIS_MODULE,
},
- .probe = max7301_probe,
- .remove = __devexit_p(max7301_remove),
+ .probe = max7301_probe,
+ .remove = __devexit_p(max7301_remove),
+ .id_table = max7301_id,
};
static int __init max7301_init(void)
@@ -336,7 +112,6 @@ static void __exit max7301_exit(void)
}
module_exit(max7301_exit);
-MODULE_AUTHOR("Juergen Beisert");
+MODULE_AUTHOR("Juergen Beisert, Wolfram Sang");
MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("MAX7301 SPI based GPIO-Expander");
-MODULE_ALIAS("spi:" DRIVER_NAME);
+MODULE_DESCRIPTION("MAX7301 GPIO-Expander");
diff --git a/drivers/gpio/max730x.c b/drivers/gpio/max730x.c
new file mode 100644
index 00000000000..c9bced55f82
--- /dev/null
+++ b/drivers/gpio/max730x.c
@@ -0,0 +1,244 @@
+/**
+ * drivers/gpio/max7301.c
+ *
+ * Copyright (C) 2006 Juergen Beisert, Pengutronix
+ * Copyright (C) 2008 Guennadi Liakhovetski, Pengutronix
+ * Copyright (C) 2009 Wolfram Sang, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * The Maxim MAX7300/1 device is an I2C/SPI driven GPIO expander. There are
+ * 28 GPIOs. 8 of them can trigger an interrupt. See datasheet for more
+ * details
+ * Note:
+ * - DIN must be stable at the rising edge of clock.
+ * - when writing:
+ * - always clock in 16 clocks at once
+ * - at DIN: D15 first, D0 last
+ * - D0..D7 = databyte, D8..D14 = commandbyte
+ * - D15 = low -> write command
+ * - when reading
+ * - always clock in 16 clocks at once
+ * - at DIN: D15 first, D0 last
+ * - D0..D7 = dummy, D8..D14 = register address
+ * - D15 = high -> read command
+ * - raise CS and assert it again
+ * - always clock in 16 clocks at once
+ * - at DOUT: D15 first, D0 last
+ * - D0..D7 contains the data from the first cycle
+ *
+ * The driver exports a standard gpiochip interface
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/spi/max7301.h>
+#include <linux/gpio.h>
+
+/*
+ * Pin configurations, see MAX7301 datasheet page 6
+ */
+#define PIN_CONFIG_MASK 0x03
+#define PIN_CONFIG_IN_PULLUP 0x03
+#define PIN_CONFIG_IN_WO_PULLUP 0x02
+#define PIN_CONFIG_OUT 0x01
+
+#define PIN_NUMBER 28
+
+static int max7301_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ struct max7301 *ts = container_of(chip, struct max7301, chip);
+ u8 *config;
+ u8 offset_bits;
+ int ret;
+
+ /* First 4 pins are unused in the controller */
+ offset += 4;
+ offset_bits = (offset & 3) << 1;
+
+ config = &ts->port_config[offset >> 2];
+
+ mutex_lock(&ts->lock);
+
+ /* Standard GPIO API doesn't support pull-ups, has to be extended.
+ * Hard-coding no pollup for now. */
+ *config = (*config & ~(PIN_CONFIG_MASK << offset_bits))
+ | (PIN_CONFIG_IN_WO_PULLUP << offset_bits);
+
+ ret = ts->write(ts->dev, 0x08 + (offset >> 2), *config);
+
+ mutex_unlock(&ts->lock);
+
+ return ret;
+}
+
+static int __max7301_set(struct max7301 *ts, unsigned offset, int value)
+{
+ if (value) {
+ ts->out_level |= 1 << offset;
+ return ts->write(ts->dev, 0x20 + offset, 0x01);
+ } else {
+ ts->out_level &= ~(1 << offset);
+ return ts->write(ts->dev, 0x20 + offset, 0x00);
+ }
+}
+
+static int max7301_direction_output(struct gpio_chip *chip, unsigned offset,
+ int value)
+{
+ struct max7301 *ts = container_of(chip, struct max7301, chip);
+ u8 *config;
+ u8 offset_bits;
+ int ret;
+
+ /* First 4 pins are unused in the controller */
+ offset += 4;
+ offset_bits = (offset & 3) << 1;
+
+ config = &ts->port_config[offset >> 2];
+
+ mutex_lock(&ts->lock);
+
+ *config = (*config & ~(PIN_CONFIG_MASK << offset_bits))
+ | (PIN_CONFIG_OUT << offset_bits);
+
+ ret = __max7301_set(ts, offset, value);
+
+ if (!ret)
+ ret = ts->write(ts->dev, 0x08 + (offset >> 2), *config);
+
+ mutex_unlock(&ts->lock);
+
+ return ret;
+}
+
+static int max7301_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct max7301 *ts = container_of(chip, struct max7301, chip);
+ int config, level = -EINVAL;
+
+ /* First 4 pins are unused in the controller */
+ offset += 4;
+
+ mutex_lock(&ts->lock);
+
+ config = (ts->port_config[offset >> 2] >> ((offset & 3) << 1))
+ & PIN_CONFIG_MASK;
+
+ switch (config) {
+ case PIN_CONFIG_OUT:
+ /* Output: return cached level */
+ level = !!(ts->out_level & (1 << offset));
+ break;
+ case PIN_CONFIG_IN_WO_PULLUP:
+ case PIN_CONFIG_IN_PULLUP:
+ /* Input: read out */
+ level = ts->read(ts->dev, 0x20 + offset) & 0x01;
+ }
+ mutex_unlock(&ts->lock);
+
+ return level;
+}
+
+static void max7301_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct max7301 *ts = container_of(chip, struct max7301, chip);
+
+ /* First 4 pins are unused in the controller */
+ offset += 4;
+
+ mutex_lock(&ts->lock);
+
+ __max7301_set(ts, offset, value);
+
+ mutex_unlock(&ts->lock);
+}
+
+int __devinit __max730x_probe(struct max7301 *ts)
+{
+ struct device *dev = ts->dev;
+ struct max7301_platform_data *pdata;
+ int i, ret;
+
+ pdata = dev->platform_data;
+ if (!pdata || !pdata->base) {
+ dev_err(dev, "incorrect or missing platform data\n");
+ return -EINVAL;
+ }
+
+ mutex_init(&ts->lock);
+ dev_set_drvdata(dev, ts);
+
+ /* Power up the chip and disable IRQ output */
+ ts->write(dev, 0x04, 0x01);
+
+ ts->chip.label = dev->driver->name;
+
+ ts->chip.direction_input = max7301_direction_input;
+ ts->chip.get = max7301_get;
+ ts->chip.direction_output = max7301_direction_output;
+ ts->chip.set = max7301_set;
+
+ ts->chip.base = pdata->base;
+ ts->chip.ngpio = PIN_NUMBER;
+ ts->chip.can_sleep = 1;
+ ts->chip.dev = dev;
+ ts->chip.owner = THIS_MODULE;
+
+ /*
+ * tristate all pins in hardware and cache the
+ * register values for later use.
+ */
+ for (i = 1; i < 8; i++) {
+ int j;
+ /* 0xAA means input with internal pullup disabled */
+ ts->write(dev, 0x08 + i, 0xAA);
+ ts->port_config[i] = 0xAA;
+ for (j = 0; j < 4; j++) {
+ int offset = (i - 1) * 4 + j;
+ ret = max7301_direction_input(&ts->chip, offset);
+ if (ret)
+ goto exit_destroy;
+ }
+ }
+
+ ret = gpiochip_add(&ts->chip);
+ if (ret)
+ goto exit_destroy;
+
+ return ret;
+
+exit_destroy:
+ dev_set_drvdata(dev, NULL);
+ mutex_destroy(&ts->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__max730x_probe);
+
+int __devexit __max730x_remove(struct device *dev)
+{
+ struct max7301 *ts = dev_get_drvdata(dev);
+ int ret;
+
+ if (ts == NULL)
+ return -ENODEV;
+
+ dev_set_drvdata(dev, NULL);
+
+ /* Power down the chip and disable IRQ output */
+ ts->write(dev, 0x04, 0x00);
+
+ ret = gpiochip_remove(&ts->chip);
+ if (!ret) {
+ mutex_destroy(&ts->lock);
+ kfree(ts);
+ } else
+ dev_err(dev, "Failed to remove GPIO controller: %d\n", ret);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__max730x_remove);
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c
index 6a2fb3fbb3d..ab5daab14bc 100644
--- a/drivers/gpio/pca953x.c
+++ b/drivers/gpio/pca953x.c
@@ -14,6 +14,8 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/i2c.h>
#include <linux/i2c/pca953x.h>
#ifdef CONFIG_OF_GPIO
@@ -26,23 +28,28 @@
#define PCA953X_INVERT 2
#define PCA953X_DIRECTION 3
+#define PCA953X_GPIOS 0x00FF
+#define PCA953X_INT 0x0100
+
static const struct i2c_device_id pca953x_id[] = {
- { "pca9534", 8, },
- { "pca9535", 16, },
+ { "pca9534", 8 | PCA953X_INT, },
+ { "pca9535", 16 | PCA953X_INT, },
{ "pca9536", 4, },
- { "pca9537", 4, },
- { "pca9538", 8, },
- { "pca9539", 16, },
- { "pca9554", 8, },
- { "pca9555", 16, },
+ { "pca9537", 4 | PCA953X_INT, },
+ { "pca9538", 8 | PCA953X_INT, },
+ { "pca9539", 16 | PCA953X_INT, },
+ { "pca9554", 8 | PCA953X_INT, },
+ { "pca9555", 16 | PCA953X_INT, },
{ "pca9556", 8, },
{ "pca9557", 8, },
{ "max7310", 8, },
- { "max7315", 8, },
- { "pca6107", 8, },
- { "tca6408", 8, },
- { "tca6416", 16, },
+ { "max7312", 16 | PCA953X_INT, },
+ { "max7313", 16 | PCA953X_INT, },
+ { "max7315", 8 | PCA953X_INT, },
+ { "pca6107", 8 | PCA953X_INT, },
+ { "tca6408", 8 | PCA953X_INT, },
+ { "tca6416", 16 | PCA953X_INT, },
/* NYET: { "tca6424", 24, }, */
{ }
};
@@ -53,6 +60,15 @@ struct pca953x_chip {
uint16_t reg_output;
uint16_t reg_direction;
+#ifdef CONFIG_GPIO_PCA953X_IRQ
+ struct mutex irq_lock;
+ uint16_t irq_mask;
+ uint16_t irq_stat;
+ uint16_t irq_trig_raise;
+ uint16_t irq_trig_fall;
+ int irq_base;
+#endif
+
struct i2c_client *client;
struct pca953x_platform_data *dyn_pdata;
struct gpio_chip gpio_chip;
@@ -202,6 +218,210 @@ static void pca953x_setup_gpio(struct pca953x_chip *chip, int gpios)
gc->names = chip->names;
}
+#ifdef CONFIG_GPIO_PCA953X_IRQ
+static int pca953x_gpio_to_irq(struct gpio_chip *gc, unsigned off)
+{
+ struct pca953x_chip *chip;
+
+ chip = container_of(gc, struct pca953x_chip, gpio_chip);
+ return chip->irq_base + off;
+}
+
+static void pca953x_irq_mask(unsigned int irq)
+{
+ struct pca953x_chip *chip = get_irq_chip_data(irq);
+
+ chip->irq_mask &= ~(1 << (irq - chip->irq_base));
+}
+
+static void pca953x_irq_unmask(unsigned int irq)
+{
+ struct pca953x_chip *chip = get_irq_chip_data(irq);
+
+ chip->irq_mask |= 1 << (irq - chip->irq_base);
+}
+
+static void pca953x_irq_bus_lock(unsigned int irq)
+{
+ struct pca953x_chip *chip = get_irq_chip_data(irq);
+
+ mutex_lock(&chip->irq_lock);
+}
+
+static void pca953x_irq_bus_sync_unlock(unsigned int irq)
+{
+ struct pca953x_chip *chip = get_irq_chip_data(irq);
+
+ mutex_unlock(&chip->irq_lock);
+}
+
+static int pca953x_irq_set_type(unsigned int irq, unsigned int type)
+{
+ struct pca953x_chip *chip = get_irq_chip_data(irq);
+ uint16_t level = irq - chip->irq_base;
+ uint16_t mask = 1 << level;
+
+ if (!(type & IRQ_TYPE_EDGE_BOTH)) {
+ dev_err(&chip->client->dev, "irq %d: unsupported type %d\n",
+ irq, type);
+ return -EINVAL;
+ }
+
+ if (type & IRQ_TYPE_EDGE_FALLING)
+ chip->irq_trig_fall |= mask;
+ else
+ chip->irq_trig_fall &= ~mask;
+
+ if (type & IRQ_TYPE_EDGE_RISING)
+ chip->irq_trig_raise |= mask;
+ else
+ chip->irq_trig_raise &= ~mask;
+
+ return pca953x_gpio_direction_input(&chip->gpio_chip, level);
+}
+
+static struct irq_chip pca953x_irq_chip = {
+ .name = "pca953x",
+ .mask = pca953x_irq_mask,
+ .unmask = pca953x_irq_unmask,
+ .bus_lock = pca953x_irq_bus_lock,
+ .bus_sync_unlock = pca953x_irq_bus_sync_unlock,
+ .set_type = pca953x_irq_set_type,
+};
+
+static uint16_t pca953x_irq_pending(struct pca953x_chip *chip)
+{
+ uint16_t cur_stat;
+ uint16_t old_stat;
+ uint16_t pending;
+ uint16_t trigger;
+ int ret;
+
+ ret = pca953x_read_reg(chip, PCA953X_INPUT, &cur_stat);
+ if (ret)
+ return 0;
+
+ /* Remove output pins from the equation */
+ cur_stat &= chip->reg_direction;
+
+ old_stat = chip->irq_stat;
+ trigger = (cur_stat ^ old_stat) & chip->irq_mask;
+
+ if (!trigger)
+ return 0;
+
+ chip->irq_stat = cur_stat;
+
+ pending = (old_stat & chip->irq_trig_fall) |
+ (cur_stat & chip->irq_trig_raise);
+ pending &= trigger;
+
+ return pending;
+}
+
+static irqreturn_t pca953x_irq_handler(int irq, void *devid)
+{
+ struct pca953x_chip *chip = devid;
+ uint16_t pending;
+ uint16_t level;
+
+ pending = pca953x_irq_pending(chip);
+
+ if (!pending)
+ return IRQ_HANDLED;
+
+ do {
+ level = __ffs(pending);
+ handle_nested_irq(level + chip->irq_base);
+
+ pending &= ~(1 << level);
+ } while (pending);
+
+ return IRQ_HANDLED;
+}
+
+static int pca953x_irq_setup(struct pca953x_chip *chip,
+ const struct i2c_device_id *id)
+{
+ struct i2c_client *client = chip->client;
+ struct pca953x_platform_data *pdata = client->dev.platform_data;
+ int ret;
+
+ if (pdata->irq_base && (id->driver_data & PCA953X_INT)) {
+ int lvl;
+
+ ret = pca953x_read_reg(chip, PCA953X_INPUT,
+ &chip->irq_stat);
+ if (ret)
+ goto out_failed;
+
+ /*
+ * There is no way to know which GPIO line generated the
+ * interrupt. We have to rely on the previous read for
+ * this purpose.
+ */
+ chip->irq_stat &= chip->reg_direction;
+ chip->irq_base = pdata->irq_base;
+ mutex_init(&chip->irq_lock);
+
+ for (lvl = 0; lvl < chip->gpio_chip.ngpio; lvl++) {
+ int irq = lvl + chip->irq_base;
+
+ set_irq_chip_data(irq, chip);
+ set_irq_chip_and_handler(irq, &pca953x_irq_chip,
+ handle_edge_irq);
+ set_irq_nested_thread(irq, 1);
+#ifdef CONFIG_ARM
+ set_irq_flags(irq, IRQF_VALID);
+#else
+ set_irq_noprobe(irq);
+#endif
+ }
+
+ ret = request_threaded_irq(client->irq,
+ NULL,
+ pca953x_irq_handler,
+ IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
+ dev_name(&client->dev), chip);
+ if (ret) {
+ dev_err(&client->dev, "failed to request irq %d\n",
+ client->irq);
+ goto out_failed;
+ }
+
+ chip->gpio_chip.to_irq = pca953x_gpio_to_irq;
+ }
+
+ return 0;
+
+out_failed:
+ chip->irq_base = 0;
+ return ret;
+}
+
+static void pca953x_irq_teardown(struct pca953x_chip *chip)
+{
+ if (chip->irq_base)
+ free_irq(chip->client->irq, chip);
+}
+#else /* CONFIG_GPIO_PCA953X_IRQ */
+static int pca953x_irq_setup(struct pca953x_chip *chip,
+ const struct i2c_device_id *id)
+{
+ struct i2c_client *client = chip->client;
+ struct pca953x_platform_data *pdata = client->dev.platform_data;
+
+ if (pdata->irq_base && (id->driver_data & PCA953X_INT))
+ dev_warn(&client->dev, "interrupt support not compiled in\n");
+
+ return 0;
+}
+
+static void pca953x_irq_teardown(struct pca953x_chip *chip)
+{
+}
+#endif
+
/*
* Handlers for alternative sources of platform_data
*/
@@ -286,7 +506,7 @@ static int __devinit pca953x_probe(struct i2c_client *client,
/* initialize cached registers from their original values.
* we can't share this chip with another i2c master.
*/
- pca953x_setup_gpio(chip, id->driver_data);
+ pca953x_setup_gpio(chip, id->driver_data & PCA953X_GPIOS);
ret = pca953x_read_reg(chip, PCA953X_OUTPUT, &chip->reg_output);
if (ret)
@@ -301,6 +521,9 @@ static int __devinit pca953x_probe(struct i2c_client *client,
if (ret)
goto out_failed;
+ ret = pca953x_irq_setup(chip, id);
+ if (ret)
+ goto out_failed;
ret = gpiochip_add(&chip->gpio_chip);
if (ret)
@@ -317,6 +540,7 @@ static int __devinit pca953x_probe(struct i2c_client *client,
return 0;
out_failed:
+ pca953x_irq_teardown(chip);
kfree(chip->dyn_pdata);
kfree(chip);
return ret;
@@ -345,6 +569,7 @@ static int pca953x_remove(struct i2c_client *client)
return ret;
}
+ pca953x_irq_teardown(chip);
kfree(chip->dyn_pdata);
kfree(chip);
return 0;
diff --git a/drivers/gpio/pl061.c b/drivers/gpio/pl061.c
index 4ee4c8367a3..3ad1eeb4960 100644
--- a/drivers/gpio/pl061.c
+++ b/drivers/gpio/pl061.c
@@ -219,7 +219,7 @@ static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
if (pending == 0)
continue;
- for_each_bit(offset, &pending, PL061_GPIO_NR)
+ for_each_set_bit(offset, &pending, PL061_GPIO_NR)
generic_handle_irq(pl061_to_irq(&chip->gc, offset));
}
desc->chip->unmask(irq);
diff --git a/drivers/gpio/timbgpio.c b/drivers/gpio/timbgpio.c
index a4d344ba8e5..d4295fa5369 100644
--- a/drivers/gpio/timbgpio.c
+++ b/drivers/gpio/timbgpio.c
@@ -23,6 +23,7 @@
#include <linux/module.h>
#include <linux/gpio.h>
#include <linux/platform_device.h>
+#include <linux/irq.h>
#include <linux/io.h>
#include <linux/timb_gpio.h>
#include <linux/interrupt.h>
@@ -37,6 +38,8 @@
#define TGPIO_ICR 0x14
#define TGPIO_FLR 0x18
#define TGPIO_LVR 0x1c
+#define TGPIO_VER 0x20
+#define TGPIO_BFLR 0x24
struct timbgpio {
void __iomem *membase;
@@ -125,17 +128,23 @@ static int timbgpio_irq_type(unsigned irq, unsigned trigger)
struct timbgpio *tgpio = get_irq_chip_data(irq);
int offset = irq - tgpio->irq_base;
unsigned long flags;
- u32 lvr, flr;
+ u32 lvr, flr, bflr = 0;
+ u32 ver;
if (offset < 0 || offset > tgpio->gpio.ngpio)
return -EINVAL;
+ ver = ioread32(tgpio->membase + TGPIO_VER);
+
spin_lock_irqsave(&tgpio->lock, flags);
lvr = ioread32(tgpio->membase + TGPIO_LVR);
flr = ioread32(tgpio->membase + TGPIO_FLR);
+ if (ver > 2)
+ bflr = ioread32(tgpio->membase + TGPIO_BFLR);
if (trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
+ bflr &= ~(1 << offset);
flr &= ~(1 << offset);
if (trigger & IRQ_TYPE_LEVEL_HIGH)
lvr |= 1 << offset;
@@ -143,21 +152,27 @@ static int timbgpio_irq_type(unsigned irq, unsigned trigger)
lvr &= ~(1 << offset);
}
- if ((trigger & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
- return -EINVAL;
- else {
+ if ((trigger & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) {
+ if (ver < 3)
+ return -EINVAL;
+ else {
+ flr |= 1 << offset;
+ bflr |= 1 << offset;
+ }
+ } else {
+ bflr &= ~(1 << offset);
flr |= 1 << offset;
- /* opposite compared to the datasheet, but it mirrors the
- * reality
- */
if (trigger & IRQ_TYPE_EDGE_FALLING)
- lvr |= 1 << offset;
- else
lvr &= ~(1 << offset);
+ else
+ lvr |= 1 << offset;
}
iowrite32(lvr, tgpio->membase + TGPIO_LVR);
iowrite32(flr, tgpio->membase + TGPIO_FLR);
+ if (ver > 2)
+ iowrite32(bflr, tgpio->membase + TGPIO_BFLR);
+
iowrite32(1 << offset, tgpio->membase + TGPIO_ICR);
spin_unlock_irqrestore(&tgpio->lock, flags);
@@ -174,7 +189,7 @@ static void timbgpio_irq(unsigned int irq, struct irq_desc *desc)
ipr = ioread32(tgpio->membase + TGPIO_IPR);
iowrite32(ipr, tgpio->membase + TGPIO_ICR);
- for_each_bit(offset, &ipr, tgpio->gpio.ngpio)
+ for_each_set_bit(offset, &ipr, tgpio->gpio.ngpio)
generic_handle_irq(timbgpio_to_irq(&tgpio->gpio, offset));
}
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 39c5aa75b8f..abe3f446ca4 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -4,7 +4,7 @@
ccflags-y := -Iinclude/drm
-drm-y := drm_auth.o drm_bufs.o drm_cache.o \
+drm-y := drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
drm_context.o drm_dma.o drm_drawable.o \
drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
diff --git a/drivers/gpu/drm/drm_buffer.c b/drivers/gpu/drm/drm_buffer.c
new file mode 100644
index 00000000000..55d03ed0500
--- /dev/null
+++ b/drivers/gpu/drm/drm_buffer.c
@@ -0,0 +1,184 @@
+/**************************************************************************
+ *
+ * Copyright 2010 Pauli Nieminen.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+/*
+ * Multipart buffer for coping data which is larger than the page size.
+ *
+ * Authors:
+ * Pauli Nieminen <suokkos-at-gmail-dot-com>
+ */
+
+#include "drm_buffer.h"
+
+/**
+ * Allocate the drm buffer object.
+ *
+ * buf: Pointer to a pointer where the object is stored.
+ * size: The number of bytes to allocate.
+ */
+int drm_buffer_alloc(struct drm_buffer **buf, int size)
+{
+ int nr_pages = size / PAGE_SIZE + 1;
+ int idx;
+
+ /* Allocating pointer table to end of structure makes drm_buffer
+ * variable sized */
+ *buf = kzalloc(sizeof(struct drm_buffer) + nr_pages*sizeof(char *),
+ GFP_KERNEL);
+
+ if (*buf == NULL) {
+ DRM_ERROR("Failed to allocate drm buffer object to hold"
+ " %d bytes in %d pages.\n",
+ size, nr_pages);
+ return -ENOMEM;
+ }
+
+ (*buf)->size = size;
+
+ for (idx = 0; idx < nr_pages; ++idx) {
+
+ (*buf)->data[idx] =
+ kmalloc(min(PAGE_SIZE, size - idx * PAGE_SIZE),
+ GFP_KERNEL);
+
+
+ if ((*buf)->data[idx] == NULL) {
+ DRM_ERROR("Failed to allocate %dth page for drm"
+ " buffer with %d bytes and %d pages.\n",
+ idx + 1, size, nr_pages);
+ goto error_out;
+ }
+
+ }
+
+ return 0;
+
+error_out:
+
+ /* Only last element can be null pointer so check for it first. */
+ if ((*buf)->data[idx])
+ kfree((*buf)->data[idx]);
+
+ for (--idx; idx >= 0; --idx)
+ kfree((*buf)->data[idx]);
+
+ kfree(*buf);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(drm_buffer_alloc);
+
+/**
+ * Copy the user data to the begin of the buffer and reset the processing
+ * iterator.
+ *
+ * user_data: A pointer the data that is copied to the buffer.
+ * size: The Number of bytes to copy.
+ */
+extern int drm_buffer_copy_from_user(struct drm_buffer *buf,
+ void __user *user_data, int size)
+{
+ int nr_pages = size / PAGE_SIZE + 1;
+ int idx;
+
+ if (size > buf->size) {
+ DRM_ERROR("Requesting to copy %d bytes to a drm buffer with"
+ " %d bytes space\n",
+ size, buf->size);
+ return -EFAULT;
+ }
+
+ for (idx = 0; idx < nr_pages; ++idx) {
+
+ if (DRM_COPY_FROM_USER(buf->data[idx],
+ user_data + idx * PAGE_SIZE,
+ min(PAGE_SIZE, size - idx * PAGE_SIZE))) {
+ DRM_ERROR("Failed to copy user data (%p) to drm buffer"
+ " (%p) %dth page.\n",
+ user_data, buf, idx);
+ return -EFAULT;
+
+ }
+ }
+ buf->iterator = 0;
+ return 0;
+}
+EXPORT_SYMBOL(drm_buffer_copy_from_user);
+
+/**
+ * Free the drm buffer object
+ */
+void drm_buffer_free(struct drm_buffer *buf)
+{
+
+ if (buf != NULL) {
+
+ int nr_pages = buf->size / PAGE_SIZE + 1;
+ int idx;
+ for (idx = 0; idx < nr_pages; ++idx)
+ kfree(buf->data[idx]);
+
+ kfree(buf);
+ }
+}
+EXPORT_SYMBOL(drm_buffer_free);
+
+/**
+ * Read an object from buffer that may be split to multiple parts. If object
+ * is not split function just returns the pointer to object in buffer. But in
+ * case of split object data is copied to given stack object that is suplied
+ * by caller.
+ *
+ * The processing location of the buffer is also advanced to the next byte
+ * after the object.
+ *
+ * objsize: The size of the objet in bytes.
+ * stack_obj: A pointer to a memory location where object can be copied.
+ */
+void *drm_buffer_read_object(struct drm_buffer *buf,
+ int objsize, void *stack_obj)
+{
+ int idx = drm_buffer_index(buf);
+ int page = drm_buffer_page(buf);
+ void *obj = 0;
+
+ if (idx + objsize <= PAGE_SIZE) {
+ obj = &buf->data[page][idx];
+ } else {
+ /* The object is split which forces copy to temporary object.*/
+ int beginsz = PAGE_SIZE - idx;
+ memcpy(stack_obj, &buf->data[page][idx], beginsz);
+
+ memcpy(stack_obj + beginsz, &buf->data[page + 1][0],
+ objsize - beginsz);
+
+ obj = stack_obj;
+ }
+
+ drm_buffer_advance(buf, objsize);
+ return obj;
+}
+EXPORT_SYMBOL(drm_buffer_read_object);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 7d0f00a935f..f2aaf39be39 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -836,11 +836,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
mode_changed = true;
} else if (set->fb == NULL) {
mode_changed = true;
- } else if ((set->fb->bits_per_pixel !=
- set->crtc->fb->bits_per_pixel) ||
- set->fb->depth != set->crtc->fb->depth)
- fb_changed = true;
- else
+ } else
fb_changed = true;
}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 766c46875a2..f3c58e2bd75 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -125,28 +125,28 @@ static struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
-
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW)
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH|DRM_UNLOCKED),
+
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETRESOURCES, drm_mode_getresources, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCRTC, drm_mode_getcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETCRTC, drm_mode_setcrtc, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR, drm_mode_cursor_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETGAMMA, drm_mode_gamma_get_ioctl, DRM_MASTER|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETGAMMA, drm_mode_gamma_set_ioctl, DRM_MASTER|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETENCODER, drm_mode_getencoder, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETCONNECTOR, drm_mode_getconnector, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ATTACHMODE, drm_mode_attachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DETACHMODE, drm_mode_detachmode_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPERTY, drm_mode_getproperty_ioctl, DRM_MASTER | DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_mode_connector_property_set_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW|DRM_UNLOCKED)
};
#define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index ab6c9733041..f97e7c42ac8 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -60,8 +60,7 @@
#define EDID_QUIRK_FIRST_DETAILED_PREFERRED (1 << 5)
/* use +hsync +vsync for detailed mode */
#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
-/* define the number of Extension EDID block */
-#define MAX_EDID_EXT_NUM 4
+
#define LEVEL_DMT 0
#define LEVEL_GTF 1
@@ -114,14 +113,14 @@ static const u8 edid_header[] = {
};
/**
- * edid_is_valid - sanity check EDID data
+ * drm_edid_is_valid - sanity check EDID data
* @edid: EDID data
*
* Sanity check the EDID block by looking at the header, the version number
* and the checksum. Return 0 if the EDID doesn't check out, or 1 if it's
* valid.
*/
-static bool edid_is_valid(struct edid *edid)
+bool drm_edid_is_valid(struct edid *edid)
{
int i, score = 0;
u8 csum = 0;
@@ -163,6 +162,7 @@ bad:
}
return 0;
}
+EXPORT_SYMBOL(drm_edid_is_valid);
/**
* edid_vendor - match a string against EDID's obfuscated vendor field
@@ -1112,8 +1112,8 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
}
/* Chose real EDID extension number */
- edid_ext_num = edid->extensions > MAX_EDID_EXT_NUM ?
- MAX_EDID_EXT_NUM : edid->extensions;
+ edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ?
+ DRM_MAX_EDID_EXT_NUM : edid->extensions;
/* Find CEA extension */
for (i = 0; i < edid_ext_num; i++) {
@@ -1195,7 +1195,7 @@ static int drm_ddc_read_edid(struct drm_connector *connector,
for (i = 0; i < 4; i++) {
if (drm_do_probe_ddc_edid(adapter, buf, len))
return -1;
- if (edid_is_valid((struct edid *)buf))
+ if (drm_edid_is_valid((struct edid *)buf))
return 0;
}
@@ -1220,7 +1220,7 @@ struct edid *drm_get_edid(struct drm_connector *connector,
int ret;
struct edid *edid;
- edid = kmalloc(EDID_LENGTH * (MAX_EDID_EXT_NUM + 1),
+ edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1),
GFP_KERNEL);
if (edid == NULL) {
dev_warn(&connector->dev->pdev->dev,
@@ -1238,14 +1238,14 @@ struct edid *drm_get_edid(struct drm_connector *connector,
if (edid->extensions != 0) {
int edid_ext_num = edid->extensions;
- if (edid_ext_num > MAX_EDID_EXT_NUM) {
+ if (edid_ext_num > DRM_MAX_EDID_EXT_NUM) {
dev_warn(&connector->dev->pdev->dev,
"The number of extension(%d) is "
"over max (%d), actually read number (%d)\n",
- edid_ext_num, MAX_EDID_EXT_NUM,
- MAX_EDID_EXT_NUM);
+ edid_ext_num, DRM_MAX_EDID_EXT_NUM,
+ DRM_MAX_EDID_EXT_NUM);
/* Reset EDID extension number to be read */
- edid_ext_num = MAX_EDID_EXT_NUM;
+ edid_ext_num = DRM_MAX_EDID_EXT_NUM;
}
/* Read EDID including extensions too */
ret = drm_ddc_read_edid(connector, adapter, (char *)edid,
@@ -1288,8 +1288,8 @@ bool drm_detect_hdmi_monitor(struct edid *edid)
goto end;
/* Chose real EDID extension number */
- edid_ext_num = edid->extensions > MAX_EDID_EXT_NUM ?
- MAX_EDID_EXT_NUM : edid->extensions;
+ edid_ext_num = edid->extensions > DRM_MAX_EDID_EXT_NUM ?
+ DRM_MAX_EDID_EXT_NUM : edid->extensions;
/* Find CEA extension */
for (i = 0; i < edid_ext_num; i++) {
@@ -1346,7 +1346,7 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
if (edid == NULL) {
return 0;
}
- if (!edid_is_valid(edid)) {
+ if (!drm_edid_is_valid(edid)) {
dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
drm_get_connector_name(connector));
return 0;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 0f9e90552dc..50549703584 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -27,6 +27,7 @@
* Dave Airlie <airlied@linux.ie>
* Jesse Barnes <jesse.barnes@intel.com>
*/
+#include <linux/kernel.h>
#include <linux/sysrq.h>
#include <linux/fb.h>
#include "drmP.h"
@@ -50,21 +51,6 @@ int drm_fb_helper_add_connector(struct drm_connector *connector)
}
EXPORT_SYMBOL(drm_fb_helper_add_connector);
-static int my_atoi(const char *name)
-{
- int val = 0;
-
- for (;; name++) {
- switch (*name) {
- case '0' ... '9':
- val = 10*val+(*name-'0');
- break;
- default:
- return val;
- }
- }
-}
-
/**
* drm_fb_helper_connector_parse_command_line - parse command line for connector
* @connector - connector to parse line for
@@ -111,7 +97,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
namelen = i;
if (!refresh_specified && !bpp_specified &&
!yres_specified) {
- refresh = my_atoi(&name[i+1]);
+ refresh = simple_strtol(&name[i+1], NULL, 10);
refresh_specified = 1;
if (cvt || rb)
cvt = 0;
@@ -121,7 +107,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
case '-':
namelen = i;
if (!bpp_specified && !yres_specified) {
- bpp = my_atoi(&name[i+1]);
+ bpp = simple_strtol(&name[i+1], NULL, 10);
bpp_specified = 1;
if (cvt || rb)
cvt = 0;
@@ -130,7 +116,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
break;
case 'x':
if (!yres_specified) {
- yres = my_atoi(&name[i+1]);
+ yres = simple_strtol(&name[i+1], NULL, 10);
yres_specified = 1;
} else
goto done;
@@ -170,7 +156,7 @@ static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *con
}
}
if (i < 0 && yres_specified) {
- xres = my_atoi(name);
+ xres = simple_strtol(name, NULL, 10);
res_specified = 1;
}
done:
@@ -694,7 +680,7 @@ int drm_fb_helper_set_par(struct fb_info *info)
int i;
if (var->pixclock != 0) {
- DRM_ERROR("PIXEL CLCOK SET\n");
+ DRM_ERROR("PIXEL CLOCK SET\n");
return -EINVAL;
}
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 8bf3770f294..aa89d4b0b4c 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -192,9 +192,7 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)
idr_remove(&filp->object_idr, handle);
spin_unlock(&filp->table_lock);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_handle_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_handle_unreference_unlocked(obj);
return 0;
}
@@ -325,9 +323,7 @@ again:
}
err:
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(obj);
return ret;
}
@@ -358,9 +354,7 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
ret = drm_gem_handle_create(file_priv, obj, &handle);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(obj);
if (ret)
return ret;
@@ -390,7 +384,7 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
{
struct drm_gem_object *obj = ptr;
- drm_gem_object_handle_unreference(obj);
+ drm_gem_object_handle_unreference_unlocked(obj);
return 0;
}
@@ -403,16 +397,25 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)
void
drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
{
- mutex_lock(&dev->struct_mutex);
idr_for_each(&file_private->object_idr,
&drm_gem_object_release_handle, NULL);
idr_destroy(&file_private->object_idr);
- mutex_unlock(&dev->struct_mutex);
+}
+
+static void
+drm_gem_object_free_common(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ fput(obj->filp);
+ atomic_dec(&dev->object_count);
+ atomic_sub(obj->size, &dev->object_memory);
+ kfree(obj);
}
/**
* Called after the last reference to the object has been lost.
+ * Must be called holding struct_ mutex
*
* Frees the object
*/
@@ -427,14 +430,40 @@ drm_gem_object_free(struct kref *kref)
if (dev->driver->gem_free_object != NULL)
dev->driver->gem_free_object(obj);
- fput(obj->filp);
- atomic_dec(&dev->object_count);
- atomic_sub(obj->size, &dev->object_memory);
- kfree(obj);
+ drm_gem_object_free_common(obj);
}
EXPORT_SYMBOL(drm_gem_object_free);
/**
+ * Called after the last reference to the object has been lost.
+ * Must be called without holding struct_mutex
+ *
+ * Frees the object
+ */
+void
+drm_gem_object_free_unlocked(struct kref *kref)
+{
+ struct drm_gem_object *obj = (struct drm_gem_object *) kref;
+ struct drm_device *dev = obj->dev;
+
+ if (dev->driver->gem_free_object_unlocked != NULL)
+ dev->driver->gem_free_object_unlocked(obj);
+ else if (dev->driver->gem_free_object != NULL) {
+ mutex_lock(&dev->struct_mutex);
+ dev->driver->gem_free_object(obj);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
+ drm_gem_object_free_common(obj);
+}
+EXPORT_SYMBOL(drm_gem_object_free_unlocked);
+
+static void drm_gem_object_ref_bug(struct kref *list_kref)
+{
+ BUG();
+}
+
+/**
* Called after the last handle to the object has been closed
*
* Removes any name for the object. Note that this must be
@@ -458,8 +487,10 @@ drm_gem_object_handle_free(struct kref *kref)
/*
* The object name held a reference to this object, drop
* that now.
+ *
+ * This cannot be the last reference, since the handle holds one too.
*/
- drm_gem_object_unreference(obj);
+ kref_put(&obj->refcount, drm_gem_object_ref_bug);
} else
spin_unlock(&dev->object_name_lock);
@@ -477,11 +508,8 @@ EXPORT_SYMBOL(drm_gem_vm_open);
void drm_gem_vm_close(struct vm_area_struct *vma)
{
struct drm_gem_object *obj = vma->vm_private_data;
- struct drm_device *dev = obj->dev;
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(obj);
}
EXPORT_SYMBOL(drm_gem_vm_close);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index a894ade0309..1376dfe44c9 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -162,7 +162,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- if (!IS_IRONLAKE(dev)) {
+ if (!HAS_PCH_SPLIT(dev)) {
seq_printf(m, "Interrupt enable: %08x\n",
I915_READ(IER));
seq_printf(m, "Interrupt identity: %08x\n",
@@ -350,6 +350,36 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
return 0;
}
+static const char *pin_flag(int pinned)
+{
+ if (pinned > 0)
+ return " P";
+ else if (pinned < 0)
+ return " p";
+ else
+ return "";
+}
+
+static const char *tiling_flag(int tiling)
+{
+ switch (tiling) {
+ default:
+ case I915_TILING_NONE: return "";
+ case I915_TILING_X: return " X";
+ case I915_TILING_Y: return " Y";
+ }
+}
+
+static const char *dirty_flag(int dirty)
+{
+ return dirty ? " dirty" : "";
+}
+
+static const char *purgeable_flag(int purgeable)
+{
+ return purgeable ? " purgeable" : "";
+}
+
static int i915_error_state(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -357,6 +387,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_error_state *error;
unsigned long flags;
+ int i, page, offset, elt;
spin_lock_irqsave(&dev_priv->error_lock, flags);
if (!dev_priv->first_error) {
@@ -368,6 +399,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
seq_printf(m, "Time: %ld s %ld us\n", error->time.tv_sec,
error->time.tv_usec);
+ seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
seq_printf(m, "EIR: 0x%08x\n", error->eir);
seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er);
seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
@@ -379,6 +411,59 @@ static int i915_error_state(struct seq_file *m, void *unused)
seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
}
+ seq_printf(m, "seqno: 0x%08x\n", error->seqno);
+
+ if (error->active_bo_count) {
+ seq_printf(m, "Buffers [%d]:\n", error->active_bo_count);
+
+ for (i = 0; i < error->active_bo_count; i++) {
+ seq_printf(m, " %08x %8zd %08x %08x %08x%s%s%s%s",
+ error->active_bo[i].gtt_offset,
+ error->active_bo[i].size,
+ error->active_bo[i].read_domains,
+ error->active_bo[i].write_domain,
+ error->active_bo[i].seqno,
+ pin_flag(error->active_bo[i].pinned),
+ tiling_flag(error->active_bo[i].tiling),
+ dirty_flag(error->active_bo[i].dirty),
+ purgeable_flag(error->active_bo[i].purgeable));
+
+ if (error->active_bo[i].name)
+ seq_printf(m, " (name: %d)", error->active_bo[i].name);
+ if (error->active_bo[i].fence_reg != I915_FENCE_REG_NONE)
+ seq_printf(m, " (fence: %d)", error->active_bo[i].fence_reg);
+
+ seq_printf(m, "\n");
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
+ if (error->batchbuffer[i]) {
+ struct drm_i915_error_object *obj = error->batchbuffer[i];
+
+ seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
+ offset = 0;
+ for (page = 0; page < obj->page_count; page++) {
+ for (elt = 0; elt < PAGE_SIZE/4; elt++) {
+ seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
+ offset += 4;
+ }
+ }
+ }
+ }
+
+ if (error->ringbuffer) {
+ struct drm_i915_error_object *obj = error->ringbuffer;
+
+ seq_printf(m, "--- ringbuffer = 0x%08x\n", obj->gtt_offset);
+ offset = 0;
+ for (page = 0; page < obj->page_count; page++) {
+ for (elt = 0; elt < PAGE_SIZE/4; elt++) {
+ seq_printf(m, "%08x : %08x\n", offset, obj->pages[page][elt]);
+ offset += 4;
+ }
+ }
+ }
out:
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
@@ -386,6 +471,165 @@ out:
return 0;
}
+static int i915_rstdby_delays(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u16 crstanddelay = I915_READ16(CRSTANDVID);
+
+ seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
+
+ return 0;
+}
+
+static int i915_cur_delayinfo(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u16 rgvswctl = I915_READ16(MEMSWCTL);
+
+ seq_printf(m, "Last command: 0x%01x\n", (rgvswctl >> 13) & 0x3);
+ seq_printf(m, "Command status: %d\n", (rgvswctl >> 12) & 1);
+ seq_printf(m, "P%d DELAY 0x%02x\n", (rgvswctl >> 8) & 0xf,
+ rgvswctl & 0x3f);
+
+ return 0;
+}
+
+static int i915_delayfreq_table(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 delayfreq;
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
+ seq_printf(m, "P%02dVIDFREQ: 0x%08x\n", i, delayfreq);
+ }
+
+ return 0;
+}
+
+static inline int MAP_TO_MV(int map)
+{
+ return 1250 - (map * 25);
+}
+
+static int i915_inttoext_table(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 inttoext;
+ int i;
+
+ for (i = 1; i <= 32; i++) {
+ inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
+ seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
+ }
+
+ return 0;
+}
+
+static int i915_drpc_info(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 rgvmodectl = I915_READ(MEMMODECTL);
+
+ seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
+ "yes" : "no");
+ seq_printf(m, "Boost freq: %d\n",
+ (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
+ MEMMODE_BOOST_FREQ_SHIFT);
+ seq_printf(m, "HW control enabled: %s\n",
+ rgvmodectl & MEMMODE_HWIDLE_EN ? "yes" : "no");
+ seq_printf(m, "SW control enabled: %s\n",
+ rgvmodectl & MEMMODE_SWMODE_EN ? "yes" : "no");
+ seq_printf(m, "Gated voltage change: %s\n",
+ rgvmodectl & MEMMODE_RCLK_GATE ? "yes" : "no");
+ seq_printf(m, "Starting frequency: P%d\n",
+ (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
+ seq_printf(m, "Max frequency: P%d\n",
+ (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
+ seq_printf(m, "Min frequency: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
+
+ return 0;
+}
+
+static int i915_fbc_status(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ struct drm_crtc *crtc;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ bool fbc_enabled = false;
+
+ if (!dev_priv->display.fbc_enabled) {
+ seq_printf(m, "FBC unsupported on this chipset\n");
+ return 0;
+ }
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (!crtc->enabled)
+ continue;
+ if (dev_priv->display.fbc_enabled(crtc))
+ fbc_enabled = true;
+ }
+
+ if (fbc_enabled) {
+ seq_printf(m, "FBC enabled\n");
+ } else {
+ seq_printf(m, "FBC disabled: ");
+ switch (dev_priv->no_fbc_reason) {
+ case FBC_STOLEN_TOO_SMALL:
+ seq_printf(m, "not enough stolen memory");
+ break;
+ case FBC_UNSUPPORTED_MODE:
+ seq_printf(m, "mode not supported");
+ break;
+ case FBC_MODE_TOO_LARGE:
+ seq_printf(m, "mode too large");
+ break;
+ case FBC_BAD_PLANE:
+ seq_printf(m, "FBC unsupported on plane");
+ break;
+ case FBC_NOT_TILED:
+ seq_printf(m, "scanout buffer not tiled");
+ break;
+ default:
+ seq_printf(m, "unknown reason");
+ }
+ seq_printf(m, "\n");
+ }
+ return 0;
+}
+
+static int i915_sr_status(struct seq_file *m, void *unused)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ bool sr_enabled = false;
+
+ if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev))
+ sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
+ else if (IS_I915GM(dev))
+ sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
+ else if (IS_PINEVIEW(dev))
+ sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
+
+ seq_printf(m, "self-refresh: %s\n", sr_enabled ? "enabled" :
+ "disabled");
+
+ return 0;
+}
+
static int
i915_wedged_open(struct inode *inode,
struct file *filp)
@@ -503,6 +747,13 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_ringbuffer_info", i915_ringbuffer_info, 0},
{"i915_batchbuffers", i915_batchbuffer_info, 0},
{"i915_error_state", i915_error_state, 0},
+ {"i915_rstdby_delays", i915_rstdby_delays, 0},
+ {"i915_cur_delayinfo", i915_cur_delayinfo, 0},
+ {"i915_delayfreq_table", i915_delayfreq_table, 0},
+ {"i915_inttoext_table", i915_inttoext_table, 0},
+ {"i915_drpc_info", i915_drpc_info, 0},
+ {"i915_fbc_status", i915_fbc_status, 0},
+ {"i915_sr_status", i915_sr_status, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 2307f98349f..8bfc0bbf13e 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -35,6 +35,9 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include <linux/vgaarb.h>
+#include <linux/acpi.h>
+#include <linux/pnp.h>
+#include <linux/vga_switcheroo.h>
/* Really want an OS-independent resettable timer. Would like to have
* this loop run for (eg) 3 sec, but have the timer reset every time
@@ -933,6 +936,120 @@ static int i915_get_bridge_dev(struct drm_device *dev)
return 0;
}
+#define MCHBAR_I915 0x44
+#define MCHBAR_I965 0x48
+#define MCHBAR_SIZE (4*4096)
+
+#define DEVEN_REG 0x54
+#define DEVEN_MCHBAR_EN (1 << 28)
+
+/* Allocate space for the MCH regs if needed, return nonzero on error */
+static int
+intel_alloc_mchbar_resource(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
+ u32 temp_lo, temp_hi = 0;
+ u64 mchbar_addr;
+ int ret = 0;
+
+ if (IS_I965G(dev))
+ pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
+ pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
+ mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
+
+ /* If ACPI doesn't have it, assume we need to allocate it ourselves */
+#ifdef CONFIG_PNP
+ if (mchbar_addr &&
+ pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
+ ret = 0;
+ goto out;
+ }
+#endif
+
+ /* Get some space for it */
+ ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res,
+ MCHBAR_SIZE, MCHBAR_SIZE,
+ PCIBIOS_MIN_MEM,
+ 0, pcibios_align_resource,
+ dev_priv->bridge_dev);
+ if (ret) {
+ DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
+ dev_priv->mch_res.start = 0;
+ goto out;
+ }
+
+ if (IS_I965G(dev))
+ pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
+ upper_32_bits(dev_priv->mch_res.start));
+
+ pci_write_config_dword(dev_priv->bridge_dev, reg,
+ lower_32_bits(dev_priv->mch_res.start));
+out:
+ return ret;
+}
+
+/* Setup MCHBAR if possible, return true if we should disable it again */
+static void
+intel_setup_mchbar(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
+ u32 temp;
+ bool enabled;
+
+ dev_priv->mchbar_need_disable = false;
+
+ if (IS_I915G(dev) || IS_I915GM(dev)) {
+ pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
+ enabled = !!(temp & DEVEN_MCHBAR_EN);
+ } else {
+ pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
+ enabled = temp & 1;
+ }
+
+ /* If it's already enabled, don't have to do anything */
+ if (enabled)
+ return;
+
+ if (intel_alloc_mchbar_resource(dev))
+ return;
+
+ dev_priv->mchbar_need_disable = true;
+
+ /* Space is allocated or reserved, so enable it. */
+ if (IS_I915G(dev) || IS_I915GM(dev)) {
+ pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
+ temp | DEVEN_MCHBAR_EN);
+ } else {
+ pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
+ pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
+ }
+}
+
+static void
+intel_teardown_mchbar(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
+ u32 temp;
+
+ if (dev_priv->mchbar_need_disable) {
+ if (IS_I915G(dev) || IS_I915GM(dev)) {
+ pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
+ temp &= ~DEVEN_MCHBAR_EN;
+ pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
+ } else {
+ pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
+ temp &= ~1;
+ pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
+ }
+ }
+
+ if (dev_priv->mch_res.start)
+ release_resource(&dev_priv->mch_res);
+}
+
/**
* i915_probe_agp - get AGP bootup configuration
* @pdev: PCI device
@@ -978,59 +1095,123 @@ static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
* Some of the preallocated space is taken by the GTT
* and popup. GTT is 1K per MB of aperture size, and popup is 4K.
*/
- if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev))
+ if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev))
overhead = 4096;
else
overhead = (*aperture_size / 1024) + 4096;
- switch (tmp & INTEL_GMCH_GMS_MASK) {
- case INTEL_855_GMCH_GMS_DISABLED:
- DRM_ERROR("video memory is disabled\n");
- return -1;
- case INTEL_855_GMCH_GMS_STOLEN_1M:
- stolen = 1 * 1024 * 1024;
- break;
- case INTEL_855_GMCH_GMS_STOLEN_4M:
- stolen = 4 * 1024 * 1024;
- break;
- case INTEL_855_GMCH_GMS_STOLEN_8M:
- stolen = 8 * 1024 * 1024;
- break;
- case INTEL_855_GMCH_GMS_STOLEN_16M:
- stolen = 16 * 1024 * 1024;
- break;
- case INTEL_855_GMCH_GMS_STOLEN_32M:
- stolen = 32 * 1024 * 1024;
- break;
- case INTEL_915G_GMCH_GMS_STOLEN_48M:
- stolen = 48 * 1024 * 1024;
- break;
- case INTEL_915G_GMCH_GMS_STOLEN_64M:
- stolen = 64 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_128M:
- stolen = 128 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_256M:
- stolen = 256 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_96M:
- stolen = 96 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_160M:
- stolen = 160 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_224M:
- stolen = 224 * 1024 * 1024;
- break;
- case INTEL_GMCH_GMS_STOLEN_352M:
- stolen = 352 * 1024 * 1024;
- break;
- default:
- DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
- tmp & INTEL_GMCH_GMS_MASK);
- return -1;
+ if (IS_GEN6(dev)) {
+ /* SNB has memory control reg at 0x50.w */
+ pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &tmp);
+
+ switch (tmp & SNB_GMCH_GMS_STOLEN_MASK) {
+ case INTEL_855_GMCH_GMS_DISABLED:
+ DRM_ERROR("video memory is disabled\n");
+ return -1;
+ case SNB_GMCH_GMS_STOLEN_32M:
+ stolen = 32 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_64M:
+ stolen = 64 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_96M:
+ stolen = 96 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_128M:
+ stolen = 128 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_160M:
+ stolen = 160 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_192M:
+ stolen = 192 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_224M:
+ stolen = 224 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_256M:
+ stolen = 256 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_288M:
+ stolen = 288 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_320M:
+ stolen = 320 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_352M:
+ stolen = 352 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_384M:
+ stolen = 384 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_416M:
+ stolen = 416 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_448M:
+ stolen = 448 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_480M:
+ stolen = 480 * 1024 * 1024;
+ break;
+ case SNB_GMCH_GMS_STOLEN_512M:
+ stolen = 512 * 1024 * 1024;
+ break;
+ default:
+ DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
+ tmp & SNB_GMCH_GMS_STOLEN_MASK);
+ return -1;
+ }
+ } else {
+ switch (tmp & INTEL_GMCH_GMS_MASK) {
+ case INTEL_855_GMCH_GMS_DISABLED:
+ DRM_ERROR("video memory is disabled\n");
+ return -1;
+ case INTEL_855_GMCH_GMS_STOLEN_1M:
+ stolen = 1 * 1024 * 1024;
+ break;
+ case INTEL_855_GMCH_GMS_STOLEN_4M:
+ stolen = 4 * 1024 * 1024;
+ break;
+ case INTEL_855_GMCH_GMS_STOLEN_8M:
+ stolen = 8 * 1024 * 1024;
+ break;
+ case INTEL_855_GMCH_GMS_STOLEN_16M:
+ stolen = 16 * 1024 * 1024;
+ break;
+ case INTEL_855_GMCH_GMS_STOLEN_32M:
+ stolen = 32 * 1024 * 1024;
+ break;
+ case INTEL_915G_GMCH_GMS_STOLEN_48M:
+ stolen = 48 * 1024 * 1024;
+ break;
+ case INTEL_915G_GMCH_GMS_STOLEN_64M:
+ stolen = 64 * 1024 * 1024;
+ break;
+ case INTEL_GMCH_GMS_STOLEN_128M:
+ stolen = 128 * 1024 * 1024;
+ break;
+ case INTEL_GMCH_GMS_STOLEN_256M:
+ stolen = 256 * 1024 * 1024;
+ break;
+ case INTEL_GMCH_GMS_STOLEN_96M:
+ stolen = 96 * 1024 * 1024;
+ break;
+ case INTEL_GMCH_GMS_STOLEN_160M:
+ stolen = 160 * 1024 * 1024;
+ break;
+ case INTEL_GMCH_GMS_STOLEN_224M:
+ stolen = 224 * 1024 * 1024;
+ break;
+ case INTEL_GMCH_GMS_STOLEN_352M:
+ stolen = 352 * 1024 * 1024;
+ break;
+ default:
+ DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
+ tmp & INTEL_GMCH_GMS_MASK);
+ return -1;
+ }
}
+
*preallocated_size = stolen - overhead;
*start = overhead;
@@ -1064,7 +1245,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
int gtt_offset, gtt_size;
if (IS_I965G(dev)) {
- if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
+ if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
gtt_offset = 2*1024*1024;
gtt_size = 2*1024*1024;
} else {
@@ -1133,6 +1314,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
/* Leave 1M for line length buffer & misc. */
compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0);
if (!compressed_fb) {
+ dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
i915_warn_stolen(dev);
return;
}
@@ -1140,6 +1322,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
if (!compressed_fb) {
i915_warn_stolen(dev);
+ dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
return;
}
@@ -1199,6 +1382,32 @@ static unsigned int i915_vga_set_decode(void *cookie, bool state)
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
+static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
+ if (state == VGA_SWITCHEROO_ON) {
+ printk(KERN_INFO "i915: switched off\n");
+ /* i915 resume handler doesn't set to D0 */
+ pci_set_power_state(dev->pdev, PCI_D0);
+ i915_resume(dev);
+ } else {
+ printk(KERN_ERR "i915: switched off\n");
+ i915_suspend(dev, pmm);
+ }
+}
+
+static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ bool can_switch;
+
+ spin_lock(&dev->count_lock);
+ can_switch = (dev->open_count == 0);
+ spin_unlock(&dev->count_lock);
+ return can_switch;
+}
+
static int i915_load_modeset_init(struct drm_device *dev,
unsigned long prealloc_start,
unsigned long prealloc_size,
@@ -1260,6 +1469,12 @@ static int i915_load_modeset_init(struct drm_device *dev,
if (ret)
goto destroy_ringbuffer;
+ ret = vga_switcheroo_register_client(dev->pdev,
+ i915_switcheroo_set_state,
+ i915_switcheroo_can_switch);
+ if (ret)
+ goto destroy_ringbuffer;
+
intel_modeset_init(dev);
ret = drm_irq_install(dev);
@@ -1281,7 +1496,9 @@ static int i915_load_modeset_init(struct drm_device *dev,
return 0;
destroy_ringbuffer:
+ mutex_lock(&dev->struct_mutex);
i915_gem_cleanup_ringbuffer(dev);
+ mutex_unlock(&dev->struct_mutex);
out:
return ret;
}
@@ -1445,11 +1662,14 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev->driver->get_vblank_counter = i915_get_vblank_counter;
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
- if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
+ if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
dev->driver->get_vblank_counter = gm45_get_vblank_counter;
}
+ /* Try to make sure MCHBAR is enabled before poking at it */
+ intel_setup_mchbar(dev);
+
i915_gem_load(dev);
/* Init HWS */
@@ -1523,6 +1743,8 @@ int i915_driver_unload(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ i915_destroy_error_state(dev);
+
destroy_workqueue(dev_priv->wq);
del_timer_sync(&dev_priv->hangcheck_timer);
@@ -1544,6 +1766,7 @@ int i915_driver_unload(struct drm_device *dev)
dev_priv->child_dev_num = 0;
}
drm_irq_uninstall(dev);
+ vga_switcheroo_unregister_client(dev->pdev);
vga_client_register(dev->pdev, NULL, NULL, NULL);
}
@@ -1569,6 +1792,8 @@ int i915_driver_unload(struct drm_device *dev)
intel_cleanup_overlay(dev);
}
+ intel_teardown_mchbar(dev);
+
pci_dev_put(dev_priv->bridge_dev);
kfree(dev->dev_private);
@@ -1611,6 +1836,7 @@ void i915_driver_lastclose(struct drm_device * dev)
if (!dev_priv || drm_core_check_feature(dev, DRIVER_MODESET)) {
drm_fb_helper_restore();
+ vga_switcheroo_process_delayed_switch();
return;
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index cf4cb3e9a0c..1b2e95455c0 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -49,6 +49,7 @@ unsigned int i915_lvds_downclock = 0;
module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
static struct drm_driver driver;
+extern int intel_agp_enabled;
#define INTEL_VGA_DEVICE(id, info) { \
.class = PCI_CLASS_DISPLAY_VGA << 8, \
@@ -136,6 +137,16 @@ const static struct intel_device_info intel_ironlake_m_info = {
.has_hotplug = 1,
};
+const static struct intel_device_info intel_sandybridge_d_info = {
+ .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1,
+ .has_hotplug = 1,
+};
+
+const static struct intel_device_info intel_sandybridge_m_info = {
+ .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1, .need_gfx_hws = 1,
+ .has_hotplug = 1,
+};
+
const static struct pci_device_id pciidlist[] = {
INTEL_VGA_DEVICE(0x3577, &intel_i830_info),
INTEL_VGA_DEVICE(0x2562, &intel_845g_info),
@@ -167,6 +178,8 @@ const static struct pci_device_id pciidlist[] = {
INTEL_VGA_DEVICE(0xa011, &intel_pineview_info),
INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info),
INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info),
+ INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info),
+ INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info),
{0, 0, 0}
};
@@ -201,7 +214,7 @@ static int i915_drm_freeze(struct drm_device *dev)
return 0;
}
-static int i915_suspend(struct drm_device *dev, pm_message_t state)
+int i915_suspend(struct drm_device *dev, pm_message_t state)
{
int error;
@@ -255,7 +268,7 @@ static int i915_drm_thaw(struct drm_device *dev)
return error;
}
-static int i915_resume(struct drm_device *dev)
+int i915_resume(struct drm_device *dev)
{
if (pci_enable_device(dev->pdev))
return -EIO;
@@ -546,6 +559,11 @@ static struct drm_driver driver = {
static int __init i915_init(void)
{
+ if (!intel_agp_enabled) {
+ DRM_ERROR("drm/i915 can't work without intel_agp module!\n");
+ return -ENODEV;
+ }
+
driver.num_ioctls = i915_max_ioctl;
i915_gem_shrinker_init();
@@ -571,6 +589,11 @@ static int __init i915_init(void)
driver.driver_features &= ~DRIVER_MODESET;
#endif
+ if (!(driver.driver_features & DRIVER_MODESET)) {
+ driver.suspend = i915_suspend;
+ driver.resume = i915_resume;
+ }
+
return drm_init(&driver);
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index b99b6a841d9..979439cfb82 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -150,7 +150,27 @@ struct drm_i915_error_state {
u32 instps;
u32 instdone1;
u32 seqno;
+ u64 bbaddr;
struct timeval time;
+ struct drm_i915_error_object {
+ int page_count;
+ u32 gtt_offset;
+ u32 *pages[0];
+ } *ringbuffer, *batchbuffer[2];
+ struct drm_i915_error_buffer {
+ size_t size;
+ u32 name;
+ u32 seqno;
+ u32 gtt_offset;
+ u32 read_domains;
+ u32 write_domain;
+ u32 fence_reg;
+ s32 pinned:2;
+ u32 tiling:2;
+ u32 dirty:1;
+ u32 purgeable:1;
+ } *active_bo;
+ u32 active_bo_count;
};
struct drm_i915_display_funcs {
@@ -192,6 +212,14 @@ struct intel_device_info {
u8 cursor_needs_physical : 1;
};
+enum no_fbc_reason {
+ FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
+ FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
+ FBC_MODE_TOO_LARGE, /* mode too large for compression */
+ FBC_BAD_PLANE, /* fbc not supported on plane */
+ FBC_NOT_TILED, /* buffer not tiled */
+};
+
typedef struct drm_i915_private {
struct drm_device *dev;
@@ -452,6 +480,7 @@ typedef struct drm_i915_private {
u32 savePIPEB_DATA_N1;
u32 savePIPEB_LINK_M1;
u32 savePIPEB_LINK_N1;
+ u32 saveMCHBAR_RENDER_STANDBY;
struct {
struct drm_mm gtt_space;
@@ -590,6 +619,14 @@ typedef struct drm_i915_private {
int child_dev_num;
struct child_device_config *child_dev;
struct drm_connector *int_lvds_connector;
+
+ bool mchbar_need_disable;
+
+ u8 cur_delay;
+ u8 min_delay;
+ u8 max_delay;
+
+ enum no_fbc_reason no_fbc_reason;
} drm_i915_private_t;
/** driver private structure attached to each drm_gem_object */
@@ -736,6 +773,8 @@ extern unsigned int i915_fbpercrtc;
extern unsigned int i915_powersave;
extern unsigned int i915_lvds_downclock;
+extern int i915_suspend(struct drm_device *dev, pm_message_t state);
+extern int i915_resume(struct drm_device *dev);
extern void i915_save_display(struct drm_device *dev);
extern void i915_restore_display(struct drm_device *dev);
extern int i915_master_create(struct drm_device *dev, struct drm_master *master);
@@ -761,6 +800,7 @@ extern int i965_reset(struct drm_device *dev, u8 flags);
/* i915_irq.c */
void i915_hangcheck_elapsed(unsigned long data);
+void i915_destroy_error_state(struct drm_device *dev);
extern int i915_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_irq_wait(struct drm_device *dev, void *data,
@@ -897,7 +937,8 @@ void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj);
bool i915_tiling_ok(struct drm_device *dev, int stride, int size,
int tiling_mode);
-bool i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj);
+bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj,
+ int tiling_mode);
/* i915_gem_debug.c */
void i915_gem_dump_object(struct drm_gem_object *obj, int len,
@@ -1026,7 +1067,7 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define IS_845G(dev) ((dev)->pci_device == 0x2562)
#define IS_I85X(dev) ((dev)->pci_device == 0x3582)
#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
-#define IS_I8XX(dev) (INTEL_INFO(dev)->is_i8xx)
+#define IS_GEN2(dev) (INTEL_INFO(dev)->is_i8xx)
#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
@@ -1045,8 +1086,29 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx)
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
+#define IS_GEN3(dev) (IS_I915G(dev) || \
+ IS_I915GM(dev) || \
+ IS_I945G(dev) || \
+ IS_I945GM(dev) || \
+ IS_G33(dev) || \
+ IS_PINEVIEW(dev))
+#define IS_GEN4(dev) ((dev)->pci_device == 0x2972 || \
+ (dev)->pci_device == 0x2982 || \
+ (dev)->pci_device == 0x2992 || \
+ (dev)->pci_device == 0x29A2 || \
+ (dev)->pci_device == 0x2A02 || \
+ (dev)->pci_device == 0x2A12 || \
+ (dev)->pci_device == 0x2E02 || \
+ (dev)->pci_device == 0x2E12 || \
+ (dev)->pci_device == 0x2E22 || \
+ (dev)->pci_device == 0x2E32 || \
+ (dev)->pci_device == 0x2A42 || \
+ (dev)->pci_device == 0x2E42)
+
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
+#define IS_GEN6(dev) ((dev)->pci_device == 0x0102)
+
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
*/
@@ -1067,6 +1129,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
+#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) || \
+ IS_GEN6(dev))
+
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ec8a0d7ffa3..fba37e9f775 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -128,9 +128,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
return -ENOMEM;
ret = drm_gem_handle_create(file_priv, obj, &handle);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_handle_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_handle_unreference_unlocked(obj);
if (ret)
return ret;
@@ -488,7 +486,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
*/
if (args->offset > obj->size || args->size > obj->size ||
args->offset + args->size > obj->size) {
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference_unlocked(obj);
return -EINVAL;
}
@@ -501,7 +499,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
file_priv);
}
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference_unlocked(obj);
return ret;
}
@@ -961,7 +959,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
*/
if (args->offset > obj->size || args->size > obj->size ||
args->offset + args->size > obj->size) {
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference_unlocked(obj);
return -EINVAL;
}
@@ -995,7 +993,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
DRM_INFO("pwrite failed %d\n", ret);
#endif
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference_unlocked(obj);
return ret;
}
@@ -1138,9 +1136,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
PROT_READ | PROT_WRITE, MAP_SHARED,
args->offset);
up_write(&current->mm->mmap_sem);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(obj);
if (IS_ERR((void *)addr))
return addr;
@@ -1562,6 +1558,38 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
i915_verify_inactive(dev, __FILE__, __LINE__);
}
+static void
+i915_gem_process_flushing_list(struct drm_device *dev,
+ uint32_t flush_domains, uint32_t seqno)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv, *next;
+
+ list_for_each_entry_safe(obj_priv, next,
+ &dev_priv->mm.gpu_write_list,
+ gpu_write_list) {
+ struct drm_gem_object *obj = obj_priv->obj;
+
+ if ((obj->write_domain & flush_domains) ==
+ obj->write_domain) {
+ uint32_t old_write_domain = obj->write_domain;
+
+ obj->write_domain = 0;
+ list_del_init(&obj_priv->gpu_write_list);
+ i915_gem_object_move_to_active(obj, seqno);
+
+ /* update the fence lru list */
+ if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
+ list_move_tail(&obj_priv->fence_list,
+ &dev_priv->mm.fence_list);
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->read_domains,
+ old_write_domain);
+ }
+ }
+}
+
/**
* Creates a new sequence number, emitting a write of it to the status page
* plus an interrupt, which will trigger i915_user_interrupt_handler.
@@ -1620,29 +1648,8 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
/* Associate any objects on the flushing list matching the write
* domain we're flushing with our flush.
*/
- if (flush_domains != 0) {
- struct drm_i915_gem_object *obj_priv, *next;
-
- list_for_each_entry_safe(obj_priv, next,
- &dev_priv->mm.gpu_write_list,
- gpu_write_list) {
- struct drm_gem_object *obj = obj_priv->obj;
-
- if ((obj->write_domain & flush_domains) ==
- obj->write_domain) {
- uint32_t old_write_domain = obj->write_domain;
-
- obj->write_domain = 0;
- list_del_init(&obj_priv->gpu_write_list);
- i915_gem_object_move_to_active(obj, seqno);
-
- trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
- old_write_domain);
- }
- }
-
- }
+ if (flush_domains != 0)
+ i915_gem_process_flushing_list(dev, flush_domains, seqno);
if (!dev_priv->mm.suspended) {
mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
@@ -1822,7 +1829,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
return -EIO;
if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
ier = I915_READ(DEIER) | I915_READ(GTIER);
else
ier = I915_READ(IER);
@@ -1991,6 +1998,7 @@ int
i915_gem_object_unbind(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
int ret = 0;
@@ -2046,8 +2054,10 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
}
/* Remove ourselves from the LRU list if present. */
+ spin_lock(&dev_priv->mm.active_list_lock);
if (!list_empty(&obj_priv->list))
list_del_init(&obj_priv->list);
+ spin_unlock(&dev_priv->mm.active_list_lock);
if (i915_gem_object_is_purgeable(obj_priv))
i915_gem_object_truncate(obj);
@@ -2085,11 +2095,34 @@ i915_gem_find_inactive_object(struct drm_device *dev, int min_size)
}
static int
+i915_gpu_idle(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ bool lists_empty;
+ uint32_t seqno;
+
+ spin_lock(&dev_priv->mm.active_list_lock);
+ lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
+ list_empty(&dev_priv->mm.active_list);
+ spin_unlock(&dev_priv->mm.active_list_lock);
+
+ if (lists_empty)
+ return 0;
+
+ /* Flush everything onto the inactive list. */
+ i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
+ if (seqno == 0)
+ return -ENOMEM;
+
+ return i915_wait_request(dev, seqno);
+}
+
+static int
i915_gem_evict_everything(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
- uint32_t seqno;
bool lists_empty;
spin_lock(&dev_priv->mm.active_list_lock);
@@ -2102,12 +2135,7 @@ i915_gem_evict_everything(struct drm_device *dev)
return -ENOSPC;
/* Flush everything (on to the inactive lists) and evict */
- i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
- seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
- if (seqno == 0)
- return -ENOMEM;
-
- ret = i915_wait_request(dev, seqno);
+ ret = i915_gpu_idle(dev);
if (ret)
return ret;
@@ -2265,6 +2293,28 @@ i915_gem_object_get_pages(struct drm_gem_object *obj,
return 0;
}
+static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
+{
+ struct drm_gem_object *obj = reg->obj;
+ struct drm_device *dev = obj->dev;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv = obj->driver_private;
+ int regnum = obj_priv->fence_reg;
+ uint64_t val;
+
+ val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
+ 0xfffff000) << 32;
+ val |= obj_priv->gtt_offset & 0xfffff000;
+ val |= (uint64_t)((obj_priv->stride / 128) - 1) <<
+ SANDYBRIDGE_FENCE_PITCH_SHIFT;
+
+ if (obj_priv->tiling_mode == I915_TILING_Y)
+ val |= 1 << I965_FENCE_TILING_Y_SHIFT;
+ val |= I965_FENCE_REG_VALID;
+
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val);
+}
+
static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
{
struct drm_gem_object *obj = reg->obj;
@@ -2361,6 +2411,58 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
}
+static int i915_find_fence_reg(struct drm_device *dev)
+{
+ struct drm_i915_fence_reg *reg = NULL;
+ struct drm_i915_gem_object *obj_priv = NULL;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_gem_object *obj = NULL;
+ int i, avail, ret;
+
+ /* First try to find a free reg */
+ avail = 0;
+ for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
+ reg = &dev_priv->fence_regs[i];
+ if (!reg->obj)
+ return i;
+
+ obj_priv = reg->obj->driver_private;
+ if (!obj_priv->pin_count)
+ avail++;
+ }
+
+ if (avail == 0)
+ return -ENOSPC;
+
+ /* None available, try to steal one or wait for a user to finish */
+ i = I915_FENCE_REG_NONE;
+ list_for_each_entry(obj_priv, &dev_priv->mm.fence_list,
+ fence_list) {
+ obj = obj_priv->obj;
+
+ if (obj_priv->pin_count)
+ continue;
+
+ /* found one! */
+ i = obj_priv->fence_reg;
+ break;
+ }
+
+ BUG_ON(i == I915_FENCE_REG_NONE);
+
+ /* We only have a reference on obj from the active list. put_fence_reg
+ * might drop that one, causing a use-after-free in it. So hold a
+ * private reference to obj like the other callers of put_fence_reg
+ * (set_tiling ioctl) do. */
+ drm_gem_object_reference(obj);
+ ret = i915_gem_object_put_fence_reg(obj);
+ drm_gem_object_unreference(obj);
+ if (ret != 0)
+ return ret;
+
+ return i;
+}
+
/**
* i915_gem_object_get_fence_reg - set up a fence reg for an object
* @obj: object to map through a fence reg
@@ -2381,8 +2483,7 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
struct drm_i915_fence_reg *reg = NULL;
- struct drm_i915_gem_object *old_obj_priv = NULL;
- int i, ret, avail;
+ int ret;
/* Just update our place in the LRU if our fence is getting used. */
if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
@@ -2410,86 +2511,27 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
break;
}
- /* First try to find a free reg */
- avail = 0;
- for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
- reg = &dev_priv->fence_regs[i];
- if (!reg->obj)
- break;
-
- old_obj_priv = reg->obj->driver_private;
- if (!old_obj_priv->pin_count)
- avail++;
- }
-
- /* None available, try to steal one or wait for a user to finish */
- if (i == dev_priv->num_fence_regs) {
- struct drm_gem_object *old_obj = NULL;
-
- if (avail == 0)
- return -ENOSPC;
-
- list_for_each_entry(old_obj_priv, &dev_priv->mm.fence_list,
- fence_list) {
- old_obj = old_obj_priv->obj;
-
- if (old_obj_priv->pin_count)
- continue;
-
- /* Take a reference, as otherwise the wait_rendering
- * below may cause the object to get freed out from
- * under us.
- */
- drm_gem_object_reference(old_obj);
-
- /* i915 uses fences for GPU access to tiled buffers */
- if (IS_I965G(dev) || !old_obj_priv->active)
- break;
-
- /* This brings the object to the head of the LRU if it
- * had been written to. The only way this should
- * result in us waiting longer than the expected
- * optimal amount of time is if there was a
- * fence-using buffer later that was read-only.
- */
- i915_gem_object_flush_gpu_write_domain(old_obj);
- ret = i915_gem_object_wait_rendering(old_obj);
- if (ret != 0) {
- drm_gem_object_unreference(old_obj);
- return ret;
- }
-
- break;
- }
-
- /*
- * Zap this virtual mapping so we can set up a fence again
- * for this object next time we need it.
- */
- i915_gem_release_mmap(old_obj);
-
- i = old_obj_priv->fence_reg;
- reg = &dev_priv->fence_regs[i];
-
- old_obj_priv->fence_reg = I915_FENCE_REG_NONE;
- list_del_init(&old_obj_priv->fence_list);
-
- drm_gem_object_unreference(old_obj);
- }
+ ret = i915_find_fence_reg(dev);
+ if (ret < 0)
+ return ret;
- obj_priv->fence_reg = i;
+ obj_priv->fence_reg = ret;
+ reg = &dev_priv->fence_regs[obj_priv->fence_reg];
list_add_tail(&obj_priv->fence_list, &dev_priv->mm.fence_list);
reg->obj = obj;
- if (IS_I965G(dev))
+ if (IS_GEN6(dev))
+ sandybridge_write_fence_reg(reg);
+ else if (IS_I965G(dev))
i965_write_fence_reg(reg);
else if (IS_I9XX(dev))
i915_write_fence_reg(reg);
else
i830_write_fence_reg(reg);
- trace_i915_gem_object_get_fence(obj, i, obj_priv->tiling_mode);
+ trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
+ obj_priv->tiling_mode);
return 0;
}
@@ -2508,9 +2550,12 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv = obj->driver_private;
- if (IS_I965G(dev))
+ if (IS_GEN6(dev)) {
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
+ (obj_priv->fence_reg * 8), 0);
+ } else if (IS_I965G(dev)) {
I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
- else {
+ } else {
uint32_t fence_reg;
if (obj_priv->fence_reg < 8)
@@ -2544,6 +2589,12 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
return 0;
+ /* If we've changed tiling, GTT-mappings of the object
+ * need to re-fault to ensure that the correct fence register
+ * setup is in place.
+ */
+ i915_gem_release_mmap(obj);
+
/* On the i915, GPU access to tiled buffers is via a fence,
* therefore we must wait for any outstanding access to complete
* before clearing the fence.
@@ -2552,12 +2603,12 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj)
int ret;
i915_gem_object_flush_gpu_write_domain(obj);
- i915_gem_object_flush_gtt_write_domain(obj);
ret = i915_gem_object_wait_rendering(obj);
if (ret != 0)
return ret;
}
+ i915_gem_object_flush_gtt_write_domain(obj);
i915_gem_clear_fence_reg (obj);
return 0;
@@ -2697,7 +2748,6 @@ static void
i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- uint32_t seqno;
uint32_t old_write_domain;
if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
@@ -2706,9 +2756,8 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
/* Queue the GPU write cache flushing we need. */
old_write_domain = obj->write_domain;
i915_gem_flush(dev, 0, obj->write_domain);
- seqno = i915_add_request(dev, NULL, obj->write_domain);
+ (void) i915_add_request(dev, NULL, obj->write_domain);
BUG_ON(obj->write_domain);
- i915_gem_object_move_to_active(obj, seqno);
trace_i915_gem_object_change_domain(obj,
obj->read_domains,
@@ -3247,7 +3296,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
obj_priv->tiling_mode != I915_TILING_NONE;
/* Check fence reg constraints and rebind if necessary */
- if (need_fence && !i915_obj_fenceable(dev, obj))
+ if (need_fence && !i915_gem_object_fence_offset_ok(obj,
+ obj_priv->tiling_mode))
i915_gem_object_unbind(obj);
/* Choose the GTT offset for our buffer and put it there. */
@@ -3317,6 +3367,16 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
}
/* Validate that the target is in a valid r/w GPU domain */
+ if (reloc->write_domain & (reloc->write_domain - 1)) {
+ DRM_ERROR("reloc with multiple write domains: "
+ "obj %p target %d offset %d "
+ "read %08x write %08x",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->read_domains,
+ reloc->write_domain);
+ return -EINVAL;
+ }
if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
reloc->read_domains & I915_GEM_DOMAIN_CPU) {
DRM_ERROR("reloc with read/write CPU domains: "
@@ -4445,8 +4505,7 @@ int
i915_gem_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- uint32_t seqno, cur_seqno, last_seqno;
- int stuck, ret;
+ int ret;
mutex_lock(&dev->struct_mutex);
@@ -4455,115 +4514,36 @@ i915_gem_idle(struct drm_device *dev)
return 0;
}
- /* Hack! Don't let anybody do execbuf while we don't control the chip.
- * We need to replace this with a semaphore, or something.
- */
- dev_priv->mm.suspended = 1;
- del_timer(&dev_priv->hangcheck_timer);
-
- /* Cancel the retire work handler, wait for it to finish if running
- */
- mutex_unlock(&dev->struct_mutex);
- cancel_delayed_work_sync(&dev_priv->mm.retire_work);
- mutex_lock(&dev->struct_mutex);
-
- i915_kernel_lost_context(dev);
-
- /* Flush the GPU along with all non-CPU write domains
- */
- i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
- seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
-
- if (seqno == 0) {
+ ret = i915_gpu_idle(dev);
+ if (ret) {
mutex_unlock(&dev->struct_mutex);
- return -ENOMEM;
+ return ret;
}
- dev_priv->mm.waiting_gem_seqno = seqno;
- last_seqno = 0;
- stuck = 0;
- for (;;) {
- cur_seqno = i915_get_gem_seqno(dev);
- if (i915_seqno_passed(cur_seqno, seqno))
- break;
- if (last_seqno == cur_seqno) {
- if (stuck++ > 100) {
- DRM_ERROR("hardware wedged\n");
- atomic_set(&dev_priv->mm.wedged, 1);
- DRM_WAKEUP(&dev_priv->irq_queue);
- break;
- }
+ /* Under UMS, be paranoid and evict. */
+ if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
+ ret = i915_gem_evict_from_inactive_list(dev);
+ if (ret) {
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
}
- msleep(10);
- last_seqno = cur_seqno;
- }
- dev_priv->mm.waiting_gem_seqno = 0;
-
- i915_gem_retire_requests(dev);
-
- spin_lock(&dev_priv->mm.active_list_lock);
- if (!atomic_read(&dev_priv->mm.wedged)) {
- /* Active and flushing should now be empty as we've
- * waited for a sequence higher than any pending execbuffer
- */
- WARN_ON(!list_empty(&dev_priv->mm.active_list));
- WARN_ON(!list_empty(&dev_priv->mm.flushing_list));
- /* Request should now be empty as we've also waited
- * for the last request in the list
- */
- WARN_ON(!list_empty(&dev_priv->mm.request_list));
}
- /* Empty the active and flushing lists to inactive. If there's
- * anything left at this point, it means that we're wedged and
- * nothing good's going to happen by leaving them there. So strip
- * the GPU domains and just stuff them onto inactive.
+ /* Hack! Don't let anybody do execbuf while we don't control the chip.
+ * We need to replace this with a semaphore, or something.
+ * And not confound mm.suspended!
*/
- while (!list_empty(&dev_priv->mm.active_list)) {
- struct drm_gem_object *obj;
- uint32_t old_write_domain;
-
- obj = list_first_entry(&dev_priv->mm.active_list,
- struct drm_i915_gem_object,
- list)->obj;
- old_write_domain = obj->write_domain;
- obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
- i915_gem_object_move_to_inactive(obj);
-
- trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
- old_write_domain);
- }
- spin_unlock(&dev_priv->mm.active_list_lock);
-
- while (!list_empty(&dev_priv->mm.flushing_list)) {
- struct drm_gem_object *obj;
- uint32_t old_write_domain;
-
- obj = list_first_entry(&dev_priv->mm.flushing_list,
- struct drm_i915_gem_object,
- list)->obj;
- old_write_domain = obj->write_domain;
- obj->write_domain &= ~I915_GEM_GPU_DOMAINS;
- i915_gem_object_move_to_inactive(obj);
-
- trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
- old_write_domain);
- }
-
-
- /* Move all inactive buffers out of the GTT. */
- ret = i915_gem_evict_from_inactive_list(dev);
- WARN_ON(!list_empty(&dev_priv->mm.inactive_list));
- if (ret) {
- mutex_unlock(&dev->struct_mutex);
- return ret;
- }
+ dev_priv->mm.suspended = 1;
+ del_timer(&dev_priv->hangcheck_timer);
+ i915_kernel_lost_context(dev);
i915_gem_cleanup_ringbuffer(dev);
+
mutex_unlock(&dev->struct_mutex);
+ /* Cancel the retire work handler, which should be idle now. */
+ cancel_delayed_work_sync(&dev_priv->mm.retire_work);
+
return 0;
}
@@ -4607,8 +4587,13 @@ i915_gem_init_hws(struct drm_device *dev)
}
dev_priv->hws_obj = obj;
memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
- I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
- I915_READ(HWS_PGA); /* posting read */
+ if (IS_GEN6(dev)) {
+ I915_WRITE(HWS_PGA_GEN6, dev_priv->status_gfx_addr);
+ I915_READ(HWS_PGA_GEN6); /* posting read */
+ } else {
+ I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
+ I915_READ(HWS_PGA); /* posting read */
+ }
DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
return 0;
@@ -4850,7 +4835,8 @@ i915_gem_load(struct drm_device *dev)
spin_unlock(&shrink_list_lock);
/* Old X drivers will take 0-2 for front, back, depth buffers */
- dev_priv->fence_reg_start = 3;
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ dev_priv->fence_reg_start = 3;
if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
dev_priv->num_fence_regs = 16;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index df278b2685b..b5c55d88ff7 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -25,8 +25,6 @@
*
*/
-#include <linux/acpi.h>
-#include <linux/pnp.h>
#include "linux/string.h"
#include "linux/bitops.h"
#include "drmP.h"
@@ -83,120 +81,6 @@
* to match what the GPU expects.
*/
-#define MCHBAR_I915 0x44
-#define MCHBAR_I965 0x48
-#define MCHBAR_SIZE (4*4096)
-
-#define DEVEN_REG 0x54
-#define DEVEN_MCHBAR_EN (1 << 28)
-
-/* Allocate space for the MCH regs if needed, return nonzero on error */
-static int
-intel_alloc_mchbar_resource(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
- u32 temp_lo, temp_hi = 0;
- u64 mchbar_addr;
- int ret = 0;
-
- if (IS_I965G(dev))
- pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
- pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
- mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
-
- /* If ACPI doesn't have it, assume we need to allocate it ourselves */
-#ifdef CONFIG_PNP
- if (mchbar_addr &&
- pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
- ret = 0;
- goto out;
- }
-#endif
-
- /* Get some space for it */
- ret = pci_bus_alloc_resource(dev_priv->bridge_dev->bus, &dev_priv->mch_res,
- MCHBAR_SIZE, MCHBAR_SIZE,
- PCIBIOS_MIN_MEM,
- 0, pcibios_align_resource,
- dev_priv->bridge_dev);
- if (ret) {
- DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
- dev_priv->mch_res.start = 0;
- goto out;
- }
-
- if (IS_I965G(dev))
- pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
- upper_32_bits(dev_priv->mch_res.start));
-
- pci_write_config_dword(dev_priv->bridge_dev, reg,
- lower_32_bits(dev_priv->mch_res.start));
-out:
- return ret;
-}
-
-/* Setup MCHBAR if possible, return true if we should disable it again */
-static bool
-intel_setup_mchbar(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
- u32 temp;
- bool need_disable = false, enabled;
-
- if (IS_I915G(dev) || IS_I915GM(dev)) {
- pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
- enabled = !!(temp & DEVEN_MCHBAR_EN);
- } else {
- pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
- enabled = temp & 1;
- }
-
- /* If it's already enabled, don't have to do anything */
- if (enabled)
- goto out;
-
- if (intel_alloc_mchbar_resource(dev))
- goto out;
-
- need_disable = true;
-
- /* Space is allocated or reserved, so enable it. */
- if (IS_I915G(dev) || IS_I915GM(dev)) {
- pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
- temp | DEVEN_MCHBAR_EN);
- } else {
- pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
- pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp | 1);
- }
-out:
- return need_disable;
-}
-
-static void
-intel_teardown_mchbar(struct drm_device *dev, bool disable)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
- u32 temp;
-
- if (disable) {
- if (IS_I915G(dev) || IS_I915GM(dev)) {
- pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
- temp &= ~DEVEN_MCHBAR_EN;
- pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
- } else {
- pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
- temp &= ~1;
- pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
- }
- }
-
- if (dev_priv->mch_res.start)
- release_resource(&dev_priv->mch_res);
-}
-
/**
* Detects bit 6 swizzling of address lookup between IGD access and CPU
* access through main memory.
@@ -207,9 +91,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
- bool need_disable;
- if (IS_IRONLAKE(dev)) {
+ if (IS_IRONLAKE(dev) || IS_GEN6(dev)) {
/* On Ironlake whatever DRAM config, GPU always do
* same swizzling setup.
*/
@@ -224,9 +107,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
} else if (IS_MOBILE(dev)) {
uint32_t dcc;
- /* Try to make sure MCHBAR is enabled before poking at it */
- need_disable = intel_setup_mchbar(dev);
-
/* On mobile 9xx chipsets, channel interleave by the CPU is
* determined by DCC. For single-channel, neither the CPU
* nor the GPU do swizzling. For dual channel interleaved,
@@ -266,8 +146,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
}
-
- intel_teardown_mchbar(dev, need_disable);
} else {
/* The 965, G33, and newer, have a very flexible memory
* configuration. It will enable dual-channel mode
@@ -302,39 +180,6 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
dev_priv->mm.bit_6_swizzle_y = swizzle_y;
}
-
-/**
- * Returns whether an object is currently fenceable. If not, it may need
- * to be unbound and have its pitch adjusted.
- */
-bool
-i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj)
-{
- struct drm_i915_gem_object *obj_priv = obj->driver_private;
-
- if (IS_I965G(dev)) {
- /* The 965 can have fences at any page boundary. */
- if (obj->size & 4095)
- return false;
- return true;
- } else if (IS_I9XX(dev)) {
- if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
- return false;
- } else {
- if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
- return false;
- }
-
- /* Power of two sized... */
- if (obj->size & (obj->size - 1))
- return false;
-
- /* Objects must be size aligned as well */
- if (obj_priv->gtt_offset & (obj->size - 1))
- return false;
- return true;
-}
-
/* Check pitch constriants for all chips & tiling formats */
bool
i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
@@ -391,7 +236,7 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
return true;
}
-static bool
+bool
i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
{
struct drm_device *dev = obj->dev;
@@ -438,9 +283,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
obj_priv = obj->driver_private;
if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(obj);
return -EINVAL;
}
@@ -493,12 +336,6 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
goto err;
}
- /* If we've changed tiling, GTT-mappings of the object
- * need to re-fault to ensure that the correct fence register
- * setup is in place.
- */
- i915_gem_release_mmap(obj);
-
obj_priv->tiling_mode = args->tiling_mode;
obj_priv->stride = args->stride;
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index a17d6bdfe63..5388354da0d 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -166,7 +166,7 @@ void intel_enable_asle (struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
ironlake_enable_display_irq(dev_priv, DE_GSE);
else
i915_enable_pipestat(dev_priv, 1,
@@ -269,6 +269,57 @@ static void i915_hotplug_work_func(struct work_struct *work)
drm_sysfs_hotplug_event(dev);
}
+static void i915_handle_rps_change(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 busy_up, busy_down, max_avg, min_avg;
+ u16 rgvswctl;
+ u8 new_delay = dev_priv->cur_delay;
+
+ I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS) & ~MEMINT_EVAL_CHG);
+ busy_up = I915_READ(RCPREVBSYTUPAVG);
+ busy_down = I915_READ(RCPREVBSYTDNAVG);
+ max_avg = I915_READ(RCBMAXAVG);
+ min_avg = I915_READ(RCBMINAVG);
+
+ /* Handle RCS change request from hw */
+ if (busy_up > max_avg) {
+ if (dev_priv->cur_delay != dev_priv->max_delay)
+ new_delay = dev_priv->cur_delay - 1;
+ if (new_delay < dev_priv->max_delay)
+ new_delay = dev_priv->max_delay;
+ } else if (busy_down < min_avg) {
+ if (dev_priv->cur_delay != dev_priv->min_delay)
+ new_delay = dev_priv->cur_delay + 1;
+ if (new_delay > dev_priv->min_delay)
+ new_delay = dev_priv->min_delay;
+ }
+
+ DRM_DEBUG("rps change requested: %d -> %d\n",
+ dev_priv->cur_delay, new_delay);
+
+ rgvswctl = I915_READ(MEMSWCTL);
+ if (rgvswctl & MEMCTL_CMD_STS) {
+ DRM_ERROR("gpu busy, RCS change rejected\n");
+ return; /* still busy with another command */
+ }
+
+ /* Program the new state */
+ rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
+ (new_delay << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
+ I915_WRITE(MEMSWCTL, rgvswctl);
+ POSTING_READ(MEMSWCTL);
+
+ rgvswctl |= MEMCTL_CMD_STS;
+ I915_WRITE(MEMSWCTL, rgvswctl);
+
+ dev_priv->cur_delay = new_delay;
+
+ DRM_DEBUG("rps changed\n");
+
+ return;
+}
+
irqreturn_t ironlake_irq_handler(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -331,6 +382,11 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
queue_work(dev_priv->wq, &dev_priv->hotplug_work);
}
+ if (de_iir & DE_PCU_EVENT) {
+ I915_WRITE(MEMINTRSTS, I915_READ(MEMINTRSTS));
+ i915_handle_rps_change(dev);
+ }
+
/* should clear PCH hotplug event before clear CPU irq */
I915_WRITE(SDEIIR, pch_iir);
I915_WRITE(GTIIR, gt_iir);
@@ -376,6 +432,121 @@ static void i915_error_work_func(struct work_struct *work)
}
}
+static struct drm_i915_error_object *
+i915_error_object_create(struct drm_device *dev,
+ struct drm_gem_object *src)
+{
+ struct drm_i915_error_object *dst;
+ struct drm_i915_gem_object *src_priv;
+ int page, page_count;
+
+ if (src == NULL)
+ return NULL;
+
+ src_priv = src->driver_private;
+ if (src_priv->pages == NULL)
+ return NULL;
+
+ page_count = src->size / PAGE_SIZE;
+
+ dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC);
+ if (dst == NULL)
+ return NULL;
+
+ for (page = 0; page < page_count; page++) {
+ void *s, *d = kmalloc(PAGE_SIZE, GFP_ATOMIC);
+ if (d == NULL)
+ goto unwind;
+ s = kmap_atomic(src_priv->pages[page], KM_USER0);
+ memcpy(d, s, PAGE_SIZE);
+ kunmap_atomic(s, KM_USER0);
+ dst->pages[page] = d;
+ }
+ dst->page_count = page_count;
+ dst->gtt_offset = src_priv->gtt_offset;
+
+ return dst;
+
+unwind:
+ while (page--)
+ kfree(dst->pages[page]);
+ kfree(dst);
+ return NULL;
+}
+
+static void
+i915_error_object_free(struct drm_i915_error_object *obj)
+{
+ int page;
+
+ if (obj == NULL)
+ return;
+
+ for (page = 0; page < obj->page_count; page++)
+ kfree(obj->pages[page]);
+
+ kfree(obj);
+}
+
+static void
+i915_error_state_free(struct drm_device *dev,
+ struct drm_i915_error_state *error)
+{
+ i915_error_object_free(error->batchbuffer[0]);
+ i915_error_object_free(error->batchbuffer[1]);
+ i915_error_object_free(error->ringbuffer);
+ kfree(error->active_bo);
+ kfree(error);
+}
+
+static u32
+i915_get_bbaddr(struct drm_device *dev, u32 *ring)
+{
+ u32 cmd;
+
+ if (IS_I830(dev) || IS_845G(dev))
+ cmd = MI_BATCH_BUFFER;
+ else if (IS_I965G(dev))
+ cmd = (MI_BATCH_BUFFER_START | (2 << 6) |
+ MI_BATCH_NON_SECURE_I965);
+ else
+ cmd = (MI_BATCH_BUFFER_START | (2 << 6));
+
+ return ring[0] == cmd ? ring[1] : 0;
+}
+
+static u32
+i915_ringbuffer_last_batch(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 head, bbaddr;
+ u32 *ring;
+
+ /* Locate the current position in the ringbuffer and walk back
+ * to find the most recently dispatched batch buffer.
+ */
+ bbaddr = 0;
+ head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+ ring = (u32 *)(dev_priv->ring.virtual_start + head);
+
+ while (--ring >= (u32 *)dev_priv->ring.virtual_start) {
+ bbaddr = i915_get_bbaddr(dev, ring);
+ if (bbaddr)
+ break;
+ }
+
+ if (bbaddr == 0) {
+ ring = (u32 *)(dev_priv->ring.virtual_start + dev_priv->ring.Size);
+ while (--ring >= (u32 *)dev_priv->ring.virtual_start) {
+ bbaddr = i915_get_bbaddr(dev, ring);
+ if (bbaddr)
+ break;
+ }
+ }
+
+ return bbaddr;
+}
+
/**
* i915_capture_error_state - capture an error record for later analysis
* @dev: drm device
@@ -388,19 +559,26 @@ static void i915_error_work_func(struct work_struct *work)
static void i915_capture_error_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj_priv;
struct drm_i915_error_state *error;
+ struct drm_gem_object *batchbuffer[2];
unsigned long flags;
+ u32 bbaddr;
+ int count;
spin_lock_irqsave(&dev_priv->error_lock, flags);
- if (dev_priv->first_error)
- goto out;
+ error = dev_priv->first_error;
+ spin_unlock_irqrestore(&dev_priv->error_lock, flags);
+ if (error)
+ return;
error = kmalloc(sizeof(*error), GFP_ATOMIC);
if (!error) {
- DRM_DEBUG_DRIVER("out ot memory, not capturing error state\n");
- goto out;
+ DRM_DEBUG_DRIVER("out of memory, not capturing error state\n");
+ return;
}
+ error->seqno = i915_get_gem_seqno(dev);
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
error->pipeastat = I915_READ(PIPEASTAT);
@@ -411,6 +589,7 @@ static void i915_capture_error_state(struct drm_device *dev)
error->ipehr = I915_READ(IPEHR);
error->instdone = I915_READ(INSTDONE);
error->acthd = I915_READ(ACTHD);
+ error->bbaddr = 0;
} else {
error->ipeir = I915_READ(IPEIR_I965);
error->ipehr = I915_READ(IPEHR_I965);
@@ -418,14 +597,101 @@ static void i915_capture_error_state(struct drm_device *dev)
error->instps = I915_READ(INSTPS);
error->instdone1 = I915_READ(INSTDONE1);
error->acthd = I915_READ(ACTHD_I965);
+ error->bbaddr = I915_READ64(BB_ADDR);
}
- do_gettimeofday(&error->time);
+ bbaddr = i915_ringbuffer_last_batch(dev);
+
+ /* Grab the current batchbuffer, most likely to have crashed. */
+ batchbuffer[0] = NULL;
+ batchbuffer[1] = NULL;
+ count = 0;
+ list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
+ struct drm_gem_object *obj = obj_priv->obj;
+
+ if (batchbuffer[0] == NULL &&
+ bbaddr >= obj_priv->gtt_offset &&
+ bbaddr < obj_priv->gtt_offset + obj->size)
+ batchbuffer[0] = obj;
+
+ if (batchbuffer[1] == NULL &&
+ error->acthd >= obj_priv->gtt_offset &&
+ error->acthd < obj_priv->gtt_offset + obj->size &&
+ batchbuffer[0] != obj)
+ batchbuffer[1] = obj;
+
+ count++;
+ }
- dev_priv->first_error = error;
+ /* We need to copy these to an anonymous buffer as the simplest
+ * method to avoid being overwritten by userpace.
+ */
+ error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]);
+ error->batchbuffer[1] = i915_error_object_create(dev, batchbuffer[1]);
+
+ /* Record the ringbuffer */
+ error->ringbuffer = i915_error_object_create(dev, dev_priv->ring.ring_obj);
+
+ /* Record buffers on the active list. */
+ error->active_bo = NULL;
+ error->active_bo_count = 0;
+
+ if (count)
+ error->active_bo = kmalloc(sizeof(*error->active_bo)*count,
+ GFP_ATOMIC);
+
+ if (error->active_bo) {
+ int i = 0;
+ list_for_each_entry(obj_priv, &dev_priv->mm.active_list, list) {
+ struct drm_gem_object *obj = obj_priv->obj;
+
+ error->active_bo[i].size = obj->size;
+ error->active_bo[i].name = obj->name;
+ error->active_bo[i].seqno = obj_priv->last_rendering_seqno;
+ error->active_bo[i].gtt_offset = obj_priv->gtt_offset;
+ error->active_bo[i].read_domains = obj->read_domains;
+ error->active_bo[i].write_domain = obj->write_domain;
+ error->active_bo[i].fence_reg = obj_priv->fence_reg;
+ error->active_bo[i].pinned = 0;
+ if (obj_priv->pin_count > 0)
+ error->active_bo[i].pinned = 1;
+ if (obj_priv->user_pin_count > 0)
+ error->active_bo[i].pinned = -1;
+ error->active_bo[i].tiling = obj_priv->tiling_mode;
+ error->active_bo[i].dirty = obj_priv->dirty;
+ error->active_bo[i].purgeable = obj_priv->madv != I915_MADV_WILLNEED;
+
+ if (++i == count)
+ break;
+ }
+ error->active_bo_count = i;
+ }
+
+ do_gettimeofday(&error->time);
-out:
+ spin_lock_irqsave(&dev_priv->error_lock, flags);
+ if (dev_priv->first_error == NULL) {
+ dev_priv->first_error = error;
+ error = NULL;
+ }
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
+
+ if (error)
+ i915_error_state_free(dev, error);
+}
+
+void i915_destroy_error_state(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_error_state *error;
+
+ spin_lock(&dev_priv->error_lock);
+ error = dev_priv->first_error;
+ dev_priv->first_error = NULL;
+ spin_unlock(&dev_priv->error_lock);
+
+ if (error)
+ i915_error_state_free(dev, error);
}
/**
@@ -576,7 +842,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
atomic_inc(&dev_priv->irq_received);
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
return ironlake_irq_handler(dev);
iir = I915_READ(IIR);
@@ -737,7 +1003,7 @@ void i915_user_irq_get(struct drm_device *dev)
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
else
i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
@@ -753,7 +1019,7 @@ void i915_user_irq_put(struct drm_device *dev)
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
else
i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
@@ -861,7 +1127,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
return -EINVAL;
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
else if (IS_I965G(dev))
@@ -883,7 +1149,7 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
unsigned long irqflags;
spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
else
@@ -897,7 +1163,7 @@ void i915_enable_interrupt (struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (!IS_IRONLAKE(dev))
+ if (!HAS_PCH_SPLIT(dev))
opregion_enable_asle(dev);
dev_priv->irq_enabled = 1;
}
@@ -973,7 +1239,11 @@ void i915_hangcheck_elapsed(unsigned long data)
struct drm_device *dev = (struct drm_device *)data;
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t acthd;
-
+
+ /* No reset support on this chip yet. */
+ if (IS_GEN6(dev))
+ return;
+
if (!IS_I965G(dev))
acthd = I915_READ(ACTHD);
else
@@ -1064,6 +1334,13 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg);
(void) I915_READ(SDEIER);
+ if (IS_IRONLAKE_M(dev)) {
+ /* Clear & enable PCU event interrupts */
+ I915_WRITE(DEIIR, DE_PCU_EVENT);
+ I915_WRITE(DEIER, I915_READ(DEIER) | DE_PCU_EVENT);
+ ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT);
+ }
+
return 0;
}
@@ -1076,7 +1353,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
INIT_WORK(&dev_priv->error_work, i915_error_work_func);
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
ironlake_irq_preinstall(dev);
return;
}
@@ -1108,7 +1385,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
return ironlake_irq_postinstall(dev);
/* Unmask the interrupts that we always want on. */
@@ -1196,7 +1473,7 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
dev_priv->vblank_pipe = 0;
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
ironlake_irq_uninstall(dev);
return;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index ab1bd2d3d3b..3d59862c7cc 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -53,6 +53,25 @@
#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
+#define SNB_GMCH_CTRL 0x50
+#define SNB_GMCH_GMS_STOLEN_MASK 0xF8
+#define SNB_GMCH_GMS_STOLEN_32M (1 << 3)
+#define SNB_GMCH_GMS_STOLEN_64M (2 << 3)
+#define SNB_GMCH_GMS_STOLEN_96M (3 << 3)
+#define SNB_GMCH_GMS_STOLEN_128M (4 << 3)
+#define SNB_GMCH_GMS_STOLEN_160M (5 << 3)
+#define SNB_GMCH_GMS_STOLEN_192M (6 << 3)
+#define SNB_GMCH_GMS_STOLEN_224M (7 << 3)
+#define SNB_GMCH_GMS_STOLEN_256M (8 << 3)
+#define SNB_GMCH_GMS_STOLEN_288M (9 << 3)
+#define SNB_GMCH_GMS_STOLEN_320M (0xa << 3)
+#define SNB_GMCH_GMS_STOLEN_352M (0xb << 3)
+#define SNB_GMCH_GMS_STOLEN_384M (0xc << 3)
+#define SNB_GMCH_GMS_STOLEN_416M (0xd << 3)
+#define SNB_GMCH_GMS_STOLEN_448M (0xe << 3)
+#define SNB_GMCH_GMS_STOLEN_480M (0xf << 3)
+#define SNB_GMCH_GMS_STOLEN_512M (0x10 << 3)
+
/* PCI config space */
#define HPLLCC 0xc0 /* 855 only */
@@ -61,6 +80,7 @@
#define GC_CLOCK_100_200 (1 << 0)
#define GC_CLOCK_100_133 (2 << 0)
#define GC_CLOCK_166_250 (3 << 0)
+#define GCFGC2 0xda
#define GCFGC 0xf0 /* 915+ only */
#define GC_LOW_FREQUENCY_ENABLE (1 << 7)
#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4)
@@ -234,6 +254,9 @@
#define I965_FENCE_REG_VALID (1<<0)
#define I965_FENCE_MAX_PITCH_VAL 0x0400
+#define FENCE_REG_SANDYBRIDGE_0 0x100000
+#define SANDYBRIDGE_FENCE_PITCH_SHIFT 32
+
/*
* Instruction and interrupt control regs
*/
@@ -265,6 +288,7 @@
#define INSTDONE1 0x0207c /* 965+ only */
#define ACTHD_I965 0x02074
#define HWS_PGA 0x02080
+#define HWS_PGA_GEN6 0x04080
#define HWS_ADDRESS_MASK 0xfffff000
#define HWS_START_ADDRESS_SHIFT 4
#define PWRCTXA 0x2088 /* 965GM+ only */
@@ -282,7 +306,7 @@
#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
-#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14)
+#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) /* p-state */
#define I915_HWB_OOM_INTERRUPT (1<<13)
#define I915_SYNC_STATUS_INTERRUPT (1<<12)
#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11)
@@ -306,11 +330,14 @@
#define I915_ERROR_MEMORY_REFRESH (1<<1)
#define I915_ERROR_INSTRUCTION (1<<0)
#define INSTPM 0x020c0
+#define INSTPM_SELF_EN (1<<12) /* 915GM only */
#define ACTHD 0x020c8
#define FW_BLC 0x020d8
#define FW_BLC2 0x020dc
#define FW_BLC_SELF 0x020e0 /* 915+ only */
-#define FW_BLC_SELF_EN (1<<15)
+#define FW_BLC_SELF_EN_MASK (1<<31)
+#define FW_BLC_SELF_FIFO_MASK (1<<16) /* 945 only */
+#define FW_BLC_SELF_EN (1<<15) /* 945 only */
#define MM_BURST_LENGTH 0x00700000
#define MM_FIFO_WATERMARK 0x0001F000
#define LM_BURST_LENGTH 0x00000700
@@ -324,6 +351,7 @@
#define CM0_COLOR_EVICT_DISABLE (1<<3)
#define CM0_DEPTH_WRITE_DISABLE (1<<1)
#define CM0_RC_OP_FLUSH_DISABLE (1<<0)
+#define BB_ADDR 0x02140 /* 8 bytes */
#define GFX_FLSH_CNTL 0x02170 /* 915+ only */
@@ -784,10 +812,144 @@
#define CLKCFG_MEM_800 (3 << 4)
#define CLKCFG_MEM_MASK (7 << 4)
-/** GM965 GM45 render standby register */
-#define MCHBAR_RENDER_STANDBY 0x111B8
+#define CRSTANDVID 0x11100
+#define PXVFREQ_BASE 0x11110 /* P[0-15]VIDFREQ (0x1114c) (Ironlake) */
+#define PXVFREQ_PX_MASK 0x7f000000
+#define PXVFREQ_PX_SHIFT 24
+#define VIDFREQ_BASE 0x11110
+#define VIDFREQ1 0x11110 /* VIDFREQ1-4 (0x1111c) (Cantiga) */
+#define VIDFREQ2 0x11114
+#define VIDFREQ3 0x11118
+#define VIDFREQ4 0x1111c
+#define VIDFREQ_P0_MASK 0x1f000000
+#define VIDFREQ_P0_SHIFT 24
+#define VIDFREQ_P0_CSCLK_MASK 0x00f00000
+#define VIDFREQ_P0_CSCLK_SHIFT 20
+#define VIDFREQ_P0_CRCLK_MASK 0x000f0000
+#define VIDFREQ_P0_CRCLK_SHIFT 16
+#define VIDFREQ_P1_MASK 0x00001f00
+#define VIDFREQ_P1_SHIFT 8
+#define VIDFREQ_P1_CSCLK_MASK 0x000000f0
+#define VIDFREQ_P1_CSCLK_SHIFT 4
+#define VIDFREQ_P1_CRCLK_MASK 0x0000000f
+#define INTTOEXT_BASE_ILK 0x11300
+#define INTTOEXT_BASE 0x11120 /* INTTOEXT1-8 (0x1113c) */
+#define INTTOEXT_MAP3_SHIFT 24
+#define INTTOEXT_MAP3_MASK (0x1f << INTTOEXT_MAP3_SHIFT)
+#define INTTOEXT_MAP2_SHIFT 16
+#define INTTOEXT_MAP2_MASK (0x1f << INTTOEXT_MAP2_SHIFT)
+#define INTTOEXT_MAP1_SHIFT 8
+#define INTTOEXT_MAP1_MASK (0x1f << INTTOEXT_MAP1_SHIFT)
+#define INTTOEXT_MAP0_SHIFT 0
+#define INTTOEXT_MAP0_MASK (0x1f << INTTOEXT_MAP0_SHIFT)
+#define MEMSWCTL 0x11170 /* Ironlake only */
+#define MEMCTL_CMD_MASK 0xe000
+#define MEMCTL_CMD_SHIFT 13
+#define MEMCTL_CMD_RCLK_OFF 0
+#define MEMCTL_CMD_RCLK_ON 1
+#define MEMCTL_CMD_CHFREQ 2
+#define MEMCTL_CMD_CHVID 3
+#define MEMCTL_CMD_VMMOFF 4
+#define MEMCTL_CMD_VMMON 5
+#define MEMCTL_CMD_STS (1<<12) /* write 1 triggers command, clears
+ when command complete */
+#define MEMCTL_FREQ_MASK 0x0f00 /* jitter, from 0-15 */
+#define MEMCTL_FREQ_SHIFT 8
+#define MEMCTL_SFCAVM (1<<7)
+#define MEMCTL_TGT_VID_MASK 0x007f
+#define MEMIHYST 0x1117c
+#define MEMINTREN 0x11180 /* 16 bits */
+#define MEMINT_RSEXIT_EN (1<<8)
+#define MEMINT_CX_SUPR_EN (1<<7)
+#define MEMINT_CONT_BUSY_EN (1<<6)
+#define MEMINT_AVG_BUSY_EN (1<<5)
+#define MEMINT_EVAL_CHG_EN (1<<4)
+#define MEMINT_MON_IDLE_EN (1<<3)
+#define MEMINT_UP_EVAL_EN (1<<2)
+#define MEMINT_DOWN_EVAL_EN (1<<1)
+#define MEMINT_SW_CMD_EN (1<<0)
+#define MEMINTRSTR 0x11182 /* 16 bits */
+#define MEM_RSEXIT_MASK 0xc000
+#define MEM_RSEXIT_SHIFT 14
+#define MEM_CONT_BUSY_MASK 0x3000
+#define MEM_CONT_BUSY_SHIFT 12
+#define MEM_AVG_BUSY_MASK 0x0c00
+#define MEM_AVG_BUSY_SHIFT 10
+#define MEM_EVAL_CHG_MASK 0x0300
+#define MEM_EVAL_BUSY_SHIFT 8
+#define MEM_MON_IDLE_MASK 0x00c0
+#define MEM_MON_IDLE_SHIFT 6
+#define MEM_UP_EVAL_MASK 0x0030
+#define MEM_UP_EVAL_SHIFT 4
+#define MEM_DOWN_EVAL_MASK 0x000c
+#define MEM_DOWN_EVAL_SHIFT 2
+#define MEM_SW_CMD_MASK 0x0003
+#define MEM_INT_STEER_GFX 0
+#define MEM_INT_STEER_CMR 1
+#define MEM_INT_STEER_SMI 2
+#define MEM_INT_STEER_SCI 3
+#define MEMINTRSTS 0x11184
+#define MEMINT_RSEXIT (1<<7)
+#define MEMINT_CONT_BUSY (1<<6)
+#define MEMINT_AVG_BUSY (1<<5)
+#define MEMINT_EVAL_CHG (1<<4)
+#define MEMINT_MON_IDLE (1<<3)
+#define MEMINT_UP_EVAL (1<<2)
+#define MEMINT_DOWN_EVAL (1<<1)
+#define MEMINT_SW_CMD (1<<0)
+#define MEMMODECTL 0x11190
+#define MEMMODE_BOOST_EN (1<<31)
+#define MEMMODE_BOOST_FREQ_MASK 0x0f000000 /* jitter for boost, 0-15 */
+#define MEMMODE_BOOST_FREQ_SHIFT 24
+#define MEMMODE_IDLE_MODE_MASK 0x00030000
+#define MEMMODE_IDLE_MODE_SHIFT 16
+#define MEMMODE_IDLE_MODE_EVAL 0
+#define MEMMODE_IDLE_MODE_CONT 1
+#define MEMMODE_HWIDLE_EN (1<<15)
+#define MEMMODE_SWMODE_EN (1<<14)
+#define MEMMODE_RCLK_GATE (1<<13)
+#define MEMMODE_HW_UPDATE (1<<12)
+#define MEMMODE_FSTART_MASK 0x00000f00 /* starting jitter, 0-15 */
+#define MEMMODE_FSTART_SHIFT 8
+#define MEMMODE_FMAX_MASK 0x000000f0 /* max jitter, 0-15 */
+#define MEMMODE_FMAX_SHIFT 4
+#define MEMMODE_FMIN_MASK 0x0000000f /* min jitter, 0-15 */
+#define RCBMAXAVG 0x1119c
+#define MEMSWCTL2 0x1119e /* Cantiga only */
+#define SWMEMCMD_RENDER_OFF (0 << 13)
+#define SWMEMCMD_RENDER_ON (1 << 13)
+#define SWMEMCMD_SWFREQ (2 << 13)
+#define SWMEMCMD_TARVID (3 << 13)
+#define SWMEMCMD_VRM_OFF (4 << 13)
+#define SWMEMCMD_VRM_ON (5 << 13)
+#define CMDSTS (1<<12)
+#define SFCAVM (1<<11)
+#define SWFREQ_MASK 0x0380 /* P0-7 */
+#define SWFREQ_SHIFT 7
+#define TARVID_MASK 0x001f
+#define MEMSTAT_CTG 0x111a0
+#define RCBMINAVG 0x111a0
+#define RCUPEI 0x111b0
+#define RCDNEI 0x111b4
+#define MCHBAR_RENDER_STANDBY 0x111b8
#define RCX_SW_EXIT (1<<23)
#define RSX_STATUS_MASK 0x00700000
+#define VIDCTL 0x111c0
+#define VIDSTS 0x111c8
+#define VIDSTART 0x111cc /* 8 bits */
+#define MEMSTAT_ILK 0x111f8
+#define MEMSTAT_VID_MASK 0x7f00
+#define MEMSTAT_VID_SHIFT 8
+#define MEMSTAT_PSTATE_MASK 0x00f8
+#define MEMSTAT_PSTATE_SHIFT 3
+#define MEMSTAT_MON_ACTV (1<<2)
+#define MEMSTAT_SRC_CTL_MASK 0x0003
+#define MEMSTAT_SRC_CTL_CORE 0
+#define MEMSTAT_SRC_CTL_TRB 1
+#define MEMSTAT_SRC_CTL_THM 2
+#define MEMSTAT_SRC_CTL_STDBY 3
+#define RCPREVBSYTUPAVG 0x113b8
+#define RCPREVBSYTDNAVG 0x113bc
#define PEG_BAND_GAP_DATA 0x14d68
/*
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index a3b90c9561d..ac0d1a73ac2 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -682,6 +682,8 @@ void i915_restore_display(struct drm_device *dev)
I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
I915_WRITE(PCH_PP_DIVISOR, dev_priv->savePP_DIVISOR);
I915_WRITE(PCH_PP_CONTROL, dev_priv->savePP_CONTROL);
+ I915_WRITE(MCHBAR_RENDER_STANDBY,
+ dev_priv->saveMCHBAR_RENDER_STANDBY);
} else {
I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
@@ -745,11 +747,16 @@ int i915_save_state(struct drm_device *dev)
dev_priv->saveGTIMR = I915_READ(GTIMR);
dev_priv->saveFDI_RXA_IMR = I915_READ(FDI_RXA_IMR);
dev_priv->saveFDI_RXB_IMR = I915_READ(FDI_RXB_IMR);
+ dev_priv->saveMCHBAR_RENDER_STANDBY =
+ I915_READ(MCHBAR_RENDER_STANDBY);
} else {
dev_priv->saveIER = I915_READ(IER);
dev_priv->saveIMR = I915_READ(IMR);
}
+ if (IS_IRONLAKE_M(dev))
+ ironlake_disable_drps(dev);
+
/* Cache mode state */
dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
@@ -820,6 +827,9 @@ int i915_restore_state(struct drm_device *dev)
/* Clock gating state */
intel_init_clock_gating(dev);
+ if (IS_IRONLAKE_M(dev))
+ ironlake_enable_drps(dev);
+
/* Cache mode state */
I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 15fbc1b5a83..70c9d4ba704 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -247,6 +247,7 @@ static void
parse_general_features(struct drm_i915_private *dev_priv,
struct bdb_header *bdb)
{
+ struct drm_device *dev = dev_priv->dev;
struct bdb_general_features *general;
/* Set sensible defaults in case we can't find the general block */
@@ -263,7 +264,7 @@ parse_general_features(struct drm_i915_private *dev_priv,
if (IS_I85X(dev_priv->dev))
dev_priv->lvds_ssc_freq =
general->ssc_freq ? 66 : 48;
- else if (IS_IRONLAKE(dev_priv->dev))
+ else if (IS_IRONLAKE(dev_priv->dev) || IS_GEN6(dev))
dev_priv->lvds_ssc_freq =
general->ssc_freq ? 100 : 120;
else
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 79dd4026586..fccf07470c8 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -39,7 +39,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 temp, reg;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
reg = PCH_ADPA;
else
reg = ADPA;
@@ -113,7 +113,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
else
dpll_md_reg = DPLL_B_MD;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
adpa_reg = PCH_ADPA;
else
adpa_reg = ADPA;
@@ -122,7 +122,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
* Disable separate mode multiplier used when cloning SDVO to CRT
* XXX this needs to be adjusted when we really are cloning
*/
- if (IS_I965G(dev) && !IS_IRONLAKE(dev)) {
+ if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
dpll_md = I915_READ(dpll_md_reg);
I915_WRITE(dpll_md_reg,
dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
@@ -136,11 +136,11 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
if (intel_crtc->pipe == 0) {
adpa |= ADPA_PIPE_A_SELECT;
- if (!IS_IRONLAKE(dev))
+ if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT_A, 0);
} else {
adpa |= ADPA_PIPE_B_SELECT;
- if (!IS_IRONLAKE(dev))
+ if (!HAS_PCH_SPLIT(dev))
I915_WRITE(BCLRPAT_B, 0);
}
@@ -202,7 +202,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
u32 hotplug_en;
int i, tries = 0;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
return intel_ironlake_crt_detect_hotplug(connector);
/*
@@ -524,7 +524,7 @@ void intel_crt_init(struct drm_device *dev)
&intel_output->enc);
/* Set up the DDC bus. */
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
i2c_reg = PCH_GPIOA;
else {
i2c_reg = GPIOA;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index b27202d23eb..9cd6de5f990 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -232,7 +232,7 @@ struct intel_limit {
#define G4X_P2_DISPLAY_PORT_FAST 10
#define G4X_P2_DISPLAY_PORT_LIMIT 0
-/* Ironlake */
+/* Ironlake / Sandybridge */
/* as we calculate clock using (register_value + 2) for
N/M1/M2, so here the range value for them is (actual_value-2).
*/
@@ -690,7 +690,7 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
const intel_limit_t *limit;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
limit = intel_ironlake_limit(crtc);
else if (IS_G4X(dev)) {
limit = intel_g4x_limit(crtc);
@@ -886,7 +886,7 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
int lvds_reg;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
lvds_reg = PCH_LVDS;
else
lvds_reg = LVDS;
@@ -1188,25 +1188,30 @@ static void intel_update_fbc(struct drm_crtc *crtc,
if (intel_fb->obj->size > dev_priv->cfb_size) {
DRM_DEBUG_KMS("framebuffer too large, disabling "
"compression\n");
+ dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
goto out_disable;
}
if ((mode->flags & DRM_MODE_FLAG_INTERLACE) ||
(mode->flags & DRM_MODE_FLAG_DBLSCAN)) {
DRM_DEBUG_KMS("mode incompatible with compression, "
"disabling\n");
+ dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE;
goto out_disable;
}
if ((mode->hdisplay > 2048) ||
(mode->vdisplay > 1536)) {
DRM_DEBUG_KMS("mode too large for compression, disabling\n");
+ dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE;
goto out_disable;
}
if ((IS_I915GM(dev) || IS_I945GM(dev)) && plane != 0) {
DRM_DEBUG_KMS("plane not 0, disabling compression\n");
+ dev_priv->no_fbc_reason = FBC_BAD_PLANE;
goto out_disable;
}
if (obj_priv->tiling_mode != I915_TILING_X) {
DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n");
+ dev_priv->no_fbc_reason = FBC_NOT_TILED;
goto out_disable;
}
@@ -1366,7 +1371,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
dspcntr &= ~DISPPLANE_TILED;
}
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
/* must disable */
dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
@@ -1427,7 +1432,7 @@ static void i915_disable_vga (struct drm_device *dev)
u8 sr1;
u32 vga_reg;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
vga_reg = CPU_VGACNTRL;
else
vga_reg = VGACNTRL;
@@ -2111,7 +2116,7 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = crtc->dev;
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
/* FDI link clock is fixed at 2.7G */
if (mode->clock * 3 > 27000 * 4)
return MODE_CLOCK_HIGH;
@@ -2757,11 +2762,22 @@ static void i9xx_update_wm(struct drm_device *dev, int planea_clock,
srwm = total_size - sr_entries;
if (srwm < 0)
srwm = 1;
- I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN | (srwm & 0x3f));
+
+ if (IS_I945G(dev) || IS_I945GM(dev))
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_FIFO_MASK | (srwm & 0xff));
+ else if (IS_I915GM(dev)) {
+ /* 915M has a smaller SRWM field */
+ I915_WRITE(FW_BLC_SELF, srwm & 0x3f);
+ I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN);
+ }
} else {
/* Turn off self refresh if both pipes are enabled */
- I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
- & ~FW_BLC_SELF_EN);
+ if (IS_I945G(dev) || IS_I945GM(dev)) {
+ I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF)
+ & ~FW_BLC_SELF_EN);
+ } else if (IS_I915GM(dev)) {
+ I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN);
+ }
}
DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n",
@@ -2967,7 +2983,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
refclk / 1000);
} else if (IS_I9XX(dev)) {
refclk = 96000;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
refclk = 120000; /* 120Mhz refclk */
} else {
refclk = 48000;
@@ -3025,7 +3041,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
/* FDI link */
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
int lane, link_bw, bpp;
/* eDP doesn't require FDI link, so just set DP M/N
according to current link config */
@@ -3102,7 +3118,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
* PCH B stepping, previous chipset stepping should be
* ignoring this setting.
*/
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
temp = I915_READ(PCH_DREF_CONTROL);
/* Always enable nonspread source */
temp &= ~DREF_NONSPREAD_SOURCE_MASK;
@@ -3149,7 +3165,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
reduced_clock.m2;
}
- if (!IS_IRONLAKE(dev))
+ if (!HAS_PCH_SPLIT(dev))
dpll = DPLL_VGA_MODE_DIS;
if (IS_I9XX(dev)) {
@@ -3162,7 +3178,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
- else if (IS_IRONLAKE(dev))
+ else if (HAS_PCH_SPLIT(dev))
dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
}
if (is_dp)
@@ -3174,7 +3190,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
else {
dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
/* also FPA1 */
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
if (IS_G4X(dev) && has_reduced_clock)
dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
@@ -3193,7 +3209,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
break;
}
- if (IS_I965G(dev) && !IS_IRONLAKE(dev))
+ if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
} else {
if (is_lvds) {
@@ -3227,7 +3243,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* Ironlake's plane is forced to pipe, bit 24 is to
enable color space conversion */
- if (!IS_IRONLAKE(dev)) {
+ if (!HAS_PCH_SPLIT(dev)) {
if (pipe == 0)
dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
else
@@ -3254,14 +3270,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* Disable the panel fitter if it was on our pipe */
- if (!IS_IRONLAKE(dev) && intel_panel_fitter_pipe(dev) == pipe)
+ if (!HAS_PCH_SPLIT(dev) && intel_panel_fitter_pipe(dev) == pipe)
I915_WRITE(PFIT_CONTROL, 0);
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
drm_mode_debug_printmodeline(mode);
/* assign to Ironlake registers */
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
fp_reg = pch_fp_reg;
dpll_reg = pch_dpll_reg;
}
@@ -3282,7 +3298,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
if (is_lvds) {
u32 lvds;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
lvds_reg = PCH_LVDS;
lvds = I915_READ(lvds_reg);
@@ -3304,12 +3320,12 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* set the dithering flag */
if (IS_I965G(dev)) {
if (dev_priv->lvds_dither) {
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
pipeconf |= PIPE_ENABLE_DITHER;
else
lvds |= LVDS_ENABLE_DITHER;
} else {
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
pipeconf &= ~PIPE_ENABLE_DITHER;
else
lvds &= ~LVDS_ENABLE_DITHER;
@@ -3328,7 +3344,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* Wait for the clocks to stabilize. */
udelay(150);
- if (IS_I965G(dev) && !IS_IRONLAKE(dev)) {
+ if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
if (is_sdvo) {
sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
@@ -3375,14 +3391,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* pipesrc and dspsize control the size that is scaled from, which should
* always be the user's requested size.
*/
- if (!IS_IRONLAKE(dev)) {
+ if (!HAS_PCH_SPLIT(dev)) {
I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) |
(mode->hdisplay - 1));
I915_WRITE(dsppos_reg, 0);
}
I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m);
I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n);
I915_WRITE(link_m1_reg, m_n.link_m);
@@ -3438,7 +3454,7 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
return;
/* use legacy palette for Ironlake */
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A :
LGC_PALETTE_B;
@@ -3553,11 +3569,10 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
intel_crtc->cursor_bo = bo;
return 0;
-fail:
- mutex_lock(&dev->struct_mutex);
fail_locked:
- drm_gem_object_unreference(bo);
mutex_unlock(&dev->struct_mutex);
+fail:
+ drm_gem_object_unreference_unlocked(bo);
return ret;
}
@@ -3922,7 +3937,7 @@ static void intel_increase_pllclock(struct drm_crtc *crtc, bool schedule)
int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
int dpll = I915_READ(dpll_reg);
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
return;
if (!dev_priv->lvds_downclock_avail)
@@ -3961,7 +3976,7 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
int dpll_reg = (pipe == 0) ? DPLL_A : DPLL_B;
int dpll = I915_READ(dpll_reg);
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
return;
if (!dev_priv->lvds_downclock_avail)
@@ -4011,6 +4026,11 @@ static void intel_idle_update(struct work_struct *work)
mutex_lock(&dev->struct_mutex);
+ if (IS_I945G(dev) || IS_I945GM(dev)) {
+ DRM_DEBUG_DRIVER("enable memory self refresh on 945\n");
+ I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN);
+ }
+
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
/* Skip inactive CRTCs */
if (!crtc->fb)
@@ -4044,9 +4064,17 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return;
- if (!dev_priv->busy)
+ if (!dev_priv->busy) {
+ if (IS_I945G(dev) || IS_I945GM(dev)) {
+ u32 fw_blc_self;
+
+ DRM_DEBUG_DRIVER("disable memory self refresh on 945\n");
+ fw_blc_self = I915_READ(FW_BLC_SELF);
+ fw_blc_self &= ~FW_BLC_SELF_EN;
+ I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK);
+ }
dev_priv->busy = true;
- else
+ } else
mod_timer(&dev_priv->idle_timer, jiffies +
msecs_to_jiffies(GPU_IDLE_TIMEOUT));
@@ -4058,6 +4086,14 @@ void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
intel_fb = to_intel_framebuffer(crtc->fb);
if (intel_fb->obj == obj) {
if (!intel_crtc->busy) {
+ if (IS_I945G(dev) || IS_I945GM(dev)) {
+ u32 fw_blc_self;
+
+ DRM_DEBUG_DRIVER("disable memory self refresh on 945\n");
+ fw_blc_self = I915_READ(FW_BLC_SELF);
+ fw_blc_self &= ~FW_BLC_SELF_EN;
+ I915_WRITE(FW_BLC_SELF, fw_blc_self | FW_BLC_SELF_EN_MASK);
+ }
/* Non-busy -> busy, upclock */
intel_increase_pllclock(crtc, true);
intel_crtc->busy = true;
@@ -4382,7 +4418,7 @@ static void intel_setup_outputs(struct drm_device *dev)
if (IS_MOBILE(dev) && !IS_I830(dev))
intel_lvds_init(dev);
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
int found;
if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED))
@@ -4451,7 +4487,7 @@ static void intel_setup_outputs(struct drm_device *dev)
DRM_DEBUG_KMS("probing DP_D\n");
intel_dp_init(dev, DP_D);
}
- } else if (IS_I8XX(dev))
+ } else if (IS_GEN2(dev))
intel_dvo_init(dev);
if (SUPPORTS_TV(dev))
@@ -4476,9 +4512,7 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
intelfb_remove(dev, fb);
drm_framebuffer_cleanup(fb);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(intel_fb->obj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(intel_fb->obj);
kfree(intel_fb);
}
@@ -4541,9 +4575,7 @@ intel_user_framebuffer_create(struct drm_device *dev,
ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj);
if (ret) {
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(obj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(obj);
return NULL;
}
@@ -4591,6 +4623,91 @@ err_unref:
return NULL;
}
+void ironlake_enable_drps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 rgvmodectl = I915_READ(MEMMODECTL), rgvswctl;
+ u8 fmax, fmin, fstart, vstart;
+ int i = 0;
+
+ /* 100ms RC evaluation intervals */
+ I915_WRITE(RCUPEI, 100000);
+ I915_WRITE(RCDNEI, 100000);
+
+ /* Set max/min thresholds to 90ms and 80ms respectively */
+ I915_WRITE(RCBMAXAVG, 90000);
+ I915_WRITE(RCBMINAVG, 80000);
+
+ I915_WRITE(MEMIHYST, 1);
+
+ /* Set up min, max, and cur for interrupt handling */
+ fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT;
+ fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
+ fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
+ MEMMODE_FSTART_SHIFT;
+ vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
+ PXVFREQ_PX_SHIFT;
+
+ dev_priv->max_delay = fstart; /* can't go to fmax w/o IPS */
+ dev_priv->min_delay = fmin;
+ dev_priv->cur_delay = fstart;
+
+ I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
+
+ /*
+ * Interrupts will be enabled in ironlake_irq_postinstall
+ */
+
+ I915_WRITE(VIDSTART, vstart);
+ POSTING_READ(VIDSTART);
+
+ rgvmodectl |= MEMMODE_SWMODE_EN;
+ I915_WRITE(MEMMODECTL, rgvmodectl);
+
+ while (I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) {
+ if (i++ > 100) {
+ DRM_ERROR("stuck trying to change perf mode\n");
+ break;
+ }
+ msleep(1);
+ }
+ msleep(1);
+
+ rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
+ (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
+ I915_WRITE(MEMSWCTL, rgvswctl);
+ POSTING_READ(MEMSWCTL);
+
+ rgvswctl |= MEMCTL_CMD_STS;
+ I915_WRITE(MEMSWCTL, rgvswctl);
+}
+
+void ironlake_disable_drps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 rgvswctl;
+ u8 fstart;
+
+ /* Ack interrupts, disable EFC interrupt */
+ I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN);
+ I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG);
+ I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT);
+ I915_WRITE(DEIIR, DE_PCU_EVENT);
+ I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT);
+
+ /* Go back to the starting frequency */
+ fstart = (I915_READ(MEMMODECTL) & MEMMODE_FSTART_MASK) >>
+ MEMMODE_FSTART_SHIFT;
+ rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) |
+ (fstart << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM;
+ I915_WRITE(MEMSWCTL, rgvswctl);
+ msleep(1);
+ rgvswctl |= MEMCTL_CMD_STS;
+ I915_WRITE(MEMSWCTL, rgvswctl);
+ msleep(1);
+
+}
+
void intel_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4599,7 +4716,7 @@ void intel_init_clock_gating(struct drm_device *dev)
* Disable clock gating reported to work incorrectly according to the
* specs, but enable as much else as we can.
*/
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
return;
} else if (IS_G4X(dev)) {
uint32_t dspclk_gate;
@@ -4672,7 +4789,7 @@ static void intel_init_display(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
/* We always want a DPMS function */
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
dev_priv->display.dpms = ironlake_crtc_dpms;
else
dev_priv->display.dpms = i9xx_crtc_dpms;
@@ -4715,7 +4832,7 @@ static void intel_init_display(struct drm_device *dev)
i830_get_display_clock_speed;
/* For FIFO watermark updates */
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
dev_priv->display.update_wm = NULL;
else if (IS_G4X(dev))
dev_priv->display.update_wm = g4x_update_wm;
@@ -4774,11 +4891,6 @@ void intel_modeset_init(struct drm_device *dev)
DRM_DEBUG_KMS("%d display pipe%s available.\n",
num_pipe, num_pipe > 1 ? "s" : "");
- if (IS_I85X(dev))
- pci_read_config_word(dev->pdev, HPLLCC, &dev_priv->orig_clock);
- else if (IS_I9XX(dev) || IS_G4X(dev))
- pci_read_config_word(dev->pdev, GCFGC, &dev_priv->orig_clock);
-
for (i = 0; i < num_pipe; i++) {
intel_crtc_init(dev, i);
}
@@ -4787,6 +4899,9 @@ void intel_modeset_init(struct drm_device *dev)
intel_init_clock_gating(dev);
+ if (IS_IRONLAKE_M(dev))
+ ironlake_enable_drps(dev);
+
INIT_WORK(&dev_priv->idle_work, intel_idle_update);
setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
(unsigned long)dev);
@@ -4834,6 +4949,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
drm_gem_object_unreference(dev_priv->pwrctx);
}
+ if (IS_IRONLAKE_M(dev))
+ ironlake_disable_drps(dev);
+
mutex_unlock(&dev->struct_mutex);
drm_mode_config_cleanup(dev);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 439506cefc1..3ef3a0d0edd 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -231,7 +231,7 @@ intel_dp_aux_ch(struct intel_output *intel_output,
*/
if (IS_eDP(intel_output))
aux_clock_divider = 225; /* eDP input clock at 450Mhz */
- else if (IS_IRONLAKE(dev))
+ else if (HAS_PCH_SPLIT(dev))
aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
else
aux_clock_divider = intel_hrawclk(dev) / 2;
@@ -584,7 +584,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
intel_dp_compute_m_n(3, lane_count,
mode->clock, adjusted_mode->clock, &m_n);
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
if (intel_crtc->pipe == 0) {
I915_WRITE(TRANSA_DATA_M1,
((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
@@ -1176,7 +1176,7 @@ intel_dp_detect(struct drm_connector *connector)
dp_priv->has_audio = false;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
return ironlake_dp_detect(connector);
temp = I915_READ(PORT_HOTPLUG_EN);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index a51573da1ff..3a467ca5785 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -209,6 +209,8 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
extern void intel_init_clock_gating(struct drm_device *dev);
+extern void ironlake_enable_drps(struct drm_device *dev);
+extern void ironlake_disable_drps(struct drm_device *dev);
extern int intel_framebuffer_create(struct drm_device *dev,
struct drm_mode_fb_cmd *mode_cmd,
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index aaabbcbe590..8cd791dc5b2 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -35,6 +35,7 @@
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/init.h>
+#include <linux/vga_switcheroo.h>
#include "drmP.h"
#include "drm.h"
@@ -235,6 +236,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
obj_priv->gtt_offset, fbo);
mutex_unlock(&dev->struct_mutex);
+ vga_switcheroo_client_fb_set(dev->pdev, info);
return 0;
out_unpin:
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 0e268deed76..a30f8bfc198 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -82,7 +82,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
/* HW workaround, need to toggle enable bit off and on for 12bpc, but
* we do this anyway which shows more stable in testing.
*/
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE);
POSTING_READ(hdmi_priv->sdvox_reg);
}
@@ -99,7 +99,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
/* HW workaround, need to write this twice for issue that may result
* in first write getting masked.
*/
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(hdmi_priv->sdvox_reg, temp);
POSTING_READ(hdmi_priv->sdvox_reg);
}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 8673c735b8a..fcc753ca5d9 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -128,7 +128,7 @@ intel_i2c_reset_gmbus(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(PCH_GMBUS0, 0);
} else {
I915_WRITE(GMBUS0, 0);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index c2e8a45780d..14e516fdc2d 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -56,7 +56,7 @@ static void intel_lvds_set_backlight(struct drm_device *dev, int level)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 blc_pwm_ctl, reg;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
reg = BLC_PWM_CPU_CTL;
else
reg = BLC_PWM_CTL;
@@ -74,7 +74,7 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
reg = BLC_PWM_PCH_CTL2;
else
reg = BLC_PWM_CTL;
@@ -89,17 +89,22 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
static void intel_lvds_set_power(struct drm_device *dev, bool on)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 pp_status, ctl_reg, status_reg;
+ u32 pp_status, ctl_reg, status_reg, lvds_reg;
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
ctl_reg = PCH_PP_CONTROL;
status_reg = PCH_PP_STATUS;
+ lvds_reg = PCH_LVDS;
} else {
ctl_reg = PP_CONTROL;
status_reg = PP_STATUS;
+ lvds_reg = LVDS;
}
if (on) {
+ I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
+ POSTING_READ(lvds_reg);
+
I915_WRITE(ctl_reg, I915_READ(ctl_reg) |
POWER_TARGET_ON);
do {
@@ -115,6 +120,9 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on)
do {
pp_status = I915_READ(status_reg);
} while (pp_status & PP_ON);
+
+ I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
+ POSTING_READ(lvds_reg);
}
}
@@ -137,7 +145,7 @@ static void intel_lvds_save(struct drm_connector *connector)
u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
u32 pwm_ctl_reg;
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
pp_on_reg = PCH_PP_ON_DELAYS;
pp_off_reg = PCH_PP_OFF_DELAYS;
pp_ctl_reg = PCH_PP_CONTROL;
@@ -174,7 +182,7 @@ static void intel_lvds_restore(struct drm_connector *connector)
u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
u32 pwm_ctl_reg;
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
pp_on_reg = PCH_PP_ON_DELAYS;
pp_off_reg = PCH_PP_OFF_DELAYS;
pp_ctl_reg = PCH_PP_CONTROL;
@@ -297,7 +305,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
}
/* full screen scale for now */
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
goto out;
/* 965+ wants fuzzy fitting */
@@ -327,7 +335,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
* to register description and PRM.
* Change the value here to see the borders for debugging
*/
- if (!IS_IRONLAKE(dev)) {
+ if (!HAS_PCH_SPLIT(dev)) {
I915_WRITE(BCLRPAT_A, 0);
I915_WRITE(BCLRPAT_B, 0);
}
@@ -548,7 +556,7 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
struct drm_i915_private *dev_priv = dev->dev_private;
u32 reg;
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
reg = BLC_PWM_CPU_CTL;
else
reg = BLC_PWM_CTL;
@@ -587,7 +595,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
* settings.
*/
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
return;
/*
@@ -655,8 +663,15 @@ static const struct dmi_system_id bad_lid_status[] = {
*/
static enum drm_connector_status intel_lvds_detect(struct drm_connector *connector)
{
+ struct drm_device *dev = connector->dev;
enum drm_connector_status status = connector_status_connected;
+ /* ACPI lid methods were generally unreliable in this generation, so
+ * don't even bother.
+ */
+ if (IS_GEN2(dev))
+ return connector_status_connected;
+
if (!dmi_check_system(bad_lid_status) && !acpi_lid_open())
status = connector_status_disconnected;
@@ -1020,7 +1035,7 @@ void intel_lvds_init(struct drm_device *dev)
return;
}
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
return;
if (dev_priv->edp_support) {
@@ -1123,7 +1138,7 @@ void intel_lvds_init(struct drm_device *dev)
*/
/* Ironlake: FIXME if still fail, not try pipe mode now */
- if (IS_IRONLAKE(dev))
+ if (HAS_PCH_SPLIT(dev))
goto failed;
lvds = I915_READ(LVDS);
@@ -1144,7 +1159,7 @@ void intel_lvds_init(struct drm_device *dev)
goto failed;
out:
- if (IS_IRONLAKE(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
u32 pwm;
/* make sure PWM is enabled */
pwm = I915_READ(BLC_PWM_CPU_CTL2);
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 2639591c72e..d355d1d527e 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -172,7 +172,7 @@ struct overlay_registers {
#define OFC_UPDATE 0x1
#define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev))
-#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev))
+#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev) && !IS_GEN6(dev))
static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
@@ -199,16 +199,11 @@ static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_over
static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
{
- struct drm_device *dev = overlay->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
-
if (OVERLAY_NONPHYSICAL(overlay->dev))
io_mapping_unmap_atomic(overlay->virt_addr);
overlay->virt_addr = NULL;
- I915_READ(OVADD); /* flush wc cashes */
-
return;
}
@@ -225,9 +220,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
overlay->active = 1;
overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP;
- BEGIN_LP_RING(6);
- OUT_RING(MI_FLUSH);
- OUT_RING(MI_NOOP);
+ BEGIN_LP_RING(4);
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
OUT_RING(overlay->flip_addr | OFC_UPDATE);
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
@@ -267,9 +260,7 @@ static void intel_overlay_continue(struct intel_overlay *overlay,
if (tmp & (1 << 17))
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
- BEGIN_LP_RING(4);
- OUT_RING(MI_FLUSH);
- OUT_RING(MI_NOOP);
+ BEGIN_LP_RING(2);
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
OUT_RING(flip_addr);
ADVANCE_LP_RING();
@@ -338,9 +329,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
/* wait for overlay to go idle */
overlay->hw_wedged = SWITCH_OFF_STAGE_1;
- BEGIN_LP_RING(6);
- OUT_RING(MI_FLUSH);
- OUT_RING(MI_NOOP);
+ BEGIN_LP_RING(4);
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
OUT_RING(flip_addr);
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
@@ -358,9 +347,7 @@ static int intel_overlay_off(struct intel_overlay *overlay)
/* turn overlay off */
overlay->hw_wedged = SWITCH_OFF_STAGE_2;
- BEGIN_LP_RING(6);
- OUT_RING(MI_FLUSH);
- OUT_RING(MI_NOOP);
+ BEGIN_LP_RING(4);
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
OUT_RING(flip_addr);
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
@@ -435,9 +422,7 @@ int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
overlay->hw_wedged = SWITCH_OFF_STAGE_2;
- BEGIN_LP_RING(6);
- OUT_RING(MI_FLUSH);
- OUT_RING(MI_NOOP);
+ BEGIN_LP_RING(4);
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
OUT_RING(flip_addr);
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
@@ -1179,7 +1164,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
out_unlock:
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev->mode_config.mutex);
- drm_gem_object_unreference(new_bo);
+ drm_gem_object_unreference_unlocked(new_bo);
kfree(params);
return ret;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 82678d30ab0..48daee5c9c6 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -35,6 +35,7 @@
#include "i915_drm.h"
#include "i915_drv.h"
#include "intel_sdvo_regs.h"
+#include <linux/dmi.h>
static char *tv_format_names[] = {
"NTSC_M" , "NTSC_J" , "NTSC_443",
@@ -2283,6 +2284,25 @@ intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device)
return 0x72;
}
+static int intel_sdvo_bad_tv_callback(const struct dmi_system_id *id)
+{
+ DRM_DEBUG_KMS("Ignoring bad SDVO TV connector for %s\n", id->ident);
+ return 1;
+}
+
+static struct dmi_system_id intel_sdvo_bad_tv[] = {
+ {
+ .callback = intel_sdvo_bad_tv_callback,
+ .ident = "IntelG45/ICH10R/DME1737",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "IBM CORPORATION"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "4800784"),
+ },
+ },
+
+ { } /* terminating entry */
+};
+
static bool
intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
{
@@ -2323,7 +2343,8 @@ intel_sdvo_output_setup(struct intel_output *intel_output, uint16_t flags)
(1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
(1 << INTEL_ANALOG_CLONE_BIT);
}
- } else if (flags & SDVO_OUTPUT_SVID0) {
+ } else if ((flags & SDVO_OUTPUT_SVID0) &&
+ !dmi_check_system(intel_sdvo_bad_tv)) {
sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0;
encoder->encoder_type = DRM_MODE_ENCODER_TVDAC;
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 48c290b5da8..32db806f3b5 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -16,7 +16,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \
nv04_graph.o nv10_graph.o nv20_graph.o \
nv40_graph.o nv50_graph.o \
- nv40_grctx.o \
+ nv40_grctx.o nv50_grctx.o \
nv04_instmem.o nv50_instmem.o \
nv50_crtc.o nv50_dac.o nv50_sor.o \
nv50_cursor.o nv50_display.o nv50_fbcon.o \
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 48227e74475..0e0730a5313 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -11,6 +11,8 @@
#include "nouveau_drm.h"
#include "nv50_display.h"
+#include <linux/vga_switcheroo.h>
+
#define NOUVEAU_DSM_SUPPORTED 0x00
#define NOUVEAU_DSM_SUPPORTED_FUNCTIONS 0x00
@@ -28,31 +30,30 @@
#define NOUVEAU_DSM_POWER_SPEED 0x01
#define NOUVEAU_DSM_POWER_STAMINA 0x02
-static int nouveau_dsm(struct drm_device *dev, int func, int arg, int *result)
-{
- static char muid[] = {
- 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D,
- 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4,
- };
+static struct nouveau_dsm_priv {
+ bool dsm_detected;
+ acpi_handle dhandle;
+ acpi_handle dsm_handle;
+} nouveau_dsm_priv;
+
+static const char nouveau_dsm_muid[] = {
+ 0xA0, 0xA0, 0x95, 0x9D, 0x60, 0x00, 0x48, 0x4D,
+ 0xB3, 0x4D, 0x7E, 0x5F, 0xEA, 0x12, 0x9F, 0xD4,
+};
- struct pci_dev *pdev = dev->pdev;
- struct acpi_handle *handle;
+static int nouveau_dsm(acpi_handle handle, int func, int arg, int *result)
+{
struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
struct acpi_object_list input;
union acpi_object params[4];
union acpi_object *obj;
int err;
- handle = DEVICE_ACPI_HANDLE(&pdev->dev);
-
- if (!handle)
- return -ENODEV;
-
input.count = 4;
input.pointer = params;
params[0].type = ACPI_TYPE_BUFFER;
- params[0].buffer.length = sizeof(muid);
- params[0].buffer.pointer = (char *)muid;
+ params[0].buffer.length = sizeof(nouveau_dsm_muid);
+ params[0].buffer.pointer = (char *)nouveau_dsm_muid;
params[1].type = ACPI_TYPE_INTEGER;
params[1].integer.value = 0x00000102;
params[2].type = ACPI_TYPE_INTEGER;
@@ -62,7 +63,7 @@ static int nouveau_dsm(struct drm_device *dev, int func, int arg, int *result)
err = acpi_evaluate_object(handle, "_DSM", &input, &output);
if (err) {
- NV_INFO(dev, "failed to evaluate _DSM: %d\n", err);
+ printk(KERN_INFO "failed to evaluate _DSM: %d\n", err);
return err;
}
@@ -86,40 +87,119 @@ static int nouveau_dsm(struct drm_device *dev, int func, int arg, int *result)
return 0;
}
-int nouveau_hybrid_setup(struct drm_device *dev)
+static int nouveau_dsm_switch_mux(acpi_handle handle, int mux_id)
{
- int result;
-
- if (nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STATE,
- &result))
- return -ENODEV;
-
- NV_INFO(dev, "_DSM hardware status gave 0x%x\n", result);
-
- if (result) { /* Ensure that the external GPU is enabled */
- nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_SPEED, NULL);
- nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_SPEED,
- NULL);
- } else { /* Stamina mode - disable the external GPU */
- nouveau_dsm(dev, NOUVEAU_DSM_LED, NOUVEAU_DSM_LED_STAMINA,
- NULL);
- nouveau_dsm(dev, NOUVEAU_DSM_POWER, NOUVEAU_DSM_POWER_STAMINA,
- NULL);
- }
+ return nouveau_dsm(handle, NOUVEAU_DSM_LED, mux_id, NULL);
+}
+
+static int nouveau_dsm_set_discrete_state(acpi_handle handle, enum vga_switcheroo_state state)
+{
+ int arg;
+ if (state == VGA_SWITCHEROO_ON)
+ arg = NOUVEAU_DSM_POWER_SPEED;
+ else
+ arg = NOUVEAU_DSM_POWER_STAMINA;
+ nouveau_dsm(handle, NOUVEAU_DSM_POWER, arg, NULL);
+ return 0;
+}
+
+static int nouveau_dsm_switchto(enum vga_switcheroo_client_id id)
+{
+ if (id == VGA_SWITCHEROO_IGD)
+ return nouveau_dsm_switch_mux(nouveau_dsm_priv.dsm_handle, NOUVEAU_DSM_LED_STAMINA);
+ else
+ return nouveau_dsm_switch_mux(nouveau_dsm_priv.dsm_handle, NOUVEAU_DSM_LED_SPEED);
+}
+static int nouveau_dsm_power_state(enum vga_switcheroo_client_id id,
+ enum vga_switcheroo_state state)
+{
+ if (id == VGA_SWITCHEROO_IGD)
+ return 0;
+
+ return nouveau_dsm_set_discrete_state(nouveau_dsm_priv.dsm_handle, state);
+}
+
+static int nouveau_dsm_init(void)
+{
return 0;
}
-bool nouveau_dsm_probe(struct drm_device *dev)
+static int nouveau_dsm_get_client_id(struct pci_dev *pdev)
{
- int support = 0;
+ if (nouveau_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
+ return VGA_SWITCHEROO_IGD;
+ else
+ return VGA_SWITCHEROO_DIS;
+}
+
+static struct vga_switcheroo_handler nouveau_dsm_handler = {
+ .switchto = nouveau_dsm_switchto,
+ .power_state = nouveau_dsm_power_state,
+ .init = nouveau_dsm_init,
+ .get_client_id = nouveau_dsm_get_client_id,
+};
- if (nouveau_dsm(dev, NOUVEAU_DSM_SUPPORTED,
- NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &support))
+static bool nouveau_dsm_pci_probe(struct pci_dev *pdev)
+{
+ acpi_handle dhandle, nvidia_handle;
+ acpi_status status;
+ int ret;
+ uint32_t result;
+
+ dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ if (!dhandle)
+ return false;
+ status = acpi_get_handle(dhandle, "_DSM", &nvidia_handle);
+ if (ACPI_FAILURE(status)) {
return false;
+ }
- if (!support)
+ ret= nouveau_dsm(nvidia_handle, NOUVEAU_DSM_SUPPORTED,
+ NOUVEAU_DSM_SUPPORTED_FUNCTIONS, &result);
+ if (ret < 0)
return false;
+ nouveau_dsm_priv.dhandle = dhandle;
+ nouveau_dsm_priv.dsm_handle = nvidia_handle;
return true;
}
+
+static bool nouveau_dsm_detect(void)
+{
+ char acpi_method_name[255] = { 0 };
+ struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
+ struct pci_dev *pdev = NULL;
+ int has_dsm = 0;
+ int vga_count = 0;
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+ vga_count++;
+
+ has_dsm |= (nouveau_dsm_pci_probe(pdev) == true);
+ }
+
+ if (vga_count == 2 && has_dsm) {
+ acpi_get_name(nouveau_dsm_priv.dsm_handle, ACPI_FULL_PATHNAME, &buffer);
+ printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
+ acpi_method_name);
+ nouveau_dsm_priv.dsm_detected = true;
+ return true;
+ }
+ return false;
+}
+
+void nouveau_register_dsm_handler(void)
+{
+ bool r;
+
+ r = nouveau_dsm_detect();
+ if (!r)
+ return;
+
+ vga_switcheroo_register_handler(&nouveau_dsm_handler);
+}
+
+void nouveau_unregister_dsm_handler(void)
+{
+ vga_switcheroo_unregister_handler();
+}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 0e9cd1d4913..71247da17da 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -311,11 +311,11 @@ valid_reg(struct nvbios *bios, uint32_t reg)
/* C51 has misaligned regs on purpose. Marvellous */
if (reg & 0x2 ||
- (reg & 0x1 && dev_priv->VBIOS.pub.chip_version != 0x51))
+ (reg & 0x1 && dev_priv->vbios.chip_version != 0x51))
NV_ERROR(dev, "======= misaligned reg 0x%08X =======\n", reg);
/* warn on C51 regs that haven't been verified accessible in tracing */
- if (reg & 0x1 && dev_priv->VBIOS.pub.chip_version == 0x51 &&
+ if (reg & 0x1 && dev_priv->vbios.chip_version == 0x51 &&
reg != 0x130d && reg != 0x1311 && reg != 0x60081d)
NV_WARN(dev, "=== C51 misaligned reg 0x%08X not verified ===\n",
reg);
@@ -420,7 +420,7 @@ bios_wr32(struct nvbios *bios, uint32_t reg, uint32_t data)
LOG_OLD_VALUE(bios_rd32(bios, reg));
BIOSLOG(bios, " Write: Reg: 0x%08X, Data: 0x%08X\n", reg, data);
- if (dev_priv->VBIOS.execute) {
+ if (dev_priv->vbios.execute) {
still_alive();
nv_wr32(bios->dev, reg, data);
}
@@ -647,7 +647,7 @@ nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk)
reg0 = (reg0 & 0xfff8ffff) | (pll.log2P << 16);
reg1 = (reg1 & 0xffff0000) | (pll.N1 << 8) | pll.M1;
- if (dev_priv->VBIOS.execute) {
+ if (dev_priv->vbios.execute) {
still_alive();
nv_wr32(dev, reg + 4, reg1);
nv_wr32(dev, reg + 0, reg0);
@@ -689,7 +689,7 @@ setPLL(struct nvbios *bios, uint32_t reg, uint32_t clk)
static int dcb_entry_idx_from_crtchead(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
/*
* For the results of this function to be correct, CR44 must have been
@@ -700,7 +700,7 @@ static int dcb_entry_idx_from_crtchead(struct drm_device *dev)
uint8_t dcb_entry = NVReadVgaCrtc5758(dev, bios->state.crtchead, 0);
- if (dcb_entry > bios->bdcb.dcb.entries) {
+ if (dcb_entry > bios->dcb.entries) {
NV_ERROR(dev, "CR58 doesn't have a valid DCB entry currently "
"(%02X)\n", dcb_entry);
dcb_entry = 0x7f; /* unused / invalid marker */
@@ -713,25 +713,26 @@ static struct nouveau_i2c_chan *
init_i2c_device_find(struct drm_device *dev, int i2c_index)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct bios_parsed_dcb *bdcb = &dev_priv->VBIOS.bdcb;
+ struct dcb_table *dcb = &dev_priv->vbios.dcb;
if (i2c_index == 0xff) {
/* note: dcb_entry_idx_from_crtchead needs pre-script set-up */
int idx = dcb_entry_idx_from_crtchead(dev), shift = 0;
- int default_indices = bdcb->i2c_default_indices;
+ int default_indices = dcb->i2c_default_indices;
- if (idx != 0x7f && bdcb->dcb.entry[idx].i2c_upper_default)
+ if (idx != 0x7f && dcb->entry[idx].i2c_upper_default)
shift = 4;
i2c_index = (default_indices >> shift) & 0xf;
}
if (i2c_index == 0x80) /* g80+ */
- i2c_index = bdcb->i2c_default_indices & 0xf;
+ i2c_index = dcb->i2c_default_indices & 0xf;
return nouveau_i2c_find(dev, i2c_index);
}
-static uint32_t get_tmds_index_reg(struct drm_device *dev, uint8_t mlv)
+static uint32_t
+get_tmds_index_reg(struct drm_device *dev, uint8_t mlv)
{
/*
* For mlv < 0x80, it is an index into a table of TMDS base addresses.
@@ -744,6 +745,7 @@ static uint32_t get_tmds_index_reg(struct drm_device *dev, uint8_t mlv)
*/
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvbios *bios = &dev_priv->vbios;
const int pramdac_offset[13] = {
0, 0, 0x8, 0, 0x2000, 0, 0, 0, 0x2008, 0, 0, 0, 0x2000 };
const uint32_t pramdac_table[4] = {
@@ -756,13 +758,12 @@ static uint32_t get_tmds_index_reg(struct drm_device *dev, uint8_t mlv)
dcb_entry = dcb_entry_idx_from_crtchead(dev);
if (dcb_entry == 0x7f)
return 0;
- dacoffset = pramdac_offset[
- dev_priv->VBIOS.bdcb.dcb.entry[dcb_entry].or];
+ dacoffset = pramdac_offset[bios->dcb.entry[dcb_entry].or];
if (mlv == 0x81)
dacoffset ^= 8;
return 0x6808b0 + dacoffset;
} else {
- if (mlv > ARRAY_SIZE(pramdac_table)) {
+ if (mlv >= ARRAY_SIZE(pramdac_table)) {
NV_ERROR(dev, "Magic Lookup Value too big (%02X)\n",
mlv);
return 0;
@@ -2574,19 +2575,19 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
const uint32_t nv50_gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c };
- const uint8_t *gpio_table = &bios->data[bios->bdcb.gpio_table_ptr];
+ const uint8_t *gpio_table = &bios->data[bios->dcb.gpio_table_ptr];
const uint8_t *gpio_entry;
int i;
if (!iexec->execute)
return 1;
- if (bios->bdcb.version != 0x40) {
+ if (bios->dcb.version != 0x40) {
NV_ERROR(bios->dev, "DCB table not version 4.0\n");
return 0;
}
- if (!bios->bdcb.gpio_table_ptr) {
+ if (!bios->dcb.gpio_table_ptr) {
NV_WARN(bios->dev, "Invalid pointer to INIT_8E table\n");
return 0;
}
@@ -3123,7 +3124,7 @@ run_digital_op_script(struct drm_device *dev, uint16_t scriptptr,
struct dcb_entry *dcbent, int head, bool dl)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
struct init_exec iexec = {true, false};
NV_TRACE(dev, "0x%04X: Parsing digital output script table\n",
@@ -3140,7 +3141,7 @@ run_digital_op_script(struct drm_device *dev, uint16_t scriptptr,
static int call_lvds_manufacturer_script(struct drm_device *dev, struct dcb_entry *dcbent, int head, enum LVDS_script script)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
uint8_t sub = bios->data[bios->fp.xlated_entry + script] + (bios->fp.link_c_increment && dcbent->or & OUTPUT_C ? 1 : 0);
uint16_t scriptofs = ROM16(bios->data[bios->init_script_tbls_ptr + sub * 2]);
@@ -3194,7 +3195,7 @@ static int run_lvds_table(struct drm_device *dev, struct dcb_entry *dcbent, int
* of a list of pxclks and script pointers.
*/
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
unsigned int outputset = (dcbent->or == 4) ? 1 : 0;
uint16_t scriptptr = 0, clktable;
uint8_t clktableptr = 0;
@@ -3261,7 +3262,7 @@ int call_lvds_script(struct drm_device *dev, struct dcb_entry *dcbent, int head,
*/
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer];
uint32_t sel_clk_binding, sel_clk;
int ret;
@@ -3395,7 +3396,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
#ifndef __powerpc__
NV_ERROR(dev, "Pointer to flat panel table invalid\n");
#endif
- bios->pub.digital_min_front_porch = 0x4b;
+ bios->digital_min_front_porch = 0x4b;
return 0;
}
@@ -3428,7 +3429,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
* fptable[4] is the minimum
* RAMDAC_FP_HCRTC -> RAMDAC_FP_HSYNC_START gap
*/
- bios->pub.digital_min_front_porch = fptable[4];
+ bios->digital_min_front_porch = fptable[4];
ofs = -7;
break;
default:
@@ -3467,7 +3468,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
/* nv4x cards need both a strap value and fpindex of 0xf to use DDC */
if (lth.lvds_ver > 0x10)
- bios->pub.fp_no_ddc = fpstrapping != 0xf || fpindex != 0xf;
+ bios->fp_no_ddc = fpstrapping != 0xf || fpindex != 0xf;
/*
* If either the strap or xlated fpindex value are 0xf there is no
@@ -3491,7 +3492,7 @@ static int parse_fp_mode_table(struct drm_device *dev, struct nvbios *bios)
bool nouveau_bios_fp_mode(struct drm_device *dev, struct drm_display_mode *mode)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
uint8_t *mode_entry = &bios->data[bios->fp.mode_ptr];
if (!mode) /* just checking whether we can produce a mode */
@@ -3562,11 +3563,11 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
* until later, when this function should be called with non-zero pxclk
*/
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
int fpstrapping = get_fp_strap(dev, bios), lvdsmanufacturerindex = 0;
struct lvdstableheader lth;
uint16_t lvdsofs;
- int ret, chip_version = bios->pub.chip_version;
+ int ret, chip_version = bios->chip_version;
ret = parse_lvds_manufacturer_table_header(dev, bios, &lth);
if (ret)
@@ -3682,7 +3683,7 @@ bios_output_config_match(struct drm_device *dev, struct dcb_entry *dcbent,
uint16_t record, int record_len, int record_nr)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
uint32_t entry;
uint16_t table;
int i, v;
@@ -3716,7 +3717,7 @@ nouveau_bios_dp_table(struct drm_device *dev, struct dcb_entry *dcbent,
int *length)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
uint8_t *table;
if (!bios->display.dp_table_ptr) {
@@ -3725,7 +3726,7 @@ nouveau_bios_dp_table(struct drm_device *dev, struct dcb_entry *dcbent,
}
table = &bios->data[bios->display.dp_table_ptr];
- if (table[0] != 0x21) {
+ if (table[0] != 0x20 && table[0] != 0x21) {
NV_ERROR(dev, "DisplayPort table version 0x%02x unknown\n",
table[0]);
return NULL;
@@ -3765,7 +3766,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
*/
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
uint8_t *table = &bios->data[bios->display.script_table_ptr];
uint8_t *otable = NULL;
uint16_t script;
@@ -3918,8 +3919,8 @@ int run_tmds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, i
*/
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
- int cv = bios->pub.chip_version;
+ struct nvbios *bios = &dev_priv->vbios;
+ int cv = bios->chip_version;
uint16_t clktable = 0, scriptptr;
uint32_t sel_clk_binding, sel_clk;
@@ -3978,8 +3979,8 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
*/
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
- int cv = bios->pub.chip_version, pllindex = 0;
+ struct nvbios *bios = &dev_priv->vbios;
+ int cv = bios->chip_version, pllindex = 0;
uint8_t pll_lim_ver = 0, headerlen = 0, recordlen = 0, entries = 0;
uint32_t crystal_strap_mask, crystal_straps;
@@ -4332,7 +4333,7 @@ static void parse_bios_version(struct drm_device *dev, struct nvbios *bios, uint
*/
bios->major_version = bios->data[offset + 3];
- bios->pub.chip_version = bios->data[offset + 2];
+ bios->chip_version = bios->data[offset + 2];
NV_TRACE(dev, "Bios version %02x.%02x.%02x.%02x\n",
bios->data[offset + 3], bios->data[offset + 2],
bios->data[offset + 1], bios->data[offset]);
@@ -4402,7 +4403,7 @@ static int parse_bit_A_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
}
/* First entry is normal dac, 2nd tv-out perhaps? */
- bios->pub.dactestval = ROM32(bios->data[load_table_ptr + headerlen]) & 0x3ff;
+ bios->dactestval = ROM32(bios->data[load_table_ptr + headerlen]) & 0x3ff;
return 0;
}
@@ -4526,8 +4527,8 @@ static int parse_bit_i_tbl_entry(struct drm_device *dev, struct nvbios *bios, st
return -ENOSYS;
}
- bios->pub.dactestval = ROM32(bios->data[daccmpoffset + dacheaderlen]);
- bios->pub.tvdactestval = ROM32(bios->data[daccmpoffset + dacheaderlen + 4]);
+ bios->dactestval = ROM32(bios->data[daccmpoffset + dacheaderlen]);
+ bios->tvdactestval = ROM32(bios->data[daccmpoffset + dacheaderlen + 4]);
return 0;
}
@@ -4796,11 +4797,11 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
uint16_t legacy_scripts_offset, legacy_i2c_offset;
/* load needed defaults in case we can't parse this info */
- bios->bdcb.dcb.i2c[0].write = NV_CIO_CRE_DDC_WR__INDEX;
- bios->bdcb.dcb.i2c[0].read = NV_CIO_CRE_DDC_STATUS__INDEX;
- bios->bdcb.dcb.i2c[1].write = NV_CIO_CRE_DDC0_WR__INDEX;
- bios->bdcb.dcb.i2c[1].read = NV_CIO_CRE_DDC0_STATUS__INDEX;
- bios->pub.digital_min_front_porch = 0x4b;
+ bios->dcb.i2c[0].write = NV_CIO_CRE_DDC_WR__INDEX;
+ bios->dcb.i2c[0].read = NV_CIO_CRE_DDC_STATUS__INDEX;
+ bios->dcb.i2c[1].write = NV_CIO_CRE_DDC0_WR__INDEX;
+ bios->dcb.i2c[1].read = NV_CIO_CRE_DDC0_STATUS__INDEX;
+ bios->digital_min_front_porch = 0x4b;
bios->fmaxvco = 256000;
bios->fminvco = 128000;
bios->fp.duallink_transition_clk = 90000;
@@ -4907,10 +4908,10 @@ static int parse_bmp_structure(struct drm_device *dev, struct nvbios *bios, unsi
bios->legacy.i2c_indices.crt = bios->data[legacy_i2c_offset];
bios->legacy.i2c_indices.tv = bios->data[legacy_i2c_offset + 1];
bios->legacy.i2c_indices.panel = bios->data[legacy_i2c_offset + 2];
- bios->bdcb.dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4];
- bios->bdcb.dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5];
- bios->bdcb.dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6];
- bios->bdcb.dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7];
+ bios->dcb.i2c[0].write = bios->data[legacy_i2c_offset + 4];
+ bios->dcb.i2c[0].read = bios->data[legacy_i2c_offset + 5];
+ bios->dcb.i2c[1].write = bios->data[legacy_i2c_offset + 6];
+ bios->dcb.i2c[1].read = bios->data[legacy_i2c_offset + 7];
if (bmplength > 74) {
bios->fmaxvco = ROM32(bmp[67]);
@@ -4984,7 +4985,8 @@ read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, i
else
NV_WARN(dev,
"DCB I2C table has more entries than indexable "
- "(%d entries, max index 15)\n", i2ctable[2]);
+ "(%d entries, max %d)\n", i2ctable[2],
+ DCB_MAX_NUM_I2C_ENTRIES);
entry_len = i2ctable[3];
/* [4] is i2c_default_indices, read in parse_dcb_table() */
}
@@ -5000,8 +5002,8 @@ read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, i
if (index == 0xf)
return 0;
- if (index > i2c_entries) {
- NV_ERROR(dev, "DCB I2C index too big (%d > %d)\n",
+ if (index >= i2c_entries) {
+ NV_ERROR(dev, "DCB I2C index too big (%d >= %d)\n",
index, i2ctable[2]);
return -ENOENT;
}
@@ -5036,7 +5038,7 @@ read_dcb_i2c_entry(struct drm_device *dev, int dcb_version, uint8_t *i2ctable, i
static struct dcb_gpio_entry *
new_gpio_entry(struct nvbios *bios)
{
- struct parsed_dcb_gpio *gpio = &bios->bdcb.gpio;
+ struct dcb_gpio_table *gpio = &bios->dcb.gpio;
return &gpio->entry[gpio->entries++];
}
@@ -5045,14 +5047,14 @@ struct dcb_gpio_entry *
nouveau_bios_gpio_entry(struct drm_device *dev, enum dcb_gpio_tag tag)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
int i;
- for (i = 0; i < bios->bdcb.gpio.entries; i++) {
- if (bios->bdcb.gpio.entry[i].tag != tag)
+ for (i = 0; i < bios->dcb.gpio.entries; i++) {
+ if (bios->dcb.gpio.entry[i].tag != tag)
continue;
- return &bios->bdcb.gpio.entry[i];
+ return &bios->dcb.gpio.entry[i];
}
return NULL;
@@ -5100,7 +5102,7 @@ static void
parse_dcb_gpio_table(struct nvbios *bios)
{
struct drm_device *dev = bios->dev;
- uint16_t gpio_table_ptr = bios->bdcb.gpio_table_ptr;
+ uint16_t gpio_table_ptr = bios->dcb.gpio_table_ptr;
uint8_t *gpio_table = &bios->data[gpio_table_ptr];
int header_len = gpio_table[1],
entries = gpio_table[2],
@@ -5108,7 +5110,7 @@ parse_dcb_gpio_table(struct nvbios *bios)
void (*parse_entry)(struct nvbios *, uint16_t) = NULL;
int i;
- if (bios->bdcb.version >= 0x40) {
+ if (bios->dcb.version >= 0x40) {
if (gpio_table_ptr && entry_len != 4) {
NV_WARN(dev, "Invalid DCB GPIO table entry length.\n");
return;
@@ -5116,7 +5118,7 @@ parse_dcb_gpio_table(struct nvbios *bios)
parse_entry = parse_dcb40_gpio_entry;
- } else if (bios->bdcb.version >= 0x30) {
+ } else if (bios->dcb.version >= 0x30) {
if (gpio_table_ptr && entry_len != 2) {
NV_WARN(dev, "Invalid DCB GPIO table entry length.\n");
return;
@@ -5124,7 +5126,7 @@ parse_dcb_gpio_table(struct nvbios *bios)
parse_entry = parse_dcb30_gpio_entry;
- } else if (bios->bdcb.version >= 0x22) {
+ } else if (bios->dcb.version >= 0x22) {
/*
* DCBs older than v3.0 don't really have a GPIO
* table, instead they keep some GPIO info at fixed
@@ -5158,30 +5160,67 @@ struct dcb_connector_table_entry *
nouveau_bios_connector_entry(struct drm_device *dev, int index)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
struct dcb_connector_table_entry *cte;
- if (index >= bios->bdcb.connector.entries)
+ if (index >= bios->dcb.connector.entries)
return NULL;
- cte = &bios->bdcb.connector.entry[index];
+ cte = &bios->dcb.connector.entry[index];
if (cte->type == 0xff)
return NULL;
return cte;
}
+static enum dcb_connector_type
+divine_connector_type(struct nvbios *bios, int index)
+{
+ struct dcb_table *dcb = &bios->dcb;
+ unsigned encoders = 0, type = DCB_CONNECTOR_NONE;
+ int i;
+
+ for (i = 0; i < dcb->entries; i++) {
+ if (dcb->entry[i].connector == index)
+ encoders |= (1 << dcb->entry[i].type);
+ }
+
+ if (encoders & (1 << OUTPUT_DP)) {
+ if (encoders & (1 << OUTPUT_TMDS))
+ type = DCB_CONNECTOR_DP;
+ else
+ type = DCB_CONNECTOR_eDP;
+ } else
+ if (encoders & (1 << OUTPUT_TMDS)) {
+ if (encoders & (1 << OUTPUT_ANALOG))
+ type = DCB_CONNECTOR_DVI_I;
+ else
+ type = DCB_CONNECTOR_DVI_D;
+ } else
+ if (encoders & (1 << OUTPUT_ANALOG)) {
+ type = DCB_CONNECTOR_VGA;
+ } else
+ if (encoders & (1 << OUTPUT_LVDS)) {
+ type = DCB_CONNECTOR_LVDS;
+ } else
+ if (encoders & (1 << OUTPUT_TV)) {
+ type = DCB_CONNECTOR_TV_0;
+ }
+
+ return type;
+}
+
static void
parse_dcb_connector_table(struct nvbios *bios)
{
struct drm_device *dev = bios->dev;
- struct dcb_connector_table *ct = &bios->bdcb.connector;
+ struct dcb_connector_table *ct = &bios->dcb.connector;
struct dcb_connector_table_entry *cte;
- uint8_t *conntab = &bios->data[bios->bdcb.connector_table_ptr];
+ uint8_t *conntab = &bios->data[bios->dcb.connector_table_ptr];
uint8_t *entry;
int i;
- if (!bios->bdcb.connector_table_ptr) {
+ if (!bios->dcb.connector_table_ptr) {
NV_DEBUG_KMS(dev, "No DCB connector table present\n");
return;
}
@@ -5203,6 +5242,7 @@ parse_dcb_connector_table(struct nvbios *bios)
cte->entry = ROM16(entry[0]);
else
cte->entry = ROM32(entry[0]);
+
cte->type = (cte->entry & 0x000000ff) >> 0;
cte->index = (cte->entry & 0x00000f00) >> 8;
switch (cte->entry & 0x00033000) {
@@ -5228,10 +5268,33 @@ parse_dcb_connector_table(struct nvbios *bios)
NV_INFO(dev, " %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n",
i, cte->entry, cte->type, cte->index, cte->gpio_tag);
+
+ /* check for known types, fallback to guessing the type
+ * from attached encoders if we hit an unknown.
+ */
+ switch (cte->type) {
+ case DCB_CONNECTOR_VGA:
+ case DCB_CONNECTOR_TV_0:
+ case DCB_CONNECTOR_TV_1:
+ case DCB_CONNECTOR_TV_3:
+ case DCB_CONNECTOR_DVI_I:
+ case DCB_CONNECTOR_DVI_D:
+ case DCB_CONNECTOR_LVDS:
+ case DCB_CONNECTOR_DP:
+ case DCB_CONNECTOR_eDP:
+ case DCB_CONNECTOR_HDMI_0:
+ case DCB_CONNECTOR_HDMI_1:
+ break;
+ default:
+ cte->type = divine_connector_type(bios, cte->index);
+ NV_WARN(dev, "unknown type, using 0x%02x", cte->type);
+ break;
+ }
+
}
}
-static struct dcb_entry *new_dcb_entry(struct parsed_dcb *dcb)
+static struct dcb_entry *new_dcb_entry(struct dcb_table *dcb)
{
struct dcb_entry *entry = &dcb->entry[dcb->entries];
@@ -5241,7 +5304,7 @@ static struct dcb_entry *new_dcb_entry(struct parsed_dcb *dcb)
return entry;
}
-static void fabricate_vga_output(struct parsed_dcb *dcb, int i2c, int heads)
+static void fabricate_vga_output(struct dcb_table *dcb, int i2c, int heads)
{
struct dcb_entry *entry = new_dcb_entry(dcb);
@@ -5252,7 +5315,7 @@ static void fabricate_vga_output(struct parsed_dcb *dcb, int i2c, int heads)
/* "or" mostly unused in early gen crt modesetting, 0 is fine */
}
-static void fabricate_dvi_i_output(struct parsed_dcb *dcb, bool twoHeads)
+static void fabricate_dvi_i_output(struct dcb_table *dcb, bool twoHeads)
{
struct dcb_entry *entry = new_dcb_entry(dcb);
@@ -5279,7 +5342,7 @@ static void fabricate_dvi_i_output(struct parsed_dcb *dcb, bool twoHeads)
#endif
}
-static void fabricate_tv_output(struct parsed_dcb *dcb, bool twoHeads)
+static void fabricate_tv_output(struct dcb_table *dcb, bool twoHeads)
{
struct dcb_entry *entry = new_dcb_entry(dcb);
@@ -5290,13 +5353,13 @@ static void fabricate_tv_output(struct parsed_dcb *dcb, bool twoHeads)
}
static bool
-parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
+parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
uint32_t conn, uint32_t conf, struct dcb_entry *entry)
{
entry->type = conn & 0xf;
entry->i2c_index = (conn >> 4) & 0xf;
entry->heads = (conn >> 8) & 0xf;
- if (bdcb->version >= 0x40)
+ if (dcb->version >= 0x40)
entry->connector = (conn >> 12) & 0xf;
entry->bus = (conn >> 16) & 0xf;
entry->location = (conn >> 20) & 0x3;
@@ -5314,7 +5377,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
* Although the rest of a CRT conf dword is usually
* zeros, mac biosen have stuff there so we must mask
*/
- entry->crtconf.maxfreq = (bdcb->version < 0x30) ?
+ entry->crtconf.maxfreq = (dcb->version < 0x30) ?
(conf & 0xffff) * 10 :
(conf & 0xff) * 10000;
break;
@@ -5323,7 +5386,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
uint32_t mask;
if (conf & 0x1)
entry->lvdsconf.use_straps_for_mode = true;
- if (bdcb->version < 0x22) {
+ if (dcb->version < 0x22) {
mask = ~0xd;
/*
* The laptop in bug 14567 lies and claims to not use
@@ -5347,7 +5410,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
* Until we even try to use these on G8x, it's
* useless reporting unknown bits. They all are.
*/
- if (bdcb->version >= 0x40)
+ if (dcb->version >= 0x40)
break;
NV_ERROR(dev, "Unknown LVDS configuration bits, "
@@ -5357,7 +5420,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
}
case OUTPUT_TV:
{
- if (bdcb->version >= 0x30)
+ if (dcb->version >= 0x30)
entry->tvconf.has_component_output = conf & (0x8 << 4);
else
entry->tvconf.has_component_output = false;
@@ -5384,8 +5447,10 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
break;
case 0xe:
/* weird g80 mobile type that "nv" treats as a terminator */
- bdcb->dcb.entries--;
+ dcb->entries--;
return false;
+ default:
+ break;
}
/* unsure what DCB version introduces this, 3.0? */
@@ -5396,7 +5461,7 @@ parse_dcb20_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
}
static bool
-parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb,
+parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb,
uint32_t conn, uint32_t conf, struct dcb_entry *entry)
{
switch (conn & 0x0000000f) {
@@ -5462,27 +5527,27 @@ parse_dcb15_entry(struct drm_device *dev, struct parsed_dcb *dcb,
return true;
}
-static bool parse_dcb_entry(struct drm_device *dev, struct bios_parsed_dcb *bdcb,
+static bool parse_dcb_entry(struct drm_device *dev, struct dcb_table *dcb,
uint32_t conn, uint32_t conf)
{
- struct dcb_entry *entry = new_dcb_entry(&bdcb->dcb);
+ struct dcb_entry *entry = new_dcb_entry(dcb);
bool ret;
- if (bdcb->version >= 0x20)
- ret = parse_dcb20_entry(dev, bdcb, conn, conf, entry);
+ if (dcb->version >= 0x20)
+ ret = parse_dcb20_entry(dev, dcb, conn, conf, entry);
else
- ret = parse_dcb15_entry(dev, &bdcb->dcb, conn, conf, entry);
+ ret = parse_dcb15_entry(dev, dcb, conn, conf, entry);
if (!ret)
return ret;
- read_dcb_i2c_entry(dev, bdcb->version, bdcb->i2c_table,
- entry->i2c_index, &bdcb->dcb.i2c[entry->i2c_index]);
+ read_dcb_i2c_entry(dev, dcb->version, dcb->i2c_table,
+ entry->i2c_index, &dcb->i2c[entry->i2c_index]);
return true;
}
static
-void merge_like_dcb_entries(struct drm_device *dev, struct parsed_dcb *dcb)
+void merge_like_dcb_entries(struct drm_device *dev, struct dcb_table *dcb)
{
/*
* DCB v2.0 lists each output combination separately.
@@ -5534,8 +5599,7 @@ static int
parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct bios_parsed_dcb *bdcb = &bios->bdcb;
- struct parsed_dcb *dcb;
+ struct dcb_table *dcb = &bios->dcb;
uint16_t dcbptr = 0, i2ctabptr = 0;
uint8_t *dcbtable;
uint8_t headerlen = 0x4, entries = DCB_MAX_NUM_ENTRIES;
@@ -5543,9 +5607,6 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
int recordlength = 8, confofs = 4;
int i;
- dcb = bios->pub.dcb = &bdcb->dcb;
- dcb->entries = 0;
-
/* get the offset from 0x36 */
if (dev_priv->card_type > NV_04) {
dcbptr = ROM16(bios->data[0x36]);
@@ -5567,21 +5628,21 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
dcbtable = &bios->data[dcbptr];
/* get DCB version */
- bdcb->version = dcbtable[0];
+ dcb->version = dcbtable[0];
NV_TRACE(dev, "Found Display Configuration Block version %d.%d\n",
- bdcb->version >> 4, bdcb->version & 0xf);
+ dcb->version >> 4, dcb->version & 0xf);
- if (bdcb->version >= 0x20) { /* NV17+ */
+ if (dcb->version >= 0x20) { /* NV17+ */
uint32_t sig;
- if (bdcb->version >= 0x30) { /* NV40+ */
+ if (dcb->version >= 0x30) { /* NV40+ */
headerlen = dcbtable[1];
entries = dcbtable[2];
recordlength = dcbtable[3];
i2ctabptr = ROM16(dcbtable[4]);
sig = ROM32(dcbtable[6]);
- bdcb->gpio_table_ptr = ROM16(dcbtable[10]);
- bdcb->connector_table_ptr = ROM16(dcbtable[20]);
+ dcb->gpio_table_ptr = ROM16(dcbtable[10]);
+ dcb->connector_table_ptr = ROM16(dcbtable[20]);
} else {
i2ctabptr = ROM16(dcbtable[2]);
sig = ROM32(dcbtable[4]);
@@ -5593,7 +5654,7 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
"signature (%08X)\n", sig);
return -EINVAL;
}
- } else if (bdcb->version >= 0x15) { /* some NV11 and NV20 */
+ } else if (dcb->version >= 0x15) { /* some NV11 and NV20 */
char sig[8] = { 0 };
strncpy(sig, (char *)&dcbtable[-7], 7);
@@ -5641,14 +5702,11 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
if (!i2ctabptr)
NV_WARN(dev, "No pointer to DCB I2C port table\n");
else {
- bdcb->i2c_table = &bios->data[i2ctabptr];
- if (bdcb->version >= 0x30)
- bdcb->i2c_default_indices = bdcb->i2c_table[4];
+ dcb->i2c_table = &bios->data[i2ctabptr];
+ if (dcb->version >= 0x30)
+ dcb->i2c_default_indices = dcb->i2c_table[4];
}
- parse_dcb_gpio_table(bios);
- parse_dcb_connector_table(bios);
-
if (entries > DCB_MAX_NUM_ENTRIES)
entries = DCB_MAX_NUM_ENTRIES;
@@ -5673,7 +5731,7 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
NV_TRACEWARN(dev, "Raw DCB entry %d: %08x %08x\n",
dcb->entries, connection, config);
- if (!parse_dcb_entry(dev, bdcb, connection, config))
+ if (!parse_dcb_entry(dev, dcb, connection, config))
break;
}
@@ -5681,18 +5739,22 @@ parse_dcb_table(struct drm_device *dev, struct nvbios *bios, bool twoHeads)
* apart for v2.1+ not being known for requiring merging, this
* guarantees dcbent->index is the index of the entry in the rom image
*/
- if (bdcb->version < 0x21)
+ if (dcb->version < 0x21)
merge_like_dcb_entries(dev, dcb);
- return dcb->entries ? 0 : -ENXIO;
+ if (!dcb->entries)
+ return -ENXIO;
+
+ parse_dcb_gpio_table(bios);
+ parse_dcb_connector_table(bios);
+ return 0;
}
static void
fixup_legacy_connector(struct nvbios *bios)
{
- struct bios_parsed_dcb *bdcb = &bios->bdcb;
- struct parsed_dcb *dcb = &bdcb->dcb;
- int high = 0, i;
+ struct dcb_table *dcb = &bios->dcb;
+ int i, i2c, i2c_conn[DCB_MAX_NUM_I2C_ENTRIES] = { };
/*
* DCB 3.0 also has the table in most cases, but there are some cards
@@ -5700,9 +5762,11 @@ fixup_legacy_connector(struct nvbios *bios)
* indices are all 0. We don't need the connector indices on pre-G80
* chips (yet?) so limit the use to DCB 4.0 and above.
*/
- if (bdcb->version >= 0x40)
+ if (dcb->version >= 0x40)
return;
+ dcb->connector.entries = 0;
+
/*
* No known connector info before v3.0, so make it up. the rule here
* is: anything on the same i2c bus is considered to be on the same
@@ -5710,37 +5774,38 @@ fixup_legacy_connector(struct nvbios *bios)
* its own unique connector index.
*/
for (i = 0; i < dcb->entries; i++) {
- if (dcb->entry[i].i2c_index == 0xf)
- continue;
-
/*
* Ignore the I2C index for on-chip TV-out, as there
* are cards with bogus values (nv31m in bug 23212),
* and it's otherwise useless.
*/
if (dcb->entry[i].type == OUTPUT_TV &&
- dcb->entry[i].location == DCB_LOC_ON_CHIP) {
+ dcb->entry[i].location == DCB_LOC_ON_CHIP)
dcb->entry[i].i2c_index = 0xf;
+ i2c = dcb->entry[i].i2c_index;
+
+ if (i2c_conn[i2c]) {
+ dcb->entry[i].connector = i2c_conn[i2c] - 1;
continue;
}
- dcb->entry[i].connector = dcb->entry[i].i2c_index;
- if (dcb->entry[i].connector > high)
- high = dcb->entry[i].connector;
+ dcb->entry[i].connector = dcb->connector.entries++;
+ if (i2c != 0xf)
+ i2c_conn[i2c] = dcb->connector.entries;
}
- for (i = 0; i < dcb->entries; i++) {
- if (dcb->entry[i].i2c_index != 0xf)
- continue;
-
- dcb->entry[i].connector = ++high;
+ /* Fake the connector table as well as just connector indices */
+ for (i = 0; i < dcb->connector.entries; i++) {
+ dcb->connector.entry[i].index = i;
+ dcb->connector.entry[i].type = divine_connector_type(bios, i);
+ dcb->connector.entry[i].gpio_tag = 0xff;
}
}
static void
fixup_legacy_i2c(struct nvbios *bios)
{
- struct parsed_dcb *dcb = &bios->bdcb.dcb;
+ struct dcb_table *dcb = &bios->dcb;
int i;
for (i = 0; i < dcb->entries; i++) {
@@ -5826,7 +5891,7 @@ static int load_nv17_hw_sequencer_ucode(struct drm_device *dev,
uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
const uint8_t edid_sig[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
uint16_t offset = 0;
@@ -5859,7 +5924,7 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
struct dcb_entry *dcbent)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
struct init_exec iexec = { true, false };
mutex_lock(&bios->lock);
@@ -5872,7 +5937,7 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
static bool NVInitVBIOS(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
memset(bios, 0, sizeof(struct nvbios));
mutex_init(&bios->lock);
@@ -5888,7 +5953,7 @@ static bool NVInitVBIOS(struct drm_device *dev)
static int nouveau_parse_vbios_struct(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
const uint8_t bit_signature[] = { 0xff, 0xb8, 'B', 'I', 'T' };
const uint8_t bmp_signature[] = { 0xff, 0x7f, 'N', 'V', 0x0 };
int offset;
@@ -5915,7 +5980,7 @@ int
nouveau_run_vbios_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
int i, ret = 0;
NVLockVgaCrtcs(dev, false);
@@ -5946,9 +6011,9 @@ nouveau_run_vbios_init(struct drm_device *dev)
}
if (dev_priv->card_type >= NV_50) {
- for (i = 0; i < bios->bdcb.dcb.entries; i++) {
+ for (i = 0; i < bios->dcb.entries; i++) {
nouveau_bios_run_display_table(dev,
- &bios->bdcb.dcb.entry[i],
+ &bios->dcb.entry[i],
0, 0);
}
}
@@ -5962,11 +6027,11 @@ static void
nouveau_bios_i2c_devices_takedown(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
struct dcb_i2c_entry *entry;
int i;
- entry = &bios->bdcb.dcb.i2c[0];
+ entry = &bios->dcb.i2c[0];
for (i = 0; i < DCB_MAX_NUM_I2C_ENTRIES; i++, entry++)
nouveau_i2c_fini(dev, entry);
}
@@ -5975,13 +6040,11 @@ int
nouveau_bios_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
uint32_t saved_nv_pextdev_boot_0;
bool was_locked;
int ret;
- dev_priv->vbios = &bios->pub;
-
if (!NVInitVBIOS(dev))
return -ENODEV;
@@ -6023,10 +6086,8 @@ nouveau_bios_init(struct drm_device *dev)
bios_wr32(bios, NV_PEXTDEV_BOOT_0, saved_nv_pextdev_boot_0);
ret = nouveau_run_vbios_init(dev);
- if (ret) {
- dev_priv->vbios = NULL;
+ if (ret)
return ret;
- }
/* feature_byte on BMP is poor, but init always sets CR4B */
was_locked = NVLockVgaCrtcs(dev, false);
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index fd94bd6dc26..9f688aa9a65 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -34,9 +34,67 @@
#define DCB_LOC_ON_CHIP 0
+struct dcb_i2c_entry {
+ uint8_t port_type;
+ uint8_t read, write;
+ struct nouveau_i2c_chan *chan;
+};
+
+enum dcb_gpio_tag {
+ DCB_GPIO_TVDAC0 = 0xc,
+ DCB_GPIO_TVDAC1 = 0x2d,
+};
+
+struct dcb_gpio_entry {
+ enum dcb_gpio_tag tag;
+ int line;
+ bool invert;
+};
+
+struct dcb_gpio_table {
+ int entries;
+ struct dcb_gpio_entry entry[DCB_MAX_NUM_GPIO_ENTRIES];
+};
+
+enum dcb_connector_type {
+ DCB_CONNECTOR_VGA = 0x00,
+ DCB_CONNECTOR_TV_0 = 0x10,
+ DCB_CONNECTOR_TV_1 = 0x11,
+ DCB_CONNECTOR_TV_3 = 0x13,
+ DCB_CONNECTOR_DVI_I = 0x30,
+ DCB_CONNECTOR_DVI_D = 0x31,
+ DCB_CONNECTOR_LVDS = 0x40,
+ DCB_CONNECTOR_DP = 0x46,
+ DCB_CONNECTOR_eDP = 0x47,
+ DCB_CONNECTOR_HDMI_0 = 0x60,
+ DCB_CONNECTOR_HDMI_1 = 0x61,
+ DCB_CONNECTOR_NONE = 0xff
+};
+
+struct dcb_connector_table_entry {
+ uint32_t entry;
+ enum dcb_connector_type type;
+ uint8_t index;
+ uint8_t gpio_tag;
+};
+
+struct dcb_connector_table {
+ int entries;
+ struct dcb_connector_table_entry entry[DCB_MAX_NUM_CONNECTOR_ENTRIES];
+};
+
+enum dcb_type {
+ OUTPUT_ANALOG = 0,
+ OUTPUT_TV = 1,
+ OUTPUT_TMDS = 2,
+ OUTPUT_LVDS = 3,
+ OUTPUT_DP = 6,
+ OUTPUT_ANY = -1
+};
+
struct dcb_entry {
int index; /* may not be raw dcb index if merging has happened */
- uint8_t type;
+ enum dcb_type type;
uint8_t i2c_index;
uint8_t heads;
uint8_t connector;
@@ -71,69 +129,22 @@ struct dcb_entry {
bool i2c_upper_default;
};
-struct dcb_i2c_entry {
- uint8_t port_type;
- uint8_t read, write;
- struct nouveau_i2c_chan *chan;
-};
+struct dcb_table {
+ uint8_t version;
-struct parsed_dcb {
int entries;
struct dcb_entry entry[DCB_MAX_NUM_ENTRIES];
- struct dcb_i2c_entry i2c[DCB_MAX_NUM_I2C_ENTRIES];
-};
-
-enum dcb_gpio_tag {
- DCB_GPIO_TVDAC0 = 0xc,
- DCB_GPIO_TVDAC1 = 0x2d,
-};
-
-struct dcb_gpio_entry {
- enum dcb_gpio_tag tag;
- int line;
- bool invert;
-};
-
-struct parsed_dcb_gpio {
- int entries;
- struct dcb_gpio_entry entry[DCB_MAX_NUM_GPIO_ENTRIES];
-};
-
-struct dcb_connector_table_entry {
- uint32_t entry;
- uint8_t type;
- uint8_t index;
- uint8_t gpio_tag;
-};
-
-struct dcb_connector_table {
- int entries;
- struct dcb_connector_table_entry entry[DCB_MAX_NUM_CONNECTOR_ENTRIES];
-};
-
-struct bios_parsed_dcb {
- uint8_t version;
-
- struct parsed_dcb dcb;
uint8_t *i2c_table;
uint8_t i2c_default_indices;
+ struct dcb_i2c_entry i2c[DCB_MAX_NUM_I2C_ENTRIES];
uint16_t gpio_table_ptr;
- struct parsed_dcb_gpio gpio;
+ struct dcb_gpio_table gpio;
uint16_t connector_table_ptr;
struct dcb_connector_table connector;
};
-enum nouveau_encoder_type {
- OUTPUT_ANALOG = 0,
- OUTPUT_TV = 1,
- OUTPUT_TMDS = 2,
- OUTPUT_LVDS = 3,
- OUTPUT_DP = 6,
- OUTPUT_ANY = -1
-};
-
enum nouveau_or {
OUTPUT_A = (1 << 0),
OUTPUT_B = (1 << 1),
@@ -190,8 +201,8 @@ struct pll_lims {
int refclk;
};
-struct nouveau_bios_info {
- struct parsed_dcb *dcb;
+struct nvbios {
+ struct drm_device *dev;
uint8_t chip_version;
@@ -199,11 +210,6 @@ struct nouveau_bios_info {
uint32_t tvdactestval;
uint8_t digital_min_front_porch;
bool fp_no_ddc;
-};
-
-struct nvbios {
- struct drm_device *dev;
- struct nouveau_bios_info pub;
struct mutex lock;
@@ -234,7 +240,7 @@ struct nvbios {
uint16_t some_script_ptr; /* BIT I + 14 */
uint16_t init96_tbl_ptr; /* BIT I + 16 */
- struct bios_parsed_dcb bdcb;
+ struct dcb_table dcb;
struct {
int crtchead;
diff --git a/drivers/gpu/drm/nouveau/nouveau_calc.c b/drivers/gpu/drm/nouveau/nouveau_calc.c
index ee2b84504d0..88f9bc0941e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_calc.c
+++ b/drivers/gpu/drm/nouveau/nouveau_calc.c
@@ -274,7 +274,7 @@ getMNP_single(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
* returns calculated clock
*/
struct drm_nouveau_private *dev_priv = dev->dev_private;
- int cv = dev_priv->vbios->chip_version;
+ int cv = dev_priv->vbios.chip_version;
int minvco = pll_lim->vco1.minfreq, maxvco = pll_lim->vco1.maxfreq;
int minM = pll_lim->vco1.min_m, maxM = pll_lim->vco1.max_m;
int minN = pll_lim->vco1.min_n, maxN = pll_lim->vco1.max_n;
@@ -373,7 +373,7 @@ getMNP_double(struct drm_device *dev, struct pll_lims *pll_lim, int clk,
* returns calculated clock
*/
struct drm_nouveau_private *dev_priv = dev->dev_private;
- int chip_version = dev_priv->vbios->chip_version;
+ int chip_version = dev_priv->vbios.chip_version;
int minvco1 = pll_lim->vco1.minfreq, maxvco1 = pll_lim->vco1.maxfreq;
int minvco2 = pll_lim->vco2.minfreq, maxvco2 = pll_lim->vco2.maxfreq;
int minU1 = pll_lim->vco1.min_inputfreq, minU2 = pll_lim->vco2.min_inputfreq;
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index 2281f99da7f..6dfb425cbae 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -35,22 +35,27 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_bo *pb = chan->pushbuf_bo;
struct nouveau_gpuobj *pushbuf = NULL;
- uint32_t start = pb->bo.mem.mm_node->start << PAGE_SHIFT;
int ret;
+ if (dev_priv->card_type >= NV_50) {
+ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
+ dev_priv->vm_end, NV_DMA_ACCESS_RO,
+ NV_DMA_TARGET_AGP, &pushbuf);
+ chan->pushbuf_base = pb->bo.offset;
+ } else
if (pb->bo.mem.mem_type == TTM_PL_TT) {
ret = nouveau_gpuobj_gart_dma_new(chan, 0,
dev_priv->gart_info.aper_size,
NV_DMA_ACCESS_RO, &pushbuf,
NULL);
- chan->pushbuf_base = start;
+ chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
} else
if (dev_priv->card_type != NV_04) {
ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
dev_priv->fb_available_size,
NV_DMA_ACCESS_RO,
NV_DMA_TARGET_VIDMEM, &pushbuf);
- chan->pushbuf_base = start;
+ chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
} else {
/* NV04 cmdbuf hack, from original ddx.. not sure of it's
* exact reason for existing :) PCI access to cmdbuf in
@@ -61,7 +66,7 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
dev_priv->fb_available_size,
NV_DMA_ACCESS_RO,
NV_DMA_TARGET_PCI, &pushbuf);
- chan->pushbuf_base = start;
+ chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
}
ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf, &chan->pushbuf);
@@ -275,9 +280,18 @@ nouveau_channel_free(struct nouveau_channel *chan)
*/
nouveau_fence_fini(chan);
- /* Ensure the channel is no longer active on the GPU */
+ /* This will prevent pfifo from switching channels. */
pfifo->reassign(dev, false);
+ /* We want to give pgraph a chance to idle and get rid of all potential
+ * errors. We need to do this before the lock, otherwise the irq handler
+ * is unable to process them.
+ */
+ if (pgraph->channel(dev) == chan)
+ nouveau_wait_for_idle(dev);
+
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+
pgraph->fifo_access(dev, false);
if (pgraph->channel(dev) == chan)
pgraph->unload_context(dev);
@@ -293,6 +307,8 @@ nouveau_channel_free(struct nouveau_channel *chan)
pfifo->reassign(dev, true);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
/* Release the channel's resources */
nouveau_gpuobj_ref_del(dev, &chan->pushbuf);
if (chan->pushbuf_bo) {
@@ -369,6 +385,14 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
return ret;
init->channel = chan->id;
+ if (chan->dma.ib_max)
+ init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
+ NOUVEAU_GEM_DOMAIN_GART;
+ else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM)
+ init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
+ else
+ init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
+
init->subchan[0].handle = NvM2MF;
if (dev_priv->card_type < NV_50)
init->subchan[0].grclass = 0x0039;
@@ -408,7 +432,6 @@ nouveau_ioctl_fifo_free(struct drm_device *dev, void *data,
***********************************/
struct drm_ioctl_desc nouveau_ioctls[] = {
- DRM_IOCTL_DEF(DRM_NOUVEAU_CARD_INIT, nouveau_ioctl_card_init, DRM_AUTH),
DRM_IOCTL_DEF(DRM_NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_AUTH),
DRM_IOCTL_DEF(DRM_NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
DRM_IOCTL_DEF(DRM_NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_AUTH),
@@ -418,13 +441,9 @@ struct drm_ioctl_desc nouveau_ioctls[] = {
DRM_IOCTL_DEF(DRM_NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_AUTH),
DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_AUTH),
DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL, nouveau_gem_ioctl_pushbuf_call, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PIN, nouveau_gem_ioctl_pin, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_UNPIN, nouveau_gem_ioctl_unpin, DRM_AUTH),
DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_AUTH),
DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_AUTH),
DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_NOUVEAU_GEM_PUSHBUF_CALL2, nouveau_gem_ioctl_pushbuf_call2, DRM_AUTH),
};
int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index d2f63353ea9..24327f468c4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -218,7 +218,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
connector->interlace_allowed = true;
}
- if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) {
+ if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
drm_connector_property_set_value(connector,
dev->mode_config.dvi_i_subconnector_property,
nv_encoder->dcb->type == OUTPUT_TMDS ?
@@ -236,15 +236,17 @@ nouveau_connector_detect(struct drm_connector *connector)
struct nouveau_i2c_chan *i2c;
int type, flags;
- if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+ if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS)
nv_encoder = find_encoder_by_type(connector, OUTPUT_LVDS);
if (nv_encoder && nv_connector->native_mode) {
+ unsigned status = connector_status_connected;
+
#ifdef CONFIG_ACPI
if (!nouveau_ignorelid && !acpi_lid_open())
- return connector_status_disconnected;
+ status = connector_status_unknown;
#endif
nouveau_connector_set_encoder(connector, nv_encoder);
- return connector_status_connected;
+ return status;
}
/* Cleanup the previous EDID block. */
@@ -279,7 +281,7 @@ nouveau_connector_detect(struct drm_connector *connector)
* same i2c channel so the value returned from ddc_detect
* isn't necessarily correct.
*/
- if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) {
+ if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
if (nv_connector->edid->input & DRM_EDID_INPUT_DIGITAL)
type = OUTPUT_TMDS;
else
@@ -321,11 +323,11 @@ detect_analog:
static void
nouveau_connector_force(struct drm_connector *connector)
{
- struct drm_device *dev = connector->dev;
+ struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder;
int type;
- if (connector->connector_type == DRM_MODE_CONNECTOR_DVII) {
+ if (nv_connector->dcb->type == DCB_CONNECTOR_DVI_I) {
if (connector->force == DRM_FORCE_ON_DIGITAL)
type = OUTPUT_TMDS;
else
@@ -335,7 +337,7 @@ nouveau_connector_force(struct drm_connector *connector)
nv_encoder = find_encoder_by_type(connector, type);
if (!nv_encoder) {
- NV_ERROR(dev, "can't find encoder to force %s on!\n",
+ NV_ERROR(connector->dev, "can't find encoder to force %s on!\n",
drm_get_connector_name(connector));
connector->status = connector_status_disconnected;
return;
@@ -369,7 +371,7 @@ nouveau_connector_set_property(struct drm_connector *connector,
}
/* LVDS always needs gpu scaling */
- if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS &&
+ if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS &&
value == DRM_MODE_SCALE_NONE)
return -EINVAL;
@@ -535,7 +537,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
/* If we're not LVDS, destroy the previous native mode, the attached
* monitor could have changed.
*/
- if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS &&
+ if (nv_connector->dcb->type != DCB_CONNECTOR_LVDS &&
nv_connector->native_mode) {
drm_mode_destroy(dev, nv_connector->native_mode);
nv_connector->native_mode = NULL;
@@ -563,7 +565,7 @@ nouveau_connector_get_modes(struct drm_connector *connector)
ret = get_slave_funcs(nv_encoder)->
get_modes(to_drm_encoder(nv_encoder), connector);
- if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
+ if (nv_encoder->dcb->type == OUTPUT_LVDS)
ret += nouveau_connector_scaler_modes_add(connector);
return ret;
@@ -613,6 +615,9 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
clock *= 3;
break;
+ default:
+ BUG_ON(1);
+ return MODE_BAD;
}
if (clock < min_clock)
@@ -680,7 +685,7 @@ nouveau_connector_create_lvds(struct drm_device *dev,
/* Firstly try getting EDID over DDC, if allowed and I2C channel
* is available.
*/
- if (!dev_priv->VBIOS.pub.fp_no_ddc && nv_encoder->dcb->i2c_index < 0xf)
+ if (!dev_priv->vbios.fp_no_ddc && nv_encoder->dcb->i2c_index < 0xf)
i2c = nouveau_i2c_find(dev, nv_encoder->dcb->i2c_index);
if (i2c) {
@@ -695,7 +700,7 @@ nouveau_connector_create_lvds(struct drm_device *dev,
*/
if (!nv_connector->edid && nouveau_bios_fp_mode(dev, &native) &&
(nv_encoder->dcb->lvdsconf.use_straps_for_mode ||
- dev_priv->VBIOS.pub.fp_no_ddc)) {
+ dev_priv->vbios.fp_no_ddc)) {
nv_connector->native_mode = drm_mode_duplicate(dev, &native);
goto out;
}
@@ -704,7 +709,7 @@ nouveau_connector_create_lvds(struct drm_device *dev,
* stored for the panel stored in them.
*/
if (!nv_connector->edid && !nv_connector->native_mode &&
- !dev_priv->VBIOS.pub.fp_no_ddc) {
+ !dev_priv->vbios.fp_no_ddc) {
struct edid *edid =
(struct edid *)nouveau_bios_embedded_edid(dev);
if (edid) {
@@ -739,46 +744,66 @@ out:
}
int
-nouveau_connector_create(struct drm_device *dev, int index, int type)
+nouveau_connector_create(struct drm_device *dev,
+ struct dcb_connector_table_entry *dcb)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_connector *nv_connector = NULL;
struct drm_connector *connector;
struct drm_encoder *encoder;
- int ret;
+ int ret, type;
NV_DEBUG_KMS(dev, "\n");
- nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
- if (!nv_connector)
- return -ENOMEM;
- nv_connector->dcb = nouveau_bios_connector_entry(dev, index);
- connector = &nv_connector->base;
-
- switch (type) {
- case DRM_MODE_CONNECTOR_VGA:
+ switch (dcb->type) {
+ case DCB_CONNECTOR_NONE:
+ return 0;
+ case DCB_CONNECTOR_VGA:
NV_INFO(dev, "Detected a VGA connector\n");
+ type = DRM_MODE_CONNECTOR_VGA;
break;
- case DRM_MODE_CONNECTOR_DVID:
- NV_INFO(dev, "Detected a DVI-D connector\n");
+ case DCB_CONNECTOR_TV_0:
+ case DCB_CONNECTOR_TV_1:
+ case DCB_CONNECTOR_TV_3:
+ NV_INFO(dev, "Detected a TV connector\n");
+ type = DRM_MODE_CONNECTOR_TV;
break;
- case DRM_MODE_CONNECTOR_DVII:
+ case DCB_CONNECTOR_DVI_I:
NV_INFO(dev, "Detected a DVI-I connector\n");
+ type = DRM_MODE_CONNECTOR_DVII;
break;
- case DRM_MODE_CONNECTOR_LVDS:
- NV_INFO(dev, "Detected a LVDS connector\n");
+ case DCB_CONNECTOR_DVI_D:
+ NV_INFO(dev, "Detected a DVI-D connector\n");
+ type = DRM_MODE_CONNECTOR_DVID;
break;
- case DRM_MODE_CONNECTOR_TV:
- NV_INFO(dev, "Detected a TV connector\n");
+ case DCB_CONNECTOR_HDMI_0:
+ case DCB_CONNECTOR_HDMI_1:
+ NV_INFO(dev, "Detected a HDMI connector\n");
+ type = DRM_MODE_CONNECTOR_HDMIA;
+ break;
+ case DCB_CONNECTOR_LVDS:
+ NV_INFO(dev, "Detected a LVDS connector\n");
+ type = DRM_MODE_CONNECTOR_LVDS;
break;
- case DRM_MODE_CONNECTOR_DisplayPort:
+ case DCB_CONNECTOR_DP:
NV_INFO(dev, "Detected a DisplayPort connector\n");
+ type = DRM_MODE_CONNECTOR_DisplayPort;
break;
- default:
- NV_ERROR(dev, "Unknown connector, this is not good.\n");
+ case DCB_CONNECTOR_eDP:
+ NV_INFO(dev, "Detected an eDP connector\n");
+ type = DRM_MODE_CONNECTOR_eDP;
break;
+ default:
+ NV_ERROR(dev, "unknown connector type: 0x%02x!!\n", dcb->type);
+ return -EINVAL;
}
+ nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
+ if (!nv_connector)
+ return -ENOMEM;
+ nv_connector->dcb = dcb;
+ connector = &nv_connector->base;
+
/* defaults, will get overridden in detect() */
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
@@ -786,55 +811,65 @@ nouveau_connector_create(struct drm_device *dev, int index, int type)
drm_connector_init(dev, connector, &nouveau_connector_funcs, type);
drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
+ /* attach encoders */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+ if (nv_encoder->dcb->connector != dcb->index)
+ continue;
+
+ if (get_slave_funcs(nv_encoder))
+ get_slave_funcs(nv_encoder)->create_resources(encoder, connector);
+
+ drm_mode_connector_attach_encoder(connector, encoder);
+ }
+
+ if (!connector->encoder_ids[0]) {
+ NV_WARN(dev, " no encoders, ignoring\n");
+ drm_connector_cleanup(connector);
+ kfree(connector);
+ return 0;
+ }
+
/* Init DVI-I specific properties */
- if (type == DRM_MODE_CONNECTOR_DVII) {
+ if (dcb->type == DCB_CONNECTOR_DVI_I) {
drm_mode_create_dvi_i_properties(dev);
drm_connector_attach_property(connector, dev->mode_config.dvi_i_subconnector_property, 0);
drm_connector_attach_property(connector, dev->mode_config.dvi_i_select_subconnector_property, 0);
}
- if (type != DRM_MODE_CONNECTOR_LVDS)
+ if (dcb->type != DCB_CONNECTOR_LVDS)
nv_connector->use_dithering = false;
- if (type == DRM_MODE_CONNECTOR_DVID ||
- type == DRM_MODE_CONNECTOR_DVII ||
- type == DRM_MODE_CONNECTOR_LVDS ||
- type == DRM_MODE_CONNECTOR_DisplayPort) {
- nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
-
- drm_connector_attach_property(connector, dev->mode_config.scaling_mode_property,
- nv_connector->scaling_mode);
- drm_connector_attach_property(connector, dev->mode_config.dithering_mode_property,
- nv_connector->use_dithering ? DRM_MODE_DITHERING_ON
- : DRM_MODE_DITHERING_OFF);
-
- } else {
- nv_connector->scaling_mode = DRM_MODE_SCALE_NONE;
-
- if (type == DRM_MODE_CONNECTOR_VGA &&
- dev_priv->card_type >= NV_50) {
+ switch (dcb->type) {
+ case DCB_CONNECTOR_VGA:
+ if (dev_priv->card_type >= NV_50) {
drm_connector_attach_property(connector,
dev->mode_config.scaling_mode_property,
nv_connector->scaling_mode);
}
- }
-
- /* attach encoders */
- list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
-
- if (nv_encoder->dcb->connector != index)
- continue;
-
- if (get_slave_funcs(nv_encoder))
- get_slave_funcs(nv_encoder)->create_resources(encoder, connector);
+ /* fall-through */
+ case DCB_CONNECTOR_TV_0:
+ case DCB_CONNECTOR_TV_1:
+ case DCB_CONNECTOR_TV_3:
+ nv_connector->scaling_mode = DRM_MODE_SCALE_NONE;
+ break;
+ default:
+ nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
- drm_mode_connector_attach_encoder(connector, encoder);
+ drm_connector_attach_property(connector,
+ dev->mode_config.scaling_mode_property,
+ nv_connector->scaling_mode);
+ drm_connector_attach_property(connector,
+ dev->mode_config.dithering_mode_property,
+ nv_connector->use_dithering ?
+ DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF);
+ break;
}
drm_sysfs_connector_add(connector);
- if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) {
+ if (dcb->type == DCB_CONNECTOR_LVDS) {
ret = nouveau_connector_create_lvds(dev, connector);
if (ret) {
connector->funcs->destroy(connector);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index 728b8090e5f..4ef38abc2d9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -49,6 +49,7 @@ static inline struct nouveau_connector *nouveau_connector(
return container_of(con, struct nouveau_connector, base);
}
-int nouveau_connector_create(struct drm_device *dev, int i2c_index, int type);
+int nouveau_connector_create(struct drm_device *,
+ struct dcb_connector_table_entry *);
#endif /* __NOUVEAU_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
index d79db3698f1..8ff9ef5d4b4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
@@ -47,12 +47,23 @@ nouveau_debugfs_channel_info(struct seq_file *m, void *data)
seq_printf(m, " cur: 0x%08x\n", chan->dma.cur << 2);
seq_printf(m, " put: 0x%08x\n", chan->dma.put << 2);
seq_printf(m, " free: 0x%08x\n", chan->dma.free << 2);
+ if (chan->dma.ib_max) {
+ seq_printf(m, " ib max: 0x%08x\n", chan->dma.ib_max);
+ seq_printf(m, " ib put: 0x%08x\n", chan->dma.ib_put);
+ seq_printf(m, " ib free: 0x%08x\n", chan->dma.ib_free);
+ }
seq_printf(m, "gpu fifo state:\n");
seq_printf(m, " get: 0x%08x\n",
nvchan_rd32(chan, chan->user_get));
seq_printf(m, " put: 0x%08x\n",
nvchan_rd32(chan, chan->user_put));
+ if (chan->dma.ib_max) {
+ seq_printf(m, " ib get: 0x%08x\n",
+ nvchan_rd32(chan, 0x88));
+ seq_printf(m, " ib put: 0x%08x\n",
+ nvchan_rd32(chan, 0x8c));
+ }
seq_printf(m, "last fence : %d\n", chan->fence.sequence);
seq_printf(m, "last signalled: %d\n", chan->fence.sequence_ack);
@@ -133,9 +144,22 @@ nouveau_debugfs_memory_info(struct seq_file *m, void *data)
return 0;
}
+static int
+nouveau_debugfs_vbios_image(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_nouveau_private *dev_priv = node->minor->dev->dev_private;
+ int i;
+
+ for (i = 0; i < dev_priv->vbios.length; i++)
+ seq_printf(m, "%c", dev_priv->vbios.data[i]);
+ return 0;
+}
+
static struct drm_info_list nouveau_debugfs_list[] = {
{ "chipset", nouveau_debugfs_chipset_info, 0, NULL },
{ "memory", nouveau_debugfs_memory_info, 0, NULL },
+ { "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
};
#define NOUVEAU_DEBUGFS_ENTRIES ARRAY_SIZE(nouveau_debugfs_list)
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index dfc94391d71..cf1c5c0a0ab 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -39,11 +39,8 @@ nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
if (drm_fb->fbdev)
nouveau_fbcon_remove(dev, drm_fb);
- if (fb->nvbo) {
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(fb->nvbo->gem);
- mutex_unlock(&dev->struct_mutex);
- }
+ if (fb->nvbo)
+ drm_gem_object_unreference_unlocked(fb->nvbo->gem);
drm_framebuffer_cleanup(drm_fb);
kfree(fb);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index 50d9e67745a..c8482a108a7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -32,7 +32,22 @@
void
nouveau_dma_pre_init(struct nouveau_channel *chan)
{
- chan->dma.max = (chan->pushbuf_bo->bo.mem.size >> 2) - 2;
+ struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+ struct nouveau_bo *pushbuf = chan->pushbuf_bo;
+
+ if (dev_priv->card_type == NV_50) {
+ const int ib_size = pushbuf->bo.mem.size / 2;
+
+ chan->dma.ib_base = (pushbuf->bo.mem.size - ib_size) >> 2;
+ chan->dma.ib_max = (ib_size / 8) - 1;
+ chan->dma.ib_put = 0;
+ chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put;
+
+ chan->dma.max = (pushbuf->bo.mem.size - ib_size) >> 2;
+ } else {
+ chan->dma.max = (pushbuf->bo.mem.size >> 2) - 2;
+ }
+
chan->dma.put = 0;
chan->dma.cur = chan->dma.put;
chan->dma.free = chan->dma.max - chan->dma.cur;
@@ -162,12 +177,101 @@ READ_GET(struct nouveau_channel *chan, uint32_t *prev_get, uint32_t *timeout)
return (val - chan->pushbuf_base) >> 2;
}
+void
+nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
+ int delta, int length)
+{
+ struct nouveau_bo *pb = chan->pushbuf_bo;
+ uint64_t offset = bo->bo.offset + delta;
+ int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base;
+
+ BUG_ON(chan->dma.ib_free < 1);
+ nouveau_bo_wr32(pb, ip++, lower_32_bits(offset));
+ nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8);
+
+ chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
+ nvchan_wr32(chan, 0x8c, chan->dma.ib_put);
+ chan->dma.ib_free--;
+}
+
+static int
+nv50_dma_push_wait(struct nouveau_channel *chan, int count)
+{
+ uint32_t cnt = 0, prev_get = 0;
+
+ while (chan->dma.ib_free < count) {
+ uint32_t get = nvchan_rd32(chan, 0x88);
+ if (get != prev_get) {
+ prev_get = get;
+ cnt = 0;
+ }
+
+ if ((++cnt & 0xff) == 0) {
+ DRM_UDELAY(1);
+ if (cnt > 100000)
+ return -EBUSY;
+ }
+
+ chan->dma.ib_free = get - chan->dma.ib_put;
+ if (chan->dma.ib_free <= 0)
+ chan->dma.ib_free += chan->dma.ib_max + 1;
+ }
+
+ return 0;
+}
+
+static int
+nv50_dma_wait(struct nouveau_channel *chan, int slots, int count)
+{
+ uint32_t cnt = 0, prev_get = 0;
+ int ret;
+
+ ret = nv50_dma_push_wait(chan, slots + 1);
+ if (unlikely(ret))
+ return ret;
+
+ while (chan->dma.free < count) {
+ int get = READ_GET(chan, &prev_get, &cnt);
+ if (unlikely(get < 0)) {
+ if (get == -EINVAL)
+ continue;
+
+ return get;
+ }
+
+ if (get <= chan->dma.cur) {
+ chan->dma.free = chan->dma.max - chan->dma.cur;
+ if (chan->dma.free >= count)
+ break;
+
+ FIRE_RING(chan);
+ do {
+ get = READ_GET(chan, &prev_get, &cnt);
+ if (unlikely(get < 0)) {
+ if (get == -EINVAL)
+ continue;
+ return get;
+ }
+ } while (get == 0);
+ chan->dma.cur = 0;
+ chan->dma.put = 0;
+ }
+
+ chan->dma.free = get - chan->dma.cur - 1;
+ }
+
+ return 0;
+}
+
int
-nouveau_dma_wait(struct nouveau_channel *chan, int size)
+nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size)
{
uint32_t prev_get = 0, cnt = 0;
int get;
+ if (chan->dma.ib_max)
+ return nv50_dma_wait(chan, slots, size);
+
while (chan->dma.free < size) {
get = READ_GET(chan, &prev_get, &cnt);
if (unlikely(get == -EBUSY))
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.h b/drivers/gpu/drm/nouveau/nouveau_dma.h
index dabfd655f93..8b05c15866d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.h
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.h
@@ -31,6 +31,9 @@
#define NOUVEAU_DMA_DEBUG 0
#endif
+void nv50_dma_push(struct nouveau_channel *, struct nouveau_bo *,
+ int delta, int length);
+
/*
* There's a hw race condition where you can't jump to your PUT offset,
* to avoid this we jump to offset + SKIPS and fill the difference with
@@ -96,13 +99,11 @@ enum {
static __must_check inline int
RING_SPACE(struct nouveau_channel *chan, int size)
{
- if (chan->dma.free < size) {
- int ret;
+ int ret;
- ret = nouveau_dma_wait(chan, size);
- if (ret)
- return ret;
- }
+ ret = nouveau_dma_wait(chan, 1, size);
+ if (ret)
+ return ret;
chan->dma.free -= size;
return 0;
@@ -146,7 +147,13 @@ FIRE_RING(struct nouveau_channel *chan)
return;
chan->accel_done = true;
- WRITE_PUT(chan->dma.cur);
+ if (chan->dma.ib_max) {
+ nv50_dma_push(chan, chan->pushbuf_bo, chan->dma.put << 2,
+ (chan->dma.cur - chan->dma.put) << 2);
+ } else {
+ WRITE_PUT(chan->dma.cur);
+ }
+
chan->dma.put = chan->dma.cur;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index da3b93b8450..30cc09e8a70 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -75,11 +75,11 @@ MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status");
int nouveau_ignorelid = 0;
module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
-MODULE_PARM_DESC(noagp, "Disable all acceleration");
+MODULE_PARM_DESC(noaccel, "Disable all acceleration");
int nouveau_noaccel = 0;
module_param_named(noaccel, nouveau_noaccel, int, 0400);
-MODULE_PARM_DESC(noagp, "Disable fbcon acceleration");
+MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
int nouveau_nofbaccel = 0;
module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
@@ -135,7 +135,7 @@ nouveau_pci_remove(struct pci_dev *pdev)
drm_put_dev(dev);
}
-static int
+int
nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
{
struct drm_device *dev = pci_get_drvdata(pdev);
@@ -233,7 +233,7 @@ out_abort:
return ret;
}
-static int
+int
nouveau_pci_resume(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
@@ -402,8 +402,10 @@ static int __init nouveau_init(void)
nouveau_modeset = 1;
}
- if (nouveau_modeset == 1)
+ if (nouveau_modeset == 1) {
driver.driver_features |= DRIVER_MODESET;
+ nouveau_register_dsm_handler();
+ }
return drm_init(&driver);
}
@@ -411,6 +413,7 @@ static int __init nouveau_init(void)
static void __exit nouveau_exit(void)
{
drm_exit(&driver);
+ nouveau_unregister_dsm_handler();
}
module_init(nouveau_init);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 1c15ef37b71..5f8d987af36 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -34,7 +34,7 @@
#define DRIVER_MAJOR 0
#define DRIVER_MINOR 0
-#define DRIVER_PATCHLEVEL 15
+#define DRIVER_PATCHLEVEL 16
#define NOUVEAU_FAMILY 0x0000FFFF
#define NOUVEAU_FLAGS 0xFFFF0000
@@ -83,6 +83,7 @@ struct nouveau_bo {
struct drm_file *reserved_by;
struct list_head entry;
int pbbo_index;
+ bool validate_mapped;
struct nouveau_channel *channel;
@@ -239,6 +240,11 @@ struct nouveau_channel {
int cur;
int put;
/* access via pushbuf_bo */
+
+ int ib_base;
+ int ib_max;
+ int ib_free;
+ int ib_put;
} dma;
uint32_t sw_subchannel[8];
@@ -533,6 +539,9 @@ struct drm_nouveau_private {
struct nouveau_engine engine;
struct nouveau_channel *channel;
+ /* For PFIFO and PGRAPH. */
+ spinlock_t context_switch_lock;
+
/* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
struct nouveau_gpuobj *ramht;
uint32_t ramin_rsvd_vram;
@@ -596,8 +605,7 @@ struct drm_nouveau_private {
struct list_head gpuobj_list;
- struct nvbios VBIOS;
- struct nouveau_bios_info *vbios;
+ struct nvbios vbios;
struct nv04_mode_state mode_reg;
struct nv04_mode_state saved_reg;
@@ -614,7 +622,6 @@ struct drm_nouveau_private {
} susres;
struct backlight_device *backlight;
- bool acpi_dsm;
struct nouveau_channel *evo;
@@ -682,6 +689,9 @@ extern int nouveau_ignorelid;
extern int nouveau_nofbaccel;
extern int nouveau_noaccel;
+extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
+extern int nouveau_pci_resume(struct pci_dev *pdev);
+
/* nouveau_state.c */
extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
extern int nouveau_load(struct drm_device *, unsigned long flags);
@@ -696,12 +706,6 @@ extern bool nouveau_wait_until(struct drm_device *, uint64_t timeout,
uint32_t reg, uint32_t mask, uint32_t val);
extern bool nouveau_wait_for_idle(struct drm_device *);
extern int nouveau_card_init(struct drm_device *);
-extern int nouveau_ioctl_card_init(struct drm_device *, void *data,
- struct drm_file *);
-extern int nouveau_ioctl_suspend(struct drm_device *, void *data,
- struct drm_file *);
-extern int nouveau_ioctl_resume(struct drm_device *, void *data,
- struct drm_file *);
/* nouveau_mem.c */
extern int nouveau_mem_init_heap(struct mem_block **, uint64_t start,
@@ -845,21 +849,15 @@ nouveau_debugfs_channel_fini(struct nouveau_channel *chan)
/* nouveau_dma.c */
extern void nouveau_dma_pre_init(struct nouveau_channel *);
extern int nouveau_dma_init(struct nouveau_channel *);
-extern int nouveau_dma_wait(struct nouveau_channel *, int size);
+extern int nouveau_dma_wait(struct nouveau_channel *, int slots, int size);
/* nouveau_acpi.c */
-#ifdef CONFIG_ACPI
-extern int nouveau_hybrid_setup(struct drm_device *dev);
-extern bool nouveau_dsm_probe(struct drm_device *dev);
+#if defined(CONFIG_ACPI)
+void nouveau_register_dsm_handler(void);
+void nouveau_unregister_dsm_handler(void);
#else
-static inline int nouveau_hybrid_setup(struct drm_device *dev)
-{
- return 0;
-}
-static inline bool nouveau_dsm_probe(struct drm_device *dev)
-{
- return false;
-}
+static inline void nouveau_register_dsm_handler(void) {}
+static inline void nouveau_unregister_dsm_handler(void) {}
#endif
/* nouveau_backlight.c */
@@ -1027,6 +1025,7 @@ extern void nv50_graph_destroy_context(struct nouveau_channel *);
extern int nv50_graph_load_context(struct nouveau_channel *);
extern int nv50_graph_unload_context(struct drm_device *);
extern void nv50_graph_context_switch(struct drm_device *);
+extern int nv50_grctx_init(struct nouveau_grctx *);
/* nouveau_grctx.c */
extern int nouveau_grctx_prog_load(struct drm_device *);
@@ -1152,16 +1151,6 @@ extern int nouveau_gem_ioctl_new(struct drm_device *, void *,
struct drm_file *);
extern int nouveau_gem_ioctl_pushbuf(struct drm_device *, void *,
struct drm_file *);
-extern int nouveau_gem_ioctl_pushbuf_call(struct drm_device *, void *,
- struct drm_file *);
-extern int nouveau_gem_ioctl_pushbuf_call2(struct drm_device *, void *,
- struct drm_file *);
-extern int nouveau_gem_ioctl_pin(struct drm_device *, void *,
- struct drm_file *);
-extern int nouveau_gem_ioctl_unpin(struct drm_device *, void *,
- struct drm_file *);
-extern int nouveau_gem_ioctl_tile(struct drm_device *, void *,
- struct drm_file *);
extern int nouveau_gem_ioctl_cpu_prep(struct drm_device *, void *,
struct drm_file *);
extern int nouveau_gem_ioctl_cpu_fini(struct drm_device *, void *,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index ea879a2efef..68cedd9194f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -36,6 +36,7 @@
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/screen_info.h>
+#include <linux/vga_switcheroo.h>
#include "drmP.h"
#include "drm.h"
@@ -370,6 +371,7 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
nvbo->bo.offset, nvbo);
mutex_unlock(&dev->struct_mutex);
+ vga_switcheroo_client_fb_set(dev->pdev, info);
return 0;
out_unref:
@@ -401,10 +403,8 @@ nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb)
unregister_framebuffer(info);
nouveau_bo_unmap(nouveau_fb->nvbo);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(nouveau_fb->nvbo->gem);
+ drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
nouveau_fb->nvbo = NULL;
- mutex_unlock(&dev->struct_mutex);
if (par)
drm_fb_helper_free(&par->helper);
framebuffer_release(info);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 70cc30803e3..0d22f66f1c7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -167,12 +167,10 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
out:
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_handle_unreference(nvbo->gem);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_handle_unreference_unlocked(nvbo->gem);
if (ret)
- drm_gem_object_unreference(nvbo->gem);
+ drm_gem_object_unreference_unlocked(nvbo->gem);
return ret;
}
@@ -243,6 +241,11 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
nouveau_fence_unref((void *)&prev_fence);
}
+ if (unlikely(nvbo->validate_mapped)) {
+ ttm_bo_kunmap(&nvbo->kmap);
+ nvbo->validate_mapped = false;
+ }
+
list_del(&nvbo->entry);
nvbo->reserved_by = NULL;
ttm_bo_unreserve(&nvbo->bo);
@@ -302,11 +305,14 @@ retry:
if (ret == -EAGAIN)
ret = ttm_bo_wait_unreserved(&nvbo->bo, false);
drm_gem_object_unreference(gem);
- if (ret)
+ if (ret) {
+ NV_ERROR(dev, "fail reserve\n");
return ret;
+ }
goto retry;
}
+ b->user_priv = (uint64_t)(unsigned long)nvbo;
nvbo->reserved_by = file_priv;
nvbo->pbbo_index = i;
if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
@@ -336,8 +342,10 @@ retry:
}
ret = ttm_bo_wait_cpu(&nvbo->bo, false);
- if (ret)
+ if (ret) {
+ NV_ERROR(dev, "fail wait_cpu\n");
return ret;
+ }
goto retry;
}
}
@@ -351,6 +359,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
{
struct drm_nouveau_gem_pushbuf_bo __user *upbbo =
(void __force __user *)(uintptr_t)user_pbbo_ptr;
+ struct drm_device *dev = chan->dev;
struct nouveau_bo *nvbo;
int ret, relocs = 0;
@@ -362,39 +371,46 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
spin_lock(&nvbo->bo.lock);
ret = ttm_bo_wait(&nvbo->bo, false, false, false);
spin_unlock(&nvbo->bo.lock);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ NV_ERROR(dev, "fail wait other chan\n");
return ret;
+ }
}
ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains,
b->write_domains,
b->valid_domains);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ NV_ERROR(dev, "fail set_domain\n");
return ret;
+ }
nvbo->channel = chan;
ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
false, false);
nvbo->channel = NULL;
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ NV_ERROR(dev, "fail ttm_validate\n");
return ret;
+ }
- if (nvbo->bo.offset == b->presumed_offset &&
+ if (nvbo->bo.offset == b->presumed.offset &&
((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
- b->presumed_domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
+ b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
(nvbo->bo.mem.mem_type == TTM_PL_TT &&
- b->presumed_domain & NOUVEAU_GEM_DOMAIN_GART)))
+ b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
continue;
if (nvbo->bo.mem.mem_type == TTM_PL_TT)
- b->presumed_domain = NOUVEAU_GEM_DOMAIN_GART;
+ b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
else
- b->presumed_domain = NOUVEAU_GEM_DOMAIN_VRAM;
- b->presumed_offset = nvbo->bo.offset;
- b->presumed_ok = 0;
+ b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
+ b->presumed.offset = nvbo->bo.offset;
+ b->presumed.valid = 0;
relocs++;
- if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index], b, sizeof(*b)))
+ if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed,
+ &b->presumed, sizeof(b->presumed)))
return -EFAULT;
}
@@ -408,6 +424,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
uint64_t user_buffers, int nr_buffers,
struct validate_op *op, int *apply_relocs)
{
+ struct drm_device *dev = chan->dev;
int ret, relocs = 0;
INIT_LIST_HEAD(&op->vram_list);
@@ -418,11 +435,14 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
return 0;
ret = validate_init(chan, file_priv, pbbo, nr_buffers, op);
- if (unlikely(ret))
+ if (unlikely(ret)) {
+ NV_ERROR(dev, "validate_init\n");
return ret;
+ }
ret = validate_list(chan, &op->vram_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
+ NV_ERROR(dev, "validate vram_list\n");
validate_fini(op, NULL);
return ret;
}
@@ -430,6 +450,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
ret = validate_list(chan, &op->gart_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
+ NV_ERROR(dev, "validate gart_list\n");
validate_fini(op, NULL);
return ret;
}
@@ -437,6 +458,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
ret = validate_list(chan, &op->both_list, pbbo, user_buffers);
if (unlikely(ret < 0)) {
+ NV_ERROR(dev, "validate both_list\n");
validate_fini(op, NULL);
return ret;
}
@@ -465,59 +487,82 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size)
}
static int
-nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo,
- struct drm_nouveau_gem_pushbuf_bo *bo,
- unsigned nr_relocs, uint64_t ptr_relocs,
- unsigned nr_dwords, unsigned first_dword,
- uint32_t *pushbuf, bool is_iomem)
+nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev,
+ struct drm_nouveau_gem_pushbuf *req,
+ struct drm_nouveau_gem_pushbuf_bo *bo)
{
struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL;
- struct drm_device *dev = chan->dev;
int ret = 0;
unsigned i;
- reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc));
+ reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc));
if (IS_ERR(reloc))
return PTR_ERR(reloc);
- for (i = 0; i < nr_relocs; i++) {
+ for (i = 0; i < req->nr_relocs; i++) {
struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i];
struct drm_nouveau_gem_pushbuf_bo *b;
+ struct nouveau_bo *nvbo;
uint32_t data;
- if (r->bo_index >= nr_bo || r->reloc_index < first_dword ||
- r->reloc_index >= first_dword + nr_dwords) {
- NV_ERROR(dev, "Bad relocation %d\n", i);
- NV_ERROR(dev, " bo: %d max %d\n", r->bo_index, nr_bo);
- NV_ERROR(dev, " id: %d max %d\n", r->reloc_index, nr_dwords);
+ if (unlikely(r->bo_index > req->nr_buffers)) {
+ NV_ERROR(dev, "reloc bo index invalid\n");
ret = -EINVAL;
break;
}
b = &bo[r->bo_index];
- if (b->presumed_ok)
+ if (b->presumed.valid)
continue;
+ if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
+ NV_ERROR(dev, "reloc container bo index invalid\n");
+ ret = -EINVAL;
+ break;
+ }
+ nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
+
+ if (unlikely(r->reloc_bo_offset + 4 >
+ nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
+ NV_ERROR(dev, "reloc outside of bo\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!nvbo->kmap.virtual) {
+ ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
+ &nvbo->kmap);
+ if (ret) {
+ NV_ERROR(dev, "failed kmap for reloc\n");
+ break;
+ }
+ nvbo->validate_mapped = true;
+ }
+
if (r->flags & NOUVEAU_GEM_RELOC_LOW)
- data = b->presumed_offset + r->data;
+ data = b->presumed.offset + r->data;
else
if (r->flags & NOUVEAU_GEM_RELOC_HIGH)
- data = (b->presumed_offset + r->data) >> 32;
+ data = (b->presumed.offset + r->data) >> 32;
else
data = r->data;
if (r->flags & NOUVEAU_GEM_RELOC_OR) {
- if (b->presumed_domain == NOUVEAU_GEM_DOMAIN_GART)
+ if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART)
data |= r->tor;
else
data |= r->vor;
}
- if (is_iomem)
- iowrite32_native(data, (void __force __iomem *)
- &pushbuf[r->reloc_index]);
- else
- pushbuf[r->reloc_index] = data;
+ spin_lock(&nvbo->bo.lock);
+ ret = ttm_bo_wait(&nvbo->bo, false, false, false);
+ spin_unlock(&nvbo->bo.lock);
+ if (ret) {
+ NV_ERROR(dev, "reloc wait_idle failed: %d\n", ret);
+ break;
+ }
+
+ nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data);
}
kfree(reloc);
@@ -528,127 +573,50 @@ int
nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
struct drm_nouveau_gem_pushbuf *req = data;
- struct drm_nouveau_gem_pushbuf_bo *bo = NULL;
+ struct drm_nouveau_gem_pushbuf_push *push;
+ struct drm_nouveau_gem_pushbuf_bo *bo;
struct nouveau_channel *chan;
struct validate_op op;
- struct nouveau_fence* fence = 0;
- uint32_t *pushbuf = NULL;
- int ret = 0, do_reloc = 0, i;
+ struct nouveau_fence *fence = 0;
+ int i, j, ret = 0, do_reloc = 0;
NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
- if (req->nr_dwords >= chan->dma.max ||
- req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS ||
- req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) {
- NV_ERROR(dev, "Pushbuf config exceeds limits:\n");
- NV_ERROR(dev, " dwords : %d max %d\n", req->nr_dwords,
- chan->dma.max - 1);
- NV_ERROR(dev, " buffers: %d max %d\n", req->nr_buffers,
- NOUVEAU_GEM_MAX_BUFFERS);
- NV_ERROR(dev, " relocs : %d max %d\n", req->nr_relocs,
- NOUVEAU_GEM_MAX_RELOCS);
- return -EINVAL;
- }
-
- pushbuf = u_memcpya(req->dwords, req->nr_dwords, sizeof(uint32_t));
- if (IS_ERR(pushbuf))
- return PTR_ERR(pushbuf);
-
- bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
- if (IS_ERR(bo)) {
- kfree(pushbuf);
- return PTR_ERR(bo);
- }
-
- mutex_lock(&dev->struct_mutex);
-
- /* Validate buffer list */
- ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers,
- req->nr_buffers, &op, &do_reloc);
- if (ret)
- goto out;
-
- /* Apply any relocations that are required */
- if (do_reloc) {
- ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers,
- bo, req->nr_relocs,
- req->relocs,
- req->nr_dwords, 0,
- pushbuf, false);
- if (ret)
- goto out;
- }
-
- /* Emit push buffer to the hw
- */
- ret = RING_SPACE(chan, req->nr_dwords);
- if (ret)
- goto out;
-
- OUT_RINGp(chan, pushbuf, req->nr_dwords);
+ req->vram_available = dev_priv->fb_aper_free;
+ req->gart_available = dev_priv->gart_info.aper_free;
+ if (unlikely(req->nr_push == 0))
+ goto out_next;
- ret = nouveau_fence_new(chan, &fence, true);
- if (ret) {
- NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
- WIND_RING(chan);
- goto out;
+ if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) {
+ NV_ERROR(dev, "pushbuf push count exceeds limit: %d max %d\n",
+ req->nr_push, NOUVEAU_GEM_MAX_PUSH);
+ return -EINVAL;
}
- if (nouveau_gem_pushbuf_sync(chan)) {
- ret = nouveau_fence_wait(fence, NULL, false, false);
- if (ret) {
- for (i = 0; i < req->nr_dwords; i++)
- NV_ERROR(dev, "0x%08x\n", pushbuf[i]);
- NV_ERROR(dev, "^^ above push buffer is fail :(\n");
- }
+ if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) {
+ NV_ERROR(dev, "pushbuf bo count exceeds limit: %d max %d\n",
+ req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS);
+ return -EINVAL;
}
-out:
- validate_fini(&op, fence);
- nouveau_fence_unref((void**)&fence);
- mutex_unlock(&dev->struct_mutex);
- kfree(pushbuf);
- kfree(bo);
- return ret;
-}
-
-#define PUSHBUF_CAL (dev_priv->card_type >= NV_20)
-
-int
-nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct drm_nouveau_gem_pushbuf_call *req = data;
- struct drm_nouveau_gem_pushbuf_bo *bo = NULL;
- struct nouveau_channel *chan;
- struct drm_gem_object *gem;
- struct nouveau_bo *pbbo;
- struct validate_op op;
- struct nouveau_fence* fence = 0;
- int i, ret = 0, do_reloc = 0;
-
- NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
- NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan);
-
- if (unlikely(req->handle == 0))
- goto out_next;
-
- if (req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS ||
- req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) {
- NV_ERROR(dev, "Pushbuf config exceeds limits:\n");
- NV_ERROR(dev, " buffers: %d max %d\n", req->nr_buffers,
- NOUVEAU_GEM_MAX_BUFFERS);
- NV_ERROR(dev, " relocs : %d max %d\n", req->nr_relocs,
- NOUVEAU_GEM_MAX_RELOCS);
+ if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) {
+ NV_ERROR(dev, "pushbuf reloc count exceeds limit: %d max %d\n",
+ req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS);
return -EINVAL;
}
+ push = u_memcpya(req->push, req->nr_push, sizeof(*push));
+ if (IS_ERR(push))
+ return PTR_ERR(push);
+
bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo));
- if (IS_ERR(bo))
+ if (IS_ERR(bo)) {
+ kfree(push);
return PTR_ERR(bo);
+ }
mutex_lock(&dev->struct_mutex);
@@ -660,122 +628,84 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
goto out;
}
- /* Validate DMA push buffer */
- gem = drm_gem_object_lookup(dev, file_priv, req->handle);
- if (!gem) {
- NV_ERROR(dev, "Unknown pb handle 0x%08x\n", req->handle);
- ret = -EINVAL;
- goto out;
- }
- pbbo = nouveau_gem_object(gem);
-
- if ((req->offset & 3) || req->nr_dwords < 2 ||
- (unsigned long)req->offset > (unsigned long)pbbo->bo.mem.size ||
- (unsigned long)req->nr_dwords >
- ((unsigned long)(pbbo->bo.mem.size - req->offset ) >> 2)) {
- NV_ERROR(dev, "pb call misaligned or out of bounds: "
- "%d + %d * 4 > %ld\n",
- req->offset, req->nr_dwords, pbbo->bo.mem.size);
- ret = -EINVAL;
- drm_gem_object_unreference(gem);
- goto out;
- }
-
- ret = ttm_bo_reserve(&pbbo->bo, false, false, true,
- chan->fence.sequence);
- if (ret) {
- NV_ERROR(dev, "resv pb: %d\n", ret);
- drm_gem_object_unreference(gem);
- goto out;
- }
-
- nouveau_bo_placement_set(pbbo, 1 << chan->pushbuf_bo->bo.mem.mem_type);
- ret = ttm_bo_validate(&pbbo->bo, &pbbo->placement, false, false);
- if (ret) {
- NV_ERROR(dev, "validate pb: %d\n", ret);
- ttm_bo_unreserve(&pbbo->bo);
- drm_gem_object_unreference(gem);
- goto out;
- }
-
- list_add_tail(&pbbo->entry, &op.both_list);
-
- /* If presumed return address doesn't match, we need to map the
- * push buffer and fix it..
- */
- if (!PUSHBUF_CAL) {
- uint32_t retaddy;
-
- if (chan->dma.free < 4 + NOUVEAU_DMA_SKIPS) {
- ret = nouveau_dma_wait(chan, 4 + NOUVEAU_DMA_SKIPS);
- if (ret) {
- NV_ERROR(dev, "jmp_space: %d\n", ret);
- goto out;
- }
- }
-
- retaddy = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
- retaddy |= 0x20000000;
- if (retaddy != req->suffix0) {
- req->suffix0 = retaddy;
- do_reloc = 1;
- }
- }
-
/* Apply any relocations that are required */
if (do_reloc) {
- void *pbvirt;
- bool is_iomem;
- ret = ttm_bo_kmap(&pbbo->bo, 0, pbbo->bo.mem.num_pages,
- &pbbo->kmap);
+ ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo);
if (ret) {
- NV_ERROR(dev, "kmap pb: %d\n", ret);
+ NV_ERROR(dev, "reloc apply: %d\n", ret);
goto out;
}
+ }
- pbvirt = ttm_kmap_obj_virtual(&pbbo->kmap, &is_iomem);
- ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers, bo,
- req->nr_relocs,
- req->relocs,
- req->nr_dwords,
- req->offset / 4,
- pbvirt, is_iomem);
-
- if (!PUSHBUF_CAL) {
- nouveau_bo_wr32(pbbo,
- req->offset / 4 + req->nr_dwords - 2,
- req->suffix0);
- }
-
- ttm_bo_kunmap(&pbbo->kmap);
+ if (chan->dma.ib_max) {
+ ret = nouveau_dma_wait(chan, req->nr_push + 1, 6);
if (ret) {
- NV_ERROR(dev, "reloc apply: %d\n", ret);
+ NV_INFO(dev, "nv50cal_space: %d\n", ret);
goto out;
}
- }
- if (PUSHBUF_CAL) {
- ret = RING_SPACE(chan, 2);
+ for (i = 0; i < req->nr_push; i++) {
+ struct nouveau_bo *nvbo = (void *)(unsigned long)
+ bo[push[i].bo_index].user_priv;
+
+ nv50_dma_push(chan, nvbo, push[i].offset,
+ push[i].length);
+ }
+ } else
+ if (dev_priv->card_type >= NV_20) {
+ ret = RING_SPACE(chan, req->nr_push * 2);
if (ret) {
NV_ERROR(dev, "cal_space: %d\n", ret);
goto out;
}
- OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) +
- req->offset) | 2);
- OUT_RING(chan, 0);
+
+ for (i = 0; i < req->nr_push; i++) {
+ struct nouveau_bo *nvbo = (void *)(unsigned long)
+ bo[push[i].bo_index].user_priv;
+ struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
+
+ OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
+ push[i].offset) | 2);
+ OUT_RING(chan, 0);
+ }
} else {
- ret = RING_SPACE(chan, 2 + NOUVEAU_DMA_SKIPS);
+ ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS));
if (ret) {
NV_ERROR(dev, "jmp_space: %d\n", ret);
goto out;
}
- OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) +
- req->offset) | 0x20000000);
- OUT_RING(chan, 0);
- /* Space the jumps apart with NOPs. */
- for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
+ for (i = 0; i < req->nr_push; i++) {
+ struct nouveau_bo *nvbo = (void *)(unsigned long)
+ bo[push[i].bo_index].user_priv;
+ struct drm_mm_node *mem = nvbo->bo.mem.mm_node;
+ uint32_t cmd;
+
+ cmd = chan->pushbuf_base + ((chan->dma.cur + 2) << 2);
+ cmd |= 0x20000000;
+ if (unlikely(cmd != req->suffix0)) {
+ if (!nvbo->kmap.virtual) {
+ ret = ttm_bo_kmap(&nvbo->bo, 0,
+ nvbo->bo.mem.
+ num_pages,
+ &nvbo->kmap);
+ if (ret) {
+ WIND_RING(chan);
+ goto out;
+ }
+ nvbo->validate_mapped = true;
+ }
+
+ nouveau_bo_wr32(nvbo, (push[i].offset +
+ push[i].length - 8) / 4, cmd);
+ }
+
+ OUT_RING(chan, ((mem->start << PAGE_SHIFT) +
+ push[i].offset) | 0x20000000);
OUT_RING(chan, 0);
+ for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
+ OUT_RING(chan, 0);
+ }
}
ret = nouveau_fence_new(chan, &fence, true);
@@ -790,9 +720,14 @@ out:
nouveau_fence_unref((void**)&fence);
mutex_unlock(&dev->struct_mutex);
kfree(bo);
+ kfree(push);
out_next:
- if (PUSHBUF_CAL) {
+ if (chan->dma.ib_max) {
+ req->suffix0 = 0x00000000;
+ req->suffix1 = 0x00000000;
+ } else
+ if (dev_priv->card_type >= NV_20) {
req->suffix0 = 0x00020000;
req->suffix1 = 0x00000000;
} else {
@@ -804,19 +739,6 @@ out_next:
return ret;
}
-int
-nouveau_gem_ioctl_pushbuf_call2(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct drm_nouveau_gem_pushbuf_call *req = data;
-
- req->vram_available = dev_priv->fb_aper_free;
- req->gart_available = dev_priv->gart_info.aper_free;
-
- return nouveau_gem_ioctl_pushbuf_call(dev, data, file_priv);
-}
-
static inline uint32_t
domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
{
@@ -831,74 +753,6 @@ domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain)
}
int
-nouveau_gem_ioctl_pin(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_nouveau_gem_pin *req = data;
- struct drm_gem_object *gem;
- struct nouveau_bo *nvbo;
- int ret = 0;
-
- NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- NV_ERROR(dev, "pin only allowed without kernel modesetting\n");
- return -EINVAL;
- }
-
- if (!DRM_SUSER(DRM_CURPROC))
- return -EPERM;
-
- gem = drm_gem_object_lookup(dev, file_priv, req->handle);
- if (!gem)
- return -EINVAL;
- nvbo = nouveau_gem_object(gem);
-
- ret = nouveau_bo_pin(nvbo, domain_to_ttm(nvbo, req->domain));
- if (ret)
- goto out;
-
- req->offset = nvbo->bo.offset;
- if (nvbo->bo.mem.mem_type == TTM_PL_TT)
- req->domain = NOUVEAU_GEM_DOMAIN_GART;
- else
- req->domain = NOUVEAU_GEM_DOMAIN_VRAM;
-
-out:
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gem);
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-
-int
-nouveau_gem_ioctl_unpin(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_nouveau_gem_pin *req = data;
- struct drm_gem_object *gem;
- int ret;
-
- NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
-
- if (drm_core_check_feature(dev, DRIVER_MODESET))
- return -EINVAL;
-
- gem = drm_gem_object_lookup(dev, file_priv, req->handle);
- if (!gem)
- return -EINVAL;
-
- ret = nouveau_bo_unpin(nouveau_gem_object(gem));
-
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gem);
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-
-int
nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
@@ -935,9 +789,7 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data,
}
out:
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gem);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(gem);
return ret;
}
@@ -965,9 +817,7 @@ nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data,
ret = 0;
out:
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gem);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(gem);
return ret;
}
@@ -986,9 +836,7 @@ nouveau_gem_ioctl_info(struct drm_device *dev, void *data,
return -EINVAL;
ret = nouveau_gem_info(gem, req);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gem);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(gem);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_hw.c b/drivers/gpu/drm/nouveau/nouveau_hw.c
index dc46792a5c9..7855b35effc 100644
--- a/drivers/gpu/drm/nouveau/nouveau_hw.c
+++ b/drivers/gpu/drm/nouveau/nouveau_hw.c
@@ -160,7 +160,7 @@ static void
setPLL_single(struct drm_device *dev, uint32_t reg, struct nouveau_pll_vals *pv)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- int chip_version = dev_priv->vbios->chip_version;
+ int chip_version = dev_priv->vbios.chip_version;
uint32_t oldpll = NVReadRAMDAC(dev, 0, reg);
int oldN = (oldpll >> 8) & 0xff, oldM = oldpll & 0xff;
uint32_t pll = (oldpll & 0xfff80000) | pv->log2P << 16 | pv->NM1;
@@ -216,7 +216,7 @@ setPLL_double_highregs(struct drm_device *dev, uint32_t reg1,
struct nouveau_pll_vals *pv)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- int chip_version = dev_priv->vbios->chip_version;
+ int chip_version = dev_priv->vbios.chip_version;
bool nv3035 = chip_version == 0x30 || chip_version == 0x35;
uint32_t reg2 = reg1 + ((reg1 == NV_RAMDAC_VPLL2) ? 0x5c : 0x70);
uint32_t oldpll1 = NVReadRAMDAC(dev, 0, reg1);
@@ -374,7 +374,7 @@ nouveau_hw_setpll(struct drm_device *dev, uint32_t reg1,
struct nouveau_pll_vals *pv)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- int cv = dev_priv->vbios->chip_version;
+ int cv = dev_priv->vbios.chip_version;
if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
cv >= 0x40) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index 70e994d2812..88583e7bf65 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -254,16 +254,16 @@ struct nouveau_i2c_chan *
nouveau_i2c_find(struct drm_device *dev, int index)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
- if (index > DCB_MAX_NUM_I2C_ENTRIES)
+ if (index >= DCB_MAX_NUM_I2C_ENTRIES)
return NULL;
- if (!bios->bdcb.dcb.i2c[index].chan) {
- if (nouveau_i2c_init(dev, &bios->bdcb.dcb.i2c[index], index))
+ if (!bios->dcb.i2c[index].chan) {
+ if (nouveau_i2c_init(dev, &bios->dcb.i2c[index], index))
return NULL;
}
- return bios->bdcb.dcb.i2c[index].chan;
+ return bios->dcb.i2c[index].chan;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 447f9f69d6b..95220ddebb4 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -691,11 +691,14 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
struct drm_device *dev = (struct drm_device *)arg;
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t status, fbdev_flags = 0;
+ unsigned long flags;
status = nv_rd32(dev, NV03_PMC_INTR_0);
if (!status)
return IRQ_NONE;
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+
if (dev_priv->fbdev_info) {
fbdev_flags = dev_priv->fbdev_info->flags;
dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
@@ -733,5 +736,7 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
if (dev_priv->fbdev_info)
dev_priv->fbdev_info->flags = fbdev_flags;
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
return IRQ_HANDLED;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c
index d99dc087f9b..9537f3e3011 100644
--- a/drivers/gpu/drm/nouveau/nouveau_notifier.c
+++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c
@@ -61,11 +61,8 @@ nouveau_notifier_init_channel(struct nouveau_channel *chan)
chan->notifier_bo = ntfy;
out_err:
- if (ret) {
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(ntfy->gem);
- mutex_unlock(&dev->struct_mutex);
- }
+ if (ret)
+ drm_gem_object_unreference_unlocked(ntfy->gem);
return ret;
}
@@ -81,8 +78,8 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
nouveau_bo_unmap(chan->notifier_bo);
mutex_lock(&dev->struct_mutex);
nouveau_bo_unpin(chan->notifier_bo);
- drm_gem_object_unreference(chan->notifier_bo->gem);
mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(chan->notifier_bo->gem);
nouveau_mem_takedown(&chan->notifier_heap);
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index a4851af5b05..eb8f084d5f5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -29,6 +29,7 @@
#include "drm_sarea.h"
#include "drm_crtc_helper.h"
#include <linux/vgaarb.h>
+#include <linux/vga_switcheroo.h>
#include "nouveau_drv.h"
#include "nouveau_drm.h"
@@ -371,6 +372,30 @@ out_err:
return ret;
}
+static void nouveau_switcheroo_set_state(struct pci_dev *pdev,
+ enum vga_switcheroo_state state)
+{
+ pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
+ if (state == VGA_SWITCHEROO_ON) {
+ printk(KERN_ERR "VGA switcheroo: switched nouveau on\n");
+ nouveau_pci_resume(pdev);
+ } else {
+ printk(KERN_ERR "VGA switcheroo: switched nouveau off\n");
+ nouveau_pci_suspend(pdev, pmm);
+ }
+}
+
+static bool nouveau_switcheroo_can_switch(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ bool can_switch;
+
+ spin_lock(&dev->count_lock);
+ can_switch = (dev->open_count == 0);
+ spin_unlock(&dev->count_lock);
+ return can_switch;
+}
+
int
nouveau_card_init(struct drm_device *dev)
{
@@ -384,6 +409,8 @@ nouveau_card_init(struct drm_device *dev)
return 0;
vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
+ vga_switcheroo_register_client(dev->pdev, nouveau_switcheroo_set_state,
+ nouveau_switcheroo_can_switch);
/* Initialise internal driver API hooks */
ret = nouveau_init_engine_ptrs(dev);
@@ -391,6 +418,7 @@ nouveau_card_init(struct drm_device *dev)
goto out;
engine = &dev_priv->engine;
dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED;
+ spin_lock_init(&dev_priv->context_switch_lock);
/* Parse BIOS tables / Run init tables if card not POSTed */
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
@@ -617,11 +645,6 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
NV_DEBUG(dev, "vendor: 0x%X device: 0x%X class: 0x%X\n",
dev->pci_vendor, dev->pci_device, dev->pdev->class);
- dev_priv->acpi_dsm = nouveau_dsm_probe(dev);
-
- if (dev_priv->acpi_dsm)
- nouveau_hybrid_setup(dev);
-
dev_priv->wq = create_workqueue("nouveau");
if (!dev_priv->wq)
return -EINVAL;
@@ -776,13 +799,6 @@ int nouveau_unload(struct drm_device *dev)
return 0;
}
-int
-nouveau_ioctl_card_init(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- return nouveau_card_init(dev);
-}
-
int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index d2f143ed97c..a1d1ebb073d 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -926,9 +926,7 @@ nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset);
nv_crtc->cursor.show(nv_crtc, true);
out:
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gem);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(gem);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_dac.c b/drivers/gpu/drm/nouveau/nv04_dac.c
index 1d73b15d70d..1cb19e3acb5 100644
--- a/drivers/gpu/drm/nouveau/nv04_dac.c
+++ b/drivers/gpu/drm/nouveau/nv04_dac.c
@@ -230,13 +230,13 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
if (dcb->type == OUTPUT_TV) {
testval = RGB_TEST_DATA(0xa0, 0xa0, 0xa0);
- if (dev_priv->vbios->tvdactestval)
- testval = dev_priv->vbios->tvdactestval;
+ if (dev_priv->vbios.tvdactestval)
+ testval = dev_priv->vbios.tvdactestval;
} else {
testval = RGB_TEST_DATA(0x140, 0x140, 0x140); /* 0x94050140 */
- if (dev_priv->vbios->dactestval)
- testval = dev_priv->vbios->dactestval;
+ if (dev_priv->vbios.dactestval)
+ testval = dev_priv->vbios.dactestval;
}
saved_rtest_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c
index 483f875bdb6..41634d4752f 100644
--- a/drivers/gpu/drm/nouveau/nv04_dfp.c
+++ b/drivers/gpu/drm/nouveau/nv04_dfp.c
@@ -269,10 +269,10 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
regp->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1;
if (!nv_gf4_disp_arch(dev) ||
(output_mode->hsync_start - output_mode->hdisplay) >=
- dev_priv->vbios->digital_min_front_porch)
+ dev_priv->vbios.digital_min_front_porch)
regp->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay;
else
- regp->fp_horiz_regs[FP_CRTC] = output_mode->hsync_start - dev_priv->vbios->digital_min_front_porch - 1;
+ regp->fp_horiz_regs[FP_CRTC] = output_mode->hsync_start - dev_priv->vbios.digital_min_front_porch - 1;
regp->fp_horiz_regs[FP_SYNC_START] = output_mode->hsync_start - 1;
regp->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1;
regp->fp_horiz_regs[FP_VALID_START] = output_mode->hskew;
diff --git a/drivers/gpu/drm/nouveau/nv04_display.c b/drivers/gpu/drm/nouveau/nv04_display.c
index ef77215fa5b..c7898b4f6df 100644
--- a/drivers/gpu/drm/nouveau/nv04_display.c
+++ b/drivers/gpu/drm/nouveau/nv04_display.c
@@ -93,10 +93,9 @@ int
nv04_display_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct parsed_dcb *dcb = dev_priv->vbios->dcb;
+ struct dcb_table *dcb = &dev_priv->vbios.dcb;
struct drm_encoder *encoder;
struct drm_crtc *crtc;
- uint16_t connector[16] = { 0 };
int i, ret;
NV_DEBUG_KMS(dev, "\n");
@@ -154,52 +153,10 @@ nv04_display_create(struct drm_device *dev)
if (ret)
continue;
-
- connector[dcbent->connector] |= (1 << dcbent->type);
}
- for (i = 0; i < dcb->entries; i++) {
- struct dcb_entry *dcbent = &dcb->entry[i];
- uint16_t encoders;
- int type;
-
- encoders = connector[dcbent->connector];
- if (!(encoders & (1 << dcbent->type)))
- continue;
- connector[dcbent->connector] = 0;
-
- switch (dcbent->type) {
- case OUTPUT_ANALOG:
- if (!MULTIPLE_ENCODERS(encoders))
- type = DRM_MODE_CONNECTOR_VGA;
- else
- type = DRM_MODE_CONNECTOR_DVII;
- break;
- case OUTPUT_TMDS:
- if (!MULTIPLE_ENCODERS(encoders))
- type = DRM_MODE_CONNECTOR_DVID;
- else
- type = DRM_MODE_CONNECTOR_DVII;
- break;
- case OUTPUT_LVDS:
- type = DRM_MODE_CONNECTOR_LVDS;
-#if 0
- /* don't create i2c adapter when lvds ddc not allowed */
- if (dcbent->lvdsconf.use_straps_for_mode ||
- dev_priv->vbios->fp_no_ddc)
- i2c_index = 0xf;
-#endif
- break;
- case OUTPUT_TV:
- type = DRM_MODE_CONNECTOR_TV;
- break;
- default:
- type = DRM_MODE_CONNECTOR_Unknown;
- continue;
- }
-
- nouveau_connector_create(dev, dcbent->connector, type);
- }
+ for (i = 0; i < dcb->connector.entries; i++)
+ nouveau_connector_create(dev, &dcb->connector.entry[i]);
/* Save previous state */
NVLockVgaCrtcs(dev, false);
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index fd01caabd5c..3da90c2c4e6 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -118,7 +118,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
return;
}
- width = (image->width + 31) & ~31;
+ width = ALIGN(image->width, 32);
dsize = (width * image->height) >> 5;
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
diff --git a/drivers/gpu/drm/nouveau/nv04_fifo.c b/drivers/gpu/drm/nouveau/nv04_fifo.c
index f31347b8c9b..66fe55983b6 100644
--- a/drivers/gpu/drm/nouveau/nv04_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv04_fifo.c
@@ -117,6 +117,7 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
+ unsigned long flags;
int ret;
ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0,
@@ -127,6 +128,8 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
if (ret)
return ret;
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+
/* Setup initial state */
dev_priv->engine.instmem.prepare_access(dev, true);
RAMFC_WR(DMA_PUT, chan->pushbuf_base);
@@ -144,6 +147,8 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
/* enable the fifo dma operation */
nv_wr32(dev, NV04_PFIFO_MODE,
nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
+
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv04_tv.c b/drivers/gpu/drm/nouveau/nv04_tv.c
index 9c63099e9c4..c4e3404337d 100644
--- a/drivers/gpu/drm/nouveau/nv04_tv.c
+++ b/drivers/gpu/drm/nouveau/nv04_tv.c
@@ -262,7 +262,7 @@ int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry)
nv_encoder->or = ffs(entry->or) - 1;
/* Run the slave-specific initialization */
- adap = &dev_priv->vbios->dcb->i2c[i2c_index].chan->adapter;
+ adap = &dev_priv->vbios.dcb.i2c[i2c_index].chan->adapter;
was_locked = NVLockVgaCrtcs(dev, false);
diff --git a/drivers/gpu/drm/nouveau/nv17_tv.c b/drivers/gpu/drm/nouveau/nv17_tv.c
index 21ac6e49b6e..74c880374fb 100644
--- a/drivers/gpu/drm/nouveau/nv17_tv.c
+++ b/drivers/gpu/drm/nouveau/nv17_tv.c
@@ -45,8 +45,8 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20)
testval = RGB_TEST_DATA(0x82, 0xeb, 0x82);
- if (dev_priv->vbios->tvdactestval)
- testval = dev_priv->vbios->tvdactestval;
+ if (dev_priv->vbios.tvdactestval)
+ testval = dev_priv->vbios.tvdactestval;
dacclk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
head = (dacclk & 0x100) >> 8;
@@ -367,7 +367,7 @@ static void nv17_tv_prepare(struct drm_encoder *encoder)
!enc->crtc &&
nv04_dfp_get_bound_head(dev, dcb) == head) {
nv04_dfp_bind_head(dev, dcb, head ^ 1,
- dev_priv->VBIOS.fp.dual_link);
+ dev_priv->vbios.fp.dual_link);
}
}
diff --git a/drivers/gpu/drm/nouveau/nv40_fifo.c b/drivers/gpu/drm/nouveau/nv40_fifo.c
index b4f19ccb8b4..6b2ef4a9fce 100644
--- a/drivers/gpu/drm/nouveau/nv40_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv40_fifo.c
@@ -37,6 +37,7 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t fc = NV40_RAMFC(chan->id);
+ unsigned long flags;
int ret;
ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0,
@@ -45,6 +46,8 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
if (ret)
return ret;
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+
dev_priv->engine.instmem.prepare_access(dev, true);
nv_wi32(dev, fc + 0, chan->pushbuf_base);
nv_wi32(dev, fc + 4, chan->pushbuf_base);
@@ -63,6 +66,8 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
/* enable the fifo dma operation */
nv_wr32(dev, NV04_PFIFO_MODE,
nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
+
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_crtc.c b/drivers/gpu/drm/nouveau/nv50_crtc.c
index d1a651e3400..cfabeb974a5 100644
--- a/drivers/gpu/drm/nouveau/nv50_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv50_crtc.c
@@ -358,9 +358,7 @@ nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
nv_crtc->cursor.show(nv_crtc, true);
out:
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gem);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(gem);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_dac.c b/drivers/gpu/drm/nouveau/nv50_dac.c
index f08f042a8e1..1fd9537beff 100644
--- a/drivers/gpu/drm/nouveau/nv50_dac.c
+++ b/drivers/gpu/drm/nouveau/nv50_dac.c
@@ -79,8 +79,8 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
}
/* Use bios provided value if possible. */
- if (dev_priv->vbios->dactestval) {
- load_pattern = dev_priv->vbios->dactestval;
+ if (dev_priv->vbios.dactestval) {
+ load_pattern = dev_priv->vbios.dactestval;
NV_DEBUG_KMS(dev, "Using bios provided load_pattern of %d\n",
load_pattern);
} else {
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 90f0bf59fbc..61a89f2dc55 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -370,9 +370,7 @@ nv50_display_init(struct drm_device *dev)
struct nouveau_connector *conn = nouveau_connector(connector);
struct dcb_gpio_entry *gpio;
- if (connector->connector_type != DRM_MODE_CONNECTOR_DVII &&
- connector->connector_type != DRM_MODE_CONNECTOR_DVID &&
- connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ if (conn->dcb->gpio_tag == 0xff)
continue;
gpio = nouveau_bios_gpio_entry(dev, conn->dcb->gpio_tag);
@@ -465,8 +463,7 @@ static int nv50_display_disable(struct drm_device *dev)
int nv50_display_create(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct parsed_dcb *dcb = dev_priv->vbios->dcb;
- uint32_t connector[16] = {};
+ struct dcb_table *dcb = &dev_priv->vbios.dcb;
int ret, i;
NV_DEBUG_KMS(dev, "\n");
@@ -522,44 +519,13 @@ int nv50_display_create(struct drm_device *dev)
NV_WARN(dev, "DCB encoder %d unknown\n", entry->type);
continue;
}
-
- connector[entry->connector] |= (1 << entry->type);
}
- /* It appears that DCB 3.0+ VBIOS has a connector table, however,
- * I'm not 100% certain how to decode it correctly yet so just
- * look at what encoders are present on each connector index and
- * attempt to derive the connector type from that.
- */
- for (i = 0 ; i < dcb->entries; i++) {
- struct dcb_entry *entry = &dcb->entry[i];
- uint16_t encoders;
- int type;
-
- encoders = connector[entry->connector];
- if (!(encoders & (1 << entry->type)))
+ for (i = 0 ; i < dcb->connector.entries; i++) {
+ if (i != 0 && dcb->connector.entry[i].index ==
+ dcb->connector.entry[i - 1].index)
continue;
- connector[entry->connector] = 0;
-
- if (encoders & (1 << OUTPUT_DP)) {
- type = DRM_MODE_CONNECTOR_DisplayPort;
- } else if (encoders & (1 << OUTPUT_TMDS)) {
- if (encoders & (1 << OUTPUT_ANALOG))
- type = DRM_MODE_CONNECTOR_DVII;
- else
- type = DRM_MODE_CONNECTOR_DVID;
- } else if (encoders & (1 << OUTPUT_ANALOG)) {
- type = DRM_MODE_CONNECTOR_VGA;
- } else if (encoders & (1 << OUTPUT_LVDS)) {
- type = DRM_MODE_CONNECTOR_LVDS;
- } else {
- type = DRM_MODE_CONNECTOR_Unknown;
- }
-
- if (type == DRM_MODE_CONNECTOR_Unknown)
- continue;
-
- nouveau_connector_create(dev, entry->connector, type);
+ nouveau_connector_create(dev, &dcb->connector.entry[i]);
}
ret = nv50_display_init(dev);
@@ -667,8 +633,8 @@ nv50_display_irq_head(struct drm_device *dev, int *phead,
return -1;
}
- for (i = 0; i < dev_priv->vbios->dcb->entries; i++) {
- struct dcb_entry *dcbent = &dev_priv->vbios->dcb->entry[i];
+ for (i = 0; i < dev_priv->vbios.dcb.entries; i++) {
+ struct dcb_entry *dcbent = &dev_priv->vbios.dcb.entry[i];
if (dcbent->type != type)
continue;
@@ -692,7 +658,7 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent,
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_connector *nv_connector = NULL;
struct drm_encoder *encoder;
- struct nvbios *bios = &dev_priv->VBIOS;
+ struct nvbios *bios = &dev_priv->vbios;
uint32_t mc, script = 0, or;
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
@@ -710,7 +676,7 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent,
switch (dcbent->type) {
case OUTPUT_LVDS:
script = (mc >> 8) & 0xf;
- if (bios->pub.fp_no_ddc) {
+ if (bios->fp_no_ddc) {
if (bios->fp.dual_link)
script |= 0x0100;
if (bios->fp.if_is_24bit)
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 0f57cdf7ccb..993c7126fbd 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -109,7 +109,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
return;
}
- width = (image->width + 31) & ~31;
+ width = ALIGN(image->width, 32);
dwords = (width * image->height) >> 5;
BEGIN_RING(chan, NvSub2D, 0x0814, 2);
diff --git a/drivers/gpu/drm/nouveau/nv50_fifo.c b/drivers/gpu/drm/nouveau/nv50_fifo.c
index 204a79ff10f..e20c0e2474f 100644
--- a/drivers/gpu/drm/nouveau/nv50_fifo.c
+++ b/drivers/gpu/drm/nouveau/nv50_fifo.c
@@ -243,6 +243,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *ramfc = NULL;
+ unsigned long flags;
int ret;
NV_DEBUG(dev, "ch%d\n", chan->id);
@@ -278,19 +279,21 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
return ret;
}
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+
dev_priv->engine.instmem.prepare_access(dev, true);
- nv_wo32(dev, ramfc, 0x08/4, chan->pushbuf_base);
- nv_wo32(dev, ramfc, 0x10/4, chan->pushbuf_base);
nv_wo32(dev, ramfc, 0x48/4, chan->pushbuf->instance >> 4);
nv_wo32(dev, ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4));
- nv_wo32(dev, ramfc, 0x3c/4, 0x00086078);
nv_wo32(dev, ramfc, 0x44/4, 0x2101ffff);
nv_wo32(dev, ramfc, 0x60/4, 0x7fffffff);
nv_wo32(dev, ramfc, 0x40/4, 0x00000000);
nv_wo32(dev, ramfc, 0x7c/4, 0x30000001);
nv_wo32(dev, ramfc, 0x78/4, 0x00000000);
- nv_wo32(dev, ramfc, 0x4c/4, 0xffffffff);
+ nv_wo32(dev, ramfc, 0x3c/4, 0x403f6078);
+ nv_wo32(dev, ramfc, 0x50/4, chan->pushbuf_base +
+ chan->dma.ib_base * 4);
+ nv_wo32(dev, ramfc, 0x54/4, drm_order(chan->dma.ib_max + 1) << 16);
if (!IS_G80) {
nv_wo32(dev, chan->ramin->gpuobj, 0, chan->id);
@@ -306,10 +309,12 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
ret = nv50_fifo_channel_enable(dev, chan->id, false);
if (ret) {
NV_ERROR(dev, "error enabling ch%d: %d\n", chan->id, ret);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
nouveau_gpuobj_ref_del(dev, &chan->ramfc);
return ret;
}
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 6d504801b51..857a09671a3 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -28,30 +28,7 @@
#include "drm.h"
#include "nouveau_drv.h"
-MODULE_FIRMWARE("nouveau/nv50.ctxprog");
-MODULE_FIRMWARE("nouveau/nv50.ctxvals");
-MODULE_FIRMWARE("nouveau/nv84.ctxprog");
-MODULE_FIRMWARE("nouveau/nv84.ctxvals");
-MODULE_FIRMWARE("nouveau/nv86.ctxprog");
-MODULE_FIRMWARE("nouveau/nv86.ctxvals");
-MODULE_FIRMWARE("nouveau/nv92.ctxprog");
-MODULE_FIRMWARE("nouveau/nv92.ctxvals");
-MODULE_FIRMWARE("nouveau/nv94.ctxprog");
-MODULE_FIRMWARE("nouveau/nv94.ctxvals");
-MODULE_FIRMWARE("nouveau/nv96.ctxprog");
-MODULE_FIRMWARE("nouveau/nv96.ctxvals");
-MODULE_FIRMWARE("nouveau/nv98.ctxprog");
-MODULE_FIRMWARE("nouveau/nv98.ctxvals");
-MODULE_FIRMWARE("nouveau/nva0.ctxprog");
-MODULE_FIRMWARE("nouveau/nva0.ctxvals");
-MODULE_FIRMWARE("nouveau/nva5.ctxprog");
-MODULE_FIRMWARE("nouveau/nva5.ctxvals");
-MODULE_FIRMWARE("nouveau/nva8.ctxprog");
-MODULE_FIRMWARE("nouveau/nva8.ctxvals");
-MODULE_FIRMWARE("nouveau/nvaa.ctxprog");
-MODULE_FIRMWARE("nouveau/nvaa.ctxvals");
-MODULE_FIRMWARE("nouveau/nvac.ctxprog");
-MODULE_FIRMWARE("nouveau/nvac.ctxvals");
+#include "nouveau_grctx.h"
#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50)
@@ -111,9 +88,34 @@ nv50_graph_init_ctxctl(struct drm_device *dev)
NV_DEBUG(dev, "\n");
- nouveau_grctx_prog_load(dev);
- if (!dev_priv->engine.graph.ctxprog)
- dev_priv->engine.graph.accel_blocked = true;
+ if (nouveau_ctxfw) {
+ nouveau_grctx_prog_load(dev);
+ dev_priv->engine.graph.grctx_size = 0x70000;
+ }
+ if (!dev_priv->engine.graph.ctxprog) {
+ struct nouveau_grctx ctx = {};
+ uint32_t *cp = kmalloc(512 * 4, GFP_KERNEL);
+ int i;
+ if (!cp) {
+ NV_ERROR(dev, "Couldn't alloc ctxprog! Disabling acceleration.\n");
+ dev_priv->engine.graph.accel_blocked = true;
+ return 0;
+ }
+ ctx.dev = dev;
+ ctx.mode = NOUVEAU_GRCTX_PROG;
+ ctx.data = cp;
+ ctx.ctxprog_max = 512;
+ if (!nv50_grctx_init(&ctx)) {
+ dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4;
+
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
+ for (i = 0; i < ctx.ctxprog_len; i++)
+ nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
+ } else {
+ dev_priv->engine.graph.accel_blocked = true;
+ }
+ kfree(cp);
+ }
nv_wr32(dev, 0x400320, 4);
nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0);
@@ -193,13 +195,13 @@ nv50_graph_create_context(struct nouveau_channel *chan)
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
struct nouveau_gpuobj *ctx;
- uint32_t grctx_size = 0x70000;
+ struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
int hdr, ret;
NV_DEBUG(dev, "ch%d\n", chan->id);
- ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, grctx_size, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC |
+ ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size,
+ 0x1000, NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE, &chan->ramin_grctx);
if (ret)
return ret;
@@ -209,7 +211,7 @@ nv50_graph_create_context(struct nouveau_channel *chan)
dev_priv->engine.instmem.prepare_access(dev, true);
nv_wo32(dev, ramin, (hdr + 0x00)/4, 0x00190002);
nv_wo32(dev, ramin, (hdr + 0x04)/4, chan->ramin_grctx->instance +
- grctx_size - 1);
+ pgraph->grctx_size - 1);
nv_wo32(dev, ramin, (hdr + 0x08)/4, chan->ramin_grctx->instance);
nv_wo32(dev, ramin, (hdr + 0x0c)/4, 0);
nv_wo32(dev, ramin, (hdr + 0x10)/4, 0);
@@ -217,7 +219,15 @@ nv50_graph_create_context(struct nouveau_channel *chan)
dev_priv->engine.instmem.finish_access(dev);
dev_priv->engine.instmem.prepare_access(dev, true);
- nouveau_grctx_vals_load(dev, ctx);
+ if (!pgraph->ctxprog) {
+ struct nouveau_grctx ctx = {};
+ ctx.dev = chan->dev;
+ ctx.mode = NOUVEAU_GRCTX_VALS;
+ ctx.data = chan->ramin_grctx->gpuobj;
+ nv50_grctx_init(&ctx);
+ } else {
+ nouveau_grctx_vals_load(dev, ctx);
+ }
nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12);
if ((dev_priv->chipset & 0xf0) == 0xa0)
nv_wo32(dev, ctx, 0x00004/4, 0x00000000);
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c
new file mode 100644
index 00000000000..d105fcd42ca
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_grctx.c
@@ -0,0 +1,2367 @@
+/*
+ * Copyright 2009 Marcin Kościelnicki
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#define CP_FLAG_CLEAR 0
+#define CP_FLAG_SET 1
+#define CP_FLAG_SWAP_DIRECTION ((0 * 32) + 0)
+#define CP_FLAG_SWAP_DIRECTION_LOAD 0
+#define CP_FLAG_SWAP_DIRECTION_SAVE 1
+#define CP_FLAG_UNK01 ((0 * 32) + 1)
+#define CP_FLAG_UNK01_CLEAR 0
+#define CP_FLAG_UNK01_SET 1
+#define CP_FLAG_UNK03 ((0 * 32) + 3)
+#define CP_FLAG_UNK03_CLEAR 0
+#define CP_FLAG_UNK03_SET 1
+#define CP_FLAG_USER_SAVE ((0 * 32) + 5)
+#define CP_FLAG_USER_SAVE_NOT_PENDING 0
+#define CP_FLAG_USER_SAVE_PENDING 1
+#define CP_FLAG_USER_LOAD ((0 * 32) + 6)
+#define CP_FLAG_USER_LOAD_NOT_PENDING 0
+#define CP_FLAG_USER_LOAD_PENDING 1
+#define CP_FLAG_UNK0B ((0 * 32) + 0xb)
+#define CP_FLAG_UNK0B_CLEAR 0
+#define CP_FLAG_UNK0B_SET 1
+#define CP_FLAG_UNK1D ((0 * 32) + 0x1d)
+#define CP_FLAG_UNK1D_CLEAR 0
+#define CP_FLAG_UNK1D_SET 1
+#define CP_FLAG_UNK20 ((1 * 32) + 0)
+#define CP_FLAG_UNK20_CLEAR 0
+#define CP_FLAG_UNK20_SET 1
+#define CP_FLAG_STATUS ((2 * 32) + 0)
+#define CP_FLAG_STATUS_BUSY 0
+#define CP_FLAG_STATUS_IDLE 1
+#define CP_FLAG_AUTO_SAVE ((2 * 32) + 4)
+#define CP_FLAG_AUTO_SAVE_NOT_PENDING 0
+#define CP_FLAG_AUTO_SAVE_PENDING 1
+#define CP_FLAG_AUTO_LOAD ((2 * 32) + 5)
+#define CP_FLAG_AUTO_LOAD_NOT_PENDING 0
+#define CP_FLAG_AUTO_LOAD_PENDING 1
+#define CP_FLAG_XFER ((2 * 32) + 11)
+#define CP_FLAG_XFER_IDLE 0
+#define CP_FLAG_XFER_BUSY 1
+#define CP_FLAG_NEWCTX ((2 * 32) + 12)
+#define CP_FLAG_NEWCTX_BUSY 0
+#define CP_FLAG_NEWCTX_DONE 1
+#define CP_FLAG_ALWAYS ((2 * 32) + 13)
+#define CP_FLAG_ALWAYS_FALSE 0
+#define CP_FLAG_ALWAYS_TRUE 1
+
+#define CP_CTX 0x00100000
+#define CP_CTX_COUNT 0x000f0000
+#define CP_CTX_COUNT_SHIFT 16
+#define CP_CTX_REG 0x00003fff
+#define CP_LOAD_SR 0x00200000
+#define CP_LOAD_SR_VALUE 0x000fffff
+#define CP_BRA 0x00400000
+#define CP_BRA_IP 0x0001ff00
+#define CP_BRA_IP_SHIFT 8
+#define CP_BRA_IF_CLEAR 0x00000080
+#define CP_BRA_FLAG 0x0000007f
+#define CP_WAIT 0x00500000
+#define CP_WAIT_SET 0x00000080
+#define CP_WAIT_FLAG 0x0000007f
+#define CP_SET 0x00700000
+#define CP_SET_1 0x00000080
+#define CP_SET_FLAG 0x0000007f
+#define CP_NEWCTX 0x00600004
+#define CP_NEXT_TO_SWAP 0x00600005
+#define CP_SET_CONTEXT_POINTER 0x00600006
+#define CP_SET_XFER_POINTER 0x00600007
+#define CP_ENABLE 0x00600009
+#define CP_END 0x0060000c
+#define CP_NEXT_TO_CURRENT 0x0060000d
+#define CP_DISABLE1 0x0090ffff
+#define CP_DISABLE2 0x0091ffff
+#define CP_XFER_1 0x008000ff
+#define CP_XFER_2 0x008800ff
+#define CP_SEEK_1 0x00c000ff
+#define CP_SEEK_2 0x00c800ff
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_grctx.h"
+
+/*
+ * This code deals with PGRAPH contexts on NV50 family cards. Like NV40, it's
+ * the GPU itself that does context-switching, but it needs a special
+ * microcode to do it. And it's the driver's task to supply this microcode,
+ * further known as ctxprog, as well as the initial context values, known
+ * as ctxvals.
+ *
+ * Without ctxprog, you cannot switch contexts. Not even in software, since
+ * the majority of context [xfer strands] isn't accessible directly. You're
+ * stuck with a single channel, and you also suffer all the problems resulting
+ * from missing ctxvals, since you cannot load them.
+ *
+ * Without ctxvals, you're stuck with PGRAPH's default context. It's enough to
+ * run 2d operations, but trying to utilise 3d or CUDA will just lock you up,
+ * since you don't have... some sort of needed setup.
+ *
+ * Nouveau will just disable acceleration if not given ctxprog + ctxvals, since
+ * it's too much hassle to handle no-ctxprog as a special case.
+ */
+
+/*
+ * How ctxprogs work.
+ *
+ * The ctxprog is written in its own kind of microcode, with very small and
+ * crappy set of available commands. You upload it to a small [512 insns]
+ * area of memory on PGRAPH, and it'll be run when PFIFO wants PGRAPH to
+ * switch channel. or when the driver explicitely requests it. Stuff visible
+ * to ctxprog consists of: PGRAPH MMIO registers, PGRAPH context strands,
+ * the per-channel context save area in VRAM [known as ctxvals or grctx],
+ * 4 flags registers, a scratch register, two grctx pointers, plus many
+ * random poorly-understood details.
+ *
+ * When ctxprog runs, it's supposed to check what operations are asked of it,
+ * save old context if requested, optionally reset PGRAPH and switch to the
+ * new channel, and load the new context. Context consists of three major
+ * parts: subset of MMIO registers and two "xfer areas".
+ */
+
+/* TODO:
+ * - document unimplemented bits compared to nvidia
+ * - NVAx: make a TP subroutine, use it.
+ * - use 0x4008fc instead of 0x1540?
+ */
+
+enum cp_label {
+ cp_check_load = 1,
+ cp_setup_auto_load,
+ cp_setup_load,
+ cp_setup_save,
+ cp_swap_state,
+ cp_prepare_exit,
+ cp_exit,
+};
+
+static void nv50_graph_construct_mmio(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_xfer1(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_xfer2(struct nouveau_grctx *ctx);
+
+/* Main function: construct the ctxprog skeleton, call the other functions. */
+
+int
+nv50_grctx_init(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+
+ switch (dev_priv->chipset) {
+ case 0x50:
+ case 0x84:
+ case 0x86:
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ case 0x98:
+ case 0xa0:
+ case 0xa5:
+ case 0xa8:
+ case 0xaa:
+ case 0xac:
+ break;
+ default:
+ NV_ERROR(ctx->dev, "I don't know how to make a ctxprog for "
+ "your NV%x card.\n", dev_priv->chipset);
+ NV_ERROR(ctx->dev, "Disabling acceleration. Please contact "
+ "the devs.\n");
+ return -ENOSYS;
+ }
+ /* decide whether we're loading/unloading the context */
+ cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save);
+ cp_bra (ctx, USER_SAVE, PENDING, cp_setup_save);
+
+ cp_name(ctx, cp_check_load);
+ cp_bra (ctx, AUTO_LOAD, PENDING, cp_setup_auto_load);
+ cp_bra (ctx, USER_LOAD, PENDING, cp_setup_load);
+ cp_bra (ctx, ALWAYS, TRUE, cp_exit);
+
+ /* setup for context load */
+ cp_name(ctx, cp_setup_auto_load);
+ cp_out (ctx, CP_DISABLE1);
+ cp_out (ctx, CP_DISABLE2);
+ cp_out (ctx, CP_ENABLE);
+ cp_out (ctx, CP_NEXT_TO_SWAP);
+ cp_set (ctx, UNK01, SET);
+ cp_name(ctx, cp_setup_load);
+ cp_out (ctx, CP_NEWCTX);
+ cp_wait(ctx, NEWCTX, BUSY);
+ cp_set (ctx, UNK1D, CLEAR);
+ cp_set (ctx, SWAP_DIRECTION, LOAD);
+ cp_bra (ctx, UNK0B, SET, cp_prepare_exit);
+ cp_bra (ctx, ALWAYS, TRUE, cp_swap_state);
+
+ /* setup for context save */
+ cp_name(ctx, cp_setup_save);
+ cp_set (ctx, UNK1D, SET);
+ cp_wait(ctx, STATUS, BUSY);
+ cp_set (ctx, UNK01, SET);
+ cp_set (ctx, SWAP_DIRECTION, SAVE);
+
+ /* general PGRAPH state */
+ cp_name(ctx, cp_swap_state);
+ cp_set (ctx, UNK03, SET);
+ cp_pos (ctx, 0x00004/4);
+ cp_ctx (ctx, 0x400828, 1); /* needed. otherwise, flickering happens. */
+ cp_pos (ctx, 0x00100/4);
+ nv50_graph_construct_mmio(ctx);
+ nv50_graph_construct_xfer1(ctx);
+ nv50_graph_construct_xfer2(ctx);
+
+ cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_check_load);
+
+ cp_set (ctx, UNK20, SET);
+ cp_set (ctx, SWAP_DIRECTION, SAVE); /* no idea why this is needed, but fixes at least one lockup. */
+ cp_lsr (ctx, ctx->ctxvals_base);
+ cp_out (ctx, CP_SET_XFER_POINTER);
+ cp_lsr (ctx, 4);
+ cp_out (ctx, CP_SEEK_1);
+ cp_out (ctx, CP_XFER_1);
+ cp_wait(ctx, XFER, BUSY);
+
+ /* pre-exit state updates */
+ cp_name(ctx, cp_prepare_exit);
+ cp_set (ctx, UNK01, CLEAR);
+ cp_set (ctx, UNK03, CLEAR);
+ cp_set (ctx, UNK1D, CLEAR);
+
+ cp_bra (ctx, USER_SAVE, PENDING, cp_exit);
+ cp_out (ctx, CP_NEXT_TO_CURRENT);
+
+ cp_name(ctx, cp_exit);
+ cp_set (ctx, USER_SAVE, NOT_PENDING);
+ cp_set (ctx, USER_LOAD, NOT_PENDING);
+ cp_out (ctx, CP_END);
+ ctx->ctxvals_pos += 0x400; /* padding... no idea why you need it */
+
+ return 0;
+}
+
+/*
+ * Constructs MMIO part of ctxprog and ctxvals. Just a matter of knowing which
+ * registers to save/restore and the default values for them.
+ */
+
+static void
+nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int i, j;
+ int offset, base;
+ uint32_t units = nv_rd32 (ctx->dev, 0x1540);
+
+ /* 0800 */
+ cp_ctx(ctx, 0x400808, 7);
+ gr_def(ctx, 0x400814, 0x00000030);
+ cp_ctx(ctx, 0x400834, 0x32);
+ if (dev_priv->chipset == 0x50) {
+ gr_def(ctx, 0x400834, 0xff400040);
+ gr_def(ctx, 0x400838, 0xfff00080);
+ gr_def(ctx, 0x40083c, 0xfff70090);
+ gr_def(ctx, 0x400840, 0xffe806a8);
+ }
+ gr_def(ctx, 0x400844, 0x00000002);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ gr_def(ctx, 0x400894, 0x00001000);
+ gr_def(ctx, 0x4008e8, 0x00000003);
+ gr_def(ctx, 0x4008ec, 0x00001000);
+ if (dev_priv->chipset == 0x50)
+ cp_ctx(ctx, 0x400908, 0xb);
+ else if (dev_priv->chipset < 0xa0)
+ cp_ctx(ctx, 0x400908, 0xc);
+ else
+ cp_ctx(ctx, 0x400908, 0xe);
+
+ if (dev_priv->chipset >= 0xa0)
+ cp_ctx(ctx, 0x400b00, 0x1);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
+ cp_ctx(ctx, 0x400b10, 0x1);
+ gr_def(ctx, 0x400b10, 0x0001629d);
+ cp_ctx(ctx, 0x400b20, 0x1);
+ gr_def(ctx, 0x400b20, 0x0001629d);
+ }
+
+ /* 0C00 */
+ cp_ctx(ctx, 0x400c08, 0x2);
+ gr_def(ctx, 0x400c08, 0x0000fe0c);
+
+ /* 1000 */
+ if (dev_priv->chipset < 0xa0) {
+ cp_ctx(ctx, 0x401008, 0x4);
+ gr_def(ctx, 0x401014, 0x00001000);
+ } else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa) {
+ cp_ctx(ctx, 0x401008, 0x5);
+ gr_def(ctx, 0x401018, 0x00001000);
+ } else {
+ cp_ctx(ctx, 0x401008, 0x5);
+ gr_def(ctx, 0x401018, 0x00004000);
+ }
+
+ /* 1400 */
+ cp_ctx(ctx, 0x401400, 0x8);
+ cp_ctx(ctx, 0x401424, 0x3);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, 0x40142c, 0x0001fd87);
+ else
+ gr_def(ctx, 0x40142c, 0x00000187);
+ cp_ctx(ctx, 0x401540, 0x5);
+ gr_def(ctx, 0x401550, 0x00001018);
+
+ /* 1800 */
+ cp_ctx(ctx, 0x401814, 0x1);
+ gr_def(ctx, 0x401814, 0x000000ff);
+ if (dev_priv->chipset == 0x50) {
+ cp_ctx(ctx, 0x40181c, 0xe);
+ gr_def(ctx, 0x401850, 0x00000004);
+ } else if (dev_priv->chipset < 0xa0) {
+ cp_ctx(ctx, 0x40181c, 0xf);
+ gr_def(ctx, 0x401854, 0x00000004);
+ } else {
+ cp_ctx(ctx, 0x40181c, 0x13);
+ gr_def(ctx, 0x401864, 0x00000004);
+ }
+
+ /* 1C00 */
+ cp_ctx(ctx, 0x401c00, 0x1);
+ switch (dev_priv->chipset) {
+ case 0x50:
+ gr_def(ctx, 0x401c00, 0x0001005f);
+ break;
+ case 0x84:
+ case 0x86:
+ case 0x94:
+ gr_def(ctx, 0x401c00, 0x044d00df);
+ break;
+ case 0x92:
+ case 0x96:
+ case 0x98:
+ case 0xa0:
+ case 0xaa:
+ case 0xac:
+ gr_def(ctx, 0x401c00, 0x042500df);
+ break;
+ case 0xa5:
+ case 0xa8:
+ gr_def(ctx, 0x401c00, 0x142500df);
+ break;
+ }
+
+ /* 2400 */
+ cp_ctx(ctx, 0x402400, 0x1);
+ if (dev_priv->chipset == 0x50)
+ cp_ctx(ctx, 0x402408, 0x1);
+ else
+ cp_ctx(ctx, 0x402408, 0x2);
+ gr_def(ctx, 0x402408, 0x00000600);
+
+ /* 2800 */
+ cp_ctx(ctx, 0x402800, 0x1);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, 0x402800, 0x00000006);
+
+ /* 2C00 */
+ cp_ctx(ctx, 0x402c08, 0x6);
+ if (dev_priv->chipset != 0x50)
+ gr_def(ctx, 0x402c14, 0x01000000);
+ gr_def(ctx, 0x402c18, 0x000000ff);
+ if (dev_priv->chipset == 0x50)
+ cp_ctx(ctx, 0x402ca0, 0x1);
+ else
+ cp_ctx(ctx, 0x402ca0, 0x2);
+ if (dev_priv->chipset < 0xa0)
+ gr_def(ctx, 0x402ca0, 0x00000400);
+ else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa)
+ gr_def(ctx, 0x402ca0, 0x00000800);
+ else
+ gr_def(ctx, 0x402ca0, 0x00000400);
+ cp_ctx(ctx, 0x402cac, 0x4);
+
+ /* 3000 */
+ cp_ctx(ctx, 0x403004, 0x1);
+ gr_def(ctx, 0x403004, 0x00000001);
+
+ /* 3404 */
+ if (dev_priv->chipset >= 0xa0) {
+ cp_ctx(ctx, 0x403404, 0x1);
+ gr_def(ctx, 0x403404, 0x00000001);
+ }
+
+ /* 5000 */
+ cp_ctx(ctx, 0x405000, 0x1);
+ switch (dev_priv->chipset) {
+ case 0x50:
+ gr_def(ctx, 0x405000, 0x00300080);
+ break;
+ case 0x84:
+ case 0xa0:
+ case 0xa5:
+ case 0xa8:
+ case 0xaa:
+ case 0xac:
+ gr_def(ctx, 0x405000, 0x000e0080);
+ break;
+ case 0x86:
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ case 0x98:
+ gr_def(ctx, 0x405000, 0x00000080);
+ break;
+ }
+ cp_ctx(ctx, 0x405014, 0x1);
+ gr_def(ctx, 0x405014, 0x00000004);
+ cp_ctx(ctx, 0x40501c, 0x1);
+ cp_ctx(ctx, 0x405024, 0x1);
+ cp_ctx(ctx, 0x40502c, 0x1);
+
+ /* 5400 or maybe 4800 */
+ if (dev_priv->chipset == 0x50) {
+ offset = 0x405400;
+ cp_ctx(ctx, 0x405400, 0xea);
+ } else if (dev_priv->chipset < 0x94) {
+ offset = 0x405400;
+ cp_ctx(ctx, 0x405400, 0xcb);
+ } else if (dev_priv->chipset < 0xa0) {
+ offset = 0x405400;
+ cp_ctx(ctx, 0x405400, 0xcc);
+ } else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
+ offset = 0x404800;
+ cp_ctx(ctx, 0x404800, 0xda);
+ } else {
+ offset = 0x405400;
+ cp_ctx(ctx, 0x405400, 0xd4);
+ }
+ gr_def(ctx, offset + 0x0c, 0x00000002);
+ gr_def(ctx, offset + 0x10, 0x00000001);
+ if (dev_priv->chipset >= 0x94)
+ offset += 4;
+ gr_def(ctx, offset + 0x1c, 0x00000001);
+ gr_def(ctx, offset + 0x20, 0x00000100);
+ gr_def(ctx, offset + 0x38, 0x00000002);
+ gr_def(ctx, offset + 0x3c, 0x00000001);
+ gr_def(ctx, offset + 0x40, 0x00000001);
+ gr_def(ctx, offset + 0x50, 0x00000001);
+ gr_def(ctx, offset + 0x54, 0x003fffff);
+ gr_def(ctx, offset + 0x58, 0x00001fff);
+ gr_def(ctx, offset + 0x60, 0x00000001);
+ gr_def(ctx, offset + 0x64, 0x00000001);
+ gr_def(ctx, offset + 0x6c, 0x00000001);
+ gr_def(ctx, offset + 0x70, 0x00000001);
+ gr_def(ctx, offset + 0x74, 0x00000001);
+ gr_def(ctx, offset + 0x78, 0x00000004);
+ gr_def(ctx, offset + 0x7c, 0x00000001);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ offset += 4;
+ gr_def(ctx, offset + 0x80, 0x00000001);
+ gr_def(ctx, offset + 0x84, 0x00000001);
+ gr_def(ctx, offset + 0x88, 0x00000007);
+ gr_def(ctx, offset + 0x8c, 0x00000001);
+ gr_def(ctx, offset + 0x90, 0x00000007);
+ gr_def(ctx, offset + 0x94, 0x00000001);
+ gr_def(ctx, offset + 0x98, 0x00000001);
+ gr_def(ctx, offset + 0x9c, 0x00000001);
+ if (dev_priv->chipset == 0x50) {
+ gr_def(ctx, offset + 0xb0, 0x00000001);
+ gr_def(ctx, offset + 0xb4, 0x00000001);
+ gr_def(ctx, offset + 0xbc, 0x00000001);
+ gr_def(ctx, offset + 0xc0, 0x0000000a);
+ gr_def(ctx, offset + 0xd0, 0x00000040);
+ gr_def(ctx, offset + 0xd8, 0x00000002);
+ gr_def(ctx, offset + 0xdc, 0x00000100);
+ gr_def(ctx, offset + 0xe0, 0x00000001);
+ gr_def(ctx, offset + 0xe4, 0x00000100);
+ gr_def(ctx, offset + 0x100, 0x00000001);
+ gr_def(ctx, offset + 0x124, 0x00000004);
+ gr_def(ctx, offset + 0x13c, 0x00000001);
+ gr_def(ctx, offset + 0x140, 0x00000100);
+ gr_def(ctx, offset + 0x148, 0x00000001);
+ gr_def(ctx, offset + 0x154, 0x00000100);
+ gr_def(ctx, offset + 0x158, 0x00000001);
+ gr_def(ctx, offset + 0x15c, 0x00000100);
+ gr_def(ctx, offset + 0x164, 0x00000001);
+ gr_def(ctx, offset + 0x170, 0x00000100);
+ gr_def(ctx, offset + 0x174, 0x00000001);
+ gr_def(ctx, offset + 0x17c, 0x00000001);
+ gr_def(ctx, offset + 0x188, 0x00000002);
+ gr_def(ctx, offset + 0x190, 0x00000001);
+ gr_def(ctx, offset + 0x198, 0x00000001);
+ gr_def(ctx, offset + 0x1ac, 0x00000003);
+ offset += 0xd0;
+ } else {
+ gr_def(ctx, offset + 0xb0, 0x00000001);
+ gr_def(ctx, offset + 0xb4, 0x00000100);
+ gr_def(ctx, offset + 0xbc, 0x00000001);
+ gr_def(ctx, offset + 0xc8, 0x00000100);
+ gr_def(ctx, offset + 0xcc, 0x00000001);
+ gr_def(ctx, offset + 0xd0, 0x00000100);
+ gr_def(ctx, offset + 0xd8, 0x00000001);
+ gr_def(ctx, offset + 0xe4, 0x00000100);
+ }
+ gr_def(ctx, offset + 0xf8, 0x00000004);
+ gr_def(ctx, offset + 0xfc, 0x00000070);
+ gr_def(ctx, offset + 0x100, 0x00000080);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ offset += 4;
+ gr_def(ctx, offset + 0x114, 0x0000000c);
+ if (dev_priv->chipset == 0x50)
+ offset -= 4;
+ gr_def(ctx, offset + 0x11c, 0x00000008);
+ gr_def(ctx, offset + 0x120, 0x00000014);
+ if (dev_priv->chipset == 0x50) {
+ gr_def(ctx, offset + 0x124, 0x00000026);
+ offset -= 0x18;
+ } else {
+ gr_def(ctx, offset + 0x128, 0x00000029);
+ gr_def(ctx, offset + 0x12c, 0x00000027);
+ gr_def(ctx, offset + 0x130, 0x00000026);
+ gr_def(ctx, offset + 0x134, 0x00000008);
+ gr_def(ctx, offset + 0x138, 0x00000004);
+ gr_def(ctx, offset + 0x13c, 0x00000027);
+ }
+ gr_def(ctx, offset + 0x148, 0x00000001);
+ gr_def(ctx, offset + 0x14c, 0x00000002);
+ gr_def(ctx, offset + 0x150, 0x00000003);
+ gr_def(ctx, offset + 0x154, 0x00000004);
+ gr_def(ctx, offset + 0x158, 0x00000005);
+ gr_def(ctx, offset + 0x15c, 0x00000006);
+ gr_def(ctx, offset + 0x160, 0x00000007);
+ gr_def(ctx, offset + 0x164, 0x00000001);
+ gr_def(ctx, offset + 0x1a8, 0x000000cf);
+ if (dev_priv->chipset == 0x50)
+ offset -= 4;
+ gr_def(ctx, offset + 0x1d8, 0x00000080);
+ gr_def(ctx, offset + 0x1dc, 0x00000004);
+ gr_def(ctx, offset + 0x1e0, 0x00000004);
+ if (dev_priv->chipset == 0x50)
+ offset -= 4;
+ else
+ gr_def(ctx, offset + 0x1e4, 0x00000003);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
+ gr_def(ctx, offset + 0x1ec, 0x00000003);
+ offset += 8;
+ }
+ gr_def(ctx, offset + 0x1e8, 0x00000001);
+ if (dev_priv->chipset == 0x50)
+ offset -= 4;
+ gr_def(ctx, offset + 0x1f4, 0x00000012);
+ gr_def(ctx, offset + 0x1f8, 0x00000010);
+ gr_def(ctx, offset + 0x1fc, 0x0000000c);
+ gr_def(ctx, offset + 0x200, 0x00000001);
+ gr_def(ctx, offset + 0x210, 0x00000004);
+ gr_def(ctx, offset + 0x214, 0x00000002);
+ gr_def(ctx, offset + 0x218, 0x00000004);
+ if (dev_priv->chipset >= 0xa0)
+ offset += 4;
+ gr_def(ctx, offset + 0x224, 0x003fffff);
+ gr_def(ctx, offset + 0x228, 0x00001fff);
+ if (dev_priv->chipset == 0x50)
+ offset -= 0x20;
+ else if (dev_priv->chipset >= 0xa0) {
+ gr_def(ctx, offset + 0x250, 0x00000001);
+ gr_def(ctx, offset + 0x254, 0x00000001);
+ gr_def(ctx, offset + 0x258, 0x00000002);
+ offset += 0x10;
+ }
+ gr_def(ctx, offset + 0x250, 0x00000004);
+ gr_def(ctx, offset + 0x254, 0x00000014);
+ gr_def(ctx, offset + 0x258, 0x00000001);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ offset += 4;
+ gr_def(ctx, offset + 0x264, 0x00000002);
+ if (dev_priv->chipset >= 0xa0)
+ offset += 8;
+ gr_def(ctx, offset + 0x270, 0x00000001);
+ gr_def(ctx, offset + 0x278, 0x00000002);
+ gr_def(ctx, offset + 0x27c, 0x00001000);
+ if (dev_priv->chipset == 0x50)
+ offset -= 0xc;
+ else {
+ gr_def(ctx, offset + 0x280, 0x00000e00);
+ gr_def(ctx, offset + 0x284, 0x00001000);
+ gr_def(ctx, offset + 0x288, 0x00001e00);
+ }
+ gr_def(ctx, offset + 0x290, 0x00000001);
+ gr_def(ctx, offset + 0x294, 0x00000001);
+ gr_def(ctx, offset + 0x298, 0x00000001);
+ gr_def(ctx, offset + 0x29c, 0x00000001);
+ gr_def(ctx, offset + 0x2a0, 0x00000001);
+ gr_def(ctx, offset + 0x2b0, 0x00000200);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
+ gr_def(ctx, offset + 0x2b4, 0x00000200);
+ offset += 4;
+ }
+ if (dev_priv->chipset < 0xa0) {
+ gr_def(ctx, offset + 0x2b8, 0x00000001);
+ gr_def(ctx, offset + 0x2bc, 0x00000070);
+ gr_def(ctx, offset + 0x2c0, 0x00000080);
+ gr_def(ctx, offset + 0x2cc, 0x00000001);
+ gr_def(ctx, offset + 0x2d0, 0x00000070);
+ gr_def(ctx, offset + 0x2d4, 0x00000080);
+ } else {
+ gr_def(ctx, offset + 0x2b8, 0x00000001);
+ gr_def(ctx, offset + 0x2bc, 0x000000f0);
+ gr_def(ctx, offset + 0x2c0, 0x000000ff);
+ gr_def(ctx, offset + 0x2cc, 0x00000001);
+ gr_def(ctx, offset + 0x2d0, 0x000000f0);
+ gr_def(ctx, offset + 0x2d4, 0x000000ff);
+ gr_def(ctx, offset + 0x2dc, 0x00000009);
+ offset += 4;
+ }
+ gr_def(ctx, offset + 0x2e4, 0x00000001);
+ gr_def(ctx, offset + 0x2e8, 0x000000cf);
+ gr_def(ctx, offset + 0x2f0, 0x00000001);
+ gr_def(ctx, offset + 0x300, 0x000000cf);
+ gr_def(ctx, offset + 0x308, 0x00000002);
+ gr_def(ctx, offset + 0x310, 0x00000001);
+ gr_def(ctx, offset + 0x318, 0x00000001);
+ gr_def(ctx, offset + 0x320, 0x000000cf);
+ gr_def(ctx, offset + 0x324, 0x000000cf);
+ gr_def(ctx, offset + 0x328, 0x00000001);
+
+ /* 6000? */
+ if (dev_priv->chipset == 0x50)
+ cp_ctx(ctx, 0x4063e0, 0x1);
+
+ /* 6800 */
+ if (dev_priv->chipset < 0x90) {
+ cp_ctx(ctx, 0x406814, 0x2b);
+ gr_def(ctx, 0x406818, 0x00000f80);
+ gr_def(ctx, 0x406860, 0x007f0080);
+ gr_def(ctx, 0x40689c, 0x007f0080);
+ } else {
+ cp_ctx(ctx, 0x406814, 0x4);
+ if (dev_priv->chipset == 0x98)
+ gr_def(ctx, 0x406818, 0x00000f80);
+ else
+ gr_def(ctx, 0x406818, 0x00001f80);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ gr_def(ctx, 0x40681c, 0x00000030);
+ cp_ctx(ctx, 0x406830, 0x3);
+ }
+
+ /* 7000: per-ROP group state */
+ for (i = 0; i < 8; i++) {
+ if (units & (1<<(i+16))) {
+ cp_ctx(ctx, 0x407000 + (i<<8), 3);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, 0x407000 + (i<<8), 0x1b74f820);
+ else if (dev_priv->chipset != 0xa5)
+ gr_def(ctx, 0x407000 + (i<<8), 0x3b74f821);
+ else
+ gr_def(ctx, 0x407000 + (i<<8), 0x7b74f821);
+ gr_def(ctx, 0x407004 + (i<<8), 0x89058001);
+
+ if (dev_priv->chipset == 0x50) {
+ cp_ctx(ctx, 0x407010 + (i<<8), 1);
+ } else if (dev_priv->chipset < 0xa0) {
+ cp_ctx(ctx, 0x407010 + (i<<8), 2);
+ gr_def(ctx, 0x407010 + (i<<8), 0x00001000);
+ gr_def(ctx, 0x407014 + (i<<8), 0x0000001f);
+ } else {
+ cp_ctx(ctx, 0x407010 + (i<<8), 3);
+ gr_def(ctx, 0x407010 + (i<<8), 0x00001000);
+ if (dev_priv->chipset != 0xa5)
+ gr_def(ctx, 0x407014 + (i<<8), 0x000000ff);
+ else
+ gr_def(ctx, 0x407014 + (i<<8), 0x000001ff);
+ }
+
+ cp_ctx(ctx, 0x407080 + (i<<8), 4);
+ if (dev_priv->chipset != 0xa5)
+ gr_def(ctx, 0x407080 + (i<<8), 0x027c10fa);
+ else
+ gr_def(ctx, 0x407080 + (i<<8), 0x827c10fa);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, 0x407084 + (i<<8), 0x000000c0);
+ else
+ gr_def(ctx, 0x407084 + (i<<8), 0x400000c0);
+ gr_def(ctx, 0x407088 + (i<<8), 0xb7892080);
+
+ if (dev_priv->chipset < 0xa0)
+ cp_ctx(ctx, 0x407094 + (i<<8), 1);
+ else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa)
+ cp_ctx(ctx, 0x407094 + (i<<8), 3);
+ else {
+ cp_ctx(ctx, 0x407094 + (i<<8), 4);
+ gr_def(ctx, 0x4070a0 + (i<<8), 1);
+ }
+ }
+ }
+
+ cp_ctx(ctx, 0x407c00, 0x3);
+ if (dev_priv->chipset < 0x90)
+ gr_def(ctx, 0x407c00, 0x00010040);
+ else if (dev_priv->chipset < 0xa0)
+ gr_def(ctx, 0x407c00, 0x00390040);
+ else
+ gr_def(ctx, 0x407c00, 0x003d0040);
+ gr_def(ctx, 0x407c08, 0x00000022);
+ if (dev_priv->chipset >= 0xa0) {
+ cp_ctx(ctx, 0x407c10, 0x3);
+ cp_ctx(ctx, 0x407c20, 0x1);
+ cp_ctx(ctx, 0x407c2c, 0x1);
+ }
+
+ if (dev_priv->chipset < 0xa0) {
+ cp_ctx(ctx, 0x407d00, 0x9);
+ } else {
+ cp_ctx(ctx, 0x407d00, 0x15);
+ }
+ if (dev_priv->chipset == 0x98)
+ gr_def(ctx, 0x407d08, 0x00380040);
+ else {
+ if (dev_priv->chipset < 0x90)
+ gr_def(ctx, 0x407d08, 0x00010040);
+ else if (dev_priv->chipset < 0xa0)
+ gr_def(ctx, 0x407d08, 0x00390040);
+ else
+ gr_def(ctx, 0x407d08, 0x003d0040);
+ gr_def(ctx, 0x407d0c, 0x00000022);
+ }
+
+ /* 8000+: per-TP state */
+ for (i = 0; i < 10; i++) {
+ if (units & (1<<i)) {
+ if (dev_priv->chipset < 0xa0)
+ base = 0x408000 + (i<<12);
+ else
+ base = 0x408000 + (i<<11);
+ if (dev_priv->chipset < 0xa0)
+ offset = base + 0xc00;
+ else
+ offset = base + 0x80;
+ cp_ctx(ctx, offset + 0x00, 1);
+ gr_def(ctx, offset + 0x00, 0x0000ff0a);
+ cp_ctx(ctx, offset + 0x08, 1);
+
+ /* per-MP state */
+ for (j = 0; j < (dev_priv->chipset < 0xa0 ? 2 : 4); j++) {
+ if (!(units & (1 << (j+24)))) continue;
+ if (dev_priv->chipset < 0xa0)
+ offset = base + 0x200 + (j<<7);
+ else
+ offset = base + 0x100 + (j<<7);
+ cp_ctx(ctx, offset, 0x20);
+ gr_def(ctx, offset + 0x00, 0x01800000);
+ gr_def(ctx, offset + 0x04, 0x00160000);
+ gr_def(ctx, offset + 0x08, 0x01800000);
+ gr_def(ctx, offset + 0x18, 0x0003ffff);
+ switch (dev_priv->chipset) {
+ case 0x50:
+ gr_def(ctx, offset + 0x1c, 0x00080000);
+ break;
+ case 0x84:
+ gr_def(ctx, offset + 0x1c, 0x00880000);
+ break;
+ case 0x86:
+ gr_def(ctx, offset + 0x1c, 0x008c0000);
+ break;
+ case 0x92:
+ case 0x96:
+ case 0x98:
+ gr_def(ctx, offset + 0x1c, 0x118c0000);
+ break;
+ case 0x94:
+ gr_def(ctx, offset + 0x1c, 0x10880000);
+ break;
+ case 0xa0:
+ case 0xa5:
+ gr_def(ctx, offset + 0x1c, 0x310c0000);
+ break;
+ case 0xa8:
+ case 0xaa:
+ case 0xac:
+ gr_def(ctx, offset + 0x1c, 0x300c0000);
+ break;
+ }
+ gr_def(ctx, offset + 0x40, 0x00010401);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, offset + 0x48, 0x00000040);
+ else
+ gr_def(ctx, offset + 0x48, 0x00000078);
+ gr_def(ctx, offset + 0x50, 0x000000bf);
+ gr_def(ctx, offset + 0x58, 0x00001210);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, offset + 0x5c, 0x00000080);
+ else
+ gr_def(ctx, offset + 0x5c, 0x08000080);
+ if (dev_priv->chipset >= 0xa0)
+ gr_def(ctx, offset + 0x68, 0x0000003e);
+ }
+
+ if (dev_priv->chipset < 0xa0)
+ cp_ctx(ctx, base + 0x300, 0x4);
+ else
+ cp_ctx(ctx, base + 0x300, 0x5);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, base + 0x304, 0x00007070);
+ else if (dev_priv->chipset < 0xa0)
+ gr_def(ctx, base + 0x304, 0x00027070);
+ else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa)
+ gr_def(ctx, base + 0x304, 0x01127070);
+ else
+ gr_def(ctx, base + 0x304, 0x05127070);
+
+ if (dev_priv->chipset < 0xa0)
+ cp_ctx(ctx, base + 0x318, 1);
+ else
+ cp_ctx(ctx, base + 0x320, 1);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, base + 0x318, 0x0003ffff);
+ else if (dev_priv->chipset < 0xa0)
+ gr_def(ctx, base + 0x318, 0x03ffffff);
+ else
+ gr_def(ctx, base + 0x320, 0x07ffffff);
+
+ if (dev_priv->chipset < 0xa0)
+ cp_ctx(ctx, base + 0x324, 5);
+ else
+ cp_ctx(ctx, base + 0x328, 4);
+
+ if (dev_priv->chipset < 0xa0) {
+ cp_ctx(ctx, base + 0x340, 9);
+ offset = base + 0x340;
+ } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) {
+ cp_ctx(ctx, base + 0x33c, 0xb);
+ offset = base + 0x344;
+ } else {
+ cp_ctx(ctx, base + 0x33c, 0xd);
+ offset = base + 0x344;
+ }
+ gr_def(ctx, offset + 0x0, 0x00120407);
+ gr_def(ctx, offset + 0x4, 0x05091507);
+ if (dev_priv->chipset == 0x84)
+ gr_def(ctx, offset + 0x8, 0x05100202);
+ else
+ gr_def(ctx, offset + 0x8, 0x05010202);
+ gr_def(ctx, offset + 0xc, 0x00030201);
+
+ cp_ctx(ctx, base + 0x400, 2);
+ gr_def(ctx, base + 0x404, 0x00000040);
+ cp_ctx(ctx, base + 0x40c, 2);
+ gr_def(ctx, base + 0x40c, 0x0d0c0b0a);
+ gr_def(ctx, base + 0x410, 0x00141210);
+
+ if (dev_priv->chipset < 0xa0)
+ offset = base + 0x800;
+ else
+ offset = base + 0x500;
+ cp_ctx(ctx, offset, 6);
+ gr_def(ctx, offset + 0x0, 0x000001f0);
+ gr_def(ctx, offset + 0x4, 0x00000001);
+ gr_def(ctx, offset + 0x8, 0x00000003);
+ if (dev_priv->chipset == 0x50 || dev_priv->chipset >= 0xaa)
+ gr_def(ctx, offset + 0xc, 0x00008000);
+ gr_def(ctx, offset + 0x14, 0x00039e00);
+ cp_ctx(ctx, offset + 0x1c, 2);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, offset + 0x1c, 0x00000040);
+ else
+ gr_def(ctx, offset + 0x1c, 0x00000100);
+ gr_def(ctx, offset + 0x20, 0x00003800);
+
+ if (dev_priv->chipset >= 0xa0) {
+ cp_ctx(ctx, base + 0x54c, 2);
+ if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa)
+ gr_def(ctx, base + 0x54c, 0x003fe006);
+ else
+ gr_def(ctx, base + 0x54c, 0x003fe007);
+ gr_def(ctx, base + 0x550, 0x003fe000);
+ }
+
+ if (dev_priv->chipset < 0xa0)
+ offset = base + 0xa00;
+ else
+ offset = base + 0x680;
+ cp_ctx(ctx, offset, 1);
+ gr_def(ctx, offset, 0x00404040);
+
+ if (dev_priv->chipset < 0xa0)
+ offset = base + 0xe00;
+ else
+ offset = base + 0x700;
+ cp_ctx(ctx, offset, 2);
+ if (dev_priv->chipset < 0xa0)
+ gr_def(ctx, offset, 0x0077f005);
+ else if (dev_priv->chipset == 0xa5)
+ gr_def(ctx, offset, 0x6cf7f007);
+ else if (dev_priv->chipset == 0xa8)
+ gr_def(ctx, offset, 0x6cfff007);
+ else if (dev_priv->chipset == 0xac)
+ gr_def(ctx, offset, 0x0cfff007);
+ else
+ gr_def(ctx, offset, 0x0cf7f007);
+ if (dev_priv->chipset == 0x50)
+ gr_def(ctx, offset + 0x4, 0x00007fff);
+ else if (dev_priv->chipset < 0xa0)
+ gr_def(ctx, offset + 0x4, 0x003f7fff);
+ else
+ gr_def(ctx, offset + 0x4, 0x02bf7fff);
+ cp_ctx(ctx, offset + 0x2c, 1);
+ if (dev_priv->chipset == 0x50) {
+ cp_ctx(ctx, offset + 0x50, 9);
+ gr_def(ctx, offset + 0x54, 0x000003ff);
+ gr_def(ctx, offset + 0x58, 0x00000003);
+ gr_def(ctx, offset + 0x5c, 0x00000003);
+ gr_def(ctx, offset + 0x60, 0x000001ff);
+ gr_def(ctx, offset + 0x64, 0x0000001f);
+ gr_def(ctx, offset + 0x68, 0x0000000f);
+ gr_def(ctx, offset + 0x6c, 0x0000000f);
+ } else if(dev_priv->chipset < 0xa0) {
+ cp_ctx(ctx, offset + 0x50, 1);
+ cp_ctx(ctx, offset + 0x70, 1);
+ } else {
+ cp_ctx(ctx, offset + 0x50, 1);
+ cp_ctx(ctx, offset + 0x60, 5);
+ }
+ }
+ }
+}
+
+/*
+ * xfer areas. These are a pain.
+ *
+ * There are 2 xfer areas: the first one is big and contains all sorts of
+ * stuff, the second is small and contains some per-TP context.
+ *
+ * Each area is split into 8 "strands". The areas, when saved to grctx,
+ * are made of 8-word blocks. Each block contains a single word from
+ * each strand. The strands are independent of each other, their
+ * addresses are unrelated to each other, and data in them is closely
+ * packed together. The strand layout varies a bit between cards: here
+ * and there, a single word is thrown out in the middle and the whole
+ * strand is offset by a bit from corresponding one on another chipset.
+ * For this reason, addresses of stuff in strands are almost useless.
+ * Knowing sequence of stuff and size of gaps between them is much more
+ * useful, and that's how we build the strands in our generator.
+ *
+ * NVA0 takes this mess to a whole new level by cutting the old strands
+ * into a few dozen pieces [known as genes], rearranging them randomly,
+ * and putting them back together to make new strands. Hopefully these
+ * genes correspond more or less directly to the same PGRAPH subunits
+ * as in 400040 register.
+ *
+ * The most common value in default context is 0, and when the genes
+ * are separated by 0's, gene bounduaries are quite speculative...
+ * some of them can be clearly deduced, others can be guessed, and yet
+ * others won't be resolved without figuring out the real meaning of
+ * given ctxval. For the same reason, ending point of each strand
+ * is unknown. Except for strand 0, which is the longest strand and
+ * its end corresponds to end of the whole xfer.
+ *
+ * An unsolved mystery is the seek instruction: it takes an argument
+ * in bits 8-18, and that argument is clearly the place in strands to
+ * seek to... but the offsets don't seem to correspond to offsets as
+ * seen in grctx. Perhaps there's another, real, not randomly-changing
+ * addressing in strands, and the xfer insn just happens to skip over
+ * the unused bits? NV10-NV30 PIPE comes to mind...
+ *
+ * As far as I know, there's no way to access the xfer areas directly
+ * without the help of ctxprog.
+ */
+
+static inline void
+xf_emit(struct nouveau_grctx *ctx, int num, uint32_t val) {
+ int i;
+ if (val && ctx->mode == NOUVEAU_GRCTX_VALS)
+ for (i = 0; i < num; i++)
+ nv_wo32(ctx->dev, ctx->data, ctx->ctxvals_pos + (i << 3), val);
+ ctx->ctxvals_pos += num << 3;
+}
+
+/* Gene declarations... */
+
+static void nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk1(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk2(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk3(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk4(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk5(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk6(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk7(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk8(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk9(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_unk10(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx);
+static void nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx);
+
+static void
+nv50_graph_construct_xfer1(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int i;
+ int offset;
+ int size = 0;
+ uint32_t units = nv_rd32 (ctx->dev, 0x1540);
+
+ offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
+ ctx->ctxvals_base = offset;
+
+ if (dev_priv->chipset < 0xa0) {
+ /* Strand 0 */
+ ctx->ctxvals_pos = offset;
+ switch (dev_priv->chipset) {
+ case 0x50:
+ xf_emit(ctx, 0x99, 0);
+ break;
+ case 0x84:
+ case 0x86:
+ xf_emit(ctx, 0x384, 0);
+ break;
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ case 0x98:
+ xf_emit(ctx, 0x380, 0);
+ break;
+ }
+ nv50_graph_construct_gene_m2mf (ctx);
+ switch (dev_priv->chipset) {
+ case 0x50:
+ case 0x84:
+ case 0x86:
+ case 0x98:
+ xf_emit(ctx, 0x4c4, 0);
+ break;
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ xf_emit(ctx, 0x984, 0);
+ break;
+ }
+ nv50_graph_construct_gene_unk5(ctx);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 0xa, 0);
+ else
+ xf_emit(ctx, 0xb, 0);
+ nv50_graph_construct_gene_unk4(ctx);
+ nv50_graph_construct_gene_unk3(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 1 */
+ ctx->ctxvals_pos = offset + 0x1;
+ nv50_graph_construct_gene_unk6(ctx);
+ nv50_graph_construct_gene_unk7(ctx);
+ nv50_graph_construct_gene_unk8(ctx);
+ switch (dev_priv->chipset) {
+ case 0x50:
+ case 0x92:
+ xf_emit(ctx, 0xfb, 0);
+ break;
+ case 0x84:
+ xf_emit(ctx, 0xd3, 0);
+ break;
+ case 0x94:
+ case 0x96:
+ xf_emit(ctx, 0xab, 0);
+ break;
+ case 0x86:
+ case 0x98:
+ xf_emit(ctx, 0x6b, 0);
+ break;
+ }
+ xf_emit(ctx, 2, 0x4e3bfdf);
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 0x0fac6881);
+ xf_emit(ctx, 0xb, 0);
+ xf_emit(ctx, 2, 0x4e3bfdf);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 2 */
+ ctx->ctxvals_pos = offset + 0x2;
+ switch (dev_priv->chipset) {
+ case 0x50:
+ case 0x92:
+ xf_emit(ctx, 0xa80, 0);
+ break;
+ case 0x84:
+ xf_emit(ctx, 0xa7e, 0);
+ break;
+ case 0x94:
+ case 0x96:
+ xf_emit(ctx, 0xa7c, 0);
+ break;
+ case 0x86:
+ case 0x98:
+ xf_emit(ctx, 0xa7a, 0);
+ break;
+ }
+ xf_emit(ctx, 1, 0x3fffff);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x1fff);
+ xf_emit(ctx, 0xe, 0);
+ nv50_graph_construct_gene_unk9(ctx);
+ nv50_graph_construct_gene_unk2(ctx);
+ nv50_graph_construct_gene_unk1(ctx);
+ nv50_graph_construct_gene_unk10(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 3: per-ROP group state */
+ ctx->ctxvals_pos = offset + 3;
+ for (i = 0; i < 6; i++)
+ if (units & (1 << (i + 16)))
+ nv50_graph_construct_gene_ropc(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strands 4-7: per-TP state */
+ for (i = 0; i < 4; i++) {
+ ctx->ctxvals_pos = offset + 4 + i;
+ if (units & (1 << (2 * i)))
+ nv50_graph_construct_xfer_tp(ctx);
+ if (units & (1 << (2 * i + 1)))
+ nv50_graph_construct_xfer_tp(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+ }
+ } else {
+ /* Strand 0 */
+ ctx->ctxvals_pos = offset;
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 0x385, 0);
+ else
+ xf_emit(ctx, 0x384, 0);
+ nv50_graph_construct_gene_m2mf(ctx);
+ xf_emit(ctx, 0x950, 0);
+ nv50_graph_construct_gene_unk10(ctx);
+ xf_emit(ctx, 1, 0x0fac6881);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 3, 0);
+ }
+ nv50_graph_construct_gene_unk8(ctx);
+ if (dev_priv->chipset == 0xa0)
+ xf_emit(ctx, 0x189, 0);
+ else if (dev_priv->chipset < 0xa8)
+ xf_emit(ctx, 0x99, 0);
+ else if (dev_priv->chipset == 0xaa)
+ xf_emit(ctx, 0x65, 0);
+ else
+ xf_emit(ctx, 0x6d, 0);
+ nv50_graph_construct_gene_unk9(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 1 */
+ ctx->ctxvals_pos = offset + 1;
+ nv50_graph_construct_gene_unk1(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 2 */
+ ctx->ctxvals_pos = offset + 2;
+ if (dev_priv->chipset == 0xa0) {
+ nv50_graph_construct_gene_unk2(ctx);
+ }
+ xf_emit(ctx, 0x36, 0);
+ nv50_graph_construct_gene_unk5(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 3 */
+ ctx->ctxvals_pos = offset + 3;
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ nv50_graph_construct_gene_unk6(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 4 */
+ ctx->ctxvals_pos = offset + 4;
+ if (dev_priv->chipset == 0xa0)
+ xf_emit(ctx, 0xa80, 0);
+ else
+ xf_emit(ctx, 0xa7a, 0);
+ xf_emit(ctx, 1, 0x3fffff);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x1fff);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 5 */
+ ctx->ctxvals_pos = offset + 5;
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x0fac6881);
+ xf_emit(ctx, 0xb, 0);
+ xf_emit(ctx, 2, 0x4e3bfdf);
+ xf_emit(ctx, 3, 0);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 2, 0x4e3bfdf);
+ xf_emit(ctx, 2, 0);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 1, 0);
+ for (i = 0; i < 8; i++)
+ if (units & (1<<(i+16)))
+ nv50_graph_construct_gene_ropc(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 6 */
+ ctx->ctxvals_pos = offset + 6;
+ nv50_graph_construct_gene_unk3(ctx);
+ xf_emit(ctx, 0xb, 0);
+ nv50_graph_construct_gene_unk4(ctx);
+ nv50_graph_construct_gene_unk7(ctx);
+ if (units & (1 << 0))
+ nv50_graph_construct_xfer_tp(ctx);
+ if (units & (1 << 1))
+ nv50_graph_construct_xfer_tp(ctx);
+ if (units & (1 << 2))
+ nv50_graph_construct_xfer_tp(ctx);
+ if (units & (1 << 3))
+ nv50_graph_construct_xfer_tp(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 7 */
+ ctx->ctxvals_pos = offset + 7;
+ if (dev_priv->chipset == 0xa0) {
+ if (units & (1 << 4))
+ nv50_graph_construct_xfer_tp(ctx);
+ if (units & (1 << 5))
+ nv50_graph_construct_xfer_tp(ctx);
+ if (units & (1 << 6))
+ nv50_graph_construct_xfer_tp(ctx);
+ if (units & (1 << 7))
+ nv50_graph_construct_xfer_tp(ctx);
+ if (units & (1 << 8))
+ nv50_graph_construct_xfer_tp(ctx);
+ if (units & (1 << 9))
+ nv50_graph_construct_xfer_tp(ctx);
+ } else {
+ nv50_graph_construct_gene_unk2(ctx);
+ }
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+ }
+
+ ctx->ctxvals_pos = offset + size * 8;
+ ctx->ctxvals_pos = (ctx->ctxvals_pos+0x3f)&~0x3f;
+ cp_lsr (ctx, offset);
+ cp_out (ctx, CP_SET_XFER_POINTER);
+ cp_lsr (ctx, size);
+ cp_out (ctx, CP_SEEK_1);
+ cp_out (ctx, CP_XFER_1);
+ cp_wait(ctx, XFER, BUSY);
+}
+
+/*
+ * non-trivial demagiced parts of ctx init go here
+ */
+
+static void
+nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx)
+{
+ /* m2mf state */
+ xf_emit (ctx, 1, 0); /* DMA_NOTIFY instance >> 4 */
+ xf_emit (ctx, 1, 0); /* DMA_BUFFER_IN instance >> 4 */
+ xf_emit (ctx, 1, 0); /* DMA_BUFFER_OUT instance >> 4 */
+ xf_emit (ctx, 1, 0); /* OFFSET_IN */
+ xf_emit (ctx, 1, 0); /* OFFSET_OUT */
+ xf_emit (ctx, 1, 0); /* PITCH_IN */
+ xf_emit (ctx, 1, 0); /* PITCH_OUT */
+ xf_emit (ctx, 1, 0); /* LINE_LENGTH */
+ xf_emit (ctx, 1, 0); /* LINE_COUNT */
+ xf_emit (ctx, 1, 0x21); /* FORMAT: bits 0-4 INPUT_INC, bits 5-9 OUTPUT_INC */
+ xf_emit (ctx, 1, 1); /* LINEAR_IN */
+ xf_emit (ctx, 1, 0x2); /* TILING_MODE_IN: bits 0-2 y tiling, bits 3-5 z tiling */
+ xf_emit (ctx, 1, 0x100); /* TILING_PITCH_IN */
+ xf_emit (ctx, 1, 0x100); /* TILING_HEIGHT_IN */
+ xf_emit (ctx, 1, 1); /* TILING_DEPTH_IN */
+ xf_emit (ctx, 1, 0); /* TILING_POSITION_IN_Z */
+ xf_emit (ctx, 1, 0); /* TILING_POSITION_IN */
+ xf_emit (ctx, 1, 1); /* LINEAR_OUT */
+ xf_emit (ctx, 1, 0x2); /* TILING_MODE_OUT: bits 0-2 y tiling, bits 3-5 z tiling */
+ xf_emit (ctx, 1, 0x100); /* TILING_PITCH_OUT */
+ xf_emit (ctx, 1, 0x100); /* TILING_HEIGHT_OUT */
+ xf_emit (ctx, 1, 1); /* TILING_DEPTH_OUT */
+ xf_emit (ctx, 1, 0); /* TILING_POSITION_OUT_Z */
+ xf_emit (ctx, 1, 0); /* TILING_POSITION_OUT */
+ xf_emit (ctx, 1, 0); /* OFFSET_IN_HIGH */
+ xf_emit (ctx, 1, 0); /* OFFSET_OUT_HIGH */
+}
+
+static void
+nv50_graph_construct_gene_unk1(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ /* end of area 2 on pre-NVA0, area 1 on NVAx */
+ xf_emit(ctx, 2, 4);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x80);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 0x80c14);
+ xf_emit(ctx, 1, 0);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 1, 0x3ff);
+ else
+ xf_emit(ctx, 1, 0x7ff);
+ switch (dev_priv->chipset) {
+ case 0x50:
+ case 0x86:
+ case 0x98:
+ case 0xaa:
+ case 0xac:
+ xf_emit(ctx, 0x542, 0);
+ break;
+ case 0x84:
+ case 0x92:
+ case 0x94:
+ case 0x96:
+ xf_emit(ctx, 0x942, 0);
+ break;
+ case 0xa0:
+ xf_emit(ctx, 0x2042, 0);
+ break;
+ case 0xa5:
+ case 0xa8:
+ xf_emit(ctx, 0x842, 0);
+ break;
+ }
+ xf_emit(ctx, 2, 4);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x80);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x27);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x26);
+ xf_emit(ctx, 3, 0);
+}
+
+static void
+nv50_graph_construct_gene_unk10(struct nouveau_grctx *ctx)
+{
+ /* end of area 2 on pre-NVA0, area 1 on NVAx */
+ xf_emit(ctx, 0x10, 0x04000000);
+ xf_emit(ctx, 0x24, 0);
+ xf_emit(ctx, 2, 0x04e3bfdf);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x1fe21);
+}
+
+static void
+nv50_graph_construct_gene_unk2(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ /* middle of area 2 on pre-NVA0, beginning of area 2 on NVA0, area 7 on >NVA0 */
+ if (dev_priv->chipset != 0x50) {
+ xf_emit(ctx, 5, 0);
+ xf_emit(ctx, 1, 0x80c14);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x804);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 2, 4);
+ xf_emit(ctx, 1, 0x8100c12);
+ }
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 2, 4);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x10);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 3, 0);
+ else
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 0x804);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0x1a);
+ if (dev_priv->chipset != 0x50)
+ xf_emit(ctx, 1, 0x7f);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0x80c14);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x8100c12);
+ xf_emit(ctx, 2, 4);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x10);
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0x8100c12);
+ xf_emit(ctx, 6, 0);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 1, 0x3ff);
+ else
+ xf_emit(ctx, 1, 0x7ff);
+ xf_emit(ctx, 1, 0x80c14);
+ xf_emit(ctx, 0x38, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x10);
+ xf_emit(ctx, 0x38, 0);
+ xf_emit(ctx, 2, 0x88);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 0x16, 0);
+ xf_emit(ctx, 1, 0x26);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x3f800000);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 4, 0);
+ else
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 0x1a);
+ xf_emit(ctx, 1, 0x10);
+ if (dev_priv->chipset != 0x50)
+ xf_emit(ctx, 0x28, 0);
+ else
+ xf_emit(ctx, 0x25, 0);
+ xf_emit(ctx, 1, 0x52);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x26);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 2, 4);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x1a);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x00ffff00);
+ xf_emit(ctx, 1, 0);
+}
+
+static void
+nv50_graph_construct_gene_unk3(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ /* end of area 0 on pre-NVA0, beginning of area 6 on NVAx */
+ xf_emit(ctx, 1, 0x3f);
+ xf_emit(ctx, 0xa, 0);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 2, 0x04000000);
+ xf_emit(ctx, 8, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 4);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 0x10, 0);
+ else
+ xf_emit(ctx, 0x11, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0x1001);
+ xf_emit(ctx, 4, 0xffff);
+ xf_emit(ctx, 0x20, 0);
+ xf_emit(ctx, 0x10, 0x3f800000);
+ xf_emit(ctx, 1, 0x10);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 1, 0);
+ else
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 3);
+ xf_emit(ctx, 2, 0);
+}
+
+static void
+nv50_graph_construct_gene_unk4(struct nouveau_grctx *ctx)
+{
+ /* middle of area 0 on pre-NVA0, middle of area 6 on NVAx */
+ xf_emit(ctx, 2, 0x04000000);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x80);
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 0x80);
+ xf_emit(ctx, 1, 0);
+}
+
+static void
+nv50_graph_construct_gene_unk5(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ /* middle of area 0 on pre-NVA0 [after m2mf], end of area 2 on NVAx */
+ xf_emit(ctx, 2, 4);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 0x1c4d, 0);
+ else
+ xf_emit(ctx, 0x1c4b, 0);
+ xf_emit(ctx, 2, 4);
+ xf_emit(ctx, 1, 0x8100c12);
+ if (dev_priv->chipset != 0x50)
+ xf_emit(ctx, 1, 3);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x8100c12);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x80c14);
+ xf_emit(ctx, 1, 1);
+ if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 2, 4);
+ xf_emit(ctx, 1, 0x80c14);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x8100c12);
+ xf_emit(ctx, 1, 0x27);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 0x3c1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 0x16, 0);
+ xf_emit(ctx, 1, 0x8100c12);
+ xf_emit(ctx, 1, 0);
+}
+
+static void
+nv50_graph_construct_gene_unk6(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ /* beginning of area 1 on pre-NVA0 [after m2mf], area 3 on NVAx */
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 0xf);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 8, 0);
+ else
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 0x20);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 0x11, 0);
+ else if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 0xf, 0);
+ else
+ xf_emit(ctx, 0xe, 0);
+ xf_emit(ctx, 1, 0x1a);
+ xf_emit(ctx, 0xd, 0);
+ xf_emit(ctx, 2, 4);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 8);
+ xf_emit(ctx, 1, 0);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 1, 0x3ff);
+ else
+ xf_emit(ctx, 1, 0x7ff);
+ if (dev_priv->chipset == 0xa8)
+ xf_emit(ctx, 1, 0x1e00);
+ xf_emit(ctx, 0xc, 0);
+ xf_emit(ctx, 1, 0xf);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 0x125, 0);
+ else if (dev_priv->chipset < 0xa0)
+ xf_emit(ctx, 0x126, 0);
+ else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa)
+ xf_emit(ctx, 0x124, 0);
+ else
+ xf_emit(ctx, 0x1f7, 0);
+ xf_emit(ctx, 1, 0xf);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 3, 0);
+ else
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 0xa1, 0);
+ else
+ xf_emit(ctx, 0x5a, 0);
+ xf_emit(ctx, 1, 0xf);
+ if (dev_priv->chipset < 0xa0)
+ xf_emit(ctx, 0x834, 0);
+ else if (dev_priv->chipset == 0xa0)
+ xf_emit(ctx, 0x1873, 0);
+ else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 0x8ba, 0);
+ else
+ xf_emit(ctx, 0x833, 0);
+ xf_emit(ctx, 1, 0xf);
+ xf_emit(ctx, 0xf, 0);
+}
+
+static void
+nv50_graph_construct_gene_unk7(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ /* middle of area 1 on pre-NVA0 [after m2mf], middle of area 6 on NVAx */
+ xf_emit(ctx, 2, 0);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 2, 1);
+ else
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 2, 0x100);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 8);
+ xf_emit(ctx, 5, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 3, 1);
+ xf_emit(ctx, 1, 0xcf);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 6, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 3, 1);
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0x15);
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 0x4444480);
+ xf_emit(ctx, 0x37, 0);
+}
+
+static void
+nv50_graph_construct_gene_unk8(struct nouveau_grctx *ctx)
+{
+ /* middle of area 1 on pre-NVA0 [after m2mf], middle of area 0 on NVAx */
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 0x8100c12);
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 0x100);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x10001);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x10001);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0x10001);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 2);
+}
+
+static void
+nv50_graph_construct_gene_unk9(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ /* middle of area 2 on pre-NVA0 [after m2mf], end of area 0 on NVAx */
+ xf_emit(ctx, 1, 0x3f800000);
+ xf_emit(ctx, 6, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 0x1a);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 0x12, 0);
+ xf_emit(ctx, 1, 0x00ffff00);
+ xf_emit(ctx, 6, 0);
+ xf_emit(ctx, 1, 0xf);
+ xf_emit(ctx, 7, 0);
+ xf_emit(ctx, 1, 0x0fac6881);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 0xf, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 2, 0);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 1, 3);
+ else if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 2, 0x04000000);
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 5);
+ xf_emit(ctx, 1, 0x52);
+ if (dev_priv->chipset == 0x50) {
+ xf_emit(ctx, 0x13, 0);
+ } else {
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 1);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 0x11, 0);
+ else
+ xf_emit(ctx, 0x10, 0);
+ }
+ xf_emit(ctx, 0x10, 0x3f800000);
+ xf_emit(ctx, 1, 0x10);
+ xf_emit(ctx, 0x26, 0);
+ xf_emit(ctx, 1, 0x8100c12);
+ xf_emit(ctx, 1, 5);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 4, 0xffff);
+ if (dev_priv->chipset != 0x50)
+ xf_emit(ctx, 1, 3);
+ if (dev_priv->chipset < 0xa0)
+ xf_emit(ctx, 0x1f, 0);
+ else if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 0xc, 0);
+ else
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 0x00ffff00);
+ xf_emit(ctx, 1, 0x1a);
+ if (dev_priv->chipset != 0x50) {
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 3);
+ }
+ if (dev_priv->chipset < 0xa0)
+ xf_emit(ctx, 0x26, 0);
+ else
+ xf_emit(ctx, 0x3c, 0);
+ xf_emit(ctx, 1, 0x102);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 4, 4);
+ if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 8, 0);
+ xf_emit(ctx, 2, 4);
+ xf_emit(ctx, 1, 0);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 1, 0x3ff);
+ else
+ xf_emit(ctx, 1, 0x7ff);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x102);
+ xf_emit(ctx, 9, 0);
+ xf_emit(ctx, 4, 4);
+ xf_emit(ctx, 0x2c, 0);
+}
+
+static void
+nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int magic2;
+ if (dev_priv->chipset == 0x50) {
+ magic2 = 0x00003e60;
+ } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) {
+ magic2 = 0x001ffe67;
+ } else {
+ magic2 = 0x00087e67;
+ }
+ xf_emit(ctx, 8, 0);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, magic2);
+ xf_emit(ctx, 4, 0);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 7, 0);
+ if (dev_priv->chipset >= 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 1, 0x15);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0x10);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 4, 0);
+ if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x92 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa0) {
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 0x400);
+ xf_emit(ctx, 1, 0x300);
+ xf_emit(ctx, 1, 0x1001);
+ if (dev_priv->chipset != 0xa0) {
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 1, 0);
+ else
+ xf_emit(ctx, 1, 0x15);
+ }
+ xf_emit(ctx, 3, 0);
+ }
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 8, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0x10);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 0x13, 0);
+ xf_emit(ctx, 1, 0x10);
+ xf_emit(ctx, 0x10, 0);
+ xf_emit(ctx, 0x10, 0x3f800000);
+ xf_emit(ctx, 0x19, 0);
+ xf_emit(ctx, 1, 0x10);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x3f);
+ xf_emit(ctx, 6, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ if (dev_priv->chipset >= 0xa0) {
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x1001);
+ xf_emit(ctx, 0xb, 0);
+ } else {
+ xf_emit(ctx, 0xc, 0);
+ }
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 7, 0);
+ xf_emit(ctx, 1, 0xf);
+ xf_emit(ctx, 7, 0);
+ xf_emit(ctx, 1, 0x11);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 4, 0);
+ else
+ xf_emit(ctx, 6, 0);
+ xf_emit(ctx, 3, 1);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, magic2);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x0fac6881);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 0x18, 1);
+ xf_emit(ctx, 8, 2);
+ xf_emit(ctx, 8, 1);
+ xf_emit(ctx, 8, 2);
+ xf_emit(ctx, 8, 1);
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 5, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 0x16, 0);
+ } else {
+ if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 0x1b, 0);
+ else
+ xf_emit(ctx, 0x15, 0);
+ }
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 2, 1);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 2, 1);
+ if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 4, 0);
+ else
+ xf_emit(ctx, 3, 0);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
+ xf_emit(ctx, 0x10, 1);
+ xf_emit(ctx, 8, 2);
+ xf_emit(ctx, 0x10, 1);
+ xf_emit(ctx, 8, 2);
+ xf_emit(ctx, 8, 1);
+ xf_emit(ctx, 3, 0);
+ }
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 0x5b, 0);
+}
+
+static void
+nv50_graph_construct_xfer_tp_x1(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int magic3;
+ if (dev_priv->chipset == 0x50)
+ magic3 = 0x1000;
+ else if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa8)
+ magic3 = 0x1e00;
+ else
+ magic3 = 0;
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 4);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 0x24, 0);
+ else if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 0x14, 0);
+ else
+ xf_emit(ctx, 0x15, 0);
+ xf_emit(ctx, 2, 4);
+ if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 1, 0x03020100);
+ else
+ xf_emit(ctx, 1, 0x00608080);
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 2, 4);
+ xf_emit(ctx, 1, 0x80);
+ if (magic3)
+ xf_emit(ctx, 1, magic3);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 0x24, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 0x80);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 0x03020100);
+ xf_emit(ctx, 1, 3);
+ if (magic3)
+ xf_emit(ctx, 1, magic3);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 3);
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 4);
+ if (dev_priv->chipset == 0x94 || dev_priv->chipset == 0x96)
+ xf_emit(ctx, 0x1024, 0);
+ else if (dev_priv->chipset < 0xa0)
+ xf_emit(ctx, 0xa24, 0);
+ else if (dev_priv->chipset == 0xa0 || dev_priv->chipset >= 0xaa)
+ xf_emit(ctx, 0x214, 0);
+ else
+ xf_emit(ctx, 0x414, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 3);
+ xf_emit(ctx, 2, 0);
+}
+
+static void
+nv50_graph_construct_xfer_tp_x2(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int magic1, magic2;
+ if (dev_priv->chipset == 0x50) {
+ magic1 = 0x3ff;
+ magic2 = 0x00003e60;
+ } else if (dev_priv->chipset <= 0xa0 || dev_priv->chipset >= 0xaa) {
+ magic1 = 0x7ff;
+ magic2 = 0x001ffe67;
+ } else {
+ magic1 = 0x7ff;
+ magic2 = 0x00087e67;
+ }
+ xf_emit(ctx, 3, 0);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 0xc, 0);
+ xf_emit(ctx, 1, 0xf);
+ xf_emit(ctx, 0xb, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 4, 0xffff);
+ xf_emit(ctx, 8, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 5, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 2, 0);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
+ xf_emit(ctx, 1, 3);
+ xf_emit(ctx, 1, 0);
+ } else if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 0xa, 0);
+ xf_emit(ctx, 2, 1);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 2, 1);
+ xf_emit(ctx, 1, 2);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 0x18, 1);
+ xf_emit(ctx, 8, 2);
+ xf_emit(ctx, 8, 1);
+ xf_emit(ctx, 8, 2);
+ xf_emit(ctx, 8, 1);
+ xf_emit(ctx, 1, 0);
+ }
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 7, 0);
+ xf_emit(ctx, 1, 0x0fac6881);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 3, 0xcf);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 0xa, 0);
+ xf_emit(ctx, 2, 1);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 2, 1);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 8, 1);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 7, 0);
+ xf_emit(ctx, 1, 0x0fac6881);
+ xf_emit(ctx, 1, 0xf);
+ xf_emit(ctx, 7, 0);
+ xf_emit(ctx, 1, magic2);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x11);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 2, 1);
+ else
+ xf_emit(ctx, 1, 1);
+ if(dev_priv->chipset == 0x50)
+ xf_emit(ctx, 1, 0);
+ else
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 5, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 7, 0);
+ xf_emit(ctx, 1, 0x0fac6881);
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, magic1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 2, 0);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 0x28, 0);
+ xf_emit(ctx, 8, 8);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 7, 0);
+ xf_emit(ctx, 1, 0x0fac6881);
+ xf_emit(ctx, 8, 0x400);
+ xf_emit(ctx, 8, 0x300);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0xf);
+ xf_emit(ctx, 7, 0);
+ xf_emit(ctx, 1, 0x20);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 1, 0x100);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x40);
+ xf_emit(ctx, 1, 0x100);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 3);
+ xf_emit(ctx, 4, 0);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, magic2);
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 1, 0x0fac6881);
+ xf_emit(ctx, 9, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0x400);
+ xf_emit(ctx, 1, 0x300);
+ xf_emit(ctx, 1, 0x1001);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 4, 0);
+ else
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 7, 0);
+ xf_emit(ctx, 1, 0x0fac6881);
+ xf_emit(ctx, 1, 0xf);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
+ xf_emit(ctx, 0x15, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 3, 0);
+ } else
+ xf_emit(ctx, 0x17, 0);
+ if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 1, 0x0fac6881);
+ xf_emit(ctx, 1, magic2);
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 2, 1);
+ xf_emit(ctx, 3, 0);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 2, 1);
+ else
+ xf_emit(ctx, 1, 1);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 2, 0);
+ else if (dev_priv->chipset != 0x50)
+ xf_emit(ctx, 1, 0);
+}
+
+static void
+nv50_graph_construct_xfer_tp_x3(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 2, 0);
+ else
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 0x2a712488);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x4085c000);
+ xf_emit(ctx, 1, 0x40);
+ xf_emit(ctx, 1, 0x100);
+ xf_emit(ctx, 1, 0x10100);
+ xf_emit(ctx, 1, 0x02800000);
+}
+
+static void
+nv50_graph_construct_xfer_tp_x4(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ xf_emit(ctx, 2, 0x04e3bfdf);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x00ffff00);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 2, 1);
+ else
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 0x00ffff00);
+ xf_emit(ctx, 8, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0x30201000);
+ xf_emit(ctx, 1, 0x70605040);
+ xf_emit(ctx, 1, 0xb8a89888);
+ xf_emit(ctx, 1, 0xf8e8d8c8);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x1a);
+}
+
+static void
+nv50_graph_construct_xfer_tp_x5(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 0xfac6881);
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 2, 1);
+ xf_emit(ctx, 2, 0);
+ xf_emit(ctx, 1, 1);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 0xb, 0);
+ else
+ xf_emit(ctx, 0xa, 0);
+ xf_emit(ctx, 8, 1);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 7, 0);
+ xf_emit(ctx, 1, 0xfac6881);
+ xf_emit(ctx, 1, 0xf);
+ xf_emit(ctx, 7, 0);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 1, 1);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
+ xf_emit(ctx, 6, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 6, 0);
+ } else {
+ xf_emit(ctx, 0xb, 0);
+ }
+}
+
+static void
+nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ if (dev_priv->chipset < 0xa0) {
+ nv50_graph_construct_xfer_tp_x1(ctx);
+ nv50_graph_construct_xfer_tp_x2(ctx);
+ nv50_graph_construct_xfer_tp_x3(ctx);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 0xf, 0);
+ else
+ xf_emit(ctx, 0x12, 0);
+ nv50_graph_construct_xfer_tp_x4(ctx);
+ } else {
+ nv50_graph_construct_xfer_tp_x3(ctx);
+ if (dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 0xc, 0);
+ else
+ xf_emit(ctx, 0xa, 0);
+ nv50_graph_construct_xfer_tp_x2(ctx);
+ nv50_graph_construct_xfer_tp_x5(ctx);
+ nv50_graph_construct_xfer_tp_x4(ctx);
+ nv50_graph_construct_xfer_tp_x1(ctx);
+ }
+}
+
+static void
+nv50_graph_construct_xfer_tp2(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int i, mpcnt;
+ if (dev_priv->chipset == 0x98 || dev_priv->chipset == 0xaa)
+ mpcnt = 1;
+ else if (dev_priv->chipset < 0xa0 || dev_priv->chipset >= 0xa8)
+ mpcnt = 2;
+ else
+ mpcnt = 3;
+ for (i = 0; i < mpcnt; i++) {
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x80);
+ xf_emit(ctx, 1, 0x80007004);
+ xf_emit(ctx, 1, 0x04000400);
+ if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 1, 0xc0);
+ xf_emit(ctx, 1, 0x1000);
+ xf_emit(ctx, 2, 0);
+ if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa8) {
+ xf_emit(ctx, 1, 0xe00);
+ xf_emit(ctx, 1, 0x1e00);
+ }
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 2, 0);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 2, 0x1000);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 2);
+ if (dev_priv->chipset >= 0xaa)
+ xf_emit(ctx, 0xb, 0);
+ else if (dev_priv->chipset >= 0xa0)
+ xf_emit(ctx, 0xc, 0);
+ else
+ xf_emit(ctx, 0xa, 0);
+ }
+ xf_emit(ctx, 1, 0x08100c12);
+ xf_emit(ctx, 1, 0);
+ if (dev_priv->chipset >= 0xa0) {
+ xf_emit(ctx, 1, 0x1fe21);
+ }
+ xf_emit(ctx, 5, 0);
+ xf_emit(ctx, 4, 0xffff);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 2, 0x10001);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 0x1fe21);
+ xf_emit(ctx, 1, 0);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 4, 0);
+ xf_emit(ctx, 1, 0x08100c12);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 8, 0);
+ xf_emit(ctx, 1, 0xfac6881);
+ xf_emit(ctx, 1, 0);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa)
+ xf_emit(ctx, 1, 3);
+ xf_emit(ctx, 3, 0);
+ xf_emit(ctx, 1, 4);
+ xf_emit(ctx, 9, 0);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 2, 1);
+ xf_emit(ctx, 1, 2);
+ xf_emit(ctx, 3, 1);
+ xf_emit(ctx, 1, 0);
+ if (dev_priv->chipset > 0xa0 && dev_priv->chipset < 0xaa) {
+ xf_emit(ctx, 8, 2);
+ xf_emit(ctx, 0x10, 1);
+ xf_emit(ctx, 8, 2);
+ xf_emit(ctx, 0x18, 1);
+ xf_emit(ctx, 3, 0);
+ }
+ xf_emit(ctx, 1, 4);
+ if (dev_priv->chipset == 0x50)
+ xf_emit(ctx, 0x3a0, 0);
+ else if (dev_priv->chipset < 0x94)
+ xf_emit(ctx, 0x3a2, 0);
+ else if (dev_priv->chipset == 0x98 || dev_priv->chipset == 0xaa)
+ xf_emit(ctx, 0x39f, 0);
+ else
+ xf_emit(ctx, 0x3a3, 0);
+ xf_emit(ctx, 1, 0x11);
+ xf_emit(ctx, 1, 0);
+ xf_emit(ctx, 1, 1);
+ xf_emit(ctx, 0x2d, 0);
+}
+
+static void
+nv50_graph_construct_xfer2(struct nouveau_grctx *ctx)
+{
+ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
+ int i;
+ uint32_t offset;
+ uint32_t units = nv_rd32 (ctx->dev, 0x1540);
+ int size = 0;
+
+ offset = (ctx->ctxvals_pos+0x3f)&~0x3f;
+
+ if (dev_priv->chipset < 0xa0) {
+ for (i = 0; i < 8; i++) {
+ ctx->ctxvals_pos = offset + i;
+ if (i == 0)
+ xf_emit(ctx, 1, 0x08100c12);
+ if (units & (1 << i))
+ nv50_graph_construct_xfer_tp2(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+ }
+ } else {
+ /* Strand 0: TPs 0, 1 */
+ ctx->ctxvals_pos = offset;
+ xf_emit(ctx, 1, 0x08100c12);
+ if (units & (1 << 0))
+ nv50_graph_construct_xfer_tp2(ctx);
+ if (units & (1 << 1))
+ nv50_graph_construct_xfer_tp2(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 0: TPs 2, 3 */
+ ctx->ctxvals_pos = offset + 1;
+ if (units & (1 << 2))
+ nv50_graph_construct_xfer_tp2(ctx);
+ if (units & (1 << 3))
+ nv50_graph_construct_xfer_tp2(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 0: TPs 4, 5, 6 */
+ ctx->ctxvals_pos = offset + 2;
+ if (units & (1 << 4))
+ nv50_graph_construct_xfer_tp2(ctx);
+ if (units & (1 << 5))
+ nv50_graph_construct_xfer_tp2(ctx);
+ if (units & (1 << 6))
+ nv50_graph_construct_xfer_tp2(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+
+ /* Strand 0: TPs 7, 8, 9 */
+ ctx->ctxvals_pos = offset + 3;
+ if (units & (1 << 7))
+ nv50_graph_construct_xfer_tp2(ctx);
+ if (units & (1 << 8))
+ nv50_graph_construct_xfer_tp2(ctx);
+ if (units & (1 << 9))
+ nv50_graph_construct_xfer_tp2(ctx);
+ if ((ctx->ctxvals_pos-offset)/8 > size)
+ size = (ctx->ctxvals_pos-offset)/8;
+ }
+ ctx->ctxvals_pos = offset + size * 8;
+ ctx->ctxvals_pos = (ctx->ctxvals_pos+0x3f)&~0x3f;
+ cp_lsr (ctx, offset);
+ cp_out (ctx, CP_SET_XFER_POINTER);
+ cp_lsr (ctx, size);
+ cp_out (ctx, CP_SEEK_2);
+ cp_out (ctx, CP_XFER_2);
+ cp_wait(ctx, XFER, BUSY);
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c
index f0dc4e36ef0..de1f5b0062c 100644
--- a/drivers/gpu/drm/nouveau/nv50_instmem.c
+++ b/drivers/gpu/drm/nouveau/nv50_instmem.c
@@ -390,7 +390,7 @@ nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
if (gpuobj->im_backing)
return -EINVAL;
- *sz = (*sz + (NV50_INSTMEM_PAGE_SIZE-1)) & ~(NV50_INSTMEM_PAGE_SIZE-1);
+ *sz = ALIGN(*sz, NV50_INSTMEM_PAGE_SIZE);
if (*sz == 0)
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index 1cc7b937b1e..ed38262d998 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -30,6 +30,9 @@ $(obj)/r420_reg_safe.h: $(src)/reg_srcs/r420 $(obj)/mkregtable
$(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable
$(call if_changed,mkregtable)
+$(obj)/r600_reg_safe.h: $(src)/reg_srcs/r600 $(obj)/mkregtable
+ $(call if_changed,mkregtable)
+
$(obj)/r100.o: $(obj)/r100_reg_safe.h $(obj)/rn50_reg_safe.h
$(obj)/r200.o: $(obj)/r200_reg_safe.h
@@ -42,6 +45,8 @@ $(obj)/r420.o: $(obj)/r420_reg_safe.h
$(obj)/rs600.o: $(obj)/rs600_reg_safe.h
+$(obj)/r600_cs.o: $(obj)/r600_reg_safe.h
+
radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
radeon_irq.o r300_cmdbuf.o r600_cp.o
# add KMS driver
@@ -54,8 +59,10 @@ radeon-y += radeon_device.o radeon_kms.o \
radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
- r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o
+ r600_blit_kms.o radeon_pm.o atombios_dp.o r600_audio.o r600_hdmi.o \
+ evergreen.o
radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
+radeon-$(CONFIG_VGA_SWITCHEROO) += radeon_atpx_handler.o
obj-$(CONFIG_DRM_RADEON)+= radeon.o
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 7f152f66f19..d75788feac6 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -881,8 +881,6 @@ static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg)
uint8_t attr = U8((*ptr)++), shift;
uint32_t saved, dst;
int dptr = *ptr;
- attr &= 0x38;
- attr |= atom_def_dst[attr >> 3] << 6;
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
shift = atom_get_src(ctx, attr, ptr);
@@ -897,8 +895,6 @@ static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg)
uint8_t attr = U8((*ptr)++), shift;
uint32_t saved, dst;
int dptr = *ptr;
- attr &= 0x38;
- attr |= atom_def_dst[attr >> 3] << 6;
SDEBUG(" dst: ");
dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1);
shift = atom_get_src(ctx, attr, ptr);
diff --git a/drivers/gpu/drm/radeon/atombios.h b/drivers/gpu/drm/radeon/atombios.h
index 91ad0d1c1b1..6732b5dd8ff 100644
--- a/drivers/gpu/drm/radeon/atombios.h
+++ b/drivers/gpu/drm/radeon/atombios.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2006-2007 Advanced Micro Devices, Inc.
+ * Copyright 2006-2007 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
@@ -20,10 +20,12 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-/****************************************************************************/
+
+/****************************************************************************/
/*Portion I: Definitions shared between VBIOS and Driver */
/****************************************************************************/
+
#ifndef _ATOMBIOS_H
#define _ATOMBIOS_H
@@ -40,39 +42,46 @@
#endif
#ifdef _H2INC
-#ifndef ULONG
-typedef unsigned long ULONG;
-#endif
+ #ifndef ULONG
+ typedef unsigned long ULONG;
+ #endif
-#ifndef UCHAR
-typedef unsigned char UCHAR;
-#endif
+ #ifndef UCHAR
+ typedef unsigned char UCHAR;
+ #endif
-#ifndef USHORT
-typedef unsigned short USHORT;
-#endif
+ #ifndef USHORT
+ typedef unsigned short USHORT;
+ #endif
#endif
-
-#define ATOM_DAC_A 0
+
+#define ATOM_DAC_A 0
#define ATOM_DAC_B 1
#define ATOM_EXT_DAC 2
#define ATOM_CRTC1 0
#define ATOM_CRTC2 1
+#define ATOM_CRTC3 2
+#define ATOM_CRTC4 3
+#define ATOM_CRTC5 4
+#define ATOM_CRTC6 5
+#define ATOM_CRTC_INVALID 0xFF
#define ATOM_DIGA 0
#define ATOM_DIGB 1
#define ATOM_PPLL1 0
#define ATOM_PPLL2 1
+#define ATOM_DCPLL 2
+#define ATOM_PPLL_INVALID 0xFF
#define ATOM_SCALER1 0
#define ATOM_SCALER2 1
-#define ATOM_SCALER_DISABLE 0
-#define ATOM_SCALER_CENTER 1
-#define ATOM_SCALER_EXPANSION 2
-#define ATOM_SCALER_MULTI_EX 3
+#define ATOM_SCALER_DISABLE 0
+#define ATOM_SCALER_CENTER 1
+#define ATOM_SCALER_EXPANSION 2
+#define ATOM_SCALER_MULTI_EX 3
#define ATOM_DISABLE 0
#define ATOM_ENABLE 1
@@ -82,6 +91,7 @@ typedef unsigned short USHORT;
#define ATOM_LCD_SELFTEST_START (ATOM_DISABLE+5)
#define ATOM_LCD_SELFTEST_STOP (ATOM_ENABLE+5)
#define ATOM_ENCODER_INIT (ATOM_DISABLE+7)
+#define ATOM_GET_STATUS (ATOM_DISABLE+8)
#define ATOM_BLANKING 1
#define ATOM_BLANKING_OFF 0
@@ -114,7 +124,7 @@ typedef unsigned short USHORT;
#define ATOM_DAC2_CV ATOM_DAC1_CV
#define ATOM_DAC2_NTSC ATOM_DAC1_NTSC
#define ATOM_DAC2_PAL ATOM_DAC1_PAL
-
+
#define ATOM_PM_ON 0
#define ATOM_PM_STANDBY 1
#define ATOM_PM_SUSPEND 2
@@ -134,6 +144,7 @@ typedef unsigned short USHORT;
#define ATOM_PANEL_MISC_TEMPORAL 0x00000040
#define ATOM_PANEL_MISC_API_ENABLED 0x00000080
+
#define MEMTYPE_DDR1 "DDR1"
#define MEMTYPE_DDR2 "DDR2"
#define MEMTYPE_DDR3 "DDR3"
@@ -145,19 +156,19 @@ typedef unsigned short USHORT;
/* Maximum size of that FireGL flag string */
-#define ATOM_FIREGL_FLAG_STRING "FGL" /* Flag used to enable FireGL Support */
-#define ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING 3 /* sizeof( ATOM_FIREGL_FLAG_STRING ) */
+#define ATOM_FIREGL_FLAG_STRING "FGL" //Flag used to enable FireGL Support
+#define ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING 3 //sizeof( ATOM_FIREGL_FLAG_STRING )
-#define ATOM_FAKE_DESKTOP_STRING "DSK" /* Flag used to enable mobile ASIC on Desktop */
-#define ATOM_MAX_SIZE_OF_FAKE_DESKTOP_STRING ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING
+#define ATOM_FAKE_DESKTOP_STRING "DSK" //Flag used to enable mobile ASIC on Desktop
+#define ATOM_MAX_SIZE_OF_FAKE_DESKTOP_STRING ATOM_MAX_SIZE_OF_FIREGL_FLAG_STRING
-#define ATOM_M54T_FLAG_STRING "M54T" /* Flag used to enable M54T Support */
-#define ATOM_MAX_SIZE_OF_M54T_FLAG_STRING 4 /* sizeof( ATOM_M54T_FLAG_STRING ) */
+#define ATOM_M54T_FLAG_STRING "M54T" //Flag used to enable M54T Support
+#define ATOM_MAX_SIZE_OF_M54T_FLAG_STRING 4 //sizeof( ATOM_M54T_FLAG_STRING )
#define HW_ASSISTED_I2C_STATUS_FAILURE 2
#define HW_ASSISTED_I2C_STATUS_SUCCESS 1
-#pragma pack(1) /* BIOS data must use byte aligment */
+#pragma pack(1) /* BIOS data must use byte aligment */
/* Define offset to location of ROM header. */
@@ -165,367 +176,410 @@ typedef unsigned short USHORT;
#define OFFSET_TO_ATOM_ROM_IMAGE_SIZE 0x00000002L
#define OFFSET_TO_ATOMBIOS_ASIC_BUS_MEM_TYPE 0x94
-#define MAXSIZE_OF_ATOMBIOS_ASIC_BUS_MEM_TYPE 20 /* including the terminator 0x0! */
+#define MAXSIZE_OF_ATOMBIOS_ASIC_BUS_MEM_TYPE 20 /* including the terminator 0x0! */
#define OFFSET_TO_GET_ATOMBIOS_STRINGS_NUMBER 0x002f
#define OFFSET_TO_GET_ATOMBIOS_STRINGS_START 0x006e
/* Common header for all ROM Data tables.
- Every table pointed _ATOM_MASTER_DATA_TABLE has this common header.
+ Every table pointed _ATOM_MASTER_DATA_TABLE has this common header.
And the pointer actually points to this header. */
-typedef struct _ATOM_COMMON_TABLE_HEADER {
- USHORT usStructureSize;
- UCHAR ucTableFormatRevision; /*Change it when the Parser is not backward compatible */
- UCHAR ucTableContentRevision; /*Change it only when the table needs to change but the firmware */
- /*Image can't be updated, while Driver needs to carry the new table! */
-} ATOM_COMMON_TABLE_HEADER;
-
-typedef struct _ATOM_ROM_HEADER {
- ATOM_COMMON_TABLE_HEADER sHeader;
- UCHAR uaFirmWareSignature[4]; /*Signature to distinguish between Atombios and non-atombios,
- atombios should init it as "ATOM", don't change the position */
- USHORT usBiosRuntimeSegmentAddress;
- USHORT usProtectedModeInfoOffset;
- USHORT usConfigFilenameOffset;
- USHORT usCRC_BlockOffset;
- USHORT usBIOS_BootupMessageOffset;
- USHORT usInt10Offset;
- USHORT usPciBusDevInitCode;
- USHORT usIoBaseAddress;
- USHORT usSubsystemVendorID;
- USHORT usSubsystemID;
- USHORT usPCI_InfoOffset;
- USHORT usMasterCommandTableOffset; /*Offset for SW to get all command table offsets, Don't change the position */
- USHORT usMasterDataTableOffset; /*Offset for SW to get all data table offsets, Don't change the position */
- UCHAR ucExtendedFunctionCode;
- UCHAR ucReserved;
-} ATOM_ROM_HEADER;
+typedef struct _ATOM_COMMON_TABLE_HEADER
+{
+ USHORT usStructureSize;
+ UCHAR ucTableFormatRevision; /*Change it when the Parser is not backward compatible */
+ UCHAR ucTableContentRevision; /*Change it only when the table needs to change but the firmware */
+ /*Image can't be updated, while Driver needs to carry the new table! */
+}ATOM_COMMON_TABLE_HEADER;
+
+typedef struct _ATOM_ROM_HEADER
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR uaFirmWareSignature[4]; /*Signature to distinguish between Atombios and non-atombios,
+ atombios should init it as "ATOM", don't change the position */
+ USHORT usBiosRuntimeSegmentAddress;
+ USHORT usProtectedModeInfoOffset;
+ USHORT usConfigFilenameOffset;
+ USHORT usCRC_BlockOffset;
+ USHORT usBIOS_BootupMessageOffset;
+ USHORT usInt10Offset;
+ USHORT usPciBusDevInitCode;
+ USHORT usIoBaseAddress;
+ USHORT usSubsystemVendorID;
+ USHORT usSubsystemID;
+ USHORT usPCI_InfoOffset;
+ USHORT usMasterCommandTableOffset; /*Offset for SW to get all command table offsets, Don't change the position */
+ USHORT usMasterDataTableOffset; /*Offset for SW to get all data table offsets, Don't change the position */
+ UCHAR ucExtendedFunctionCode;
+ UCHAR ucReserved;
+}ATOM_ROM_HEADER;
/*==============================Command Table Portion==================================== */
#ifdef UEFI_BUILD
-#define UTEMP USHORT
-#define USHORT void*
+ #define UTEMP USHORT
+ #define USHORT void*
#endif
-typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES {
- USHORT ASIC_Init; /* Function Table, used by various SW components,latest version 1.1 */
- USHORT GetDisplaySurfaceSize; /* Atomic Table, Used by Bios when enabling HW ICON */
- USHORT ASIC_RegistersInit; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */
- USHORT VRAM_BlockVenderDetection; /* Atomic Table, used only by Bios */
- USHORT DIGxEncoderControl; /* Only used by Bios */
- USHORT MemoryControllerInit; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */
- USHORT EnableCRTCMemReq; /* Function Table,directly used by various SW components,latest version 2.1 */
- USHORT MemoryParamAdjust; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock if needed */
- USHORT DVOEncoderControl; /* Function Table,directly used by various SW components,latest version 1.2 */
- USHORT GPIOPinControl; /* Atomic Table, only used by Bios */
- USHORT SetEngineClock; /*Function Table,directly used by various SW components,latest version 1.1 */
- USHORT SetMemoryClock; /* Function Table,directly used by various SW components,latest version 1.1 */
- USHORT SetPixelClock; /*Function Table,directly used by various SW components,latest version 1.2 */
- USHORT DynamicClockGating; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */
- USHORT ResetMemoryDLL; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */
- USHORT ResetMemoryDevice; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */
- USHORT MemoryPLLInit;
- USHORT AdjustDisplayPll; /* only used by Bios */
- USHORT AdjustMemoryController; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */
- USHORT EnableASIC_StaticPwrMgt; /* Atomic Table, only used by Bios */
- USHORT ASIC_StaticPwrMgtStatusChange; /* Obsolete, only used by Bios */
- USHORT DAC_LoadDetection; /* Atomic Table, directly used by various SW components,latest version 1.2 */
- USHORT LVTMAEncoderControl; /* Atomic Table,directly used by various SW components,latest version 1.3 */
- USHORT LCD1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT DAC1EncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT DAC2EncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT DVOOutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT CV1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT GetConditionalGoldenSetting; /* only used by Bios */
- USHORT TVEncoderControl; /* Function Table,directly used by various SW components,latest version 1.1 */
- USHORT TMDSAEncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.3 */
- USHORT LVDSEncoderControl; /* Atomic Table, directly used by various SW components,latest version 1.3 */
- USHORT TV1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT EnableScaler; /* Atomic Table, used only by Bios */
- USHORT BlankCRTC; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT EnableCRTC; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT GetPixelClock; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT EnableVGA_Render; /* Function Table,directly used by various SW components,latest version 1.1 */
- USHORT EnableVGA_Access; /* Obsolete , only used by Bios */
- USHORT SetCRTC_Timing; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT SetCRTC_OverScan; /* Atomic Table, used by various SW components,latest version 1.1 */
- USHORT SetCRTC_Replication; /* Atomic Table, used only by Bios */
- USHORT SelectCRTC_Source; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT EnableGraphSurfaces; /* Atomic Table, used only by Bios */
- USHORT UpdateCRTC_DoubleBufferRegisters;
- USHORT LUT_AutoFill; /* Atomic Table, only used by Bios */
- USHORT EnableHW_IconCursor; /* Atomic Table, only used by Bios */
- USHORT GetMemoryClock; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT GetEngineClock; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT SetCRTC_UsingDTDTiming; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT ExternalEncoderControl; /* Atomic Table, directly used by various SW components,latest version 2.1 */
- USHORT LVTMAOutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT VRAM_BlockDetectionByStrap; /* Atomic Table, used only by Bios */
- USHORT MemoryCleanUp; /* Atomic Table, only used by Bios */
- USHORT ProcessI2cChannelTransaction; /* Function Table,only used by Bios */
- USHORT WriteOneByteToHWAssistedI2C; /* Function Table,indirectly used by various SW components */
- USHORT ReadHWAssistedI2CStatus; /* Atomic Table, indirectly used by various SW components */
- USHORT SpeedFanControl; /* Function Table,indirectly used by various SW components,called from ASIC_Init */
- USHORT PowerConnectorDetection; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT MC_Synchronization; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */
- USHORT ComputeMemoryEnginePLL; /* Atomic Table, indirectly used by various SW components,called from SetMemory/EngineClock */
- USHORT MemoryRefreshConversion; /* Atomic Table, indirectly used by various SW components,called from SetMemory or SetEngineClock */
- USHORT VRAM_GetCurrentInfoBlock; /* Atomic Table, used only by Bios */
- USHORT DynamicMemorySettings; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */
- USHORT MemoryTraining; /* Atomic Table, used only by Bios */
- USHORT EnableSpreadSpectrumOnPPLL; /* Atomic Table, directly used by various SW components,latest version 1.2 */
- USHORT TMDSAOutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT SetVoltage; /* Function Table,directly and/or indirectly used by various SW components,latest version 1.1 */
- USHORT DAC1OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT DAC2OutputControl; /* Atomic Table, directly used by various SW components,latest version 1.1 */
- USHORT SetupHWAssistedI2CStatus; /* Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C" */
- USHORT ClockSource; /* Atomic Table, indirectly used by various SW components,called from ASIC_Init */
- USHORT MemoryDeviceInit; /* Atomic Table, indirectly used by various SW components,called from SetMemoryClock */
- USHORT EnableYUV; /* Atomic Table, indirectly used by various SW components,called from EnableVGARender */
- USHORT DIG1EncoderControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */
- USHORT DIG2EncoderControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */
- USHORT DIG1TransmitterControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */
- USHORT DIG2TransmitterControl; /* Atomic Table,directly used by various SW components,latest version 1.1 */
- USHORT ProcessAuxChannelTransaction; /* Function Table,only used by Bios */
- USHORT DPEncoderService; /* Function Table,only used by Bios */
-} ATOM_MASTER_LIST_OF_COMMAND_TABLES;
-
-/* For backward compatible */
+typedef struct _ATOM_MASTER_LIST_OF_COMMAND_TABLES{
+ USHORT ASIC_Init; //Function Table, used by various SW components,latest version 1.1
+ USHORT GetDisplaySurfaceSize; //Atomic Table, Used by Bios when enabling HW ICON
+ USHORT ASIC_RegistersInit; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
+ USHORT VRAM_BlockVenderDetection; //Atomic Table, used only by Bios
+ USHORT DIGxEncoderControl; //Only used by Bios
+ USHORT MemoryControllerInit; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
+ USHORT EnableCRTCMemReq; //Function Table,directly used by various SW components,latest version 2.1
+ USHORT MemoryParamAdjust; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock if needed
+ USHORT DVOEncoderControl; //Function Table,directly used by various SW components,latest version 1.2
+ USHORT GPIOPinControl; //Atomic Table, only used by Bios
+ USHORT SetEngineClock; //Function Table,directly used by various SW components,latest version 1.1
+ USHORT SetMemoryClock; //Function Table,directly used by various SW components,latest version 1.1
+ USHORT SetPixelClock; //Function Table,directly used by various SW components,latest version 1.2
+ USHORT DynamicClockGating; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
+ USHORT ResetMemoryDLL; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
+ USHORT ResetMemoryDevice; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
+ USHORT MemoryPLLInit;
+ USHORT AdjustDisplayPll; //only used by Bios
+ USHORT AdjustMemoryController; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
+ USHORT EnableASIC_StaticPwrMgt; //Atomic Table, only used by Bios
+ USHORT ASIC_StaticPwrMgtStatusChange; //Obsolete , only used by Bios
+ USHORT DAC_LoadDetection; //Atomic Table, directly used by various SW components,latest version 1.2
+ USHORT LVTMAEncoderControl; //Atomic Table,directly used by various SW components,latest version 1.3
+ USHORT LCD1OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT DAC1EncoderControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT DAC2EncoderControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT DVOOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT CV1OutputControl; //Atomic Table, Atomic Table, Obsolete from Ry6xx, use DAC2 Output instead
+ USHORT GetConditionalGoldenSetting; //only used by Bios
+ USHORT TVEncoderControl; //Function Table,directly used by various SW components,latest version 1.1
+ USHORT TMDSAEncoderControl; //Atomic Table, directly used by various SW components,latest version 1.3
+ USHORT LVDSEncoderControl; //Atomic Table, directly used by various SW components,latest version 1.3
+ USHORT TV1OutputControl; //Atomic Table, Obsolete from Ry6xx, use DAC2 Output instead
+ USHORT EnableScaler; //Atomic Table, used only by Bios
+ USHORT BlankCRTC; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT EnableCRTC; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT GetPixelClock; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT EnableVGA_Render; //Function Table,directly used by various SW components,latest version 1.1
+ USHORT GetSCLKOverMCLKRatio; //Atomic Table, only used by Bios
+ USHORT SetCRTC_Timing; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT SetCRTC_OverScan; //Atomic Table, used by various SW components,latest version 1.1
+ USHORT SetCRTC_Replication; //Atomic Table, used only by Bios
+ USHORT SelectCRTC_Source; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT EnableGraphSurfaces; //Atomic Table, used only by Bios
+ USHORT UpdateCRTC_DoubleBufferRegisters;
+ USHORT LUT_AutoFill; //Atomic Table, only used by Bios
+ USHORT EnableHW_IconCursor; //Atomic Table, only used by Bios
+ USHORT GetMemoryClock; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT GetEngineClock; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT SetCRTC_UsingDTDTiming; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT ExternalEncoderControl; //Atomic Table, directly used by various SW components,latest version 2.1
+ USHORT LVTMAOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT VRAM_BlockDetectionByStrap; //Atomic Table, used only by Bios
+ USHORT MemoryCleanUp; //Atomic Table, only used by Bios
+ USHORT ProcessI2cChannelTransaction; //Function Table,only used by Bios
+ USHORT WriteOneByteToHWAssistedI2C; //Function Table,indirectly used by various SW components
+ USHORT ReadHWAssistedI2CStatus; //Atomic Table, indirectly used by various SW components
+ USHORT SpeedFanControl; //Function Table,indirectly used by various SW components,called from ASIC_Init
+ USHORT PowerConnectorDetection; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT MC_Synchronization; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
+ USHORT ComputeMemoryEnginePLL; //Atomic Table, indirectly used by various SW components,called from SetMemory/EngineClock
+ USHORT MemoryRefreshConversion; //Atomic Table, indirectly used by various SW components,called from SetMemory or SetEngineClock
+ USHORT VRAM_GetCurrentInfoBlock; //Atomic Table, used only by Bios
+ USHORT DynamicMemorySettings; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
+ USHORT MemoryTraining; //Atomic Table, used only by Bios
+ USHORT EnableSpreadSpectrumOnPPLL; //Atomic Table, directly used by various SW components,latest version 1.2
+ USHORT TMDSAOutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT SetVoltage; //Function Table,directly and/or indirectly used by various SW components,latest version 1.1
+ USHORT DAC1OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT DAC2OutputControl; //Atomic Table, directly used by various SW components,latest version 1.1
+ USHORT SetupHWAssistedI2CStatus; //Function Table,only used by Bios, obsolete soon.Switch to use "ReadEDIDFromHWAssistedI2C"
+ USHORT ClockSource; //Atomic Table, indirectly used by various SW components,called from ASIC_Init
+ USHORT MemoryDeviceInit; //Atomic Table, indirectly used by various SW components,called from SetMemoryClock
+ USHORT EnableYUV; //Atomic Table, indirectly used by various SW components,called from EnableVGARender
+ USHORT DIG1EncoderControl; //Atomic Table,directly used by various SW components,latest version 1.1
+ USHORT DIG2EncoderControl; //Atomic Table,directly used by various SW components,latest version 1.1
+ USHORT DIG1TransmitterControl; //Atomic Table,directly used by various SW components,latest version 1.1
+ USHORT DIG2TransmitterControl; //Atomic Table,directly used by various SW components,latest version 1.1
+ USHORT ProcessAuxChannelTransaction; //Function Table,only used by Bios
+ USHORT DPEncoderService; //Function Table,only used by Bios
+}ATOM_MASTER_LIST_OF_COMMAND_TABLES;
+
+// For backward compatible
#define ReadEDIDFromHWAssistedI2C ProcessI2cChannelTransaction
#define UNIPHYTransmitterControl DIG1TransmitterControl
#define LVTMATransmitterControl DIG2TransmitterControl
#define SetCRTC_DPM_State GetConditionalGoldenSetting
#define SetUniphyInstance ASIC_StaticPwrMgtStatusChange
+#define HPDInterruptService ReadHWAssistedI2CStatus
+#define EnableVGA_Access GetSCLKOverMCLKRatio
-typedef struct _ATOM_MASTER_COMMAND_TABLE {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_MASTER_LIST_OF_COMMAND_TABLES ListOfCommandTables;
-} ATOM_MASTER_COMMAND_TABLE;
-
-/****************************************************************************/
-/* Structures used in every command table */
-/****************************************************************************/
-typedef struct _ATOM_TABLE_ATTRIBUTE {
+typedef struct _ATOM_MASTER_COMMAND_TABLE
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_MASTER_LIST_OF_COMMAND_TABLES ListOfCommandTables;
+}ATOM_MASTER_COMMAND_TABLE;
+
+/****************************************************************************/
+// Structures used in every command table
+/****************************************************************************/
+typedef struct _ATOM_TABLE_ATTRIBUTE
+{
#if ATOM_BIG_ENDIAN
- USHORT UpdatedByUtility:1; /* [15]=Table updated by utility flag */
- USHORT PS_SizeInBytes:7; /* [14:8]=Size of parameter space in Bytes (multiple of a dword), */
- USHORT WS_SizeInBytes:8; /* [7:0]=Size of workspace in Bytes (in multiple of a dword), */
+ USHORT UpdatedByUtility:1; //[15]=Table updated by utility flag
+ USHORT PS_SizeInBytes:7; //[14:8]=Size of parameter space in Bytes (multiple of a dword),
+ USHORT WS_SizeInBytes:8; //[7:0]=Size of workspace in Bytes (in multiple of a dword),
#else
- USHORT WS_SizeInBytes:8; /* [7:0]=Size of workspace in Bytes (in multiple of a dword), */
- USHORT PS_SizeInBytes:7; /* [14:8]=Size of parameter space in Bytes (multiple of a dword), */
- USHORT UpdatedByUtility:1; /* [15]=Table updated by utility flag */
+ USHORT WS_SizeInBytes:8; //[7:0]=Size of workspace in Bytes (in multiple of a dword),
+ USHORT PS_SizeInBytes:7; //[14:8]=Size of parameter space in Bytes (multiple of a dword),
+ USHORT UpdatedByUtility:1; //[15]=Table updated by utility flag
#endif
-} ATOM_TABLE_ATTRIBUTE;
-
-typedef union _ATOM_TABLE_ATTRIBUTE_ACCESS {
- ATOM_TABLE_ATTRIBUTE sbfAccess;
- USHORT susAccess;
-} ATOM_TABLE_ATTRIBUTE_ACCESS;
+}ATOM_TABLE_ATTRIBUTE;
-/****************************************************************************/
-/* Common header for all command tables. */
-/* Every table pointed by _ATOM_MASTER_COMMAND_TABLE has this common header. */
-/* And the pointer actually points to this header. */
-/****************************************************************************/
-typedef struct _ATOM_COMMON_ROM_COMMAND_TABLE_HEADER {
- ATOM_COMMON_TABLE_HEADER CommonHeader;
- ATOM_TABLE_ATTRIBUTE TableAttribute;
-} ATOM_COMMON_ROM_COMMAND_TABLE_HEADER;
+typedef union _ATOM_TABLE_ATTRIBUTE_ACCESS
+{
+ ATOM_TABLE_ATTRIBUTE sbfAccess;
+ USHORT susAccess;
+}ATOM_TABLE_ATTRIBUTE_ACCESS;
+
+/****************************************************************************/
+// Common header for all command tables.
+// Every table pointed by _ATOM_MASTER_COMMAND_TABLE has this common header.
+// And the pointer actually points to this header.
+/****************************************************************************/
+typedef struct _ATOM_COMMON_ROM_COMMAND_TABLE_HEADER
+{
+ ATOM_COMMON_TABLE_HEADER CommonHeader;
+ ATOM_TABLE_ATTRIBUTE TableAttribute;
+}ATOM_COMMON_ROM_COMMAND_TABLE_HEADER;
-/****************************************************************************/
-/* Structures used by ComputeMemoryEnginePLLTable */
-/****************************************************************************/
+/****************************************************************************/
+// Structures used by ComputeMemoryEnginePLLTable
+/****************************************************************************/
#define COMPUTE_MEMORY_PLL_PARAM 1
#define COMPUTE_ENGINE_PLL_PARAM 2
-typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS {
- ULONG ulClock; /* When returen, it's the re-calculated clock based on given Fb_div Post_Div and ref_div */
- UCHAR ucAction; /* 0:reserved //1:Memory //2:Engine */
- UCHAR ucReserved; /* may expand to return larger Fbdiv later */
- UCHAR ucFbDiv; /* return value */
- UCHAR ucPostDiv; /* return value */
-} COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS;
-
-typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2 {
- ULONG ulClock; /* When return, [23:0] return real clock */
- UCHAR ucAction; /* 0:reserved;COMPUTE_MEMORY_PLL_PARAM:Memory;COMPUTE_ENGINE_PLL_PARAM:Engine. it return ref_div to be written to register */
- USHORT usFbDiv; /* return Feedback value to be written to register */
- UCHAR ucPostDiv; /* return post div to be written to register */
-} COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2;
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS
+{
+ ULONG ulClock; //When returen, it's the re-calculated clock based on given Fb_div Post_Div and ref_div
+ UCHAR ucAction; //0:reserved //1:Memory //2:Engine
+ UCHAR ucReserved; //may expand to return larger Fbdiv later
+ UCHAR ucFbDiv; //return value
+ UCHAR ucPostDiv; //return value
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS;
+
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2
+{
+ ULONG ulClock; //When return, [23:0] return real clock
+ UCHAR ucAction; //0:reserved;COMPUTE_MEMORY_PLL_PARAM:Memory;COMPUTE_ENGINE_PLL_PARAM:Engine. it return ref_div to be written to register
+ USHORT usFbDiv; //return Feedback value to be written to register
+ UCHAR ucPostDiv; //return post div to be written to register
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2;
#define COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS
-#define SET_CLOCK_FREQ_MASK 0x00FFFFFF /* Clock change tables only take bit [23:0] as the requested clock value */
-#define USE_NON_BUS_CLOCK_MASK 0x01000000 /* Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa) */
-#define USE_MEMORY_SELF_REFRESH_MASK 0x02000000 /* Only applicable to memory clock change, when set, using memory self refresh during clock transition */
-#define SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04000000 /* Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change */
-#define FIRST_TIME_CHANGE_CLOCK 0x08000000 /* Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup */
-#define SKIP_SW_PROGRAM_PLL 0x10000000 /* Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL */
+
+#define SET_CLOCK_FREQ_MASK 0x00FFFFFF //Clock change tables only take bit [23:0] as the requested clock value
+#define USE_NON_BUS_CLOCK_MASK 0x01000000 //Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa)
+#define USE_MEMORY_SELF_REFRESH_MASK 0x02000000 //Only applicable to memory clock change, when set, using memory self refresh during clock transition
+#define SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04000000 //Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change
+#define FIRST_TIME_CHANGE_CLOCK 0x08000000 //Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup
+#define SKIP_SW_PROGRAM_PLL 0x10000000 //Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL
#define USE_SS_ENABLED_PIXEL_CLOCK USE_NON_BUS_CLOCK_MASK
-#define b3USE_NON_BUS_CLOCK_MASK 0x01 /* Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa) */
-#define b3USE_MEMORY_SELF_REFRESH 0x02 /* Only applicable to memory clock change, when set, using memory self refresh during clock transition */
-#define b3SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04 /* Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change */
-#define b3FIRST_TIME_CHANGE_CLOCK 0x08 /* Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup */
-#define b3SKIP_SW_PROGRAM_PLL 0x10 /* Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL */
+#define b3USE_NON_BUS_CLOCK_MASK 0x01 //Applicable to both memory and engine clock change, when set, it uses another clock as the temporary clock (engine uses memory and vice versa)
+#define b3USE_MEMORY_SELF_REFRESH 0x02 //Only applicable to memory clock change, when set, using memory self refresh during clock transition
+#define b3SKIP_INTERNAL_MEMORY_PARAMETER_CHANGE 0x04 //Only applicable to memory clock change, when set, the table will skip predefined internal memory parameter change
+#define b3FIRST_TIME_CHANGE_CLOCK 0x08 //Applicable to both memory and engine clock change,when set, it means this is 1st time to change clock after ASIC bootup
+#define b3SKIP_SW_PROGRAM_PLL 0x10 //Applicable to both memory and engine clock change, when set, it means the table will not program SPLL/MPLL
-typedef struct _ATOM_COMPUTE_CLOCK_FREQ {
+typedef struct _ATOM_COMPUTE_CLOCK_FREQ
+{
#if ATOM_BIG_ENDIAN
- ULONG ulComputeClockFlag:8; /* =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM */
- ULONG ulClockFreq:24; /* in unit of 10kHz */
+ ULONG ulComputeClockFlag:8; // =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM
+ ULONG ulClockFreq:24; // in unit of 10kHz
#else
- ULONG ulClockFreq:24; /* in unit of 10kHz */
- ULONG ulComputeClockFlag:8; /* =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM */
+ ULONG ulClockFreq:24; // in unit of 10kHz
+ ULONG ulComputeClockFlag:8; // =1: COMPUTE_MEMORY_PLL_PARAM, =2: COMPUTE_ENGINE_PLL_PARAM
#endif
-} ATOM_COMPUTE_CLOCK_FREQ;
-
-typedef struct _ATOM_S_MPLL_FB_DIVIDER {
- USHORT usFbDivFrac;
- USHORT usFbDiv;
-} ATOM_S_MPLL_FB_DIVIDER;
+}ATOM_COMPUTE_CLOCK_FREQ;
-typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 {
- union {
- ATOM_COMPUTE_CLOCK_FREQ ulClock; /* Input Parameter */
- ATOM_S_MPLL_FB_DIVIDER ulFbDiv; /* Output Parameter */
- };
- UCHAR ucRefDiv; /* Output Parameter */
- UCHAR ucPostDiv; /* Output Parameter */
- UCHAR ucCntlFlag; /* Output Parameter */
- UCHAR ucReserved;
-} COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3;
+typedef struct _ATOM_S_MPLL_FB_DIVIDER
+{
+ USHORT usFbDivFrac;
+ USHORT usFbDiv;
+}ATOM_S_MPLL_FB_DIVIDER;
-/* ucCntlFlag */
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3
+{
+ union
+ {
+ ATOM_COMPUTE_CLOCK_FREQ ulClock; //Input Parameter
+ ATOM_S_MPLL_FB_DIVIDER ulFbDiv; //Output Parameter
+ };
+ UCHAR ucRefDiv; //Output Parameter
+ UCHAR ucPostDiv; //Output Parameter
+ UCHAR ucCntlFlag; //Output Parameter
+ UCHAR ucReserved;
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3;
+
+// ucCntlFlag
#define ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN 1
#define ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE 2
#define ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE 4
+#define ATOM_PLL_CNTL_FLAG_SPLL_ISPARE_9 8
-typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER {
- ATOM_COMPUTE_CLOCK_FREQ ulClock;
- ULONG ulReserved[2];
-} DYNAMICE_MEMORY_SETTINGS_PARAMETER;
-
-typedef struct _DYNAMICE_ENGINE_SETTINGS_PARAMETER {
- ATOM_COMPUTE_CLOCK_FREQ ulClock;
- ULONG ulMemoryClock;
- ULONG ulReserved;
-} DYNAMICE_ENGINE_SETTINGS_PARAMETER;
-
-/****************************************************************************/
-/* Structures used by SetEngineClockTable */
-/****************************************************************************/
-typedef struct _SET_ENGINE_CLOCK_PARAMETERS {
- ULONG ulTargetEngineClock; /* In 10Khz unit */
-} SET_ENGINE_CLOCK_PARAMETERS;
-typedef struct _SET_ENGINE_CLOCK_PS_ALLOCATION {
- ULONG ulTargetEngineClock; /* In 10Khz unit */
- COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved;
-} SET_ENGINE_CLOCK_PS_ALLOCATION;
+// V4 are only used for APU which PLL outside GPU
+typedef struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4
+{
+#if ATOM_BIG_ENDIAN
+ ULONG ucPostDiv; //return parameter: post divider which is used to program to register directly
+ ULONG ulClock:24; //Input= target clock, output = actual clock
+#else
+ ULONG ulClock:24; //Input= target clock, output = actual clock
+ ULONG ucPostDiv; //return parameter: post divider which is used to program to register directly
+#endif
+}COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4;
-/****************************************************************************/
-/* Structures used by SetMemoryClockTable */
-/****************************************************************************/
-typedef struct _SET_MEMORY_CLOCK_PARAMETERS {
- ULONG ulTargetMemoryClock; /* In 10Khz unit */
-} SET_MEMORY_CLOCK_PARAMETERS;
+typedef struct _DYNAMICE_MEMORY_SETTINGS_PARAMETER
+{
+ ATOM_COMPUTE_CLOCK_FREQ ulClock;
+ ULONG ulReserved[2];
+}DYNAMICE_MEMORY_SETTINGS_PARAMETER;
-typedef struct _SET_MEMORY_CLOCK_PS_ALLOCATION {
- ULONG ulTargetMemoryClock; /* In 10Khz unit */
- COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved;
-} SET_MEMORY_CLOCK_PS_ALLOCATION;
+typedef struct _DYNAMICE_ENGINE_SETTINGS_PARAMETER
+{
+ ATOM_COMPUTE_CLOCK_FREQ ulClock;
+ ULONG ulMemoryClock;
+ ULONG ulReserved;
+}DYNAMICE_ENGINE_SETTINGS_PARAMETER;
+
+/****************************************************************************/
+// Structures used by SetEngineClockTable
+/****************************************************************************/
+typedef struct _SET_ENGINE_CLOCK_PARAMETERS
+{
+ ULONG ulTargetEngineClock; //In 10Khz unit
+}SET_ENGINE_CLOCK_PARAMETERS;
-/****************************************************************************/
-/* Structures used by ASIC_Init.ctb */
-/****************************************************************************/
-typedef struct _ASIC_INIT_PARAMETERS {
- ULONG ulDefaultEngineClock; /* In 10Khz unit */
- ULONG ulDefaultMemoryClock; /* In 10Khz unit */
-} ASIC_INIT_PARAMETERS;
+typedef struct _SET_ENGINE_CLOCK_PS_ALLOCATION
+{
+ ULONG ulTargetEngineClock; //In 10Khz unit
+ COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved;
+}SET_ENGINE_CLOCK_PS_ALLOCATION;
+
+/****************************************************************************/
+// Structures used by SetMemoryClockTable
+/****************************************************************************/
+typedef struct _SET_MEMORY_CLOCK_PARAMETERS
+{
+ ULONG ulTargetMemoryClock; //In 10Khz unit
+}SET_MEMORY_CLOCK_PARAMETERS;
-typedef struct _ASIC_INIT_PS_ALLOCATION {
- ASIC_INIT_PARAMETERS sASICInitClocks;
- SET_ENGINE_CLOCK_PS_ALLOCATION sReserved; /* Caller doesn't need to init this structure */
-} ASIC_INIT_PS_ALLOCATION;
+typedef struct _SET_MEMORY_CLOCK_PS_ALLOCATION
+{
+ ULONG ulTargetMemoryClock; //In 10Khz unit
+ COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_PS_ALLOCATION sReserved;
+}SET_MEMORY_CLOCK_PS_ALLOCATION;
+
+/****************************************************************************/
+// Structures used by ASIC_Init.ctb
+/****************************************************************************/
+typedef struct _ASIC_INIT_PARAMETERS
+{
+ ULONG ulDefaultEngineClock; //In 10Khz unit
+ ULONG ulDefaultMemoryClock; //In 10Khz unit
+}ASIC_INIT_PARAMETERS;
-/****************************************************************************/
-/* Structure used by DynamicClockGatingTable.ctb */
-/****************************************************************************/
-typedef struct _DYNAMIC_CLOCK_GATING_PARAMETERS {
- UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
- UCHAR ucPadding[3];
-} DYNAMIC_CLOCK_GATING_PARAMETERS;
+typedef struct _ASIC_INIT_PS_ALLOCATION
+{
+ ASIC_INIT_PARAMETERS sASICInitClocks;
+ SET_ENGINE_CLOCK_PS_ALLOCATION sReserved; //Caller doesn't need to init this structure
+}ASIC_INIT_PS_ALLOCATION;
+
+/****************************************************************************/
+// Structure used by DynamicClockGatingTable.ctb
+/****************************************************************************/
+typedef struct _DYNAMIC_CLOCK_GATING_PARAMETERS
+{
+ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
+ UCHAR ucPadding[3];
+}DYNAMIC_CLOCK_GATING_PARAMETERS;
#define DYNAMIC_CLOCK_GATING_PS_ALLOCATION DYNAMIC_CLOCK_GATING_PARAMETERS
-/****************************************************************************/
-/* Structure used by EnableASIC_StaticPwrMgtTable.ctb */
-/****************************************************************************/
-typedef struct _ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS {
- UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
- UCHAR ucPadding[3];
-} ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS;
+/****************************************************************************/
+// Structure used by EnableASIC_StaticPwrMgtTable.ctb
+/****************************************************************************/
+typedef struct _ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS
+{
+ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
+ UCHAR ucPadding[3];
+}ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS;
#define ENABLE_ASIC_STATIC_PWR_MGT_PS_ALLOCATION ENABLE_ASIC_STATIC_PWR_MGT_PARAMETERS
-/****************************************************************************/
-/* Structures used by DAC_LoadDetectionTable.ctb */
-/****************************************************************************/
-typedef struct _DAC_LOAD_DETECTION_PARAMETERS {
- USHORT usDeviceID; /* {ATOM_DEVICE_CRTx_SUPPORT,ATOM_DEVICE_TVx_SUPPORT,ATOM_DEVICE_CVx_SUPPORT} */
- UCHAR ucDacType; /* {ATOM_DAC_A,ATOM_DAC_B, ATOM_EXT_DAC} */
- UCHAR ucMisc; /* Valid only when table revision =1.3 and above */
-} DAC_LOAD_DETECTION_PARAMETERS;
+/****************************************************************************/
+// Structures used by DAC_LoadDetectionTable.ctb
+/****************************************************************************/
+typedef struct _DAC_LOAD_DETECTION_PARAMETERS
+{
+ USHORT usDeviceID; //{ATOM_DEVICE_CRTx_SUPPORT,ATOM_DEVICE_TVx_SUPPORT,ATOM_DEVICE_CVx_SUPPORT}
+ UCHAR ucDacType; //{ATOM_DAC_A,ATOM_DAC_B, ATOM_EXT_DAC}
+ UCHAR ucMisc; //Valid only when table revision =1.3 and above
+}DAC_LOAD_DETECTION_PARAMETERS;
-/* DAC_LOAD_DETECTION_PARAMETERS.ucMisc */
+// DAC_LOAD_DETECTION_PARAMETERS.ucMisc
#define DAC_LOAD_MISC_YPrPb 0x01
-typedef struct _DAC_LOAD_DETECTION_PS_ALLOCATION {
- DAC_LOAD_DETECTION_PARAMETERS sDacload;
- ULONG Reserved[2]; /* Don't set this one, allocation for EXT DAC */
-} DAC_LOAD_DETECTION_PS_ALLOCATION;
-
-/****************************************************************************/
-/* Structures used by DAC1EncoderControlTable.ctb and DAC2EncoderControlTable.ctb */
-/****************************************************************************/
-typedef struct _DAC_ENCODER_CONTROL_PARAMETERS {
- USHORT usPixelClock; /* in 10KHz; for bios convenient */
- UCHAR ucDacStandard; /* See definition of ATOM_DACx_xxx, For DEC3.0, bit 7 used as internal flag to indicate DAC2 (==1) or DAC1 (==0) */
- UCHAR ucAction; /* 0: turn off encoder */
- /* 1: setup and turn on encoder */
- /* 7: ATOM_ENCODER_INIT Initialize DAC */
-} DAC_ENCODER_CONTROL_PARAMETERS;
+typedef struct _DAC_LOAD_DETECTION_PS_ALLOCATION
+{
+ DAC_LOAD_DETECTION_PARAMETERS sDacload;
+ ULONG Reserved[2];// Don't set this one, allocation for EXT DAC
+}DAC_LOAD_DETECTION_PS_ALLOCATION;
+
+/****************************************************************************/
+// Structures used by DAC1EncoderControlTable.ctb and DAC2EncoderControlTable.ctb
+/****************************************************************************/
+typedef struct _DAC_ENCODER_CONTROL_PARAMETERS
+{
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ UCHAR ucDacStandard; // See definition of ATOM_DACx_xxx, For DEC3.0, bit 7 used as internal flag to indicate DAC2 (==1) or DAC1 (==0)
+ UCHAR ucAction; // 0: turn off encoder
+ // 1: setup and turn on encoder
+ // 7: ATOM_ENCODER_INIT Initialize DAC
+}DAC_ENCODER_CONTROL_PARAMETERS;
#define DAC_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PARAMETERS
-/****************************************************************************/
-/* Structures used by DIG1EncoderControlTable */
-/* DIG2EncoderControlTable */
-/* ExternalEncoderControlTable */
-/****************************************************************************/
-typedef struct _DIG_ENCODER_CONTROL_PARAMETERS {
- USHORT usPixelClock; /* in 10KHz; for bios convenient */
- UCHAR ucConfig;
- /* [2] Link Select: */
- /* =0: PHY linkA if bfLane<3 */
- /* =1: PHY linkB if bfLanes<3 */
- /* =0: PHY linkA+B if bfLanes=3 */
- /* [3] Transmitter Sel */
- /* =0: UNIPHY or PCIEPHY */
- /* =1: LVTMA */
- UCHAR ucAction; /* =0: turn off encoder */
- /* =1: turn on encoder */
- UCHAR ucEncoderMode;
- /* =0: DP encoder */
- /* =1: LVDS encoder */
- /* =2: DVI encoder */
- /* =3: HDMI encoder */
- /* =4: SDVO encoder */
- UCHAR ucLaneNum; /* how many lanes to enable */
- UCHAR ucReserved[2];
-} DIG_ENCODER_CONTROL_PARAMETERS;
+/****************************************************************************/
+// Structures used by DIG1EncoderControlTable
+// DIG2EncoderControlTable
+// ExternalEncoderControlTable
+/****************************************************************************/
+typedef struct _DIG_ENCODER_CONTROL_PARAMETERS
+{
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ UCHAR ucConfig;
+ // [2] Link Select:
+ // =0: PHY linkA if bfLane<3
+ // =1: PHY linkB if bfLanes<3
+ // =0: PHY linkA+B if bfLanes=3
+ // [3] Transmitter Sel
+ // =0: UNIPHY or PCIEPHY
+ // =1: LVTMA
+ UCHAR ucAction; // =0: turn off encoder
+ // =1: turn on encoder
+ UCHAR ucEncoderMode;
+ // =0: DP encoder
+ // =1: LVDS encoder
+ // =2: DVI encoder
+ // =3: HDMI encoder
+ // =4: SDVO encoder
+ UCHAR ucLaneNum; // how many lanes to enable
+ UCHAR ucReserved[2];
+}DIG_ENCODER_CONTROL_PARAMETERS;
#define DIG_ENCODER_CONTROL_PS_ALLOCATION DIG_ENCODER_CONTROL_PARAMETERS
#define EXTERNAL_ENCODER_CONTROL_PARAMETER DIG_ENCODER_CONTROL_PARAMETERS
-/* ucConfig */
+//ucConfig
#define ATOM_ENCODER_CONFIG_DPLINKRATE_MASK 0x01
#define ATOM_ENCODER_CONFIG_DPLINKRATE_1_62GHZ 0x00
#define ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ 0x01
@@ -539,52 +593,57 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS {
#define ATOM_ENCODER_CONFIG_LVTMA 0x08
#define ATOM_ENCODER_CONFIG_TRANSMITTER1 0x00
#define ATOM_ENCODER_CONFIG_TRANSMITTER2 0x08
-#define ATOM_ENCODER_CONFIG_DIGB 0x80 /* VBIOS Internal use, outside SW should set this bit=0 */
-/* ucAction */
-/* ATOM_ENABLE: Enable Encoder */
-/* ATOM_DISABLE: Disable Encoder */
+#define ATOM_ENCODER_CONFIG_DIGB 0x80 // VBIOS Internal use, outside SW should set this bit=0
+// ucAction
+// ATOM_ENABLE: Enable Encoder
+// ATOM_DISABLE: Disable Encoder
-/* ucEncoderMode */
+//ucEncoderMode
#define ATOM_ENCODER_MODE_DP 0
#define ATOM_ENCODER_MODE_LVDS 1
#define ATOM_ENCODER_MODE_DVI 2
#define ATOM_ENCODER_MODE_HDMI 3
#define ATOM_ENCODER_MODE_SDVO 4
+#define ATOM_ENCODER_MODE_DP_AUDIO 5
#define ATOM_ENCODER_MODE_TV 13
#define ATOM_ENCODER_MODE_CV 14
#define ATOM_ENCODER_MODE_CRT 15
-typedef struct _ATOM_DIG_ENCODER_CONFIG_V2 {
+typedef struct _ATOM_DIG_ENCODER_CONFIG_V2
+{
#if ATOM_BIG_ENDIAN
- UCHAR ucReserved1:2;
- UCHAR ucTransmitterSel:2; /* =0: UniphyAB, =1: UniphyCD =2: UniphyEF */
- UCHAR ucLinkSel:1; /* =0: linkA/C/E =1: linkB/D/F */
- UCHAR ucReserved:1;
- UCHAR ucDPLinkRate:1; /* =0: 1.62Ghz, =1: 2.7Ghz */
+ UCHAR ucReserved1:2;
+ UCHAR ucTransmitterSel:2; // =0: UniphyAB, =1: UniphyCD =2: UniphyEF
+ UCHAR ucLinkSel:1; // =0: linkA/C/E =1: linkB/D/F
+ UCHAR ucReserved:1;
+ UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
#else
- UCHAR ucDPLinkRate:1; /* =0: 1.62Ghz, =1: 2.7Ghz */
- UCHAR ucReserved:1;
- UCHAR ucLinkSel:1; /* =0: linkA/C/E =1: linkB/D/F */
- UCHAR ucTransmitterSel:2; /* =0: UniphyAB, =1: UniphyCD =2: UniphyEF */
- UCHAR ucReserved1:2;
+ UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
+ UCHAR ucReserved:1;
+ UCHAR ucLinkSel:1; // =0: linkA/C/E =1: linkB/D/F
+ UCHAR ucTransmitterSel:2; // =0: UniphyAB, =1: UniphyCD =2: UniphyEF
+ UCHAR ucReserved1:2;
#endif
-} ATOM_DIG_ENCODER_CONFIG_V2;
+}ATOM_DIG_ENCODER_CONFIG_V2;
-typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2 {
- USHORT usPixelClock; /* in 10KHz; for bios convenient */
- ATOM_DIG_ENCODER_CONFIG_V2 acConfig;
- UCHAR ucAction;
- UCHAR ucEncoderMode;
- /* =0: DP encoder */
- /* =1: LVDS encoder */
- /* =2: DVI encoder */
- /* =3: HDMI encoder */
- /* =4: SDVO encoder */
- UCHAR ucLaneNum; /* how many lanes to enable */
- UCHAR ucReserved[2];
-} DIG_ENCODER_CONTROL_PARAMETERS_V2;
-/* ucConfig */
+typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2
+{
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ ATOM_DIG_ENCODER_CONFIG_V2 acConfig;
+ UCHAR ucAction;
+ UCHAR ucEncoderMode;
+ // =0: DP encoder
+ // =1: LVDS encoder
+ // =2: DVI encoder
+ // =3: HDMI encoder
+ // =4: SDVO encoder
+ UCHAR ucLaneNum; // how many lanes to enable
+ UCHAR ucStatus; // = DP_LINK_TRAINING_COMPLETE or DP_LINK_TRAINING_INCOMPLETE, only used by VBIOS with command ATOM_ENCODER_CMD_QUERY_DP_LINK_TRAINING_STATUS
+ UCHAR ucReserved;
+}DIG_ENCODER_CONTROL_PARAMETERS_V2;
+
+//ucConfig
#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_MASK 0x01
#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_1_62GHZ 0x00
#define ATOM_ENCODER_CONFIG_V2_DPLINKRATE_2_70GHZ 0x01
@@ -596,58 +655,122 @@ typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V2 {
#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER2 0x08
#define ATOM_ENCODER_CONFIG_V2_TRANSMITTER3 0x10
-/****************************************************************************/
-/* Structures used by UNIPHYTransmitterControlTable */
-/* LVTMATransmitterControlTable */
-/* DVOOutputControlTable */
-/****************************************************************************/
-typedef struct _ATOM_DP_VS_MODE {
- UCHAR ucLaneSel;
- UCHAR ucLaneSet;
-} ATOM_DP_VS_MODE;
-
-typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS {
- union {
- USHORT usPixelClock; /* in 10KHz; for bios convenient */
- USHORT usInitInfo; /* when init uniphy,lower 8bit is used for connector type defined in objectid.h */
- ATOM_DP_VS_MODE asMode; /* DP Voltage swing mode */
+// ucAction:
+// ATOM_DISABLE
+// ATOM_ENABLE
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_START 0x08
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1 0x09
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2 0x0a
+#define ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE 0x0b
+#define ATOM_ENCODER_CMD_DP_VIDEO_OFF 0x0c
+#define ATOM_ENCODER_CMD_DP_VIDEO_ON 0x0d
+#define ATOM_ENCODER_CMD_QUERY_DP_LINK_TRAINING_STATUS 0x0e
+#define ATOM_ENCODER_CMD_SETUP 0x0f
+
+// ucStatus
+#define ATOM_ENCODER_STATUS_LINK_TRAINING_COMPLETE 0x10
+#define ATOM_ENCODER_STATUS_LINK_TRAINING_INCOMPLETE 0x00
+
+// Following function ENABLE sub-function will be used by driver when TMDS/HDMI/LVDS is used, disable function will be used by driver
+typedef struct _ATOM_DIG_ENCODER_CONFIG_V3
+{
+#if ATOM_BIG_ENDIAN
+ UCHAR ucReserved1:1;
+ UCHAR ucDigSel:3; // =0: DIGA/B/C/D/E/F
+ UCHAR ucReserved:3;
+ UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
+#else
+ UCHAR ucDPLinkRate:1; // =0: 1.62Ghz, =1: 2.7Ghz
+ UCHAR ucReserved:3;
+ UCHAR ucDigSel:3; // =0: DIGA/B/C/D/E/F
+ UCHAR ucReserved1:1;
+#endif
+}ATOM_DIG_ENCODER_CONFIG_V3;
+
+#define ATOM_ENCODER_CONFIG_V3_ENCODER_SEL 0x70
+
+
+typedef struct _DIG_ENCODER_CONTROL_PARAMETERS_V3
+{
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ ATOM_DIG_ENCODER_CONFIG_V3 acConfig;
+ UCHAR ucAction;
+ UCHAR ucEncoderMode;
+ // =0: DP encoder
+ // =1: LVDS encoder
+ // =2: DVI encoder
+ // =3: HDMI encoder
+ // =4: SDVO encoder
+ // =5: DP audio
+ UCHAR ucLaneNum; // how many lanes to enable
+ UCHAR ucBitPerColor; // only valid for DP mode when ucAction = ATOM_ENCODER_CMD_SETUP
+ UCHAR ucReserved;
+}DIG_ENCODER_CONTROL_PARAMETERS_V3;
+
+
+// define ucBitPerColor:
+#define PANEL_BPC_UNDEFINE 0x00
+#define PANEL_6BIT_PER_COLOR 0x01
+#define PANEL_8BIT_PER_COLOR 0x02
+#define PANEL_10BIT_PER_COLOR 0x03
+#define PANEL_12BIT_PER_COLOR 0x04
+#define PANEL_16BIT_PER_COLOR 0x05
+
+/****************************************************************************/
+// Structures used by UNIPHYTransmitterControlTable
+// LVTMATransmitterControlTable
+// DVOOutputControlTable
+/****************************************************************************/
+typedef struct _ATOM_DP_VS_MODE
+{
+ UCHAR ucLaneSel;
+ UCHAR ucLaneSet;
+}ATOM_DP_VS_MODE;
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS
+{
+ union
+ {
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h
+ ATOM_DP_VS_MODE asMode; // DP Voltage swing mode
};
- UCHAR ucConfig;
- /* [0]=0: 4 lane Link, */
- /* =1: 8 lane Link ( Dual Links TMDS ) */
- /* [1]=0: InCoherent mode */
- /* =1: Coherent Mode */
- /* [2] Link Select: */
- /* =0: PHY linkA if bfLane<3 */
- /* =1: PHY linkB if bfLanes<3 */
- /* =0: PHY linkA+B if bfLanes=3 */
- /* [5:4]PCIE lane Sel */
- /* =0: lane 0~3 or 0~7 */
- /* =1: lane 4~7 */
- /* =2: lane 8~11 or 8~15 */
- /* =3: lane 12~15 */
- UCHAR ucAction; /* =0: turn off encoder */
- /* =1: turn on encoder */
- UCHAR ucReserved[4];
-} DIG_TRANSMITTER_CONTROL_PARAMETERS;
-
-#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PARAMETERS
-
-/* ucInitInfo */
-#define ATOM_TRAMITTER_INITINFO_CONNECTOR_MASK 0x00ff
-
-/* ucConfig */
+ UCHAR ucConfig;
+ // [0]=0: 4 lane Link,
+ // =1: 8 lane Link ( Dual Links TMDS )
+ // [1]=0: InCoherent mode
+ // =1: Coherent Mode
+ // [2] Link Select:
+ // =0: PHY linkA if bfLane<3
+ // =1: PHY linkB if bfLanes<3
+ // =0: PHY linkA+B if bfLanes=3
+ // [5:4]PCIE lane Sel
+ // =0: lane 0~3 or 0~7
+ // =1: lane 4~7
+ // =2: lane 8~11 or 8~15
+ // =3: lane 12~15
+ UCHAR ucAction; // =0: turn off encoder
+ // =1: turn on encoder
+ UCHAR ucReserved[4];
+}DIG_TRANSMITTER_CONTROL_PARAMETERS;
+
+#define DIG_TRANSMITTER_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PARAMETERS
+
+//ucInitInfo
+#define ATOM_TRAMITTER_INITINFO_CONNECTOR_MASK 0x00ff
+
+//ucConfig
#define ATOM_TRANSMITTER_CONFIG_8LANE_LINK 0x01
#define ATOM_TRANSMITTER_CONFIG_COHERENT 0x02
#define ATOM_TRANSMITTER_CONFIG_LINK_SEL_MASK 0x04
#define ATOM_TRANSMITTER_CONFIG_LINKA 0x00
#define ATOM_TRANSMITTER_CONFIG_LINKB 0x04
-#define ATOM_TRANSMITTER_CONFIG_LINKA_B 0x00
+#define ATOM_TRANSMITTER_CONFIG_LINKA_B 0x00
#define ATOM_TRANSMITTER_CONFIG_LINKB_A 0x04
-#define ATOM_TRANSMITTER_CONFIG_ENCODER_SEL_MASK 0x08 /* only used when ATOM_TRANSMITTER_ACTION_ENABLE */
-#define ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER 0x00 /* only used when ATOM_TRANSMITTER_ACTION_ENABLE */
-#define ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER 0x08 /* only used when ATOM_TRANSMITTER_ACTION_ENABLE */
+#define ATOM_TRANSMITTER_CONFIG_ENCODER_SEL_MASK 0x08 // only used when ATOM_TRANSMITTER_ACTION_ENABLE
+#define ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER 0x00 // only used when ATOM_TRANSMITTER_ACTION_ENABLE
+#define ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER 0x08 // only used when ATOM_TRANSMITTER_ACTION_ENABLE
#define ATOM_TRANSMITTER_CONFIG_CLKSRC_MASK 0x30
#define ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL 0x00
@@ -661,7 +784,7 @@ typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS {
#define ATOM_TRANSMITTER_CONFIG_LANE_8_15 0x80
#define ATOM_TRANSMITTER_CONFIG_LANE_12_15 0xc0
-/* ucAction */
+//ucAction
#define ATOM_TRANSMITTER_ACTION_DISABLE 0
#define ATOM_TRANSMITTER_ACTION_ENABLE 1
#define ATOM_TRANSMITTER_ACTION_LCD_BLOFF 2
@@ -674,93 +797,168 @@ typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS {
#define ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT 9
#define ATOM_TRANSMITTER_ACTION_SETUP 10
#define ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH 11
+#define ATOM_TRANSMITTER_ACTION_POWER_ON 12
+#define ATOM_TRANSMITTER_ACTION_POWER_OFF 13
-/* Following are used for DigTransmitterControlTable ver1.2 */
-typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V2 {
+// Following are used for DigTransmitterControlTable ver1.2
+typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V2
+{
#if ATOM_BIG_ENDIAN
- UCHAR ucTransmitterSel:2; /* bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) */
- /* =1 Dig Transmitter 2 ( Uniphy CD ) */
- /* =2 Dig Transmitter 3 ( Uniphy EF ) */
- UCHAR ucReserved:1;
- UCHAR fDPConnector:1; /* bit4=0: DP connector =1: None DP connector */
- UCHAR ucEncoderSel:1; /* bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 ) */
- UCHAR ucLinkSel:1; /* bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E */
- /* =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F */
-
- UCHAR fCoherentMode:1; /* bit1=1: Coherent Mode ( for DVI/HDMI mode ) */
- UCHAR fDualLinkConnector:1; /* bit0=1: Dual Link DVI connector */
+ UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+ // =1 Dig Transmitter 2 ( Uniphy CD )
+ // =2 Dig Transmitter 3 ( Uniphy EF )
+ UCHAR ucReserved:1;
+ UCHAR fDPConnector:1; //bit4=0: DP connector =1: None DP connector
+ UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 )
+ UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+ // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+
+ UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+ UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
#else
- UCHAR fDualLinkConnector:1; /* bit0=1: Dual Link DVI connector */
- UCHAR fCoherentMode:1; /* bit1=1: Coherent Mode ( for DVI/HDMI mode ) */
- UCHAR ucLinkSel:1; /* bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E */
- /* =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F */
- UCHAR ucEncoderSel:1; /* bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 ) */
- UCHAR fDPConnector:1; /* bit4=0: DP connector =1: None DP connector */
- UCHAR ucReserved:1;
- UCHAR ucTransmitterSel:2; /* bit7:6: =0 Dig Transmitter 1 ( Uniphy AB ) */
- /* =1 Dig Transmitter 2 ( Uniphy CD ) */
- /* =2 Dig Transmitter 3 ( Uniphy EF ) */
+ UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
+ UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+ UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+ // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+ UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA( DIG inst0 ). =1: Data/clk path source from DIGB ( DIG inst1 )
+ UCHAR fDPConnector:1; //bit4=0: DP connector =1: None DP connector
+ UCHAR ucReserved:1;
+ UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+ // =1 Dig Transmitter 2 ( Uniphy CD )
+ // =2 Dig Transmitter 3 ( Uniphy EF )
#endif
-} ATOM_DIG_TRANSMITTER_CONFIG_V2;
+}ATOM_DIG_TRANSMITTER_CONFIG_V2;
-/* ucConfig */
-/* Bit0 */
+//ucConfig
+//Bit0
#define ATOM_TRANSMITTER_CONFIG_V2_DUAL_LINK_CONNECTOR 0x01
-/* Bit1 */
+//Bit1
#define ATOM_TRANSMITTER_CONFIG_V2_COHERENT 0x02
-/* Bit2 */
+//Bit2
#define ATOM_TRANSMITTER_CONFIG_V2_LINK_SEL_MASK 0x04
-#define ATOM_TRANSMITTER_CONFIG_V2_LINKA 0x00
+#define ATOM_TRANSMITTER_CONFIG_V2_LINKA 0x00
#define ATOM_TRANSMITTER_CONFIG_V2_LINKB 0x04
-/* Bit3 */
+// Bit3
#define ATOM_TRANSMITTER_CONFIG_V2_ENCODER_SEL_MASK 0x08
-#define ATOM_TRANSMITTER_CONFIG_V2_DIG1_ENCODER 0x00 /* only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP */
-#define ATOM_TRANSMITTER_CONFIG_V2_DIG2_ENCODER 0x08 /* only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP */
+#define ATOM_TRANSMITTER_CONFIG_V2_DIG1_ENCODER 0x00 // only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP
+#define ATOM_TRANSMITTER_CONFIG_V2_DIG2_ENCODER 0x08 // only used when ucAction == ATOM_TRANSMITTER_ACTION_ENABLE or ATOM_TRANSMITTER_ACTION_SETUP
-/* Bit4 */
+// Bit4
#define ATOM_TRASMITTER_CONFIG_V2_DP_CONNECTOR 0x10
-/* Bit7:6 */
+// Bit7:6
#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER_SEL_MASK 0xC0
-#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER1 0x00 /* AB */
-#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER2 0x40 /* CD */
-#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER3 0x80 /* EF */
-
-typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 {
- union {
- USHORT usPixelClock; /* in 10KHz; for bios convenient */
- USHORT usInitInfo; /* when init uniphy,lower 8bit is used for connector type defined in objectid.h */
- ATOM_DP_VS_MODE asMode; /* DP Voltage swing mode */
+#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER1 0x00 //AB
+#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER2 0x40 //CD
+#define ATOM_TRANSMITTER_CONFIG_V2_TRANSMITTER3 0x80 //EF
+
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V2
+{
+ union
+ {
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h
+ ATOM_DP_VS_MODE asMode; // DP Voltage swing mode
};
- ATOM_DIG_TRANSMITTER_CONFIG_V2 acConfig;
- UCHAR ucAction; /* define as ATOM_TRANSMITER_ACTION_XXX */
- UCHAR ucReserved[4];
-} DIG_TRANSMITTER_CONTROL_PARAMETERS_V2;
+ ATOM_DIG_TRANSMITTER_CONFIG_V2 acConfig;
+ UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_XXX
+ UCHAR ucReserved[4];
+}DIG_TRANSMITTER_CONTROL_PARAMETERS_V2;
-/****************************************************************************/
-/* Structures used by DAC1OuputControlTable */
-/* DAC2OuputControlTable */
-/* LVTMAOutputControlTable (Before DEC30) */
-/* TMDSAOutputControlTable (Before DEC30) */
-/****************************************************************************/
-typedef struct _DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS {
- UCHAR ucAction; /* Possible input:ATOM_ENABLE||ATOMDISABLE */
- /* When the display is LCD, in addition to above: */
- /* ATOM_LCD_BLOFF|| ATOM_LCD_BLON ||ATOM_LCD_BL_BRIGHTNESS_CONTROL||ATOM_LCD_SELFTEST_START|| */
- /* ATOM_LCD_SELFTEST_STOP */
+typedef struct _ATOM_DIG_TRANSMITTER_CONFIG_V3
+{
+#if ATOM_BIG_ENDIAN
+ UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+ // =1 Dig Transmitter 2 ( Uniphy CD )
+ // =2 Dig Transmitter 3 ( Uniphy EF )
+ UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, EXT_CLK=2
+ UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
+ UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+ // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+ UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+ UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
+#else
+ UCHAR fDualLinkConnector:1; //bit0=1: Dual Link DVI connector
+ UCHAR fCoherentMode:1; //bit1=1: Coherent Mode ( for DVI/HDMI mode )
+ UCHAR ucLinkSel:1; //bit2=0: Uniphy LINKA or C or E when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is A or C or E
+ // =1: Uniphy LINKB or D or F when fDualLinkConnector=0. when fDualLinkConnector=1, it means master link of dual link is B or D or F
+ UCHAR ucEncoderSel:1; //bit3=0: Data/Clk path source from DIGA/C/E. =1: Data/clk path source from DIGB/D/F
+ UCHAR ucRefClkSource:2; //bit5:4: PPLL1 =0, PPLL2=1, EXT_CLK=2
+ UCHAR ucTransmitterSel:2; //bit7:6: =0 Dig Transmitter 1 ( Uniphy AB )
+ // =1 Dig Transmitter 2 ( Uniphy CD )
+ // =2 Dig Transmitter 3 ( Uniphy EF )
+#endif
+}ATOM_DIG_TRANSMITTER_CONFIG_V3;
- UCHAR aucPadding[3]; /* padding to DWORD aligned */
-} DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS;
+typedef struct _DIG_TRANSMITTER_CONTROL_PARAMETERS_V3
+{
+ union
+ {
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ USHORT usInitInfo; // when init uniphy,lower 8bit is used for connector type defined in objectid.h
+ ATOM_DP_VS_MODE asMode; // DP Voltage swing mode
+ };
+ ATOM_DIG_TRANSMITTER_CONFIG_V3 acConfig;
+ UCHAR ucAction; // define as ATOM_TRANSMITER_ACTION_XXX
+ UCHAR ucLaneNum;
+ UCHAR ucReserved[3];
+}DIG_TRANSMITTER_CONTROL_PARAMETERS_V3;
+
+//ucConfig
+//Bit0
+#define ATOM_TRANSMITTER_CONFIG_V3_DUAL_LINK_CONNECTOR 0x01
+
+//Bit1
+#define ATOM_TRANSMITTER_CONFIG_V3_COHERENT 0x02
+
+//Bit2
+#define ATOM_TRANSMITTER_CONFIG_V3_LINK_SEL_MASK 0x04
+#define ATOM_TRANSMITTER_CONFIG_V3_LINKA 0x00
+#define ATOM_TRANSMITTER_CONFIG_V3_LINKB 0x04
+
+// Bit3
+#define ATOM_TRANSMITTER_CONFIG_V3_ENCODER_SEL_MASK 0x08
+#define ATOM_TRANSMITTER_CONFIG_V3_DIG1_ENCODER 0x00
+#define ATOM_TRANSMITTER_CONFIG_V3_DIG2_ENCODER 0x08
+
+// Bit5:4
+#define ATOM_TRASMITTER_CONFIG_V3_REFCLK_SEL_MASK 0x30
+#define ATOM_TRASMITTER_CONFIG_V3_P1PLL 0x00
+#define ATOM_TRASMITTER_CONFIG_V3_P2PLL 0x10
+#define ATOM_TRASMITTER_CONFIG_V3_REFCLK_SRC_EXT 0x20
+
+// Bit7:6
+#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER_SEL_MASK 0xC0
+#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER1 0x00 //AB
+#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER2 0x40 //CD
+#define ATOM_TRANSMITTER_CONFIG_V3_TRANSMITTER3 0x80 //EF
+
+/****************************************************************************/
+// Structures used by DAC1OuputControlTable
+// DAC2OuputControlTable
+// LVTMAOutputControlTable (Before DEC30)
+// TMDSAOutputControlTable (Before DEC30)
+/****************************************************************************/
+typedef struct _DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+{
+ UCHAR ucAction; // Possible input:ATOM_ENABLE||ATOMDISABLE
+ // When the display is LCD, in addition to above:
+ // ATOM_LCD_BLOFF|| ATOM_LCD_BLON ||ATOM_LCD_BL_BRIGHTNESS_CONTROL||ATOM_LCD_SELFTEST_START||
+ // ATOM_LCD_SELFTEST_STOP
+
+ UCHAR aucPadding[3]; // padding to DWORD aligned
+}DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS;
#define DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
-#define CRT1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+
+#define CRT1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
#define CRT1_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
-#define CRT2_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
+#define CRT2_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
#define CRT2_OUTPUT_CONTROL_PS_ALLOCATION DISPLAY_DEVICE_OUTPUT_CONTROL_PS_ALLOCATION
#define CV1_OUTPUT_CONTROL_PARAMETERS DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS
@@ -782,397 +980,550 @@ typedef struct _DISPLAY_DEVICE_OUTPUT_CONTROL_PARAMETERS {
#define DVO_OUTPUT_CONTROL_PS_ALLOCATION DIG_TRANSMITTER_CONTROL_PS_ALLOCATION
#define DVO_OUTPUT_CONTROL_PARAMETERS_V3 DIG_TRANSMITTER_CONTROL_PARAMETERS
-/****************************************************************************/
-/* Structures used by BlankCRTCTable */
-/****************************************************************************/
-typedef struct _BLANK_CRTC_PARAMETERS {
- UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
- UCHAR ucBlanking; /* ATOM_BLANKING or ATOM_BLANKINGOFF */
- USHORT usBlackColorRCr;
- USHORT usBlackColorGY;
- USHORT usBlackColorBCb;
-} BLANK_CRTC_PARAMETERS;
+/****************************************************************************/
+// Structures used by BlankCRTCTable
+/****************************************************************************/
+typedef struct _BLANK_CRTC_PARAMETERS
+{
+ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
+ UCHAR ucBlanking; // ATOM_BLANKING or ATOM_BLANKINGOFF
+ USHORT usBlackColorRCr;
+ USHORT usBlackColorGY;
+ USHORT usBlackColorBCb;
+}BLANK_CRTC_PARAMETERS;
#define BLANK_CRTC_PS_ALLOCATION BLANK_CRTC_PARAMETERS
-/****************************************************************************/
-/* Structures used by EnableCRTCTable */
-/* EnableCRTCMemReqTable */
-/* UpdateCRTC_DoubleBufferRegistersTable */
-/****************************************************************************/
-typedef struct _ENABLE_CRTC_PARAMETERS {
- UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
- UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
- UCHAR ucPadding[2];
-} ENABLE_CRTC_PARAMETERS;
+/****************************************************************************/
+// Structures used by EnableCRTCTable
+// EnableCRTCMemReqTable
+// UpdateCRTC_DoubleBufferRegistersTable
+/****************************************************************************/
+typedef struct _ENABLE_CRTC_PARAMETERS
+{
+ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
+ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
+ UCHAR ucPadding[2];
+}ENABLE_CRTC_PARAMETERS;
#define ENABLE_CRTC_PS_ALLOCATION ENABLE_CRTC_PARAMETERS
-/****************************************************************************/
-/* Structures used by SetCRTC_OverScanTable */
-/****************************************************************************/
-typedef struct _SET_CRTC_OVERSCAN_PARAMETERS {
- USHORT usOverscanRight; /* right */
- USHORT usOverscanLeft; /* left */
- USHORT usOverscanBottom; /* bottom */
- USHORT usOverscanTop; /* top */
- UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
- UCHAR ucPadding[3];
-} SET_CRTC_OVERSCAN_PARAMETERS;
+/****************************************************************************/
+// Structures used by SetCRTC_OverScanTable
+/****************************************************************************/
+typedef struct _SET_CRTC_OVERSCAN_PARAMETERS
+{
+ USHORT usOverscanRight; // right
+ USHORT usOverscanLeft; // left
+ USHORT usOverscanBottom; // bottom
+ USHORT usOverscanTop; // top
+ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
+ UCHAR ucPadding[3];
+}SET_CRTC_OVERSCAN_PARAMETERS;
#define SET_CRTC_OVERSCAN_PS_ALLOCATION SET_CRTC_OVERSCAN_PARAMETERS
-/****************************************************************************/
-/* Structures used by SetCRTC_ReplicationTable */
-/****************************************************************************/
-typedef struct _SET_CRTC_REPLICATION_PARAMETERS {
- UCHAR ucH_Replication; /* horizontal replication */
- UCHAR ucV_Replication; /* vertical replication */
- UCHAR usCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
- UCHAR ucPadding;
-} SET_CRTC_REPLICATION_PARAMETERS;
+/****************************************************************************/
+// Structures used by SetCRTC_ReplicationTable
+/****************************************************************************/
+typedef struct _SET_CRTC_REPLICATION_PARAMETERS
+{
+ UCHAR ucH_Replication; // horizontal replication
+ UCHAR ucV_Replication; // vertical replication
+ UCHAR usCRTC; // ATOM_CRTC1 or ATOM_CRTC2
+ UCHAR ucPadding;
+}SET_CRTC_REPLICATION_PARAMETERS;
#define SET_CRTC_REPLICATION_PS_ALLOCATION SET_CRTC_REPLICATION_PARAMETERS
-/****************************************************************************/
-/* Structures used by SelectCRTC_SourceTable */
-/****************************************************************************/
-typedef struct _SELECT_CRTC_SOURCE_PARAMETERS {
- UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
- UCHAR ucDevice; /* ATOM_DEVICE_CRT1|ATOM_DEVICE_CRT2|.... */
- UCHAR ucPadding[2];
-} SELECT_CRTC_SOURCE_PARAMETERS;
+/****************************************************************************/
+// Structures used by SelectCRTC_SourceTable
+/****************************************************************************/
+typedef struct _SELECT_CRTC_SOURCE_PARAMETERS
+{
+ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
+ UCHAR ucDevice; // ATOM_DEVICE_CRT1|ATOM_DEVICE_CRT2|....
+ UCHAR ucPadding[2];
+}SELECT_CRTC_SOURCE_PARAMETERS;
#define SELECT_CRTC_SOURCE_PS_ALLOCATION SELECT_CRTC_SOURCE_PARAMETERS
-typedef struct _SELECT_CRTC_SOURCE_PARAMETERS_V2 {
- UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
- UCHAR ucEncoderID; /* DAC1/DAC2/TVOUT/DIG1/DIG2/DVO */
- UCHAR ucEncodeMode; /* Encoding mode, only valid when using DIG1/DIG2/DVO */
- UCHAR ucPadding;
-} SELECT_CRTC_SOURCE_PARAMETERS_V2;
-
-/* ucEncoderID */
-/* #define ASIC_INT_DAC1_ENCODER_ID 0x00 */
-/* #define ASIC_INT_TV_ENCODER_ID 0x02 */
-/* #define ASIC_INT_DIG1_ENCODER_ID 0x03 */
-/* #define ASIC_INT_DAC2_ENCODER_ID 0x04 */
-/* #define ASIC_EXT_TV_ENCODER_ID 0x06 */
-/* #define ASIC_INT_DVO_ENCODER_ID 0x07 */
-/* #define ASIC_INT_DIG2_ENCODER_ID 0x09 */
-/* #define ASIC_EXT_DIG_ENCODER_ID 0x05 */
-
-/* ucEncodeMode */
-/* #define ATOM_ENCODER_MODE_DP 0 */
-/* #define ATOM_ENCODER_MODE_LVDS 1 */
-/* #define ATOM_ENCODER_MODE_DVI 2 */
-/* #define ATOM_ENCODER_MODE_HDMI 3 */
-/* #define ATOM_ENCODER_MODE_SDVO 4 */
-/* #define ATOM_ENCODER_MODE_TV 13 */
-/* #define ATOM_ENCODER_MODE_CV 14 */
-/* #define ATOM_ENCODER_MODE_CRT 15 */
-
-/****************************************************************************/
-/* Structures used by SetPixelClockTable */
-/* GetPixelClockTable */
-/****************************************************************************/
-/* Major revision=1., Minor revision=1 */
-typedef struct _PIXEL_CLOCK_PARAMETERS {
- USHORT usPixelClock; /* in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) */
- /* 0 means disable PPLL */
- USHORT usRefDiv; /* Reference divider */
- USHORT usFbDiv; /* feedback divider */
- UCHAR ucPostDiv; /* post divider */
- UCHAR ucFracFbDiv; /* fractional feedback divider */
- UCHAR ucPpll; /* ATOM_PPLL1 or ATOM_PPL2 */
- UCHAR ucRefDivSrc; /* ATOM_PJITTER or ATO_NONPJITTER */
- UCHAR ucCRTC; /* Which CRTC uses this Ppll */
- UCHAR ucPadding;
-} PIXEL_CLOCK_PARAMETERS;
-
-/* Major revision=1., Minor revision=2, add ucMiscIfno */
-/* ucMiscInfo: */
+typedef struct _SELECT_CRTC_SOURCE_PARAMETERS_V2
+{
+ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
+ UCHAR ucEncoderID; // DAC1/DAC2/TVOUT/DIG1/DIG2/DVO
+ UCHAR ucEncodeMode; // Encoding mode, only valid when using DIG1/DIG2/DVO
+ UCHAR ucPadding;
+}SELECT_CRTC_SOURCE_PARAMETERS_V2;
+
+//ucEncoderID
+//#define ASIC_INT_DAC1_ENCODER_ID 0x00
+//#define ASIC_INT_TV_ENCODER_ID 0x02
+//#define ASIC_INT_DIG1_ENCODER_ID 0x03
+//#define ASIC_INT_DAC2_ENCODER_ID 0x04
+//#define ASIC_EXT_TV_ENCODER_ID 0x06
+//#define ASIC_INT_DVO_ENCODER_ID 0x07
+//#define ASIC_INT_DIG2_ENCODER_ID 0x09
+//#define ASIC_EXT_DIG_ENCODER_ID 0x05
+
+//ucEncodeMode
+//#define ATOM_ENCODER_MODE_DP 0
+//#define ATOM_ENCODER_MODE_LVDS 1
+//#define ATOM_ENCODER_MODE_DVI 2
+//#define ATOM_ENCODER_MODE_HDMI 3
+//#define ATOM_ENCODER_MODE_SDVO 4
+//#define ATOM_ENCODER_MODE_TV 13
+//#define ATOM_ENCODER_MODE_CV 14
+//#define ATOM_ENCODER_MODE_CRT 15
+
+/****************************************************************************/
+// Structures used by SetPixelClockTable
+// GetPixelClockTable
+/****************************************************************************/
+//Major revision=1., Minor revision=1
+typedef struct _PIXEL_CLOCK_PARAMETERS
+{
+ USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
+ // 0 means disable PPLL
+ USHORT usRefDiv; // Reference divider
+ USHORT usFbDiv; // feedback divider
+ UCHAR ucPostDiv; // post divider
+ UCHAR ucFracFbDiv; // fractional feedback divider
+ UCHAR ucPpll; // ATOM_PPLL1 or ATOM_PPL2
+ UCHAR ucRefDivSrc; // ATOM_PJITTER or ATO_NONPJITTER
+ UCHAR ucCRTC; // Which CRTC uses this Ppll
+ UCHAR ucPadding;
+}PIXEL_CLOCK_PARAMETERS;
+
+//Major revision=1., Minor revision=2, add ucMiscIfno
+//ucMiscInfo:
#define MISC_FORCE_REPROG_PIXEL_CLOCK 0x1
#define MISC_DEVICE_INDEX_MASK 0xF0
#define MISC_DEVICE_INDEX_SHIFT 4
-typedef struct _PIXEL_CLOCK_PARAMETERS_V2 {
- USHORT usPixelClock; /* in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) */
- /* 0 means disable PPLL */
- USHORT usRefDiv; /* Reference divider */
- USHORT usFbDiv; /* feedback divider */
- UCHAR ucPostDiv; /* post divider */
- UCHAR ucFracFbDiv; /* fractional feedback divider */
- UCHAR ucPpll; /* ATOM_PPLL1 or ATOM_PPL2 */
- UCHAR ucRefDivSrc; /* ATOM_PJITTER or ATO_NONPJITTER */
- UCHAR ucCRTC; /* Which CRTC uses this Ppll */
- UCHAR ucMiscInfo; /* Different bits for different purpose, bit [7:4] as device index, bit[0]=Force prog */
-} PIXEL_CLOCK_PARAMETERS_V2;
-
-/* Major revision=1., Minor revision=3, structure/definition change */
-/* ucEncoderMode: */
-/* ATOM_ENCODER_MODE_DP */
-/* ATOM_ENOCDER_MODE_LVDS */
-/* ATOM_ENOCDER_MODE_DVI */
-/* ATOM_ENOCDER_MODE_HDMI */
-/* ATOM_ENOCDER_MODE_SDVO */
-/* ATOM_ENCODER_MODE_TV 13 */
-/* ATOM_ENCODER_MODE_CV 14 */
-/* ATOM_ENCODER_MODE_CRT 15 */
-
-/* ucDVOConfig */
-/* #define DVO_ENCODER_CONFIG_RATE_SEL 0x01 */
-/* #define DVO_ENCODER_CONFIG_DDR_SPEED 0x00 */
-/* #define DVO_ENCODER_CONFIG_SDR_SPEED 0x01 */
-/* #define DVO_ENCODER_CONFIG_OUTPUT_SEL 0x0c */
-/* #define DVO_ENCODER_CONFIG_LOW12BIT 0x00 */
-/* #define DVO_ENCODER_CONFIG_UPPER12BIT 0x04 */
-/* #define DVO_ENCODER_CONFIG_24BIT 0x08 */
-
-/* ucMiscInfo: also changed, see below */
+typedef struct _PIXEL_CLOCK_PARAMETERS_V2
+{
+ USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
+ // 0 means disable PPLL
+ USHORT usRefDiv; // Reference divider
+ USHORT usFbDiv; // feedback divider
+ UCHAR ucPostDiv; // post divider
+ UCHAR ucFracFbDiv; // fractional feedback divider
+ UCHAR ucPpll; // ATOM_PPLL1 or ATOM_PPL2
+ UCHAR ucRefDivSrc; // ATOM_PJITTER or ATO_NONPJITTER
+ UCHAR ucCRTC; // Which CRTC uses this Ppll
+ UCHAR ucMiscInfo; // Different bits for different purpose, bit [7:4] as device index, bit[0]=Force prog
+}PIXEL_CLOCK_PARAMETERS_V2;
+
+//Major revision=1., Minor revision=3, structure/definition change
+//ucEncoderMode:
+//ATOM_ENCODER_MODE_DP
+//ATOM_ENOCDER_MODE_LVDS
+//ATOM_ENOCDER_MODE_DVI
+//ATOM_ENOCDER_MODE_HDMI
+//ATOM_ENOCDER_MODE_SDVO
+//ATOM_ENCODER_MODE_TV 13
+//ATOM_ENCODER_MODE_CV 14
+//ATOM_ENCODER_MODE_CRT 15
+
+//ucDVOConfig
+//#define DVO_ENCODER_CONFIG_RATE_SEL 0x01
+//#define DVO_ENCODER_CONFIG_DDR_SPEED 0x00
+//#define DVO_ENCODER_CONFIG_SDR_SPEED 0x01
+//#define DVO_ENCODER_CONFIG_OUTPUT_SEL 0x0c
+//#define DVO_ENCODER_CONFIG_LOW12BIT 0x00
+//#define DVO_ENCODER_CONFIG_UPPER12BIT 0x04
+//#define DVO_ENCODER_CONFIG_24BIT 0x08
+
+//ucMiscInfo: also changed, see below
#define PIXEL_CLOCK_MISC_FORCE_PROG_PPLL 0x01
#define PIXEL_CLOCK_MISC_VGA_MODE 0x02
#define PIXEL_CLOCK_MISC_CRTC_SEL_MASK 0x04
#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC1 0x00
#define PIXEL_CLOCK_MISC_CRTC_SEL_CRTC2 0x04
#define PIXEL_CLOCK_MISC_USE_ENGINE_FOR_DISPCLK 0x08
+#define PIXEL_CLOCK_MISC_REF_DIV_SRC 0x10
+// V1.4 for RoadRunner
+#define PIXEL_CLOCK_V4_MISC_SS_ENABLE 0x10
+#define PIXEL_CLOCK_V4_MISC_COHERENT_MODE 0x20
-typedef struct _PIXEL_CLOCK_PARAMETERS_V3 {
- USHORT usPixelClock; /* in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div) */
- /* 0 means disable PPLL. For VGA PPLL,make sure this value is not 0. */
- USHORT usRefDiv; /* Reference divider */
- USHORT usFbDiv; /* feedback divider */
- UCHAR ucPostDiv; /* post divider */
- UCHAR ucFracFbDiv; /* fractional feedback divider */
- UCHAR ucPpll; /* ATOM_PPLL1 or ATOM_PPL2 */
- UCHAR ucTransmitterId; /* graphic encoder id defined in objectId.h */
- union {
- UCHAR ucEncoderMode; /* encoder type defined as ATOM_ENCODER_MODE_DP/DVI/HDMI/ */
- UCHAR ucDVOConfig; /* when use DVO, need to know SDR/DDR, 12bit or 24bit */
+typedef struct _PIXEL_CLOCK_PARAMETERS_V3
+{
+ USHORT usPixelClock; // in 10kHz unit; for bios convenient = (RefClk*FB_Div)/(Ref_Div*Post_Div)
+ // 0 means disable PPLL. For VGA PPLL,make sure this value is not 0.
+ USHORT usRefDiv; // Reference divider
+ USHORT usFbDiv; // feedback divider
+ UCHAR ucPostDiv; // post divider
+ UCHAR ucFracFbDiv; // fractional feedback divider
+ UCHAR ucPpll; // ATOM_PPLL1 or ATOM_PPL2
+ UCHAR ucTransmitterId; // graphic encoder id defined in objectId.h
+ union
+ {
+ UCHAR ucEncoderMode; // encoder type defined as ATOM_ENCODER_MODE_DP/DVI/HDMI/
+ UCHAR ucDVOConfig; // when use DVO, need to know SDR/DDR, 12bit or 24bit
};
- UCHAR ucMiscInfo; /* bit[0]=Force program, bit[1]= set pclk for VGA, b[2]= CRTC sel */
- /* bit[3]=0:use PPLL for dispclk source, =1: use engine clock for dispclock source */
-} PIXEL_CLOCK_PARAMETERS_V3;
+ UCHAR ucMiscInfo; // bit[0]=Force program, bit[1]= set pclk for VGA, b[2]= CRTC sel
+ // bit[3]=0:use PPLL for dispclk source, =1: use engine clock for dispclock source
+ // bit[4]=0:use XTALIN as the source of reference divider,=1 use the pre-defined clock as the source of reference divider
+}PIXEL_CLOCK_PARAMETERS_V3;
#define PIXEL_CLOCK_PARAMETERS_LAST PIXEL_CLOCK_PARAMETERS_V2
#define GET_PIXEL_CLOCK_PS_ALLOCATION PIXEL_CLOCK_PARAMETERS_LAST
-/****************************************************************************/
-/* Structures used by AdjustDisplayPllTable */
-/****************************************************************************/
-typedef struct _ADJUST_DISPLAY_PLL_PARAMETERS {
+typedef struct _PIXEL_CLOCK_PARAMETERS_V5
+{
+ UCHAR ucCRTC; // ATOM_CRTC1~6, indicate the CRTC controller to
+ // drive the pixel clock. not used for DCPLL case.
+ union{
+ UCHAR ucReserved;
+ UCHAR ucFracFbDiv; // [gphan] temporary to prevent build problem. remove it after driver code is changed.
+ };
+ USHORT usPixelClock; // target the pixel clock to drive the CRTC timing
+ // 0 means disable PPLL/DCPLL.
+ USHORT usFbDiv; // feedback divider integer part.
+ UCHAR ucPostDiv; // post divider.
+ UCHAR ucRefDiv; // Reference divider
+ UCHAR ucPpll; // ATOM_PPLL1/ATOM_PPLL2/ATOM_DCPLL
+ UCHAR ucTransmitterID; // ASIC encoder id defined in objectId.h,
+ // indicate which graphic encoder will be used.
+ UCHAR ucEncoderMode; // Encoder mode:
+ UCHAR ucMiscInfo; // bit[0]= Force program PPLL
+ // bit[1]= when VGA timing is used.
+ // bit[3:2]= HDMI panel bit depth: =0: 24bpp =1:30bpp, =2:32bpp
+ // bit[4]= RefClock source for PPLL.
+ // =0: XTLAIN( default mode )
+ // =1: other external clock source, which is pre-defined
+ // by VBIOS depend on the feature required.
+ // bit[7:5]: reserved.
+ ULONG ulFbDivDecFrac; // 20 bit feedback divider decimal fraction part, range from 1~999999 ( 0.000001 to 0.999999 )
+
+}PIXEL_CLOCK_PARAMETERS_V5;
+
+#define PIXEL_CLOCK_V5_MISC_FORCE_PROG_PPLL 0x01
+#define PIXEL_CLOCK_V5_MISC_VGA_MODE 0x02
+#define PIXEL_CLOCK_V5_MISC_HDMI_BPP_MASK 0x0c
+#define PIXEL_CLOCK_V5_MISC_HDMI_24BPP 0x00
+#define PIXEL_CLOCK_V5_MISC_HDMI_30BPP 0x04
+#define PIXEL_CLOCK_V5_MISC_HDMI_32BPP 0x08
+#define PIXEL_CLOCK_V5_MISC_REF_DIV_SRC 0x10
+
+typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2
+{
+ PIXEL_CLOCK_PARAMETERS_V3 sDispClkInput;
+}GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V2;
+
+typedef struct _GET_DISP_PLL_STATUS_OUTPUT_PARAMETERS_V2
+{
+ UCHAR ucStatus;
+ UCHAR ucRefDivSrc; // =1: reference clock source from XTALIN, =0: source from PCIE ref clock
+ UCHAR ucReserved[2];
+}GET_DISP_PLL_STATUS_OUTPUT_PARAMETERS_V2;
+
+typedef struct _GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V3
+{
+ PIXEL_CLOCK_PARAMETERS_V5 sDispClkInput;
+}GET_DISP_PLL_STATUS_INPUT_PARAMETERS_V3;
+
+/****************************************************************************/
+// Structures used by AdjustDisplayPllTable
+/****************************************************************************/
+typedef struct _ADJUST_DISPLAY_PLL_PARAMETERS
+{
USHORT usPixelClock;
UCHAR ucTransmitterID;
UCHAR ucEncodeMode;
- union {
- UCHAR ucDVOConfig; /* if DVO, need passing link rate and output 12bitlow or 24bit */
- UCHAR ucConfig; /* if none DVO, not defined yet */
+ union
+ {
+ UCHAR ucDVOConfig; //if DVO, need passing link rate and output 12bitlow or 24bit
+ UCHAR ucConfig; //if none DVO, not defined yet
};
UCHAR ucReserved[3];
-} ADJUST_DISPLAY_PLL_PARAMETERS;
+}ADJUST_DISPLAY_PLL_PARAMETERS;
#define ADJUST_DISPLAY_CONFIG_SS_ENABLE 0x10
-
#define ADJUST_DISPLAY_PLL_PS_ALLOCATION ADJUST_DISPLAY_PLL_PARAMETERS
-/****************************************************************************/
-/* Structures used by EnableYUVTable */
-/****************************************************************************/
-typedef struct _ENABLE_YUV_PARAMETERS {
- UCHAR ucEnable; /* ATOM_ENABLE:Enable YUV or ATOM_DISABLE:Disable YUV (RGB) */
- UCHAR ucCRTC; /* Which CRTC needs this YUV or RGB format */
- UCHAR ucPadding[2];
-} ENABLE_YUV_PARAMETERS;
+typedef struct _ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3
+{
+ USHORT usPixelClock; // target pixel clock
+ UCHAR ucTransmitterID; // transmitter id defined in objectid.h
+ UCHAR ucEncodeMode; // encoder mode: CRT, LVDS, DP, TMDS or HDMI
+ UCHAR ucDispPllConfig; // display pll configure parameter defined as following DISPPLL_CONFIG_XXXX
+ UCHAR ucReserved[3];
+}ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3;
+
+// usDispPllConfig v1.2 for RoadRunner
+#define DISPPLL_CONFIG_DVO_RATE_SEL 0x0001 // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_DDR_SPEED 0x0000 // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_SDR_SPEED 0x0001 // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_OUTPUT_SEL 0x000c // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_LOW12BIT 0x0000 // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_UPPER12BIT 0x0004 // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_DVO_24BIT 0x0008 // need only when ucTransmitterID = DVO
+#define DISPPLL_CONFIG_SS_ENABLE 0x0010 // Only used when ucEncoderMode = DP or LVDS
+#define DISPPLL_CONFIG_COHERENT_MODE 0x0020 // Only used when ucEncoderMode = TMDS or HDMI
+#define DISPPLL_CONFIG_DUAL_LINK 0x0040 // Only used when ucEncoderMode = TMDS or LVDS
+
+
+typedef struct _ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3
+{
+ ULONG ulDispPllFreq; // return display PPLL freq which is used to generate the pixclock, and related idclk, symclk etc
+ UCHAR ucRefDiv; // if it is none-zero, it is used to be calculated the other ppll parameter fb_divider and post_div ( if it is not given )
+ UCHAR ucPostDiv; // if it is none-zero, it is used to be calculated the other ppll parameter fb_divider
+ UCHAR ucReserved[2];
+}ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3;
+
+typedef struct _ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3
+{
+ union
+ {
+ ADJUST_DISPLAY_PLL_INPUT_PARAMETERS_V3 sInput;
+ ADJUST_DISPLAY_PLL_OUTPUT_PARAMETERS_V3 sOutput;
+ };
+} ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3;
+
+/****************************************************************************/
+// Structures used by EnableYUVTable
+/****************************************************************************/
+typedef struct _ENABLE_YUV_PARAMETERS
+{
+ UCHAR ucEnable; // ATOM_ENABLE:Enable YUV or ATOM_DISABLE:Disable YUV (RGB)
+ UCHAR ucCRTC; // Which CRTC needs this YUV or RGB format
+ UCHAR ucPadding[2];
+}ENABLE_YUV_PARAMETERS;
#define ENABLE_YUV_PS_ALLOCATION ENABLE_YUV_PARAMETERS
-/****************************************************************************/
-/* Structures used by GetMemoryClockTable */
-/****************************************************************************/
-typedef struct _GET_MEMORY_CLOCK_PARAMETERS {
- ULONG ulReturnMemoryClock; /* current memory speed in 10KHz unit */
+/****************************************************************************/
+// Structures used by GetMemoryClockTable
+/****************************************************************************/
+typedef struct _GET_MEMORY_CLOCK_PARAMETERS
+{
+ ULONG ulReturnMemoryClock; // current memory speed in 10KHz unit
} GET_MEMORY_CLOCK_PARAMETERS;
#define GET_MEMORY_CLOCK_PS_ALLOCATION GET_MEMORY_CLOCK_PARAMETERS
-/****************************************************************************/
-/* Structures used by GetEngineClockTable */
-/****************************************************************************/
-typedef struct _GET_ENGINE_CLOCK_PARAMETERS {
- ULONG ulReturnEngineClock; /* current engine speed in 10KHz unit */
+/****************************************************************************/
+// Structures used by GetEngineClockTable
+/****************************************************************************/
+typedef struct _GET_ENGINE_CLOCK_PARAMETERS
+{
+ ULONG ulReturnEngineClock; // current engine speed in 10KHz unit
} GET_ENGINE_CLOCK_PARAMETERS;
#define GET_ENGINE_CLOCK_PS_ALLOCATION GET_ENGINE_CLOCK_PARAMETERS
-/****************************************************************************/
-/* Following Structures and constant may be obsolete */
-/****************************************************************************/
-/* Maxium 8 bytes,the data read in will be placed in the parameter space. */
-/* Read operaion successeful when the paramter space is non-zero, otherwise read operation failed */
-typedef struct _READ_EDID_FROM_HW_I2C_DATA_PARAMETERS {
- USHORT usPrescale; /* Ratio between Engine clock and I2C clock */
- USHORT usVRAMAddress; /* Adress in Frame Buffer where to pace raw EDID */
- USHORT usStatus; /* When use output: lower byte EDID checksum, high byte hardware status */
- /* WHen use input: lower byte as 'byte to read':currently limited to 128byte or 1byte */
- UCHAR ucSlaveAddr; /* Read from which slave */
- UCHAR ucLineNumber; /* Read from which HW assisted line */
-} READ_EDID_FROM_HW_I2C_DATA_PARAMETERS;
+/****************************************************************************/
+// Following Structures and constant may be obsolete
+/****************************************************************************/
+//Maxium 8 bytes,the data read in will be placed in the parameter space.
+//Read operaion successeful when the paramter space is non-zero, otherwise read operation failed
+typedef struct _READ_EDID_FROM_HW_I2C_DATA_PARAMETERS
+{
+ USHORT usPrescale; //Ratio between Engine clock and I2C clock
+ USHORT usVRAMAddress; //Adress in Frame Buffer where to pace raw EDID
+ USHORT usStatus; //When use output: lower byte EDID checksum, high byte hardware status
+ //WHen use input: lower byte as 'byte to read':currently limited to 128byte or 1byte
+ UCHAR ucSlaveAddr; //Read from which slave
+ UCHAR ucLineNumber; //Read from which HW assisted line
+}READ_EDID_FROM_HW_I2C_DATA_PARAMETERS;
#define READ_EDID_FROM_HW_I2C_DATA_PS_ALLOCATION READ_EDID_FROM_HW_I2C_DATA_PARAMETERS
+
#define ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSDATABYTE 0
#define ATOM_WRITE_I2C_FORMAT_PSOFFSET_PSTWODATABYTES 1
#define ATOM_WRITE_I2C_FORMAT_PSCOUNTER_PSOFFSET_IDDATABLOCK 2
#define ATOM_WRITE_I2C_FORMAT_PSCOUNTER_IDOFFSET_PLUS_IDDATABLOCK 3
#define ATOM_WRITE_I2C_FORMAT_IDCOUNTER_IDOFFSET_IDDATABLOCK 4
-typedef struct _WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS {
- USHORT usPrescale; /* Ratio between Engine clock and I2C clock */
- USHORT usByteOffset; /* Write to which byte */
- /* Upper portion of usByteOffset is Format of data */
- /* 1bytePS+offsetPS */
- /* 2bytesPS+offsetPS */
- /* blockID+offsetPS */
- /* blockID+offsetID */
- /* blockID+counterID+offsetID */
- UCHAR ucData; /* PS data1 */
- UCHAR ucStatus; /* Status byte 1=success, 2=failure, Also is used as PS data2 */
- UCHAR ucSlaveAddr; /* Write to which slave */
- UCHAR ucLineNumber; /* Write from which HW assisted line */
-} WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS;
+typedef struct _WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
+{
+ USHORT usPrescale; //Ratio between Engine clock and I2C clock
+ USHORT usByteOffset; //Write to which byte
+ //Upper portion of usByteOffset is Format of data
+ //1bytePS+offsetPS
+ //2bytesPS+offsetPS
+ //blockID+offsetPS
+ //blockID+offsetID
+ //blockID+counterID+offsetID
+ UCHAR ucData; //PS data1
+ UCHAR ucStatus; //Status byte 1=success, 2=failure, Also is used as PS data2
+ UCHAR ucSlaveAddr; //Write to which slave
+ UCHAR ucLineNumber; //Write from which HW assisted line
+}WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS;
#define WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
-typedef struct _SET_UP_HW_I2C_DATA_PARAMETERS {
- USHORT usPrescale; /* Ratio between Engine clock and I2C clock */
- UCHAR ucSlaveAddr; /* Write to which slave */
- UCHAR ucLineNumber; /* Write from which HW assisted line */
-} SET_UP_HW_I2C_DATA_PARAMETERS;
+typedef struct _SET_UP_HW_I2C_DATA_PARAMETERS
+{
+ USHORT usPrescale; //Ratio between Engine clock and I2C clock
+ UCHAR ucSlaveAddr; //Write to which slave
+ UCHAR ucLineNumber; //Write from which HW assisted line
+}SET_UP_HW_I2C_DATA_PARAMETERS;
+
/**************************************************************************/
#define SPEED_FAN_CONTROL_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
-/****************************************************************************/
-/* Structures used by PowerConnectorDetectionTable */
-/****************************************************************************/
-typedef struct _POWER_CONNECTOR_DETECTION_PARAMETERS {
- UCHAR ucPowerConnectorStatus; /* Used for return value 0: detected, 1:not detected */
- UCHAR ucPwrBehaviorId;
- USHORT usPwrBudget; /* how much power currently boot to in unit of watt */
-} POWER_CONNECTOR_DETECTION_PARAMETERS;
-
-typedef struct POWER_CONNECTOR_DETECTION_PS_ALLOCATION {
- UCHAR ucPowerConnectorStatus; /* Used for return value 0: detected, 1:not detected */
- UCHAR ucReserved;
- USHORT usPwrBudget; /* how much power currently boot to in unit of watt */
- WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
-} POWER_CONNECTOR_DETECTION_PS_ALLOCATION;
+/****************************************************************************/
+// Structures used by PowerConnectorDetectionTable
+/****************************************************************************/
+typedef struct _POWER_CONNECTOR_DETECTION_PARAMETERS
+{
+ UCHAR ucPowerConnectorStatus; //Used for return value 0: detected, 1:not detected
+ UCHAR ucPwrBehaviorId;
+ USHORT usPwrBudget; //how much power currently boot to in unit of watt
+}POWER_CONNECTOR_DETECTION_PARAMETERS;
+
+typedef struct POWER_CONNECTOR_DETECTION_PS_ALLOCATION
+{
+ UCHAR ucPowerConnectorStatus; //Used for return value 0: detected, 1:not detected
+ UCHAR ucReserved;
+ USHORT usPwrBudget; //how much power currently boot to in unit of watt
+ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
+}POWER_CONNECTOR_DETECTION_PS_ALLOCATION;
/****************************LVDS SS Command Table Definitions**********************/
-/****************************************************************************/
-/* Structures used by EnableSpreadSpectrumOnPPLLTable */
-/****************************************************************************/
-typedef struct _ENABLE_LVDS_SS_PARAMETERS {
- USHORT usSpreadSpectrumPercentage;
- UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */
- UCHAR ucSpreadSpectrumStepSize_Delay; /* bits3:2 SS_STEP_SIZE; bit 6:4 SS_DELAY */
- UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
- UCHAR ucPadding[3];
-} ENABLE_LVDS_SS_PARAMETERS;
-
-/* ucTableFormatRevision=1,ucTableContentRevision=2 */
-typedef struct _ENABLE_LVDS_SS_PARAMETERS_V2 {
- USHORT usSpreadSpectrumPercentage;
- UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */
- UCHAR ucSpreadSpectrumStep; /* */
- UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
- UCHAR ucSpreadSpectrumDelay;
- UCHAR ucSpreadSpectrumRange;
- UCHAR ucPadding;
-} ENABLE_LVDS_SS_PARAMETERS_V2;
-
-/* This new structure is based on ENABLE_LVDS_SS_PARAMETERS but expands to SS on PPLL, so other devices can use SS. */
-typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL {
- USHORT usSpreadSpectrumPercentage;
- UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */
- UCHAR ucSpreadSpectrumStep; /* */
- UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
- UCHAR ucSpreadSpectrumDelay;
- UCHAR ucSpreadSpectrumRange;
- UCHAR ucPpll; /* ATOM_PPLL1/ATOM_PPLL2 */
-} ENABLE_SPREAD_SPECTRUM_ON_PPLL;
+/****************************************************************************/
+// Structures used by EnableSpreadSpectrumOnPPLLTable
+/****************************************************************************/
+typedef struct _ENABLE_LVDS_SS_PARAMETERS
+{
+ USHORT usSpreadSpectrumPercentage;
+ UCHAR ucSpreadSpectrumType; //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD
+ UCHAR ucSpreadSpectrumStepSize_Delay; //bits3:2 SS_STEP_SIZE; bit 6:4 SS_DELAY
+ UCHAR ucEnable; //ATOM_ENABLE or ATOM_DISABLE
+ UCHAR ucPadding[3];
+}ENABLE_LVDS_SS_PARAMETERS;
+
+//ucTableFormatRevision=1,ucTableContentRevision=2
+typedef struct _ENABLE_LVDS_SS_PARAMETERS_V2
+{
+ USHORT usSpreadSpectrumPercentage;
+ UCHAR ucSpreadSpectrumType; //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD
+ UCHAR ucSpreadSpectrumStep; //
+ UCHAR ucEnable; //ATOM_ENABLE or ATOM_DISABLE
+ UCHAR ucSpreadSpectrumDelay;
+ UCHAR ucSpreadSpectrumRange;
+ UCHAR ucPadding;
+}ENABLE_LVDS_SS_PARAMETERS_V2;
+
+//This new structure is based on ENABLE_LVDS_SS_PARAMETERS but expands to SS on PPLL, so other devices can use SS.
+typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL
+{
+ USHORT usSpreadSpectrumPercentage;
+ UCHAR ucSpreadSpectrumType; // Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD
+ UCHAR ucSpreadSpectrumStep; //
+ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
+ UCHAR ucSpreadSpectrumDelay;
+ UCHAR ucSpreadSpectrumRange;
+ UCHAR ucPpll; // ATOM_PPLL1/ATOM_PPLL2
+}ENABLE_SPREAD_SPECTRUM_ON_PPLL;
+
+typedef struct _ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2
+{
+ USHORT usSpreadSpectrumPercentage;
+ UCHAR ucSpreadSpectrumType; // Bit[0]: 0-Down Spread,1-Center Spread.
+ // Bit[1]: 1-Ext. 0-Int.
+ // Bit[3:2]: =0 P1PLL =1 P2PLL =2 DCPLL
+ // Bits[7:4] reserved
+ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
+ USHORT usSpreadSpectrumAmount; // Includes SS_AMOUNT_FBDIV[7:0] and SS_AMOUNT_NFRAC_SLIP[11:8]
+ USHORT usSpreadSpectrumStep; // SS_STEP_SIZE_DSFRAC
+}ENABLE_SPREAD_SPECTRUM_ON_PPLL_V2;
+
+#define ATOM_PPLL_SS_TYPE_V2_DOWN_SPREAD 0x00
+#define ATOM_PPLL_SS_TYPE_V2_CENTRE_SPREAD 0x01
+#define ATOM_PPLL_SS_TYPE_V2_EXT_SPREAD 0x02
+#define ATOM_PPLL_SS_TYPE_V2_PPLL_SEL_MASK 0x0c
+#define ATOM_PPLL_SS_TYPE_V2_P1PLL 0x00
+#define ATOM_PPLL_SS_TYPE_V2_P2PLL 0x04
+#define ATOM_PPLL_SS_TYPE_V2_DCPLL 0x08
+#define ATOM_PPLL_SS_AMOUNT_V2_FBDIV_MASK 0x00FF
+#define ATOM_PPLL_SS_AMOUNT_V2_FBDIV_SHIFT 0
+#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_MASK 0x0F00
+#define ATOM_PPLL_SS_AMOUNT_V2_NFRAC_SHIFT 8
#define ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION ENABLE_SPREAD_SPECTRUM_ON_PPLL
/**************************************************************************/
-typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION {
- PIXEL_CLOCK_PARAMETERS sPCLKInput;
- ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved; /* Caller doesn't need to init this portion */
-} SET_PIXEL_CLOCK_PS_ALLOCATION;
+typedef struct _SET_PIXEL_CLOCK_PS_ALLOCATION
+{
+ PIXEL_CLOCK_PARAMETERS sPCLKInput;
+ ENABLE_SPREAD_SPECTRUM_ON_PPLL sReserved;//Caller doesn't need to init this portion
+}SET_PIXEL_CLOCK_PS_ALLOCATION;
#define ENABLE_VGA_RENDER_PS_ALLOCATION SET_PIXEL_CLOCK_PS_ALLOCATION
-/****************************************************************************/
-/* Structures used by ### */
-/****************************************************************************/
-typedef struct _MEMORY_TRAINING_PARAMETERS {
- ULONG ulTargetMemoryClock; /* In 10Khz unit */
-} MEMORY_TRAINING_PARAMETERS;
+/****************************************************************************/
+// Structures used by ###
+/****************************************************************************/
+typedef struct _MEMORY_TRAINING_PARAMETERS
+{
+ ULONG ulTargetMemoryClock; //In 10Khz unit
+}MEMORY_TRAINING_PARAMETERS;
#define MEMORY_TRAINING_PS_ALLOCATION MEMORY_TRAINING_PARAMETERS
+
/****************************LVDS and other encoder command table definitions **********************/
-/****************************************************************************/
-/* Structures used by LVDSEncoderControlTable (Before DCE30) */
-/* LVTMAEncoderControlTable (Before DCE30) */
-/* TMDSAEncoderControlTable (Before DCE30) */
-/****************************************************************************/
-typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS {
- USHORT usPixelClock; /* in 10KHz; for bios convenient */
- UCHAR ucMisc; /* bit0=0: Enable single link */
- /* =1: Enable dual link */
- /* Bit1=0: 666RGB */
- /* =1: 888RGB */
- UCHAR ucAction; /* 0: turn off encoder */
- /* 1: setup and turn on encoder */
-} LVDS_ENCODER_CONTROL_PARAMETERS;
-#define LVDS_ENCODER_CONTROL_PS_ALLOCATION LVDS_ENCODER_CONTROL_PARAMETERS
+/****************************************************************************/
+// Structures used by LVDSEncoderControlTable (Before DCE30)
+// LVTMAEncoderControlTable (Before DCE30)
+// TMDSAEncoderControlTable (Before DCE30)
+/****************************************************************************/
+typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS
+{
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ UCHAR ucMisc; // bit0=0: Enable single link
+ // =1: Enable dual link
+ // Bit1=0: 666RGB
+ // =1: 888RGB
+ UCHAR ucAction; // 0: turn off encoder
+ // 1: setup and turn on encoder
+}LVDS_ENCODER_CONTROL_PARAMETERS;
+#define LVDS_ENCODER_CONTROL_PS_ALLOCATION LVDS_ENCODER_CONTROL_PARAMETERS
+
#define TMDS1_ENCODER_CONTROL_PARAMETERS LVDS_ENCODER_CONTROL_PARAMETERS
#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION TMDS1_ENCODER_CONTROL_PARAMETERS
#define TMDS2_ENCODER_CONTROL_PARAMETERS TMDS1_ENCODER_CONTROL_PARAMETERS
#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION TMDS2_ENCODER_CONTROL_PARAMETERS
-/* ucTableFormatRevision=1,ucTableContentRevision=2 */
-typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2 {
- USHORT usPixelClock; /* in 10KHz; for bios convenient */
- UCHAR ucMisc; /* see PANEL_ENCODER_MISC_xx definitions below */
- UCHAR ucAction; /* 0: turn off encoder */
- /* 1: setup and turn on encoder */
- UCHAR ucTruncate; /* bit0=0: Disable truncate */
- /* =1: Enable truncate */
- /* bit4=0: 666RGB */
- /* =1: 888RGB */
- UCHAR ucSpatial; /* bit0=0: Disable spatial dithering */
- /* =1: Enable spatial dithering */
- /* bit4=0: 666RGB */
- /* =1: 888RGB */
- UCHAR ucTemporal; /* bit0=0: Disable temporal dithering */
- /* =1: Enable temporal dithering */
- /* bit4=0: 666RGB */
- /* =1: 888RGB */
- /* bit5=0: Gray level 2 */
- /* =1: Gray level 4 */
- UCHAR ucFRC; /* bit4=0: 25FRC_SEL pattern E */
- /* =1: 25FRC_SEL pattern F */
- /* bit6:5=0: 50FRC_SEL pattern A */
- /* =1: 50FRC_SEL pattern B */
- /* =2: 50FRC_SEL pattern C */
- /* =3: 50FRC_SEL pattern D */
- /* bit7=0: 75FRC_SEL pattern E */
- /* =1: 75FRC_SEL pattern F */
-} LVDS_ENCODER_CONTROL_PARAMETERS_V2;
-#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2
+//ucTableFormatRevision=1,ucTableContentRevision=2
+typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2
+{
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ UCHAR ucMisc; // see PANEL_ENCODER_MISC_xx defintions below
+ UCHAR ucAction; // 0: turn off encoder
+ // 1: setup and turn on encoder
+ UCHAR ucTruncate; // bit0=0: Disable truncate
+ // =1: Enable truncate
+ // bit4=0: 666RGB
+ // =1: 888RGB
+ UCHAR ucSpatial; // bit0=0: Disable spatial dithering
+ // =1: Enable spatial dithering
+ // bit4=0: 666RGB
+ // =1: 888RGB
+ UCHAR ucTemporal; // bit0=0: Disable temporal dithering
+ // =1: Enable temporal dithering
+ // bit4=0: 666RGB
+ // =1: 888RGB
+ // bit5=0: Gray level 2
+ // =1: Gray level 4
+ UCHAR ucFRC; // bit4=0: 25FRC_SEL pattern E
+ // =1: 25FRC_SEL pattern F
+ // bit6:5=0: 50FRC_SEL pattern A
+ // =1: 50FRC_SEL pattern B
+ // =2: 50FRC_SEL pattern C
+ // =3: 50FRC_SEL pattern D
+ // bit7=0: 75FRC_SEL pattern E
+ // =1: 75FRC_SEL pattern F
+}LVDS_ENCODER_CONTROL_PARAMETERS_V2;
+#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2
+
#define TMDS1_ENCODER_CONTROL_PARAMETERS_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2
#define TMDS1_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2
-
+
#define TMDS2_ENCODER_CONTROL_PARAMETERS_V2 TMDS1_ENCODER_CONTROL_PARAMETERS_V2
#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V2 TMDS2_ENCODER_CONTROL_PARAMETERS_V2
@@ -1185,38 +1536,42 @@ typedef struct _LVDS_ENCODER_CONTROL_PARAMETERS_V2 {
#define TMDS2_ENCODER_CONTROL_PARAMETERS_V3 LVDS_ENCODER_CONTROL_PARAMETERS_V3
#define TMDS2_ENCODER_CONTROL_PS_ALLOCATION_V3 TMDS2_ENCODER_CONTROL_PARAMETERS_V3
-/****************************************************************************/
-/* Structures used by ### */
-/****************************************************************************/
-typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS {
- UCHAR ucEnable; /* Enable or Disable External TMDS encoder */
- UCHAR ucMisc; /* Bit0=0:Enable Single link;=1:Enable Dual link;Bit1 {=0:666RGB, =1:888RGB} */
- UCHAR ucPadding[2];
-} ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS;
-
-typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION {
- ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS sXTmdsEncoder;
- WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Caller doesn't need to init this portion */
-} ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION;
+/****************************************************************************/
+// Structures used by ###
+/****************************************************************************/
+typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS
+{
+ UCHAR ucEnable; // Enable or Disable External TMDS encoder
+ UCHAR ucMisc; // Bit0=0:Enable Single link;=1:Enable Dual link;Bit1 {=0:666RGB, =1:888RGB}
+ UCHAR ucPadding[2];
+}ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS;
+
+typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION
+{
+ ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS sXTmdsEncoder;
+ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; //Caller doesn't need to init this portion
+}ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION;
#define ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 LVDS_ENCODER_CONTROL_PARAMETERS_V2
-typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2 {
- ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 sXTmdsEncoder;
- WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Caller doesn't need to init this portion */
-} ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2;
+typedef struct _ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2
+{
+ ENABLE_EXTERNAL_TMDS_ENCODER_PARAMETERS_V2 sXTmdsEncoder;
+ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; //Caller doesn't need to init this portion
+}ENABLE_EXTERNAL_TMDS_ENCODER_PS_ALLOCATION_V2;
-typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION {
- DIG_ENCODER_CONTROL_PARAMETERS sDigEncoder;
- WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
-} EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION;
+typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION
+{
+ DIG_ENCODER_CONTROL_PARAMETERS sDigEncoder;
+ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
+}EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION;
-/****************************************************************************/
-/* Structures used by DVOEncoderControlTable */
-/****************************************************************************/
-/* ucTableFormatRevision=1,ucTableContentRevision=3 */
+/****************************************************************************/
+// Structures used by DVOEncoderControlTable
+/****************************************************************************/
+//ucTableFormatRevision=1,ucTableContentRevision=3
-/* ucDVOConfig: */
+//ucDVOConfig:
#define DVO_ENCODER_CONFIG_RATE_SEL 0x01
#define DVO_ENCODER_CONFIG_DDR_SPEED 0x00
#define DVO_ENCODER_CONFIG_SDR_SPEED 0x01
@@ -1225,21 +1580,22 @@ typedef struct _EXTERNAL_ENCODER_CONTROL_PS_ALLOCATION {
#define DVO_ENCODER_CONFIG_UPPER12BIT 0x04
#define DVO_ENCODER_CONFIG_24BIT 0x08
-typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3 {
- USHORT usPixelClock;
- UCHAR ucDVOConfig;
- UCHAR ucAction; /* ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT */
- UCHAR ucReseved[4];
-} DVO_ENCODER_CONTROL_PARAMETERS_V3;
+typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3
+{
+ USHORT usPixelClock;
+ UCHAR ucDVOConfig;
+ UCHAR ucAction; //ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT
+ UCHAR ucReseved[4];
+}DVO_ENCODER_CONTROL_PARAMETERS_V3;
#define DVO_ENCODER_CONTROL_PS_ALLOCATION_V3 DVO_ENCODER_CONTROL_PARAMETERS_V3
-/* ucTableFormatRevision=1 */
-/* ucTableContentRevision=3 structure is not changed but usMisc add bit 1 as another input for */
-/* bit1=0: non-coherent mode */
-/* =1: coherent mode */
+//ucTableFormatRevision=1
+//ucTableContentRevision=3 structure is not changed but usMisc add bit 1 as another input for
+// bit1=0: non-coherent mode
+// =1: coherent mode
-/* ========================================================================================== */
-/* Only change is here next time when changing encoder parameter definitions again! */
+//==========================================================================================
+//Only change is here next time when changing encoder parameter definitions again!
#define LVDS_ENCODER_CONTROL_PARAMETERS_LAST LVDS_ENCODER_CONTROL_PARAMETERS_V3
#define LVDS_ENCODER_CONTROL_PS_ALLOCATION_LAST LVDS_ENCODER_CONTROL_PARAMETERS_LAST
@@ -1252,7 +1608,7 @@ typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3 {
#define DVO_ENCODER_CONTROL_PARAMETERS_LAST DVO_ENCODER_CONTROL_PARAMETERS
#define DVO_ENCODER_CONTROL_PS_ALLOCATION_LAST DVO_ENCODER_CONTROL_PS_ALLOCATION
-/* ========================================================================================== */
+//==========================================================================================
#define PANEL_ENCODER_MISC_DUAL 0x01
#define PANEL_ENCODER_MISC_COHERENT 0x02
#define PANEL_ENCODER_MISC_TMDS_LINKB 0x04
@@ -1281,159 +1637,159 @@ typedef struct _DVO_ENCODER_CONTROL_PARAMETERS_V3 {
#define PANEL_ENCODER_75FRC_E 0x00
#define PANEL_ENCODER_75FRC_F 0x80
-/****************************************************************************/
-/* Structures used by SetVoltageTable */
-/****************************************************************************/
+/****************************************************************************/
+// Structures used by SetVoltageTable
+/****************************************************************************/
#define SET_VOLTAGE_TYPE_ASIC_VDDC 1
#define SET_VOLTAGE_TYPE_ASIC_MVDDC 2
#define SET_VOLTAGE_TYPE_ASIC_MVDDQ 3
#define SET_VOLTAGE_TYPE_ASIC_VDDCI 4
#define SET_VOLTAGE_INIT_MODE 5
-#define SET_VOLTAGE_GET_MAX_VOLTAGE 6 /* Gets the Max. voltage for the soldered Asic */
+#define SET_VOLTAGE_GET_MAX_VOLTAGE 6 //Gets the Max. voltage for the soldered Asic
#define SET_ASIC_VOLTAGE_MODE_ALL_SOURCE 0x1
#define SET_ASIC_VOLTAGE_MODE_SOURCE_A 0x2
#define SET_ASIC_VOLTAGE_MODE_SOURCE_B 0x4
#define SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE 0x0
-#define SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL 0x1
+#define SET_ASIC_VOLTAGE_MODE_GET_GPIOVAL 0x1
#define SET_ASIC_VOLTAGE_MODE_GET_GPIOMASK 0x2
-typedef struct _SET_VOLTAGE_PARAMETERS {
- UCHAR ucVoltageType; /* To tell which voltage to set up, VDDC/MVDDC/MVDDQ */
- UCHAR ucVoltageMode; /* To set all, to set source A or source B or ... */
- UCHAR ucVoltageIndex; /* An index to tell which voltage level */
- UCHAR ucReserved;
-} SET_VOLTAGE_PARAMETERS;
-
-typedef struct _SET_VOLTAGE_PARAMETERS_V2 {
- UCHAR ucVoltageType; /* To tell which voltage to set up, VDDC/MVDDC/MVDDQ */
- UCHAR ucVoltageMode; /* Not used, maybe use for state machine for differen power mode */
- USHORT usVoltageLevel; /* real voltage level */
-} SET_VOLTAGE_PARAMETERS_V2;
-
-typedef struct _SET_VOLTAGE_PS_ALLOCATION {
- SET_VOLTAGE_PARAMETERS sASICSetVoltage;
- WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
-} SET_VOLTAGE_PS_ALLOCATION;
-
-/****************************************************************************/
-/* Structures used by TVEncoderControlTable */
-/****************************************************************************/
-typedef struct _TV_ENCODER_CONTROL_PARAMETERS {
- USHORT usPixelClock; /* in 10KHz; for bios convenient */
- UCHAR ucTvStandard; /* See definition "ATOM_TV_NTSC ..." */
- UCHAR ucAction; /* 0: turn off encoder */
- /* 1: setup and turn on encoder */
-} TV_ENCODER_CONTROL_PARAMETERS;
-
-typedef struct _TV_ENCODER_CONTROL_PS_ALLOCATION {
- TV_ENCODER_CONTROL_PARAMETERS sTVEncoder;
- WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Don't set this one */
-} TV_ENCODER_CONTROL_PS_ALLOCATION;
-
-/* ==============================Data Table Portion==================================== */
-
-#ifdef UEFI_BUILD
-#define UTEMP USHORT
-#define USHORT void*
-#endif
-
-/****************************************************************************/
-/* Structure used in Data.mtb */
-/****************************************************************************/
-typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES {
- USHORT UtilityPipeLine; /* Offest for the utility to get parser info,Don't change this position! */
- USHORT MultimediaCapabilityInfo; /* Only used by MM Lib,latest version 1.1, not configuable from Bios, need to include the table to build Bios */
- USHORT MultimediaConfigInfo; /* Only used by MM Lib,latest version 2.1, not configuable from Bios, need to include the table to build Bios */
- USHORT StandardVESA_Timing; /* Only used by Bios */
- USHORT FirmwareInfo; /* Shared by various SW components,latest version 1.4 */
- USHORT DAC_Info; /* Will be obsolete from R600 */
- USHORT LVDS_Info; /* Shared by various SW components,latest version 1.1 */
- USHORT TMDS_Info; /* Will be obsolete from R600 */
- USHORT AnalogTV_Info; /* Shared by various SW components,latest version 1.1 */
- USHORT SupportedDevicesInfo; /* Will be obsolete from R600 */
- USHORT GPIO_I2C_Info; /* Shared by various SW components,latest version 1.2 will be used from R600 */
- USHORT VRAM_UsageByFirmware; /* Shared by various SW components,latest version 1.3 will be used from R600 */
- USHORT GPIO_Pin_LUT; /* Shared by various SW components,latest version 1.1 */
- USHORT VESA_ToInternalModeLUT; /* Only used by Bios */
- USHORT ComponentVideoInfo; /* Shared by various SW components,latest version 2.1 will be used from R600 */
- USHORT PowerPlayInfo; /* Shared by various SW components,latest version 2.1,new design from R600 */
- USHORT CompassionateData; /* Will be obsolete from R600 */
- USHORT SaveRestoreInfo; /* Only used by Bios */
- USHORT PPLL_SS_Info; /* Shared by various SW components,latest version 1.2, used to call SS_Info, change to new name because of int ASIC SS info */
- USHORT OemInfo; /* Defined and used by external SW, should be obsolete soon */
- USHORT XTMDS_Info; /* Will be obsolete from R600 */
- USHORT MclkSS_Info; /* Shared by various SW components,latest version 1.1, only enabled when ext SS chip is used */
- USHORT Object_Header; /* Shared by various SW components,latest version 1.1 */
- USHORT IndirectIOAccess; /* Only used by Bios,this table position can't change at all!! */
- USHORT MC_InitParameter; /* Only used by command table */
- USHORT ASIC_VDDC_Info; /* Will be obsolete from R600 */
- USHORT ASIC_InternalSS_Info; /* New tabel name from R600, used to be called "ASIC_MVDDC_Info" */
- USHORT TV_VideoMode; /* Only used by command table */
- USHORT VRAM_Info; /* Only used by command table, latest version 1.3 */
- USHORT MemoryTrainingInfo; /* Used for VBIOS and Diag utility for memory training purpose since R600. the new table rev start from 2.1 */
- USHORT IntegratedSystemInfo; /* Shared by various SW components */
- USHORT ASIC_ProfilingInfo; /* New table name from R600, used to be called "ASIC_VDDCI_Info" for pre-R600 */
- USHORT VoltageObjectInfo; /* Shared by various SW components, latest version 1.1 */
- USHORT PowerSourceInfo; /* Shared by various SW components, latest versoin 1.1 */
-} ATOM_MASTER_LIST_OF_DATA_TABLES;
-
-#ifdef UEFI_BUILD
-#define USHORT UTEMP
-#endif
+typedef struct _SET_VOLTAGE_PARAMETERS
+{
+ UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ
+ UCHAR ucVoltageMode; // To set all, to set source A or source B or ...
+ UCHAR ucVoltageIndex; // An index to tell which voltage level
+ UCHAR ucReserved;
+}SET_VOLTAGE_PARAMETERS;
-typedef struct _ATOM_MASTER_DATA_TABLE {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables;
-} ATOM_MASTER_DATA_TABLE;
+typedef struct _SET_VOLTAGE_PARAMETERS_V2
+{
+ UCHAR ucVoltageType; // To tell which voltage to set up, VDDC/MVDDC/MVDDQ
+ UCHAR ucVoltageMode; // Not used, maybe use for state machine for differen power mode
+ USHORT usVoltageLevel; // real voltage level
+}SET_VOLTAGE_PARAMETERS_V2;
-/****************************************************************************/
-/* Structure used in MultimediaCapabilityInfoTable */
-/****************************************************************************/
-typedef struct _ATOM_MULTIMEDIA_CAPABILITY_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ULONG ulSignature; /* HW info table signature string "$ATI" */
- UCHAR ucI2C_Type; /* I2C type (normal GP_IO, ImpactTV GP_IO, Dedicated I2C pin, etc) */
- UCHAR ucTV_OutInfo; /* Type of TV out supported (3:0) and video out crystal frequency (6:4) and TV data port (7) */
- UCHAR ucVideoPortInfo; /* Provides the video port capabilities */
- UCHAR ucHostPortInfo; /* Provides host port configuration information */
-} ATOM_MULTIMEDIA_CAPABILITY_INFO;
+typedef struct _SET_VOLTAGE_PS_ALLOCATION
+{
+ SET_VOLTAGE_PARAMETERS sASICSetVoltage;
+ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved;
+}SET_VOLTAGE_PS_ALLOCATION;
+
+/****************************************************************************/
+// Structures used by TVEncoderControlTable
+/****************************************************************************/
+typedef struct _TV_ENCODER_CONTROL_PARAMETERS
+{
+ USHORT usPixelClock; // in 10KHz; for bios convenient
+ UCHAR ucTvStandard; // See definition "ATOM_TV_NTSC ..."
+ UCHAR ucAction; // 0: turn off encoder
+ // 1: setup and turn on encoder
+}TV_ENCODER_CONTROL_PARAMETERS;
-/****************************************************************************/
-/* Structure used in MultimediaConfigInfoTable */
-/****************************************************************************/
-typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ULONG ulSignature; /* MM info table signature sting "$MMT" */
- UCHAR ucTunerInfo; /* Type of tuner installed on the adapter (4:0) and video input for tuner (7:5) */
- UCHAR ucAudioChipInfo; /* List the audio chip type (3:0) product type (4) and OEM revision (7:5) */
- UCHAR ucProductID; /* Defines as OEM ID or ATI board ID dependent on product type setting */
- UCHAR ucMiscInfo1; /* Tuner voltage (1:0) HW teletext support (3:2) FM audio decoder (5:4) reserved (6) audio scrambling (7) */
- UCHAR ucMiscInfo2; /* I2S input config (0) I2S output config (1) I2S Audio Chip (4:2) SPDIF Output Config (5) reserved (7:6) */
- UCHAR ucMiscInfo3; /* Video Decoder Type (3:0) Video In Standard/Crystal (7:4) */
- UCHAR ucMiscInfo4; /* Video Decoder Host Config (2:0) reserved (7:3) */
- UCHAR ucVideoInput0Info; /* Video Input 0 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
- UCHAR ucVideoInput1Info; /* Video Input 1 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
- UCHAR ucVideoInput2Info; /* Video Input 2 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
- UCHAR ucVideoInput3Info; /* Video Input 3 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
- UCHAR ucVideoInput4Info; /* Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6) */
-} ATOM_MULTIMEDIA_CONFIG_INFO;
+typedef struct _TV_ENCODER_CONTROL_PS_ALLOCATION
+{
+ TV_ENCODER_CONTROL_PARAMETERS sTVEncoder;
+ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; // Don't set this one
+}TV_ENCODER_CONTROL_PS_ALLOCATION;
-/****************************************************************************/
-/* Structures used in FirmwareInfoTable */
-/****************************************************************************/
+//==============================Data Table Portion====================================
-/* usBIOSCapability Definition: */
-/* Bit 0 = 0: Bios image is not Posted, =1:Bios image is Posted; */
-/* Bit 1 = 0: Dual CRTC is not supported, =1: Dual CRTC is supported; */
-/* Bit 2 = 0: Extended Desktop is not supported, =1: Extended Desktop is supported; */
-/* Others: Reserved */
+/****************************************************************************/
+// Structure used in Data.mtb
+/****************************************************************************/
+typedef struct _ATOM_MASTER_LIST_OF_DATA_TABLES
+{
+ USHORT UtilityPipeLine; // Offest for the utility to get parser info,Don't change this position!
+ USHORT MultimediaCapabilityInfo; // Only used by MM Lib,latest version 1.1, not configuable from Bios, need to include the table to build Bios
+ USHORT MultimediaConfigInfo; // Only used by MM Lib,latest version 2.1, not configuable from Bios, need to include the table to build Bios
+ USHORT StandardVESA_Timing; // Only used by Bios
+ USHORT FirmwareInfo; // Shared by various SW components,latest version 1.4
+ USHORT DAC_Info; // Will be obsolete from R600
+ USHORT LVDS_Info; // Shared by various SW components,latest version 1.1
+ USHORT TMDS_Info; // Will be obsolete from R600
+ USHORT AnalogTV_Info; // Shared by various SW components,latest version 1.1
+ USHORT SupportedDevicesInfo; // Will be obsolete from R600
+ USHORT GPIO_I2C_Info; // Shared by various SW components,latest version 1.2 will be used from R600
+ USHORT VRAM_UsageByFirmware; // Shared by various SW components,latest version 1.3 will be used from R600
+ USHORT GPIO_Pin_LUT; // Shared by various SW components,latest version 1.1
+ USHORT VESA_ToInternalModeLUT; // Only used by Bios
+ USHORT ComponentVideoInfo; // Shared by various SW components,latest version 2.1 will be used from R600
+ USHORT PowerPlayInfo; // Shared by various SW components,latest version 2.1,new design from R600
+ USHORT CompassionateData; // Will be obsolete from R600
+ USHORT SaveRestoreInfo; // Only used by Bios
+ USHORT PPLL_SS_Info; // Shared by various SW components,latest version 1.2, used to call SS_Info, change to new name because of int ASIC SS info
+ USHORT OemInfo; // Defined and used by external SW, should be obsolete soon
+ USHORT XTMDS_Info; // Will be obsolete from R600
+ USHORT MclkSS_Info; // Shared by various SW components,latest version 1.1, only enabled when ext SS chip is used
+ USHORT Object_Header; // Shared by various SW components,latest version 1.1
+ USHORT IndirectIOAccess; // Only used by Bios,this table position can't change at all!!
+ USHORT MC_InitParameter; // Only used by command table
+ USHORT ASIC_VDDC_Info; // Will be obsolete from R600
+ USHORT ASIC_InternalSS_Info; // New tabel name from R600, used to be called "ASIC_MVDDC_Info"
+ USHORT TV_VideoMode; // Only used by command table
+ USHORT VRAM_Info; // Only used by command table, latest version 1.3
+ USHORT MemoryTrainingInfo; // Used for VBIOS and Diag utility for memory training purpose since R600. the new table rev start from 2.1
+ USHORT IntegratedSystemInfo; // Shared by various SW components
+ USHORT ASIC_ProfilingInfo; // New table name from R600, used to be called "ASIC_VDDCI_Info" for pre-R600
+ USHORT VoltageObjectInfo; // Shared by various SW components, latest version 1.1
+ USHORT PowerSourceInfo; // Shared by various SW components, latest versoin 1.1
+}ATOM_MASTER_LIST_OF_DATA_TABLES;
+
+typedef struct _ATOM_MASTER_DATA_TABLE
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_MASTER_LIST_OF_DATA_TABLES ListOfDataTables;
+}ATOM_MASTER_DATA_TABLE;
+
+/****************************************************************************/
+// Structure used in MultimediaCapabilityInfoTable
+/****************************************************************************/
+typedef struct _ATOM_MULTIMEDIA_CAPABILITY_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulSignature; // HW info table signature string "$ATI"
+ UCHAR ucI2C_Type; // I2C type (normal GP_IO, ImpactTV GP_IO, Dedicated I2C pin, etc)
+ UCHAR ucTV_OutInfo; // Type of TV out supported (3:0) and video out crystal frequency (6:4) and TV data port (7)
+ UCHAR ucVideoPortInfo; // Provides the video port capabilities
+ UCHAR ucHostPortInfo; // Provides host port configuration information
+}ATOM_MULTIMEDIA_CAPABILITY_INFO;
+
+/****************************************************************************/
+// Structure used in MultimediaConfigInfoTable
+/****************************************************************************/
+typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulSignature; // MM info table signature sting "$MMT"
+ UCHAR ucTunerInfo; // Type of tuner installed on the adapter (4:0) and video input for tuner (7:5)
+ UCHAR ucAudioChipInfo; // List the audio chip type (3:0) product type (4) and OEM revision (7:5)
+ UCHAR ucProductID; // Defines as OEM ID or ATI board ID dependent on product type setting
+ UCHAR ucMiscInfo1; // Tuner voltage (1:0) HW teletext support (3:2) FM audio decoder (5:4) reserved (6) audio scrambling (7)
+ UCHAR ucMiscInfo2; // I2S input config (0) I2S output config (1) I2S Audio Chip (4:2) SPDIF Output Config (5) reserved (7:6)
+ UCHAR ucMiscInfo3; // Video Decoder Type (3:0) Video In Standard/Crystal (7:4)
+ UCHAR ucMiscInfo4; // Video Decoder Host Config (2:0) reserved (7:3)
+ UCHAR ucVideoInput0Info;// Video Input 0 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+ UCHAR ucVideoInput1Info;// Video Input 1 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+ UCHAR ucVideoInput2Info;// Video Input 2 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+ UCHAR ucVideoInput3Info;// Video Input 3 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+ UCHAR ucVideoInput4Info;// Video Input 4 Type (1:0) F/B setting (2) physical connector ID (5:3) reserved (7:6)
+}ATOM_MULTIMEDIA_CONFIG_INFO;
+
+/****************************************************************************/
+// Structures used in FirmwareInfoTable
+/****************************************************************************/
+
+// usBIOSCapability Defintion:
+// Bit 0 = 0: Bios image is not Posted, =1:Bios image is Posted;
+// Bit 1 = 0: Dual CRTC is not supported, =1: Dual CRTC is supported;
+// Bit 2 = 0: Extended Desktop is not supported, =1: Extended Desktop is supported;
+// Others: Reserved
#define ATOM_BIOS_INFO_ATOM_FIRMWARE_POSTED 0x0001
#define ATOM_BIOS_INFO_DUAL_CRTC_SUPPORT 0x0002
#define ATOM_BIOS_INFO_EXTENDED_DESKTOP_SUPPORT 0x0004
-#define ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT 0x0008
-#define ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT 0x0010
+#define ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT 0x0008 // (valid from v1.1 ~v1.4):=1: memclk SS enable, =0 memclk SS disable.
+#define ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT 0x0010 // (valid from v1.1 ~v1.4):=1: engclk SS enable, =0 engclk SS disable.
#define ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU 0x0020
#define ATOM_BIOS_INFO_WMI_SUPPORT 0x0040
#define ATOM_BIOS_INFO_PPMODE_ASSIGNGED_BY_SYSTEM 0x0080
@@ -1441,242 +1797,292 @@ typedef struct _ATOM_MULTIMEDIA_CONFIG_INFO {
#define ATOM_BIOS_INFO_HYPERMEMORY_SIZE_MASK 0x1E00
#define ATOM_BIOS_INFO_VPOST_WITHOUT_FIRST_MODE_SET 0x2000
#define ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE 0x4000
+#define ATOM_BIOS_INFO_MEMORY_CLOCK_EXT_SS_SUPPORT 0x0008 // (valid from v2.1 ): =1: memclk ss enable with external ss chip
+#define ATOM_BIOS_INFO_ENGINE_CLOCK_EXT_SS_SUPPORT 0x0010 // (valid from v2.1 ): =1: engclk ss enable with external ss chip
#ifndef _H2INC
-/* Please don't add or expand this bitfield structure below, this one will retire soon.! */
-typedef struct _ATOM_FIRMWARE_CAPABILITY {
+//Please don't add or expand this bitfield structure below, this one will retire soon.!
+typedef struct _ATOM_FIRMWARE_CAPABILITY
+{
#if ATOM_BIG_ENDIAN
- USHORT Reserved:3;
- USHORT HyperMemory_Size:4;
- USHORT HyperMemory_Support:1;
- USHORT PPMode_Assigned:1;
- USHORT WMI_SUPPORT:1;
- USHORT GPUControlsBL:1;
- USHORT EngineClockSS_Support:1;
- USHORT MemoryClockSS_Support:1;
- USHORT ExtendedDesktopSupport:1;
- USHORT DualCRTC_Support:1;
- USHORT FirmwarePosted:1;
+ USHORT Reserved:3;
+ USHORT HyperMemory_Size:4;
+ USHORT HyperMemory_Support:1;
+ USHORT PPMode_Assigned:1;
+ USHORT WMI_SUPPORT:1;
+ USHORT GPUControlsBL:1;
+ USHORT EngineClockSS_Support:1;
+ USHORT MemoryClockSS_Support:1;
+ USHORT ExtendedDesktopSupport:1;
+ USHORT DualCRTC_Support:1;
+ USHORT FirmwarePosted:1;
#else
- USHORT FirmwarePosted:1;
- USHORT DualCRTC_Support:1;
- USHORT ExtendedDesktopSupport:1;
- USHORT MemoryClockSS_Support:1;
- USHORT EngineClockSS_Support:1;
- USHORT GPUControlsBL:1;
- USHORT WMI_SUPPORT:1;
- USHORT PPMode_Assigned:1;
- USHORT HyperMemory_Support:1;
- USHORT HyperMemory_Size:4;
- USHORT Reserved:3;
+ USHORT FirmwarePosted:1;
+ USHORT DualCRTC_Support:1;
+ USHORT ExtendedDesktopSupport:1;
+ USHORT MemoryClockSS_Support:1;
+ USHORT EngineClockSS_Support:1;
+ USHORT GPUControlsBL:1;
+ USHORT WMI_SUPPORT:1;
+ USHORT PPMode_Assigned:1;
+ USHORT HyperMemory_Support:1;
+ USHORT HyperMemory_Size:4;
+ USHORT Reserved:3;
#endif
-} ATOM_FIRMWARE_CAPABILITY;
+}ATOM_FIRMWARE_CAPABILITY;
-typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS {
- ATOM_FIRMWARE_CAPABILITY sbfAccess;
- USHORT susAccess;
-} ATOM_FIRMWARE_CAPABILITY_ACCESS;
+typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS
+{
+ ATOM_FIRMWARE_CAPABILITY sbfAccess;
+ USHORT susAccess;
+}ATOM_FIRMWARE_CAPABILITY_ACCESS;
#else
-typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS {
- USHORT susAccess;
-} ATOM_FIRMWARE_CAPABILITY_ACCESS;
+typedef union _ATOM_FIRMWARE_CAPABILITY_ACCESS
+{
+ USHORT susAccess;
+}ATOM_FIRMWARE_CAPABILITY_ACCESS;
#endif
-typedef struct _ATOM_FIRMWARE_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ULONG ulFirmwareRevision;
- ULONG ulDefaultEngineClock; /* In 10Khz unit */
- ULONG ulDefaultMemoryClock; /* In 10Khz unit */
- ULONG ulDriverTargetEngineClock; /* In 10Khz unit */
- ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */
- ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */
- ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */
- ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */
- ULONG ulASICMaxEngineClock; /* In 10Khz unit */
- ULONG ulASICMaxMemoryClock; /* In 10Khz unit */
- UCHAR ucASICMaxTemperature;
- UCHAR ucPadding[3]; /* Don't use them */
- ULONG aulReservedForBIOS[3]; /* Don't use them */
- USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */
- USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */
- USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */
- USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */
- USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */
- USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */
- USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */
- USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */
- USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */
- USHORT usMinPixelClockPLL_Output; /* In 10Khz unit, the definitions above can't change!!! */
- ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
- USHORT usReferenceClock; /* In 10Khz unit */
- USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */
- UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */
- UCHAR ucDesign_ID; /* Indicate what is the board design */
- UCHAR ucMemoryModule_ID; /* Indicate what is the board design */
-} ATOM_FIRMWARE_INFO;
-
-typedef struct _ATOM_FIRMWARE_INFO_V1_2 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ULONG ulFirmwareRevision;
- ULONG ulDefaultEngineClock; /* In 10Khz unit */
- ULONG ulDefaultMemoryClock; /* In 10Khz unit */
- ULONG ulDriverTargetEngineClock; /* In 10Khz unit */
- ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */
- ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */
- ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */
- ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */
- ULONG ulASICMaxEngineClock; /* In 10Khz unit */
- ULONG ulASICMaxMemoryClock; /* In 10Khz unit */
- UCHAR ucASICMaxTemperature;
- UCHAR ucMinAllowedBL_Level;
- UCHAR ucPadding[2]; /* Don't use them */
- ULONG aulReservedForBIOS[2]; /* Don't use them */
- ULONG ulMinPixelClockPLL_Output; /* In 10Khz unit */
- USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */
- USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */
- USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */
- USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */
- USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */
- USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */
- USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */
- USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */
- USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */
- USHORT usMinPixelClockPLL_Output; /* In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output */
- ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
- USHORT usReferenceClock; /* In 10Khz unit */
- USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */
- UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */
- UCHAR ucDesign_ID; /* Indicate what is the board design */
- UCHAR ucMemoryModule_ID; /* Indicate what is the board design */
-} ATOM_FIRMWARE_INFO_V1_2;
-
-typedef struct _ATOM_FIRMWARE_INFO_V1_3 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ULONG ulFirmwareRevision;
- ULONG ulDefaultEngineClock; /* In 10Khz unit */
- ULONG ulDefaultMemoryClock; /* In 10Khz unit */
- ULONG ulDriverTargetEngineClock; /* In 10Khz unit */
- ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */
- ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */
- ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */
- ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */
- ULONG ulASICMaxEngineClock; /* In 10Khz unit */
- ULONG ulASICMaxMemoryClock; /* In 10Khz unit */
- UCHAR ucASICMaxTemperature;
- UCHAR ucMinAllowedBL_Level;
- UCHAR ucPadding[2]; /* Don't use them */
- ULONG aulReservedForBIOS; /* Don't use them */
- ULONG ul3DAccelerationEngineClock; /* In 10Khz unit */
- ULONG ulMinPixelClockPLL_Output; /* In 10Khz unit */
- USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */
- USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */
- USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */
- USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */
- USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */
- USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */
- USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */
- USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */
- USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */
- USHORT usMinPixelClockPLL_Output; /* In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output */
- ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
- USHORT usReferenceClock; /* In 10Khz unit */
- USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */
- UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */
- UCHAR ucDesign_ID; /* Indicate what is the board design */
- UCHAR ucMemoryModule_ID; /* Indicate what is the board design */
-} ATOM_FIRMWARE_INFO_V1_3;
-
-typedef struct _ATOM_FIRMWARE_INFO_V1_4 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ULONG ulFirmwareRevision;
- ULONG ulDefaultEngineClock; /* In 10Khz unit */
- ULONG ulDefaultMemoryClock; /* In 10Khz unit */
- ULONG ulDriverTargetEngineClock; /* In 10Khz unit */
- ULONG ulDriverTargetMemoryClock; /* In 10Khz unit */
- ULONG ulMaxEngineClockPLL_Output; /* In 10Khz unit */
- ULONG ulMaxMemoryClockPLL_Output; /* In 10Khz unit */
- ULONG ulMaxPixelClockPLL_Output; /* In 10Khz unit */
- ULONG ulASICMaxEngineClock; /* In 10Khz unit */
- ULONG ulASICMaxMemoryClock; /* In 10Khz unit */
- UCHAR ucASICMaxTemperature;
- UCHAR ucMinAllowedBL_Level;
- USHORT usBootUpVDDCVoltage; /* In MV unit */
- USHORT usLcdMinPixelClockPLL_Output; /* In MHz unit */
- USHORT usLcdMaxPixelClockPLL_Output; /* In MHz unit */
- ULONG ul3DAccelerationEngineClock; /* In 10Khz unit */
- ULONG ulMinPixelClockPLL_Output; /* In 10Khz unit */
- USHORT usMinEngineClockPLL_Input; /* In 10Khz unit */
- USHORT usMaxEngineClockPLL_Input; /* In 10Khz unit */
- USHORT usMinEngineClockPLL_Output; /* In 10Khz unit */
- USHORT usMinMemoryClockPLL_Input; /* In 10Khz unit */
- USHORT usMaxMemoryClockPLL_Input; /* In 10Khz unit */
- USHORT usMinMemoryClockPLL_Output; /* In 10Khz unit */
- USHORT usMaxPixelClock; /* In 10Khz unit, Max. Pclk */
- USHORT usMinPixelClockPLL_Input; /* In 10Khz unit */
- USHORT usMaxPixelClockPLL_Input; /* In 10Khz unit */
- USHORT usMinPixelClockPLL_Output; /* In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output */
- ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
- USHORT usReferenceClock; /* In 10Khz unit */
- USHORT usPM_RTS_Location; /* RTS PM4 starting location in ROM in 1Kb unit */
- UCHAR ucPM_RTS_StreamSize; /* RTS PM4 packets in Kb unit */
- UCHAR ucDesign_ID; /* Indicate what is the board design */
- UCHAR ucMemoryModule_ID; /* Indicate what is the board design */
-} ATOM_FIRMWARE_INFO_V1_4;
-
-#define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V1_4
-
-/****************************************************************************/
-/* Structures used in IntegratedSystemInfoTable */
-/****************************************************************************/
+typedef struct _ATOM_FIRMWARE_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulFirmwareRevision;
+ ULONG ulDefaultEngineClock; //In 10Khz unit
+ ULONG ulDefaultMemoryClock; //In 10Khz unit
+ ULONG ulDriverTargetEngineClock; //In 10Khz unit
+ ULONG ulDriverTargetMemoryClock; //In 10Khz unit
+ ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
+ ULONG ulASICMaxEngineClock; //In 10Khz unit
+ ULONG ulASICMaxMemoryClock; //In 10Khz unit
+ UCHAR ucASICMaxTemperature;
+ UCHAR ucPadding[3]; //Don't use them
+ ULONG aulReservedForBIOS[3]; //Don't use them
+ USHORT usMinEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMaxEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMinEngineClockPLL_Output; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Output; //In 10Khz unit
+ USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk
+ USHORT usMinPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMinPixelClockPLL_Output; //In 10Khz unit, the definitions above can't change!!!
+ ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+ USHORT usReferenceClock; //In 10Khz unit
+ USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit
+ UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit
+ UCHAR ucDesign_ID; //Indicate what is the board design
+ UCHAR ucMemoryModule_ID; //Indicate what is the board design
+}ATOM_FIRMWARE_INFO;
+
+typedef struct _ATOM_FIRMWARE_INFO_V1_2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulFirmwareRevision;
+ ULONG ulDefaultEngineClock; //In 10Khz unit
+ ULONG ulDefaultMemoryClock; //In 10Khz unit
+ ULONG ulDriverTargetEngineClock; //In 10Khz unit
+ ULONG ulDriverTargetMemoryClock; //In 10Khz unit
+ ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
+ ULONG ulASICMaxEngineClock; //In 10Khz unit
+ ULONG ulASICMaxMemoryClock; //In 10Khz unit
+ UCHAR ucASICMaxTemperature;
+ UCHAR ucMinAllowedBL_Level;
+ UCHAR ucPadding[2]; //Don't use them
+ ULONG aulReservedForBIOS[2]; //Don't use them
+ ULONG ulMinPixelClockPLL_Output; //In 10Khz unit
+ USHORT usMinEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMaxEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMinEngineClockPLL_Output; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Output; //In 10Khz unit
+ USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk
+ USHORT usMinPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
+ ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+ USHORT usReferenceClock; //In 10Khz unit
+ USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit
+ UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit
+ UCHAR ucDesign_ID; //Indicate what is the board design
+ UCHAR ucMemoryModule_ID; //Indicate what is the board design
+}ATOM_FIRMWARE_INFO_V1_2;
+
+typedef struct _ATOM_FIRMWARE_INFO_V1_3
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulFirmwareRevision;
+ ULONG ulDefaultEngineClock; //In 10Khz unit
+ ULONG ulDefaultMemoryClock; //In 10Khz unit
+ ULONG ulDriverTargetEngineClock; //In 10Khz unit
+ ULONG ulDriverTargetMemoryClock; //In 10Khz unit
+ ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
+ ULONG ulASICMaxEngineClock; //In 10Khz unit
+ ULONG ulASICMaxMemoryClock; //In 10Khz unit
+ UCHAR ucASICMaxTemperature;
+ UCHAR ucMinAllowedBL_Level;
+ UCHAR ucPadding[2]; //Don't use them
+ ULONG aulReservedForBIOS; //Don't use them
+ ULONG ul3DAccelerationEngineClock;//In 10Khz unit
+ ULONG ulMinPixelClockPLL_Output; //In 10Khz unit
+ USHORT usMinEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMaxEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMinEngineClockPLL_Output; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Output; //In 10Khz unit
+ USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk
+ USHORT usMinPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
+ ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+ USHORT usReferenceClock; //In 10Khz unit
+ USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit
+ UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit
+ UCHAR ucDesign_ID; //Indicate what is the board design
+ UCHAR ucMemoryModule_ID; //Indicate what is the board design
+}ATOM_FIRMWARE_INFO_V1_3;
+
+typedef struct _ATOM_FIRMWARE_INFO_V1_4
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulFirmwareRevision;
+ ULONG ulDefaultEngineClock; //In 10Khz unit
+ ULONG ulDefaultMemoryClock; //In 10Khz unit
+ ULONG ulDriverTargetEngineClock; //In 10Khz unit
+ ULONG ulDriverTargetMemoryClock; //In 10Khz unit
+ ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
+ ULONG ulASICMaxEngineClock; //In 10Khz unit
+ ULONG ulASICMaxMemoryClock; //In 10Khz unit
+ UCHAR ucASICMaxTemperature;
+ UCHAR ucMinAllowedBL_Level;
+ USHORT usBootUpVDDCVoltage; //In MV unit
+ USHORT usLcdMinPixelClockPLL_Output; // In MHz unit
+ USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit
+ ULONG ul3DAccelerationEngineClock;//In 10Khz unit
+ ULONG ulMinPixelClockPLL_Output; //In 10Khz unit
+ USHORT usMinEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMaxEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMinEngineClockPLL_Output; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Output; //In 10Khz unit
+ USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk
+ USHORT usMinPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
+ ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+ USHORT usReferenceClock; //In 10Khz unit
+ USHORT usPM_RTS_Location; //RTS PM4 starting location in ROM in 1Kb unit
+ UCHAR ucPM_RTS_StreamSize; //RTS PM4 packets in Kb unit
+ UCHAR ucDesign_ID; //Indicate what is the board design
+ UCHAR ucMemoryModule_ID; //Indicate what is the board design
+}ATOM_FIRMWARE_INFO_V1_4;
+
+//the structure below to be used from Cypress
+typedef struct _ATOM_FIRMWARE_INFO_V2_1
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulFirmwareRevision;
+ ULONG ulDefaultEngineClock; //In 10Khz unit
+ ULONG ulDefaultMemoryClock; //In 10Khz unit
+ ULONG ulReserved1;
+ ULONG ulReserved2;
+ ULONG ulMaxEngineClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxMemoryClockPLL_Output; //In 10Khz unit
+ ULONG ulMaxPixelClockPLL_Output; //In 10Khz unit
+ ULONG ulBinaryAlteredInfo; //Was ulASICMaxEngineClock
+ ULONG ulDefaultDispEngineClkFreq; //In 10Khz unit
+ UCHAR ucReserved1; //Was ucASICMaxTemperature;
+ UCHAR ucMinAllowedBL_Level;
+ USHORT usBootUpVDDCVoltage; //In MV unit
+ USHORT usLcdMinPixelClockPLL_Output; // In MHz unit
+ USHORT usLcdMaxPixelClockPLL_Output; // In MHz unit
+ ULONG ulReserved4; //Was ulAsicMaximumVoltage
+ ULONG ulMinPixelClockPLL_Output; //In 10Khz unit
+ USHORT usMinEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMaxEngineClockPLL_Input; //In 10Khz unit
+ USHORT usMinEngineClockPLL_Output; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMaxMemoryClockPLL_Input; //In 10Khz unit
+ USHORT usMinMemoryClockPLL_Output; //In 10Khz unit
+ USHORT usMaxPixelClock; //In 10Khz unit, Max. Pclk
+ USHORT usMinPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMaxPixelClockPLL_Input; //In 10Khz unit
+ USHORT usMinPixelClockPLL_Output; //In 10Khz unit - lower 16bit of ulMinPixelClockPLL_Output
+ ATOM_FIRMWARE_CAPABILITY_ACCESS usFirmwareCapability;
+ USHORT usCoreReferenceClock; //In 10Khz unit
+ USHORT usMemoryReferenceClock; //In 10Khz unit
+ USHORT usUniphyDPModeExtClkFreq; //In 10Khz unit, if it is 0, In DP Mode Uniphy Input clock from internal PPLL, otherwise Input clock from external Spread clock
+ UCHAR ucMemoryModule_ID; //Indicate what is the board design
+ UCHAR ucReserved4[3];
+}ATOM_FIRMWARE_INFO_V2_1;
+
+
+#define ATOM_FIRMWARE_INFO_LAST ATOM_FIRMWARE_INFO_V2_1
+
+/****************************************************************************/
+// Structures used in IntegratedSystemInfoTable
+/****************************************************************************/
#define IGP_CAP_FLAG_DYNAMIC_CLOCK_EN 0x2
#define IGP_CAP_FLAG_AC_CARD 0x4
#define IGP_CAP_FLAG_SDVO_CARD 0x8
#define IGP_CAP_FLAG_POSTDIV_BY_2_MODE 0x10
-typedef struct _ATOM_INTEGRATED_SYSTEM_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ULONG ulBootUpEngineClock; /* in 10kHz unit */
- ULONG ulBootUpMemoryClock; /* in 10kHz unit */
- ULONG ulMaxSystemMemoryClock; /* in 10kHz unit */
- ULONG ulMinSystemMemoryClock; /* in 10kHz unit */
- UCHAR ucNumberOfCyclesInPeriodHi;
- UCHAR ucLCDTimingSel; /* =0:not valid.!=0 sel this timing descriptor from LCD EDID. */
- USHORT usReserved1;
- USHORT usInterNBVoltageLow; /* An intermidiate PMW value to set the voltage */
- USHORT usInterNBVoltageHigh; /* Another intermidiate PMW value to set the voltage */
- ULONG ulReserved[2];
-
- USHORT usFSBClock; /* In MHz unit */
- USHORT usCapabilityFlag; /* Bit0=1 indicates the fake HDMI support,Bit1=0/1 for Dynamic clocking dis/enable */
- /* Bit[3:2]== 0:No PCIE card, 1:AC card, 2:SDVO card */
- /* Bit[4]==1: P/2 mode, ==0: P/1 mode */
- USHORT usPCIENBCfgReg7; /* bit[7:0]=MUX_Sel, bit[9:8]=MUX_SEL_LEVEL2, bit[10]=Lane_Reversal */
- USHORT usK8MemoryClock; /* in MHz unit */
- USHORT usK8SyncStartDelay; /* in 0.01 us unit */
- USHORT usK8DataReturnTime; /* in 0.01 us unit */
- UCHAR ucMaxNBVoltage;
- UCHAR ucMinNBVoltage;
- UCHAR ucMemoryType; /* [7:4]=1:DDR1;=2:DDR2;=3:DDR3.[3:0] is reserved */
- UCHAR ucNumberOfCyclesInPeriod; /* CG.FVTHROT_PWM_CTRL_REG0.NumberOfCyclesInPeriod */
- UCHAR ucStartingPWM_HighTime; /* CG.FVTHROT_PWM_CTRL_REG0.StartingPWM_HighTime */
- UCHAR ucHTLinkWidth; /* 16 bit vs. 8 bit */
- UCHAR ucMaxNBVoltageHigh;
- UCHAR ucMinNBVoltageHigh;
-} ATOM_INTEGRATED_SYSTEM_INFO;
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulBootUpEngineClock; //in 10kHz unit
+ ULONG ulBootUpMemoryClock; //in 10kHz unit
+ ULONG ulMaxSystemMemoryClock; //in 10kHz unit
+ ULONG ulMinSystemMemoryClock; //in 10kHz unit
+ UCHAR ucNumberOfCyclesInPeriodHi;
+ UCHAR ucLCDTimingSel; //=0:not valid.!=0 sel this timing descriptor from LCD EDID.
+ USHORT usReserved1;
+ USHORT usInterNBVoltageLow; //An intermidiate PMW value to set the voltage
+ USHORT usInterNBVoltageHigh; //Another intermidiate PMW value to set the voltage
+ ULONG ulReserved[2];
+
+ USHORT usFSBClock; //In MHz unit
+ USHORT usCapabilityFlag; //Bit0=1 indicates the fake HDMI support,Bit1=0/1 for Dynamic clocking dis/enable
+ //Bit[3:2]== 0:No PCIE card, 1:AC card, 2:SDVO card
+ //Bit[4]==1: P/2 mode, ==0: P/1 mode
+ USHORT usPCIENBCfgReg7; //bit[7:0]=MUX_Sel, bit[9:8]=MUX_SEL_LEVEL2, bit[10]=Lane_Reversal
+ USHORT usK8MemoryClock; //in MHz unit
+ USHORT usK8SyncStartDelay; //in 0.01 us unit
+ USHORT usK8DataReturnTime; //in 0.01 us unit
+ UCHAR ucMaxNBVoltage;
+ UCHAR ucMinNBVoltage;
+ UCHAR ucMemoryType; //[7:4]=1:DDR1;=2:DDR2;=3:DDR3.[3:0] is reserved
+ UCHAR ucNumberOfCyclesInPeriod; //CG.FVTHROT_PWM_CTRL_REG0.NumberOfCyclesInPeriod
+ UCHAR ucStartingPWM_HighTime; //CG.FVTHROT_PWM_CTRL_REG0.StartingPWM_HighTime
+ UCHAR ucHTLinkWidth; //16 bit vs. 8 bit
+ UCHAR ucMaxNBVoltageHigh;
+ UCHAR ucMinNBVoltageHigh;
+}ATOM_INTEGRATED_SYSTEM_INFO;
/* Explanation on entries in ATOM_INTEGRATED_SYSTEM_INFO
-ulBootUpMemoryClock: For Intel IGP,it's the UMA system memory clock
+ulBootUpMemoryClock: For Intel IGP,it's the UMA system memory clock
For AMD IGP,it's 0 if no SidePort memory installed or it's the boot-up SidePort memory clock
ulMaxSystemMemoryClock: For Intel IGP,it's the Max freq from memory SPD if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0
For AMD IGP,for now this can be 0
-ulMinSystemMemoryClock: For Intel IGP,it's 133MHz if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0
+ulMinSystemMemoryClock: For Intel IGP,it's 133MHz if memory runs in ASYNC mode or otherwise (SYNC mode) it's 0
For AMD IGP,for now this can be 0
-usFSBClock: For Intel IGP,it's FSB Freq
+usFSBClock: For Intel IGP,it's FSB Freq
For AMD IGP,it's HT Link Speed
usK8MemoryClock: For AMD IGP only. For RevF CPU, set it to 200
@@ -1687,98 +2093,113 @@ VC:Voltage Control
ucMaxNBVoltage: Voltage regulator dependent PWM value. Low 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all.
ucMinNBVoltage: Voltage regulator dependent PWM value. Low 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all.
-ucNumberOfCyclesInPeriod: Indicate how many cycles when PWM duty is 100%. low 8 bits of the value.
-ucNumberOfCyclesInPeriodHi: Indicate how many cycles when PWM duty is 100%. high 8 bits of the value.If the PWM has an inverter,set bit [7]==1,otherwise set it 0
+ucNumberOfCyclesInPeriod: Indicate how many cycles when PWM duty is 100%. low 8 bits of the value.
+ucNumberOfCyclesInPeriodHi: Indicate how many cycles when PWM duty is 100%. high 8 bits of the value.If the PWM has an inverter,set bit [7]==1,otherwise set it 0
ucMaxNBVoltageHigh: Voltage regulator dependent PWM value. High 8 bits of the value for the max voltage.Set this one to 0xFF if VC without PWM. Set this to 0x0 if no VC at all.
ucMinNBVoltageHigh: Voltage regulator dependent PWM value. High 8 bits of the value for the min voltage.Set this one to 0x00 if VC without PWM or no VC at all.
+
usInterNBVoltageLow: Voltage regulator dependent PWM value. The value makes the the voltage >=Min NB voltage but <=InterNBVoltageHigh. Set this to 0x0000 if VC without PWM or no VC at all.
usInterNBVoltageHigh: Voltage regulator dependent PWM value. The value makes the the voltage >=InterNBVoltageLow but <=Max NB voltage.Set this to 0x0000 if VC without PWM or no VC at all.
*/
+
/*
The following IGP table is introduced from RS780, which is supposed to be put by SBIOS in FB before IGP VBIOS starts VPOST;
-Then VBIOS will copy the whole structure to its image so all GPU SW components can access this data structure to get whatever they need.
+Then VBIOS will copy the whole structure to its image so all GPU SW components can access this data structure to get whatever they need.
The enough reservation should allow us to never change table revisions. Whenever needed, a GPU SW component can use reserved portion for new data entries.
SW components can access the IGP system infor structure in the same way as before
*/
-typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ULONG ulBootUpEngineClock; /* in 10kHz unit */
- ULONG ulReserved1[2]; /* must be 0x0 for the reserved */
- ULONG ulBootUpUMAClock; /* in 10kHz unit */
- ULONG ulBootUpSidePortClock; /* in 10kHz unit */
- ULONG ulMinSidePortClock; /* in 10kHz unit */
- ULONG ulReserved2[6]; /* must be 0x0 for the reserved */
- ULONG ulSystemConfig; /* see explanation below */
- ULONG ulBootUpReqDisplayVector;
- ULONG ulOtherDisplayMisc;
- ULONG ulDDISlot1Config;
- ULONG ulDDISlot2Config;
- UCHAR ucMemoryType; /* [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved */
- UCHAR ucUMAChannelNumber;
- UCHAR ucDockingPinBit;
- UCHAR ucDockingPinPolarity;
- ULONG ulDockingPinCFGInfo;
- ULONG ulCPUCapInfo;
- USHORT usNumberOfCyclesInPeriod;
- USHORT usMaxNBVoltage;
- USHORT usMinNBVoltage;
- USHORT usBootUpNBVoltage;
- ULONG ulHTLinkFreq; /* in 10Khz */
- USHORT usMinHTLinkWidth;
- USHORT usMaxHTLinkWidth;
- USHORT usUMASyncStartDelay;
- USHORT usUMADataReturnTime;
- USHORT usLinkStatusZeroTime;
- USHORT usReserved;
- ULONG ulHighVoltageHTLinkFreq; /* in 10Khz */
- ULONG ulLowVoltageHTLinkFreq; /* in 10Khz */
- USHORT usMaxUpStreamHTLinkWidth;
- USHORT usMaxDownStreamHTLinkWidth;
- USHORT usMinUpStreamHTLinkWidth;
- USHORT usMinDownStreamHTLinkWidth;
- ULONG ulReserved3[97]; /* must be 0x0 */
-} ATOM_INTEGRATED_SYSTEM_INFO_V2;
+
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulBootUpEngineClock; //in 10kHz unit
+ ULONG ulReserved1[2]; //must be 0x0 for the reserved
+ ULONG ulBootUpUMAClock; //in 10kHz unit
+ ULONG ulBootUpSidePortClock; //in 10kHz unit
+ ULONG ulMinSidePortClock; //in 10kHz unit
+ ULONG ulReserved2[6]; //must be 0x0 for the reserved
+ ULONG ulSystemConfig; //see explanation below
+ ULONG ulBootUpReqDisplayVector;
+ ULONG ulOtherDisplayMisc;
+ ULONG ulDDISlot1Config;
+ ULONG ulDDISlot2Config;
+ UCHAR ucMemoryType; //[3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved
+ UCHAR ucUMAChannelNumber;
+ UCHAR ucDockingPinBit;
+ UCHAR ucDockingPinPolarity;
+ ULONG ulDockingPinCFGInfo;
+ ULONG ulCPUCapInfo;
+ USHORT usNumberOfCyclesInPeriod;
+ USHORT usMaxNBVoltage;
+ USHORT usMinNBVoltage;
+ USHORT usBootUpNBVoltage;
+ ULONG ulHTLinkFreq; //in 10Khz
+ USHORT usMinHTLinkWidth;
+ USHORT usMaxHTLinkWidth;
+ USHORT usUMASyncStartDelay;
+ USHORT usUMADataReturnTime;
+ USHORT usLinkStatusZeroTime;
+ USHORT usDACEfuse; //for storing badgap value (for RS880 only)
+ ULONG ulHighVoltageHTLinkFreq; // in 10Khz
+ ULONG ulLowVoltageHTLinkFreq; // in 10Khz
+ USHORT usMaxUpStreamHTLinkWidth;
+ USHORT usMaxDownStreamHTLinkWidth;
+ USHORT usMinUpStreamHTLinkWidth;
+ USHORT usMinDownStreamHTLinkWidth;
+ USHORT usFirmwareVersion; //0 means FW is not supported. Otherwise it's the FW version loaded by SBIOS and driver should enable FW.
+ USHORT usFullT0Time; // Input to calculate minimum HT link change time required by NB P-State. Unit is 0.01us.
+ ULONG ulReserved3[96]; //must be 0x0
+}ATOM_INTEGRATED_SYSTEM_INFO_V2;
/*
ulBootUpEngineClock: Boot-up Engine Clock in 10Khz;
ulBootUpUMAClock: Boot-up UMA Clock in 10Khz; it must be 0x0 when UMA is not present
ulBootUpSidePortClock: Boot-up SidePort Clock in 10Khz; it must be 0x0 when SidePort Memory is not present,this could be equal to or less than maximum supported Sideport memory clock
-ulSystemConfig:
-Bit[0]=1: PowerExpress mode =0 Non-PowerExpress mode;
+ulSystemConfig:
+Bit[0]=1: PowerExpress mode =0 Non-PowerExpress mode;
Bit[1]=1: system boots up at AMD overdrived state or user customized mode. In this case, driver will just stick to this boot-up mode. No other PowerPlay state
=0: system boots up at driver control state. Power state depends on PowerPlay table.
Bit[2]=1: PWM method is used on NB voltage control. =0: GPIO method is used.
Bit[3]=1: Only one power state(Performance) will be supported.
=0: Multiple power states supported from PowerPlay table.
-Bit[4]=1: CLMC is supported and enabled on current system.
- =0: CLMC is not supported or enabled on current system. SBIOS need to support HT link/freq change through ATIF interface.
-Bit[5]=1: Enable CDLW for all driver control power states. Max HT width is from SBIOS, while Min HT width is determined by display requirement.
+Bit[4]=1: CLMC is supported and enabled on current system.
+ =0: CLMC is not supported or enabled on current system. SBIOS need to support HT link/freq change through ATIF interface.
+Bit[5]=1: Enable CDLW for all driver control power states. Max HT width is from SBIOS, while Min HT width is determined by display requirement.
=0: CDLW is disabled. If CLMC is enabled case, Min HT width will be set equal to Max HT width. If CLMC disabled case, Max HT width will be applied.
Bit[6]=1: High Voltage requested for all power states. In this case, voltage will be forced at 1.1v and powerplay table voltage drop/throttling request will be ignored.
=0: Voltage settings is determined by powerplay table.
Bit[7]=1: Enable CLMC as hybrid Mode. CDLD and CILR will be disabled in this case and we're using legacy C1E. This is workaround for CPU(Griffin) performance issue.
=0: Enable CLMC as regular mode, CDLD and CILR will be enabled.
+Bit[8]=1: CDLF is supported and enabled on current system.
+ =0: CDLF is not supported or enabled on current system.
+Bit[9]=1: DLL Shut Down feature is enabled on current system.
+ =0: DLL Shut Down feature is not enabled or supported on current system.
ulBootUpReqDisplayVector: This dword is a bit vector indicates what display devices are requested during boot-up. Refer to ATOM_DEVICE_xxx_SUPPORT for the bit vector definitions.
ulOtherDisplayMisc: [15:8]- Bootup LCD Expansion selection; 0-center, 1-full panel size expansion;
- [7:0] - BootupTV standard selection; This is a bit vector to indicate what TV standards are supported by the system. Refer to ucTVSuppportedStd definition;
+ [7:0] - BootupTV standard selection; This is a bit vector to indicate what TV standards are supported by the system. Refer to ucTVSupportedStd definition;
ulDDISlot1Config: Describes the PCIE lane configuration on this DDI PCIE slot (ADD2 card) or connector (Mobile design).
[3:0] - Bit vector to indicate PCIE lane config of the DDI slot/connector on chassis (bit 0=1 lane 3:0; bit 1=1 lane 7:4; bit 2=1 lane 11:8; bit 3=1 lane 15:12)
- [7:4] - Bit vector to indicate PCIE lane config of the same DDI slot/connector on docking station (bit 0=1 lane 3:0; bit 1=1 lane 7:4; bit 2=1 lane 11:8; bit 3=1 lane 15:12)
- [15:8] - Lane configuration attribute;
+ [7:4] - Bit vector to indicate PCIE lane config of the same DDI slot/connector on docking station (bit 4=1 lane 3:0; bit 5=1 lane 7:4; bit 6=1 lane 11:8; bit 7=1 lane 15:12)
+ When a DDI connector is not "paired" (meaming two connections mutualexclusive on chassis or docking, only one of them can be connected at one time.
+ in both chassis and docking, SBIOS has to duplicate the same PCIE lane info from chassis to docking or vice versa. For example:
+ one DDI connector is only populated in docking with PCIE lane 8-11, but there is no paired connection on chassis, SBIOS has to copy bit 6 to bit 2.
+
+ [15:8] - Lane configuration attribute;
[23:16]- Connector type, possible value:
CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D
CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D
CONNECTOR_OBJECT_ID_HDMI_TYPE_A
CONNECTOR_OBJECT_ID_DISPLAYPORT
+ CONNECTOR_OBJECT_ID_eDP
[31:24]- Reserved
ulDDISlot2Config: Same as Slot1.
@@ -1787,29 +2208,31 @@ For IGP, Hypermemory is the only memory type showed in CCC.
ucUMAChannelNumber: how many channels for the UMA;
-ulDockingPinCFGInfo: [15:0]-Bus/Device/Function # to CFG to read this Docking Pin; [31:16]-reg offset in CFG to read this pin
+ulDockingPinCFGInfo: [15:0]-Bus/Device/Function # to CFG to read this Docking Pin; [31:16]-reg offset in CFG to read this pin
ucDockingPinBit: which bit in this register to read the pin status;
ucDockingPinPolarity:Polarity of the pin when docked;
ulCPUCapInfo: [7:0]=1:Griffin;[7:0]=2:Greyhound;[7:0]=3:K8, other bits reserved for now and must be 0x0
usNumberOfCyclesInPeriod:Indicate how many cycles when PWM duty is 100%.
-usMaxNBVoltage:Max. voltage control value in either PWM or GPIO mode.
+
+usMaxNBVoltage:Max. voltage control value in either PWM or GPIO mode.
usMinNBVoltage:Min. voltage control value in either PWM or GPIO mode.
GPIO mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=0
PWM mode: both usMaxNBVoltage & usMinNBVoltage have a valid value ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE=1
GPU SW don't control mode: usMaxNBVoltage & usMinNBVoltage=0 and no care about ulSystemConfig.SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE
+
usBootUpNBVoltage:Boot-up voltage regulator dependent PWM value.
ulHTLinkFreq: Bootup HT link Frequency in 10Khz.
-usMinHTLinkWidth: Bootup minimum HT link width. If CDLW disabled, this is equal to usMaxHTLinkWidth.
- If CDLW enabled, both upstream and downstream width should be the same during bootup.
-usMaxHTLinkWidth: Bootup maximum HT link width. If CDLW disabled, this is equal to usMinHTLinkWidth.
+usMinHTLinkWidth: Bootup minimum HT link width. If CDLW disabled, this is equal to usMaxHTLinkWidth.
If CDLW enabled, both upstream and downstream width should be the same during bootup.
+usMaxHTLinkWidth: Bootup maximum HT link width. If CDLW disabled, this is equal to usMinHTLinkWidth.
+ If CDLW enabled, both upstream and downstream width should be the same during bootup.
-usUMASyncStartDelay: Memory access latency, required for watermark calculation
+usUMASyncStartDelay: Memory access latency, required for watermark calculation
usUMADataReturnTime: Memory access latency, required for watermark calculation
-usLinkStatusZeroTime:Memory access latency required for watermark calculation, set this to 0x0 for K8 CPU, set a proper value in 0.01 the unit of us
+usLinkStatusZeroTime:Memory access latency required for watermark calculation, set this to 0x0 for K8 CPU, set a proper value in 0.01 the unit of us
for Griffin or Greyhound. SBIOS needs to convert to actual time by:
if T0Ttime [5:4]=00b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.1us (0.0 to 1.5us)
if T0Ttime [5:4]=01b, then usLinkStatusZeroTime=T0Ttime [3:0]*0.5us (0.0 to 7.5us)
@@ -1817,7 +2240,7 @@ for Griffin or Greyhound. SBIOS needs to convert to actual time by:
if T0Ttime [5:4]=11b, and T0Ttime [3:0]=0x0 to 0xa, then usLinkStatusZeroTime=T0Ttime [3:0]*20us (0.0 to 200us)
ulHighVoltageHTLinkFreq: HT link frequency for power state with low voltage. If boot up runs in HT1, this must be 0.
- This must be less than or equal to ulHTLinkFreq(bootup frequency).
+ This must be less than or equal to ulHTLinkFreq(bootup frequency).
ulLowVoltageHTLinkFreq: HT link frequency for power state with low voltage or voltage scaling 1.0v~1.1v. If boot up runs in HT1, this must be 0.
This must be less than or equal to ulHighVoltageHTLinkFreq.
@@ -1827,14 +2250,17 @@ usMinUpStreamHTLinkWidth: Asymmetric link width support in the future, to rep
usMinDownStreamHTLinkWidth: same as above.
*/
+
#define SYSTEM_CONFIG_POWEREXPRESS_ENABLE 0x00000001
#define SYSTEM_CONFIG_RUN_AT_OVERDRIVE_ENGINE 0x00000002
-#define SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE 0x00000004
+#define SYSTEM_CONFIG_USE_PWM_ON_VOLTAGE 0x00000004
#define SYSTEM_CONFIG_PERFORMANCE_POWERSTATE_ONLY 0x00000008
#define SYSTEM_CONFIG_CLMC_ENABLED 0x00000010
#define SYSTEM_CONFIG_CDLW_ENABLED 0x00000020
#define SYSTEM_CONFIG_HIGH_VOLTAGE_REQUESTED 0x00000040
#define SYSTEM_CONFIG_CLMC_HYBRID_MODE_ENABLED 0x00000080
+#define SYSTEM_CONFIG_CDLF_ENABLED 0x00000100
+#define SYSTEM_CONFIG_DLL_SHUTDOWN_ENABLED 0x00000200
#define IGP_DDI_SLOT_LANE_CONFIG_MASK 0x000000FF
@@ -1851,6 +2277,41 @@ usMinDownStreamHTLinkWidth: same as above.
#define IGP_DDI_SLOT_CONNECTOR_TYPE_MASK 0x00FF0000
+// IntegratedSystemInfoTable new Rev is V5 after V2, because of the real rev of V2 is v1.4. This rev is used for RR
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V5
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulBootUpEngineClock; //in 10kHz unit
+ ULONG ulDentistVCOFreq; //Dentist VCO clock in 10kHz unit, the source of GPU SCLK, LCLK, UCLK and VCLK.
+ ULONG ulLClockFreq; //GPU Lclk freq in 10kHz unit, have relationship with NCLK in NorthBridge
+ ULONG ulBootUpUMAClock; //in 10kHz unit
+ ULONG ulReserved1[8]; //must be 0x0 for the reserved
+ ULONG ulBootUpReqDisplayVector;
+ ULONG ulOtherDisplayMisc;
+ ULONG ulReserved2[4]; //must be 0x0 for the reserved
+ ULONG ulSystemConfig; //TBD
+ ULONG ulCPUCapInfo; //TBD
+ USHORT usMaxNBVoltage; //high NB voltage, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse;
+ USHORT usMinNBVoltage; //low NB voltage, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse;
+ USHORT usBootUpNBVoltage; //boot up NB voltage
+ UCHAR ucHtcTmpLmt; //bit [22:16] of D24F3x64 Hardware Thermal Control (HTC) Register, may not be needed, TBD
+ UCHAR ucTjOffset; //bit [28:22] of D24F3xE4 Thermtrip Status Register,may not be needed, TBD
+ ULONG ulReserved3[4]; //must be 0x0 for the reserved
+ ULONG ulDDISlot1Config; //see above ulDDISlot1Config definition
+ ULONG ulDDISlot2Config;
+ ULONG ulDDISlot3Config;
+ ULONG ulDDISlot4Config;
+ ULONG ulReserved4[4]; //must be 0x0 for the reserved
+ UCHAR ucMemoryType; //[3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved
+ UCHAR ucUMAChannelNumber;
+ USHORT usReserved;
+ ULONG ulReserved5[4]; //must be 0x0 for the reserved
+ ULONG ulCSR_M3_ARB_CNTL_DEFAULT[10];//arrays with values for CSR M3 arbiter for default
+ ULONG ulCSR_M3_ARB_CNTL_UVD[10]; //arrays with values for CSR M3 arbiter for UVD playback
+ ULONG ulCSR_M3_ARB_CNTL_FS3D[10];//arrays with values for CSR M3 arbiter for Full Screen 3D applications
+ ULONG ulReserved6[61]; //must be 0x0
+}ATOM_INTEGRATED_SYSTEM_INFO_V5;
+
#define ATOM_CRT_INT_ENCODER1_INDEX 0x00000000
#define ATOM_LCD_INT_ENCODER1_INDEX 0x00000001
#define ATOM_TV_INT_ENCODER1_INDEX 0x00000002
@@ -1866,8 +2327,8 @@ usMinDownStreamHTLinkWidth: same as above.
#define ATOM_DFP_INT_ENCODER3_INDEX 0x0000000C
#define ATOM_DFP_INT_ENCODER4_INDEX 0x0000000D
-/* define ASIC internal encoder id ( bit vector ) */
-#define ASIC_INT_DAC1_ENCODER_ID 0x00
+// define ASIC internal encoder id ( bit vector ), used for CRTC_SourceSelTable
+#define ASIC_INT_DAC1_ENCODER_ID 0x00
#define ASIC_INT_TV_ENCODER_ID 0x02
#define ASIC_INT_DIG1_ENCODER_ID 0x03
#define ASIC_INT_DAC2_ENCODER_ID 0x04
@@ -1875,10 +2336,24 @@ usMinDownStreamHTLinkWidth: same as above.
#define ASIC_INT_DVO_ENCODER_ID 0x07
#define ASIC_INT_DIG2_ENCODER_ID 0x09
#define ASIC_EXT_DIG_ENCODER_ID 0x05
+#define ASIC_EXT_DIG2_ENCODER_ID 0x08
+#define ASIC_INT_DIG3_ENCODER_ID 0x0a
+#define ASIC_INT_DIG4_ENCODER_ID 0x0b
+#define ASIC_INT_DIG5_ENCODER_ID 0x0c
+#define ASIC_INT_DIG6_ENCODER_ID 0x0d
-/* define Encoder attribute */
+//define Encoder attribute
#define ATOM_ANALOG_ENCODER 0
-#define ATOM_DIGITAL_ENCODER 1
+#define ATOM_DIGITAL_ENCODER 1
+#define ATOM_DP_ENCODER 2
+
+#define ATOM_ENCODER_ENUM_MASK 0x70
+#define ATOM_ENCODER_ENUM_ID1 0x00
+#define ATOM_ENCODER_ENUM_ID2 0x10
+#define ATOM_ENCODER_ENUM_ID3 0x20
+#define ATOM_ENCODER_ENUM_ID4 0x30
+#define ATOM_ENCODER_ENUM_ID5 0x40
+#define ATOM_ENCODER_ENUM_ID6 0x50
#define ATOM_DEVICE_CRT1_INDEX 0x00000000
#define ATOM_DEVICE_LCD1_INDEX 0x00000001
@@ -1886,45 +2361,40 @@ usMinDownStreamHTLinkWidth: same as above.
#define ATOM_DEVICE_DFP1_INDEX 0x00000003
#define ATOM_DEVICE_CRT2_INDEX 0x00000004
#define ATOM_DEVICE_LCD2_INDEX 0x00000005
-#define ATOM_DEVICE_TV2_INDEX 0x00000006
+#define ATOM_DEVICE_DFP6_INDEX 0x00000006
#define ATOM_DEVICE_DFP2_INDEX 0x00000007
#define ATOM_DEVICE_CV_INDEX 0x00000008
-#define ATOM_DEVICE_DFP3_INDEX 0x00000009
-#define ATOM_DEVICE_DFP4_INDEX 0x0000000A
-#define ATOM_DEVICE_DFP5_INDEX 0x0000000B
+#define ATOM_DEVICE_DFP3_INDEX 0x00000009
+#define ATOM_DEVICE_DFP4_INDEX 0x0000000A
+#define ATOM_DEVICE_DFP5_INDEX 0x0000000B
+
#define ATOM_DEVICE_RESERVEDC_INDEX 0x0000000C
#define ATOM_DEVICE_RESERVEDD_INDEX 0x0000000D
#define ATOM_DEVICE_RESERVEDE_INDEX 0x0000000E
#define ATOM_DEVICE_RESERVEDF_INDEX 0x0000000F
#define ATOM_MAX_SUPPORTED_DEVICE_INFO (ATOM_DEVICE_DFP3_INDEX+1)
#define ATOM_MAX_SUPPORTED_DEVICE_INFO_2 ATOM_MAX_SUPPORTED_DEVICE_INFO
-#define ATOM_MAX_SUPPORTED_DEVICE_INFO_3 (ATOM_DEVICE_DFP5_INDEX + 1)
+#define ATOM_MAX_SUPPORTED_DEVICE_INFO_3 (ATOM_DEVICE_DFP5_INDEX + 1 )
#define ATOM_MAX_SUPPORTED_DEVICE (ATOM_DEVICE_RESERVEDF_INDEX+1)
-#define ATOM_DEVICE_CRT1_SUPPORT (0x1L << ATOM_DEVICE_CRT1_INDEX)
-#define ATOM_DEVICE_LCD1_SUPPORT (0x1L << ATOM_DEVICE_LCD1_INDEX)
-#define ATOM_DEVICE_TV1_SUPPORT (0x1L << ATOM_DEVICE_TV1_INDEX)
-#define ATOM_DEVICE_DFP1_SUPPORT (0x1L << ATOM_DEVICE_DFP1_INDEX)
-#define ATOM_DEVICE_CRT2_SUPPORT (0x1L << ATOM_DEVICE_CRT2_INDEX)
-#define ATOM_DEVICE_LCD2_SUPPORT (0x1L << ATOM_DEVICE_LCD2_INDEX)
-#define ATOM_DEVICE_TV2_SUPPORT (0x1L << ATOM_DEVICE_TV2_INDEX)
-#define ATOM_DEVICE_DFP2_SUPPORT (0x1L << ATOM_DEVICE_DFP2_INDEX)
-#define ATOM_DEVICE_CV_SUPPORT (0x1L << ATOM_DEVICE_CV_INDEX)
-#define ATOM_DEVICE_DFP3_SUPPORT (0x1L << ATOM_DEVICE_DFP3_INDEX)
-#define ATOM_DEVICE_DFP4_SUPPORT (0x1L << ATOM_DEVICE_DFP4_INDEX )
-#define ATOM_DEVICE_DFP5_SUPPORT (0x1L << ATOM_DEVICE_DFP5_INDEX)
-
-#define ATOM_DEVICE_CRT_SUPPORT \
- (ATOM_DEVICE_CRT1_SUPPORT | ATOM_DEVICE_CRT2_SUPPORT)
-#define ATOM_DEVICE_DFP_SUPPORT \
- (ATOM_DEVICE_DFP1_SUPPORT | ATOM_DEVICE_DFP2_SUPPORT | \
- ATOM_DEVICE_DFP3_SUPPORT | ATOM_DEVICE_DFP4_SUPPORT | \
- ATOM_DEVICE_DFP5_SUPPORT)
-#define ATOM_DEVICE_TV_SUPPORT \
- (ATOM_DEVICE_TV1_SUPPORT | ATOM_DEVICE_TV2_SUPPORT)
-#define ATOM_DEVICE_LCD_SUPPORT \
- (ATOM_DEVICE_LCD1_SUPPORT | ATOM_DEVICE_LCD2_SUPPORT)
+#define ATOM_DEVICE_CRT1_SUPPORT (0x1L << ATOM_DEVICE_CRT1_INDEX )
+#define ATOM_DEVICE_LCD1_SUPPORT (0x1L << ATOM_DEVICE_LCD1_INDEX )
+#define ATOM_DEVICE_TV1_SUPPORT (0x1L << ATOM_DEVICE_TV1_INDEX )
+#define ATOM_DEVICE_DFP1_SUPPORT (0x1L << ATOM_DEVICE_DFP1_INDEX )
+#define ATOM_DEVICE_CRT2_SUPPORT (0x1L << ATOM_DEVICE_CRT2_INDEX )
+#define ATOM_DEVICE_LCD2_SUPPORT (0x1L << ATOM_DEVICE_LCD2_INDEX )
+#define ATOM_DEVICE_DFP6_SUPPORT (0x1L << ATOM_DEVICE_DFP6_INDEX )
+#define ATOM_DEVICE_DFP2_SUPPORT (0x1L << ATOM_DEVICE_DFP2_INDEX )
+#define ATOM_DEVICE_CV_SUPPORT (0x1L << ATOM_DEVICE_CV_INDEX )
+#define ATOM_DEVICE_DFP3_SUPPORT (0x1L << ATOM_DEVICE_DFP3_INDEX )
+#define ATOM_DEVICE_DFP4_SUPPORT (0x1L << ATOM_DEVICE_DFP4_INDEX )
+#define ATOM_DEVICE_DFP5_SUPPORT (0x1L << ATOM_DEVICE_DFP5_INDEX )
+
+#define ATOM_DEVICE_CRT_SUPPORT (ATOM_DEVICE_CRT1_SUPPORT | ATOM_DEVICE_CRT2_SUPPORT)
+#define ATOM_DEVICE_DFP_SUPPORT (ATOM_DEVICE_DFP1_SUPPORT | ATOM_DEVICE_DFP2_SUPPORT | ATOM_DEVICE_DFP3_SUPPORT | ATOM_DEVICE_DFP4_SUPPORT | ATOM_DEVICE_DFP5_SUPPORT | ATOM_DEVICE_DFP6_SUPPORT)
+#define ATOM_DEVICE_TV_SUPPORT (ATOM_DEVICE_TV1_SUPPORT)
+#define ATOM_DEVICE_LCD_SUPPORT (ATOM_DEVICE_LCD1_SUPPORT | ATOM_DEVICE_LCD2_SUPPORT)
#define ATOM_DEVICE_CONNECTOR_TYPE_MASK 0x000000F0
#define ATOM_DEVICE_CONNECTOR_TYPE_SHIFT 0x00000004
@@ -1942,6 +2412,7 @@ usMinDownStreamHTLinkWidth: same as above.
#define ATOM_DEVICE_CONNECTOR_CASE_1 0x0000000E
#define ATOM_DEVICE_CONNECTOR_DISPLAYPORT 0x0000000F
+
#define ATOM_DEVICE_DAC_INFO_MASK 0x0000000F
#define ATOM_DEVICE_DAC_INFO_SHIFT 0x00000000
#define ATOM_DEVICE_DAC_INFO_NODAC 0x00000000
@@ -1958,139 +2429,150 @@ usMinDownStreamHTLinkWidth: same as above.
#define ATOM_DEVICE_I2C_ID_SHIFT 0x00000004
#define ATOM_DEVICE_I2C_ID_IS_FOR_NON_MM_USE 0x00000001
#define ATOM_DEVICE_I2C_ID_IS_FOR_MM_USE 0x00000002
-#define ATOM_DEVICE_I2C_ID_IS_FOR_SDVO_USE 0x00000003 /* For IGP RS600 */
-#define ATOM_DEVICE_I2C_ID_IS_FOR_DAC_SCL 0x00000004 /* For IGP RS690 */
+#define ATOM_DEVICE_I2C_ID_IS_FOR_SDVO_USE 0x00000003 //For IGP RS600
+#define ATOM_DEVICE_I2C_ID_IS_FOR_DAC_SCL 0x00000004 //For IGP RS690
#define ATOM_DEVICE_I2C_HARDWARE_CAP_MASK 0x00000080
#define ATOM_DEVICE_I2C_HARDWARE_CAP_SHIFT 0x00000007
#define ATOM_DEVICE_USES_SOFTWARE_ASSISTED_I2C 0x00000000
#define ATOM_DEVICE_USES_HARDWARE_ASSISTED_I2C 0x00000001
-/* usDeviceSupport: */
-/* Bits0 = 0 - no CRT1 support= 1- CRT1 is supported */
-/* Bit 1 = 0 - no LCD1 support= 1- LCD1 is supported */
-/* Bit 2 = 0 - no TV1 support= 1- TV1 is supported */
-/* Bit 3 = 0 - no DFP1 support= 1- DFP1 is supported */
-/* Bit 4 = 0 - no CRT2 support= 1- CRT2 is supported */
-/* Bit 5 = 0 - no LCD2 support= 1- LCD2 is supported */
-/* Bit 6 = 0 - no TV2 support= 1- TV2 is supported */
-/* Bit 7 = 0 - no DFP2 support= 1- DFP2 is supported */
-/* Bit 8 = 0 - no CV support= 1- CV is supported */
-/* Bit 9 = 0 - no DFP3 support= 1- DFP3 is supported */
-/* Byte1 (Supported Device Info) */
-/* Bit 0 = = 0 - no CV support= 1- CV is supported */
-/* */
-/* */
-
-/* ucI2C_ConfigID */
-/* [7:0] - I2C LINE Associate ID */
-/* = 0 - no I2C */
-/* [7] - HW_Cap = 1, [6:0]=HW assisted I2C ID(HW line selection) */
-/* = 0, [6:0]=SW assisted I2C ID */
-/* [6-4] - HW_ENGINE_ID = 1, HW engine for NON multimedia use */
-/* = 2, HW engine for Multimedia use */
-/* = 3-7 Reserved for future I2C engines */
-/* [3-0] - I2C_LINE_MUX = A Mux number when it's HW assisted I2C or GPIO ID when it's SW I2C */
-
-typedef struct _ATOM_I2C_ID_CONFIG {
-#if ATOM_BIG_ENDIAN
- UCHAR bfHW_Capable:1;
- UCHAR bfHW_EngineID:3;
- UCHAR bfI2C_LineMux:4;
-#else
- UCHAR bfI2C_LineMux:4;
- UCHAR bfHW_EngineID:3;
- UCHAR bfHW_Capable:1;
-#endif
-} ATOM_I2C_ID_CONFIG;
-
-typedef union _ATOM_I2C_ID_CONFIG_ACCESS {
- ATOM_I2C_ID_CONFIG sbfAccess;
- UCHAR ucAccess;
-} ATOM_I2C_ID_CONFIG_ACCESS;
+// usDeviceSupport:
+// Bits0 = 0 - no CRT1 support= 1- CRT1 is supported
+// Bit 1 = 0 - no LCD1 support= 1- LCD1 is supported
+// Bit 2 = 0 - no TV1 support= 1- TV1 is supported
+// Bit 3 = 0 - no DFP1 support= 1- DFP1 is supported
+// Bit 4 = 0 - no CRT2 support= 1- CRT2 is supported
+// Bit 5 = 0 - no LCD2 support= 1- LCD2 is supported
+// Bit 6 = 0 - no DFP6 support= 1- DFP6 is supported
+// Bit 7 = 0 - no DFP2 support= 1- DFP2 is supported
+// Bit 8 = 0 - no CV support= 1- CV is supported
+// Bit 9 = 0 - no DFP3 support= 1- DFP3 is supported
+// Bit 10 = 0 - no DFP4 support= 1- DFP4 is supported
+// Bit 11 = 0 - no DFP5 support= 1- DFP5 is supported
+//
+//
/****************************************************************************/
-/* Structure used in GPIO_I2C_InfoTable */
+/* Structure used in MclkSS_InfoTable */
/****************************************************************************/
-typedef struct _ATOM_GPIO_I2C_ASSIGMENT {
- USHORT usClkMaskRegisterIndex;
- USHORT usClkEnRegisterIndex;
- USHORT usClkY_RegisterIndex;
- USHORT usClkA_RegisterIndex;
- USHORT usDataMaskRegisterIndex;
- USHORT usDataEnRegisterIndex;
- USHORT usDataY_RegisterIndex;
- USHORT usDataA_RegisterIndex;
- ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
- UCHAR ucClkMaskShift;
- UCHAR ucClkEnShift;
- UCHAR ucClkY_Shift;
- UCHAR ucClkA_Shift;
- UCHAR ucDataMaskShift;
- UCHAR ucDataEnShift;
- UCHAR ucDataY_Shift;
- UCHAR ucDataA_Shift;
- UCHAR ucReserved1;
- UCHAR ucReserved2;
-} ATOM_GPIO_I2C_ASSIGMENT;
-
-typedef struct _ATOM_GPIO_I2C_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_GPIO_I2C_ASSIGMENT asGPIO_Info[ATOM_MAX_SUPPORTED_DEVICE];
-} ATOM_GPIO_I2C_INFO;
+// ucI2C_ConfigID
+// [7:0] - I2C LINE Associate ID
+// = 0 - no I2C
+// [7] - HW_Cap = 1, [6:0]=HW assisted I2C ID(HW line selection)
+// = 0, [6:0]=SW assisted I2C ID
+// [6-4] - HW_ENGINE_ID = 1, HW engine for NON multimedia use
+// = 2, HW engine for Multimedia use
+// = 3-7 Reserved for future I2C engines
+// [3-0] - I2C_LINE_MUX = A Mux number when it's HW assisted I2C or GPIO ID when it's SW I2C
+
+typedef struct _ATOM_I2C_ID_CONFIG
+{
+#if ATOM_BIG_ENDIAN
+ UCHAR bfHW_Capable:1;
+ UCHAR bfHW_EngineID:3;
+ UCHAR bfI2C_LineMux:4;
+#else
+ UCHAR bfI2C_LineMux:4;
+ UCHAR bfHW_EngineID:3;
+ UCHAR bfHW_Capable:1;
+#endif
+}ATOM_I2C_ID_CONFIG;
-/****************************************************************************/
-/* Common Structure used in other structures */
-/****************************************************************************/
+typedef union _ATOM_I2C_ID_CONFIG_ACCESS
+{
+ ATOM_I2C_ID_CONFIG sbfAccess;
+ UCHAR ucAccess;
+}ATOM_I2C_ID_CONFIG_ACCESS;
+
+
+/****************************************************************************/
+// Structure used in GPIO_I2C_InfoTable
+/****************************************************************************/
+typedef struct _ATOM_GPIO_I2C_ASSIGMENT
+{
+ USHORT usClkMaskRegisterIndex;
+ USHORT usClkEnRegisterIndex;
+ USHORT usClkY_RegisterIndex;
+ USHORT usClkA_RegisterIndex;
+ USHORT usDataMaskRegisterIndex;
+ USHORT usDataEnRegisterIndex;
+ USHORT usDataY_RegisterIndex;
+ USHORT usDataA_RegisterIndex;
+ ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
+ UCHAR ucClkMaskShift;
+ UCHAR ucClkEnShift;
+ UCHAR ucClkY_Shift;
+ UCHAR ucClkA_Shift;
+ UCHAR ucDataMaskShift;
+ UCHAR ucDataEnShift;
+ UCHAR ucDataY_Shift;
+ UCHAR ucDataA_Shift;
+ UCHAR ucReserved1;
+ UCHAR ucReserved2;
+}ATOM_GPIO_I2C_ASSIGMENT;
+
+typedef struct _ATOM_GPIO_I2C_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_GPIO_I2C_ASSIGMENT asGPIO_Info[ATOM_MAX_SUPPORTED_DEVICE];
+}ATOM_GPIO_I2C_INFO;
+
+/****************************************************************************/
+// Common Structure used in other structures
+/****************************************************************************/
#ifndef _H2INC
-
-/* Please don't add or expand this bitfield structure below, this one will retire soon.! */
-typedef struct _ATOM_MODE_MISC_INFO {
+
+//Please don't add or expand this bitfield structure below, this one will retire soon.!
+typedef struct _ATOM_MODE_MISC_INFO
+{
#if ATOM_BIG_ENDIAN
- USHORT Reserved:6;
- USHORT RGB888:1;
- USHORT DoubleClock:1;
- USHORT Interlace:1;
- USHORT CompositeSync:1;
- USHORT V_ReplicationBy2:1;
- USHORT H_ReplicationBy2:1;
- USHORT VerticalCutOff:1;
- USHORT VSyncPolarity:1; /* 0=Active High, 1=Active Low */
- USHORT HSyncPolarity:1; /* 0=Active High, 1=Active Low */
- USHORT HorizontalCutOff:1;
+ USHORT Reserved:6;
+ USHORT RGB888:1;
+ USHORT DoubleClock:1;
+ USHORT Interlace:1;
+ USHORT CompositeSync:1;
+ USHORT V_ReplicationBy2:1;
+ USHORT H_ReplicationBy2:1;
+ USHORT VerticalCutOff:1;
+ USHORT VSyncPolarity:1; //0=Active High, 1=Active Low
+ USHORT HSyncPolarity:1; //0=Active High, 1=Active Low
+ USHORT HorizontalCutOff:1;
#else
- USHORT HorizontalCutOff:1;
- USHORT HSyncPolarity:1; /* 0=Active High, 1=Active Low */
- USHORT VSyncPolarity:1; /* 0=Active High, 1=Active Low */
- USHORT VerticalCutOff:1;
- USHORT H_ReplicationBy2:1;
- USHORT V_ReplicationBy2:1;
- USHORT CompositeSync:1;
- USHORT Interlace:1;
- USHORT DoubleClock:1;
- USHORT RGB888:1;
- USHORT Reserved:6;
+ USHORT HorizontalCutOff:1;
+ USHORT HSyncPolarity:1; //0=Active High, 1=Active Low
+ USHORT VSyncPolarity:1; //0=Active High, 1=Active Low
+ USHORT VerticalCutOff:1;
+ USHORT H_ReplicationBy2:1;
+ USHORT V_ReplicationBy2:1;
+ USHORT CompositeSync:1;
+ USHORT Interlace:1;
+ USHORT DoubleClock:1;
+ USHORT RGB888:1;
+ USHORT Reserved:6;
#endif
-} ATOM_MODE_MISC_INFO;
-
-typedef union _ATOM_MODE_MISC_INFO_ACCESS {
- ATOM_MODE_MISC_INFO sbfAccess;
- USHORT usAccess;
-} ATOM_MODE_MISC_INFO_ACCESS;
-
+}ATOM_MODE_MISC_INFO;
+
+typedef union _ATOM_MODE_MISC_INFO_ACCESS
+{
+ ATOM_MODE_MISC_INFO sbfAccess;
+ USHORT usAccess;
+}ATOM_MODE_MISC_INFO_ACCESS;
+
#else
-
-typedef union _ATOM_MODE_MISC_INFO_ACCESS {
- USHORT usAccess;
-} ATOM_MODE_MISC_INFO_ACCESS;
-
+
+typedef union _ATOM_MODE_MISC_INFO_ACCESS
+{
+ USHORT usAccess;
+}ATOM_MODE_MISC_INFO_ACCESS;
+
#endif
-/* usModeMiscInfo- */
+// usModeMiscInfo-
#define ATOM_H_CUTOFF 0x01
-#define ATOM_HSYNC_POLARITY 0x02 /* 0=Active High, 1=Active Low */
-#define ATOM_VSYNC_POLARITY 0x04 /* 0=Active High, 1=Active Low */
+#define ATOM_HSYNC_POLARITY 0x02 //0=Active High, 1=Active Low
+#define ATOM_VSYNC_POLARITY 0x04 //0=Active High, 1=Active Low
#define ATOM_V_CUTOFF 0x08
#define ATOM_H_REPLICATIONBY2 0x10
#define ATOM_V_REPLICATIONBY2 0x20
@@ -2099,10 +2581,10 @@ typedef union _ATOM_MODE_MISC_INFO_ACCESS {
#define ATOM_DOUBLE_CLOCK_MODE 0x100
#define ATOM_RGB888_MODE 0x200
-/* usRefreshRate- */
+//usRefreshRate-
#define ATOM_REFRESH_43 43
#define ATOM_REFRESH_47 47
-#define ATOM_REFRESH_56 56
+#define ATOM_REFRESH_56 56
#define ATOM_REFRESH_60 60
#define ATOM_REFRESH_65 65
#define ATOM_REFRESH_70 70
@@ -2110,192 +2592,233 @@ typedef union _ATOM_MODE_MISC_INFO_ACCESS {
#define ATOM_REFRESH_75 75
#define ATOM_REFRESH_85 85
-/* ATOM_MODE_TIMING data are exactly the same as VESA timing data. */
-/* Translation from EDID to ATOM_MODE_TIMING, use the following formula. */
-/* */
-/* VESA_HTOTAL = VESA_ACTIVE + 2* VESA_BORDER + VESA_BLANK */
-/* = EDID_HA + EDID_HBL */
-/* VESA_HDISP = VESA_ACTIVE = EDID_HA */
-/* VESA_HSYNC_START = VESA_ACTIVE + VESA_BORDER + VESA_FRONT_PORCH */
-/* = EDID_HA + EDID_HSO */
-/* VESA_HSYNC_WIDTH = VESA_HSYNC_TIME = EDID_HSPW */
-/* VESA_BORDER = EDID_BORDER */
-
-/****************************************************************************/
-/* Structure used in SetCRTC_UsingDTDTimingTable */
-/****************************************************************************/
-typedef struct _SET_CRTC_USING_DTD_TIMING_PARAMETERS {
- USHORT usH_Size;
- USHORT usH_Blanking_Time;
- USHORT usV_Size;
- USHORT usV_Blanking_Time;
- USHORT usH_SyncOffset;
- USHORT usH_SyncWidth;
- USHORT usV_SyncOffset;
- USHORT usV_SyncWidth;
- ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
- UCHAR ucH_Border; /* From DFP EDID */
- UCHAR ucV_Border;
- UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
- UCHAR ucPadding[3];
-} SET_CRTC_USING_DTD_TIMING_PARAMETERS;
-
-/****************************************************************************/
-/* Structure used in SetCRTC_TimingTable */
-/****************************************************************************/
-typedef struct _SET_CRTC_TIMING_PARAMETERS {
- USHORT usH_Total; /* horizontal total */
- USHORT usH_Disp; /* horizontal display */
- USHORT usH_SyncStart; /* horozontal Sync start */
- USHORT usH_SyncWidth; /* horizontal Sync width */
- USHORT usV_Total; /* vertical total */
- USHORT usV_Disp; /* vertical display */
- USHORT usV_SyncStart; /* vertical Sync start */
- USHORT usV_SyncWidth; /* vertical Sync width */
- ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
- UCHAR ucCRTC; /* ATOM_CRTC1 or ATOM_CRTC2 */
- UCHAR ucOverscanRight; /* right */
- UCHAR ucOverscanLeft; /* left */
- UCHAR ucOverscanBottom; /* bottom */
- UCHAR ucOverscanTop; /* top */
- UCHAR ucReserved;
-} SET_CRTC_TIMING_PARAMETERS;
+// ATOM_MODE_TIMING data are exactly the same as VESA timing data.
+// Translation from EDID to ATOM_MODE_TIMING, use the following formula.
+//
+// VESA_HTOTAL = VESA_ACTIVE + 2* VESA_BORDER + VESA_BLANK
+// = EDID_HA + EDID_HBL
+// VESA_HDISP = VESA_ACTIVE = EDID_HA
+// VESA_HSYNC_START = VESA_ACTIVE + VESA_BORDER + VESA_FRONT_PORCH
+// = EDID_HA + EDID_HSO
+// VESA_HSYNC_WIDTH = VESA_HSYNC_TIME = EDID_HSPW
+// VESA_BORDER = EDID_BORDER
+
+/****************************************************************************/
+// Structure used in SetCRTC_UsingDTDTimingTable
+/****************************************************************************/
+typedef struct _SET_CRTC_USING_DTD_TIMING_PARAMETERS
+{
+ USHORT usH_Size;
+ USHORT usH_Blanking_Time;
+ USHORT usV_Size;
+ USHORT usV_Blanking_Time;
+ USHORT usH_SyncOffset;
+ USHORT usH_SyncWidth;
+ USHORT usV_SyncOffset;
+ USHORT usV_SyncWidth;
+ ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
+ UCHAR ucH_Border; // From DFP EDID
+ UCHAR ucV_Border;
+ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
+ UCHAR ucPadding[3];
+}SET_CRTC_USING_DTD_TIMING_PARAMETERS;
+
+/****************************************************************************/
+// Structure used in SetCRTC_TimingTable
+/****************************************************************************/
+typedef struct _SET_CRTC_TIMING_PARAMETERS
+{
+ USHORT usH_Total; // horizontal total
+ USHORT usH_Disp; // horizontal display
+ USHORT usH_SyncStart; // horozontal Sync start
+ USHORT usH_SyncWidth; // horizontal Sync width
+ USHORT usV_Total; // vertical total
+ USHORT usV_Disp; // vertical display
+ USHORT usV_SyncStart; // vertical Sync start
+ USHORT usV_SyncWidth; // vertical Sync width
+ ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
+ UCHAR ucCRTC; // ATOM_CRTC1 or ATOM_CRTC2
+ UCHAR ucOverscanRight; // right
+ UCHAR ucOverscanLeft; // left
+ UCHAR ucOverscanBottom; // bottom
+ UCHAR ucOverscanTop; // top
+ UCHAR ucReserved;
+}SET_CRTC_TIMING_PARAMETERS;
#define SET_CRTC_TIMING_PARAMETERS_PS_ALLOCATION SET_CRTC_TIMING_PARAMETERS
-/****************************************************************************/
-/* Structure used in StandardVESA_TimingTable */
-/* AnalogTV_InfoTable */
-/* ComponentVideoInfoTable */
-/****************************************************************************/
-typedef struct _ATOM_MODE_TIMING {
- USHORT usCRTC_H_Total;
- USHORT usCRTC_H_Disp;
- USHORT usCRTC_H_SyncStart;
- USHORT usCRTC_H_SyncWidth;
- USHORT usCRTC_V_Total;
- USHORT usCRTC_V_Disp;
- USHORT usCRTC_V_SyncStart;
- USHORT usCRTC_V_SyncWidth;
- USHORT usPixelClock; /* in 10Khz unit */
- ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
- USHORT usCRTC_OverscanRight;
- USHORT usCRTC_OverscanLeft;
- USHORT usCRTC_OverscanBottom;
- USHORT usCRTC_OverscanTop;
- USHORT usReserve;
- UCHAR ucInternalModeNumber;
- UCHAR ucRefreshRate;
-} ATOM_MODE_TIMING;
-
-typedef struct _ATOM_DTD_FORMAT {
- USHORT usPixClk;
- USHORT usHActive;
- USHORT usHBlanking_Time;
- USHORT usVActive;
- USHORT usVBlanking_Time;
- USHORT usHSyncOffset;
- USHORT usHSyncWidth;
- USHORT usVSyncOffset;
- USHORT usVSyncWidth;
- USHORT usImageHSize;
- USHORT usImageVSize;
- UCHAR ucHBorder;
- UCHAR ucVBorder;
- ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
- UCHAR ucInternalModeNumber;
- UCHAR ucRefreshRate;
-} ATOM_DTD_FORMAT;
-
-/****************************************************************************/
-/* Structure used in LVDS_InfoTable */
-/* * Need a document to describe this table */
-/****************************************************************************/
+/****************************************************************************/
+// Structure used in StandardVESA_TimingTable
+// AnalogTV_InfoTable
+// ComponentVideoInfoTable
+/****************************************************************************/
+typedef struct _ATOM_MODE_TIMING
+{
+ USHORT usCRTC_H_Total;
+ USHORT usCRTC_H_Disp;
+ USHORT usCRTC_H_SyncStart;
+ USHORT usCRTC_H_SyncWidth;
+ USHORT usCRTC_V_Total;
+ USHORT usCRTC_V_Disp;
+ USHORT usCRTC_V_SyncStart;
+ USHORT usCRTC_V_SyncWidth;
+ USHORT usPixelClock; //in 10Khz unit
+ ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
+ USHORT usCRTC_OverscanRight;
+ USHORT usCRTC_OverscanLeft;
+ USHORT usCRTC_OverscanBottom;
+ USHORT usCRTC_OverscanTop;
+ USHORT usReserve;
+ UCHAR ucInternalModeNumber;
+ UCHAR ucRefreshRate;
+}ATOM_MODE_TIMING;
+
+typedef struct _ATOM_DTD_FORMAT
+{
+ USHORT usPixClk;
+ USHORT usHActive;
+ USHORT usHBlanking_Time;
+ USHORT usVActive;
+ USHORT usVBlanking_Time;
+ USHORT usHSyncOffset;
+ USHORT usHSyncWidth;
+ USHORT usVSyncOffset;
+ USHORT usVSyncWidth;
+ USHORT usImageHSize;
+ USHORT usImageVSize;
+ UCHAR ucHBorder;
+ UCHAR ucVBorder;
+ ATOM_MODE_MISC_INFO_ACCESS susModeMiscInfo;
+ UCHAR ucInternalModeNumber;
+ UCHAR ucRefreshRate;
+}ATOM_DTD_FORMAT;
+
+/****************************************************************************/
+// Structure used in LVDS_InfoTable
+// * Need a document to describe this table
+/****************************************************************************/
#define SUPPORTED_LCD_REFRESHRATE_30Hz 0x0004
#define SUPPORTED_LCD_REFRESHRATE_40Hz 0x0008
#define SUPPORTED_LCD_REFRESHRATE_50Hz 0x0010
#define SUPPORTED_LCD_REFRESHRATE_60Hz 0x0020
-/* Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12. */
-/* Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL */
-#define LCDPANEL_CAP_READ_EDID 0x1
-
-/* ucTableFormatRevision=1 */
-/* ucTableContentRevision=1 */
-typedef struct _ATOM_LVDS_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_DTD_FORMAT sLCDTiming;
- USHORT usModePatchTableOffset;
- USHORT usSupportedRefreshRate; /* Refer to panel info table in ATOMBIOS extension Spec. */
- USHORT usOffDelayInMs;
- UCHAR ucPowerSequenceDigOntoDEin10Ms;
- UCHAR ucPowerSequenceDEtoBLOnin10Ms;
- UCHAR ucLVDS_Misc; /* Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level} */
- /* Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888} */
- /* Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled} */
- /* Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled} */
- UCHAR ucPanelDefaultRefreshRate;
- UCHAR ucPanelIdentification;
- UCHAR ucSS_Id;
-} ATOM_LVDS_INFO;
-
-/* ucTableFormatRevision=1 */
-/* ucTableContentRevision=2 */
-typedef struct _ATOM_LVDS_INFO_V12 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_DTD_FORMAT sLCDTiming;
- USHORT usExtInfoTableOffset;
- USHORT usSupportedRefreshRate; /* Refer to panel info table in ATOMBIOS extension Spec. */
- USHORT usOffDelayInMs;
- UCHAR ucPowerSequenceDigOntoDEin10Ms;
- UCHAR ucPowerSequenceDEtoBLOnin10Ms;
- UCHAR ucLVDS_Misc; /* Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level} */
- /* Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888} */
- /* Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled} */
- /* Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled} */
- UCHAR ucPanelDefaultRefreshRate;
- UCHAR ucPanelIdentification;
- UCHAR ucSS_Id;
- USHORT usLCDVenderID;
- USHORT usLCDProductID;
- UCHAR ucLCDPanel_SpecialHandlingCap;
- UCHAR ucPanelInfoSize; /* start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable */
- UCHAR ucReserved[2];
-} ATOM_LVDS_INFO_V12;
+//ucTableFormatRevision=1
+//ucTableContentRevision=1
+typedef struct _ATOM_LVDS_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_DTD_FORMAT sLCDTiming;
+ USHORT usModePatchTableOffset;
+ USHORT usSupportedRefreshRate; //Refer to panel info table in ATOMBIOS extension Spec.
+ USHORT usOffDelayInMs;
+ UCHAR ucPowerSequenceDigOntoDEin10Ms;
+ UCHAR ucPowerSequenceDEtoBLOnin10Ms;
+ UCHAR ucLVDS_Misc; // Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level}
+ // Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888}
+ // Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled}
+ // Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled}
+ UCHAR ucPanelDefaultRefreshRate;
+ UCHAR ucPanelIdentification;
+ UCHAR ucSS_Id;
+}ATOM_LVDS_INFO;
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=2
+typedef struct _ATOM_LVDS_INFO_V12
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_DTD_FORMAT sLCDTiming;
+ USHORT usExtInfoTableOffset;
+ USHORT usSupportedRefreshRate; //Refer to panel info table in ATOMBIOS extension Spec.
+ USHORT usOffDelayInMs;
+ UCHAR ucPowerSequenceDigOntoDEin10Ms;
+ UCHAR ucPowerSequenceDEtoBLOnin10Ms;
+ UCHAR ucLVDS_Misc; // Bit0:{=0:single, =1:dual},Bit1 {=0:666RGB, =1:888RGB},Bit2:3:{Grey level}
+ // Bit4:{=0:LDI format for RGB888, =1 FPDI format for RGB888}
+ // Bit5:{=0:Spatial Dithering disabled;1 Spatial Dithering enabled}
+ // Bit6:{=0:Temporal Dithering disabled;1 Temporal Dithering enabled}
+ UCHAR ucPanelDefaultRefreshRate;
+ UCHAR ucPanelIdentification;
+ UCHAR ucSS_Id;
+ USHORT usLCDVenderID;
+ USHORT usLCDProductID;
+ UCHAR ucLCDPanel_SpecialHandlingCap;
+ UCHAR ucPanelInfoSize; // start from ATOM_DTD_FORMAT to end of panel info, include ExtInfoTable
+ UCHAR ucReserved[2];
+}ATOM_LVDS_INFO_V12;
+
+//Definitions for ucLCDPanel_SpecialHandlingCap:
+
+//Once DAL sees this CAP is set, it will read EDID from LCD on its own instead of using sLCDTiming in ATOM_LVDS_INFO_V12.
+//Other entries in ATOM_LVDS_INFO_V12 are still valid/useful to DAL
+#define LCDPANEL_CAP_READ_EDID 0x1
+
+//If a design supports DRR (dynamic refresh rate) on internal panels (LVDS or EDP), this cap is set in ucLCDPanel_SpecialHandlingCap together
+//with multiple supported refresh rates@usSupportedRefreshRate. This cap should not be set when only slow refresh rate is supported (static
+//refresh rate switch by SW. This is only valid from ATOM_LVDS_INFO_V12
+#define LCDPANEL_CAP_DRR_SUPPORTED 0x2
+
+//Use this cap bit for a quick reference whether an embadded panel (LCD1 ) is LVDS or eDP.
+#define LCDPANEL_CAP_eDP 0x4
+
+
+//Color Bit Depth definition in EDID V1.4 @BYTE 14h
+//Bit 6 5 4
+ // 0 0 0 - Color bit depth is undefined
+ // 0 0 1 - 6 Bits per Primary Color
+ // 0 1 0 - 8 Bits per Primary Color
+ // 0 1 1 - 10 Bits per Primary Color
+ // 1 0 0 - 12 Bits per Primary Color
+ // 1 0 1 - 14 Bits per Primary Color
+ // 1 1 0 - 16 Bits per Primary Color
+ // 1 1 1 - Reserved
+
+#define PANEL_COLOR_BIT_DEPTH_MASK 0x70
+
+// Bit7:{=0:Random Dithering disabled;1 Random Dithering enabled}
+#define PANEL_RANDOM_DITHER 0x80
+#define PANEL_RANDOM_DITHER_MASK 0x80
+
#define ATOM_LVDS_INFO_LAST ATOM_LVDS_INFO_V12
-typedef struct _ATOM_PATCH_RECORD_MODE {
- UCHAR ucRecordType;
- USHORT usHDisp;
- USHORT usVDisp;
-} ATOM_PATCH_RECORD_MODE;
+typedef struct _ATOM_PATCH_RECORD_MODE
+{
+ UCHAR ucRecordType;
+ USHORT usHDisp;
+ USHORT usVDisp;
+}ATOM_PATCH_RECORD_MODE;
-typedef struct _ATOM_LCD_RTS_RECORD {
- UCHAR ucRecordType;
- UCHAR ucRTSValue;
-} ATOM_LCD_RTS_RECORD;
+typedef struct _ATOM_LCD_RTS_RECORD
+{
+ UCHAR ucRecordType;
+ UCHAR ucRTSValue;
+}ATOM_LCD_RTS_RECORD;
-/* !! If the record below exits, it shoud always be the first record for easy use in command table!!! */
-typedef struct _ATOM_LCD_MODE_CONTROL_CAP {
- UCHAR ucRecordType;
- USHORT usLCDCap;
-} ATOM_LCD_MODE_CONTROL_CAP;
+//!! If the record below exits, it shoud always be the first record for easy use in command table!!!
+// The record below is only used when LVDS_Info is present. From ATOM_LVDS_INFO_V12, use ucLCDPanel_SpecialHandlingCap instead.
+typedef struct _ATOM_LCD_MODE_CONTROL_CAP
+{
+ UCHAR ucRecordType;
+ USHORT usLCDCap;
+}ATOM_LCD_MODE_CONTROL_CAP;
#define LCD_MODE_CAP_BL_OFF 1
#define LCD_MODE_CAP_CRTC_OFF 2
#define LCD_MODE_CAP_PANEL_OFF 4
-typedef struct _ATOM_FAKE_EDID_PATCH_RECORD {
- UCHAR ucRecordType;
- UCHAR ucFakeEDIDLength;
- UCHAR ucFakeEDIDString[1]; /* This actually has ucFakeEdidLength elements. */
+typedef struct _ATOM_FAKE_EDID_PATCH_RECORD
+{
+ UCHAR ucRecordType;
+ UCHAR ucFakeEDIDLength;
+ UCHAR ucFakeEDIDString[1]; // This actually has ucFakeEdidLength elements.
} ATOM_FAKE_EDID_PATCH_RECORD;
-typedef struct _ATOM_PANEL_RESOLUTION_PATCH_RECORD {
- UCHAR ucRecordType;
- USHORT usHSize;
- USHORT usVSize;
-} ATOM_PANEL_RESOLUTION_PATCH_RECORD;
+typedef struct _ATOM_PANEL_RESOLUTION_PATCH_RECORD
+{
+ UCHAR ucRecordType;
+ USHORT usHSize;
+ USHORT usVSize;
+}ATOM_PANEL_RESOLUTION_PATCH_RECORD;
#define LCD_MODE_PATCH_RECORD_MODE_TYPE 1
#define LCD_RTS_RECORD_TYPE 2
@@ -2306,21 +2829,25 @@ typedef struct _ATOM_PANEL_RESOLUTION_PATCH_RECORD {
/****************************Spread Spectrum Info Table Definitions **********************/
-/* ucTableFormatRevision=1 */
-/* ucTableContentRevision=2 */
-typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT {
- USHORT usSpreadSpectrumPercentage;
- UCHAR ucSpreadSpectrumType; /* Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Others:TBD */
- UCHAR ucSS_Step;
- UCHAR ucSS_Delay;
- UCHAR ucSS_Id;
- UCHAR ucRecommendedRef_Div;
- UCHAR ucSS_Range; /* it was reserved for V11 */
-} ATOM_SPREAD_SPECTRUM_ASSIGNMENT;
+//ucTableFormatRevision=1
+//ucTableContentRevision=2
+typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT
+{
+ USHORT usSpreadSpectrumPercentage;
+ UCHAR ucSpreadSpectrumType; //Bit1=0 Down Spread,=1 Center Spread. Bit1=1 Ext. =0 Int. Bit2=1: PCIE REFCLK SS =0 iternal PPLL SS Others:TBD
+ UCHAR ucSS_Step;
+ UCHAR ucSS_Delay;
+ UCHAR ucSS_Id;
+ UCHAR ucRecommendedRef_Div;
+ UCHAR ucSS_Range; //it was reserved for V11
+}ATOM_SPREAD_SPECTRUM_ASSIGNMENT;
#define ATOM_MAX_SS_ENTRY 16
-#define ATOM_DP_SS_ID1 0x0f1 /* SS modulation freq=30k */
-#define ATOM_DP_SS_ID2 0x0f2 /* SS modulation freq=33k */
+#define ATOM_DP_SS_ID1 0x0f1 // SS ID for internal DP stream at 2.7Ghz. if ATOM_DP_SS_ID2 does not exist in SS_InfoTable, it is used for internal DP stream at 1.62Ghz as well.
+#define ATOM_DP_SS_ID2 0x0f2 // SS ID for internal DP stream at 1.62Ghz, if it exists in SS_InfoTable.
+#define ATOM_LVLINK_2700MHz_SS_ID 0x0f3 // SS ID for LV link translator chip at 2.7Ghz
+#define ATOM_LVLINK_1620MHz_SS_ID 0x0f4 // SS ID for LV link translator chip at 1.62Ghz
+
#define ATOM_SS_DOWN_SPREAD_MODE_MASK 0x00000000
#define ATOM_SS_DOWN_SPREAD_MODE 0x00000000
@@ -2329,29 +2856,30 @@ typedef struct _ATOM_SPREAD_SPECTRUM_ASSIGNMENT {
#define ATOM_INTERNAL_SS_MASK 0x00000000
#define ATOM_EXTERNAL_SS_MASK 0x00000002
#define EXEC_SS_STEP_SIZE_SHIFT 2
-#define EXEC_SS_DELAY_SHIFT 4
+#define EXEC_SS_DELAY_SHIFT 4
#define ACTIVEDATA_TO_BLON_DELAY_SHIFT 4
-typedef struct _ATOM_SPREAD_SPECTRUM_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_SPREAD_SPECTRUM_ASSIGNMENT asSS_Info[ATOM_MAX_SS_ENTRY];
-} ATOM_SPREAD_SPECTRUM_INFO;
-
-/****************************************************************************/
-/* Structure used in AnalogTV_InfoTable (Top level) */
-/****************************************************************************/
-/* ucTVBootUpDefaultStd definiton: */
-
-/* ATOM_TV_NTSC 1 */
-/* ATOM_TV_NTSCJ 2 */
-/* ATOM_TV_PAL 3 */
-/* ATOM_TV_PALM 4 */
-/* ATOM_TV_PALCN 5 */
-/* ATOM_TV_PALN 6 */
-/* ATOM_TV_PAL60 7 */
-/* ATOM_TV_SECAM 8 */
-
-/* ucTVSuppportedStd definition: */
+typedef struct _ATOM_SPREAD_SPECTRUM_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_SPREAD_SPECTRUM_ASSIGNMENT asSS_Info[ATOM_MAX_SS_ENTRY];
+}ATOM_SPREAD_SPECTRUM_INFO;
+
+/****************************************************************************/
+// Structure used in AnalogTV_InfoTable (Top level)
+/****************************************************************************/
+//ucTVBootUpDefaultStd definiton:
+
+//ATOM_TV_NTSC 1
+//ATOM_TV_NTSCJ 2
+//ATOM_TV_PAL 3
+//ATOM_TV_PALM 4
+//ATOM_TV_PALCN 5
+//ATOM_TV_PALN 6
+//ATOM_TV_PAL60 7
+//ATOM_TV_SECAM 8
+
+//ucTVSupportedStd definition:
#define NTSC_SUPPORT 0x1
#define NTSCJ_SUPPORT 0x2
@@ -2364,46 +2892,58 @@ typedef struct _ATOM_SPREAD_SPECTRUM_INFO {
#define MAX_SUPPORTED_TV_TIMING 2
-typedef struct _ATOM_ANALOG_TV_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- UCHAR ucTV_SupportedStandard;
- UCHAR ucTV_BootUpDefaultStandard;
- UCHAR ucExt_TV_ASIC_ID;
- UCHAR ucExt_TV_ASIC_SlaveAddr;
- /*ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING]; */
- ATOM_MODE_TIMING aModeTimings[MAX_SUPPORTED_TV_TIMING];
-} ATOM_ANALOG_TV_INFO;
+typedef struct _ATOM_ANALOG_TV_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucTV_SupportedStandard;
+ UCHAR ucTV_BootUpDefaultStandard;
+ UCHAR ucExt_TV_ASIC_ID;
+ UCHAR ucExt_TV_ASIC_SlaveAddr;
+ /*ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING];*/
+ ATOM_MODE_TIMING aModeTimings[MAX_SUPPORTED_TV_TIMING];
+}ATOM_ANALOG_TV_INFO;
#define MAX_SUPPORTED_TV_TIMING_V1_2 3
-typedef struct _ATOM_ANALOG_TV_INFO_V1_2 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- UCHAR ucTV_SupportedStandard;
- UCHAR ucTV_BootUpDefaultStandard;
- UCHAR ucExt_TV_ASIC_ID;
- UCHAR ucExt_TV_ASIC_SlaveAddr;
- ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING];
-} ATOM_ANALOG_TV_INFO_V1_2;
+typedef struct _ATOM_ANALOG_TV_INFO_V1_2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucTV_SupportedStandard;
+ UCHAR ucTV_BootUpDefaultStandard;
+ UCHAR ucExt_TV_ASIC_ID;
+ UCHAR ucExt_TV_ASIC_SlaveAddr;
+ ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_TV_TIMING];
+}ATOM_ANALOG_TV_INFO_V1_2;
+
+typedef struct _ATOM_DPCD_INFO
+{
+ UCHAR ucRevisionNumber; //10h : Revision 1.0; 11h : Revision 1.1
+ UCHAR ucMaxLinkRate; //06h : 1.62Gbps per lane; 0Ah = 2.7Gbps per lane
+ UCHAR ucMaxLane; //Bits 4:0 = MAX_LANE_COUNT (1/2/4). Bit 7 = ENHANCED_FRAME_CAP
+ UCHAR ucMaxDownSpread; //Bit0 = 0: No Down spread; Bit0 = 1: 0.5% (Subject to change according to DP spec)
+}ATOM_DPCD_INFO;
+
+#define ATOM_DPCD_MAX_LANE_MASK 0x1F
/**************************************************************************/
-/* VRAM usage and their definitions */
+// VRAM usage and their defintions
-/* One chunk of VRAM used by Bios are for HWICON surfaces,EDID data. */
-/* Current Mode timing and Dail Timing and/or STD timing data EACH device. They can be broken down as below. */
-/* All the addresses below are the offsets from the frame buffer start.They all MUST be Dword aligned! */
-/* To driver: The physical address of this memory portion=mmFB_START(4K aligned)+ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR */
-/* To Bios: ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR->MM_INDEX */
+// One chunk of VRAM used by Bios are for HWICON surfaces,EDID data.
+// Current Mode timing and Dail Timing and/or STD timing data EACH device. They can be broken down as below.
+// All the addresses below are the offsets from the frame buffer start.They all MUST be Dword aligned!
+// To driver: The physical address of this memory portion=mmFB_START(4K aligned)+ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR
+// To Bios: ATOMBIOS_VRAM_USAGE_START_ADDR+ATOM_x_ADDR->MM_INDEX
#ifndef VESA_MEMORY_IN_64K_BLOCK
-#define VESA_MEMORY_IN_64K_BLOCK 0x100 /* 256*64K=16Mb (Max. VESA memory is 16Mb!) */
+#define VESA_MEMORY_IN_64K_BLOCK 0x100 //256*64K=16Mb (Max. VESA memory is 16Mb!)
#endif
-#define ATOM_EDID_RAW_DATASIZE 256 /* In Bytes */
-#define ATOM_HWICON_SURFACE_SIZE 4096 /* In Bytes */
+#define ATOM_EDID_RAW_DATASIZE 256 //In Bytes
+#define ATOM_HWICON_SURFACE_SIZE 4096 //In Bytes
#define ATOM_HWICON_INFOTABLE_SIZE 32
#define MAX_DTD_MODE_IN_VRAM 6
-#define ATOM_DTD_MODE_SUPPORT_TBL_SIZE (MAX_DTD_MODE_IN_VRAM*28) /* 28= (SIZEOF ATOM_DTD_FORMAT) */
-#define ATOM_STD_MODE_SUPPORT_TBL_SIZE (32*8) /* 32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT) */
+#define ATOM_DTD_MODE_SUPPORT_TBL_SIZE (MAX_DTD_MODE_IN_VRAM*28) //28= (SIZEOF ATOM_DTD_FORMAT)
+#define ATOM_STD_MODE_SUPPORT_TBL_SIZE 32*8 //32 is a predefined number,8= (SIZEOF ATOM_STD_FORMAT)
#define DFP_ENCODER_TYPE_OFFSET 0x80
#define DP_ENCODER_LANE_NUM_OFFSET 0x84
#define DP_ENCODER_LINK_RATE_OFFSET 0x88
@@ -2417,7 +2957,7 @@ typedef struct _ATOM_ANALOG_TV_INFO_V1_2 {
#define ATOM_LCD1_EDID_ADDR (ATOM_CRT1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
#define ATOM_LCD1_DTD_MODE_TBL_ADDR (ATOM_LCD1_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
-#define ATOM_LCD1_STD_MODE_TBL_ADDR (ATOM_LCD1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_LCD1_STD_MODE_TBL_ADDR (ATOM_LCD1_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
#define ATOM_TV1_DTD_MODE_TBL_ADDR (ATOM_LCD1_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
@@ -2431,13 +2971,13 @@ typedef struct _ATOM_ANALOG_TV_INFO_V1_2 {
#define ATOM_LCD2_EDID_ADDR (ATOM_CRT2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
#define ATOM_LCD2_DTD_MODE_TBL_ADDR (ATOM_LCD2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
-#define ATOM_LCD2_STD_MODE_TBL_ADDR (ATOM_LCD2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_LCD2_STD_MODE_TBL_ADDR (ATOM_LCD2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
-#define ATOM_TV2_EDID_ADDR (ATOM_LCD2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
-#define ATOM_TV2_DTD_MODE_TBL_ADDR (ATOM_TV2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
-#define ATOM_TV2_STD_MODE_TBL_ADDR (ATOM_TV2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP6_EDID_ADDR (ATOM_LCD2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP6_DTD_MODE_TBL_ADDR (ATOM_DFP6_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
+#define ATOM_DFP6_STD_MODE_TBL_ADDR (ATOM_DFP6_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
-#define ATOM_DFP2_EDID_ADDR (ATOM_TV2_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DFP2_EDID_ADDR (ATOM_DFP6_STD_MODE_TBL_ADDR + ATOM_STD_MODE_SUPPORT_TBL_SIZE)
#define ATOM_DFP2_DTD_MODE_TBL_ADDR (ATOM_DFP2_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
#define ATOM_DFP2_STD_MODE_TBL_ADDR (ATOM_DFP2_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
@@ -2457,533 +2997,850 @@ typedef struct _ATOM_ANALOG_TV_INFO_V1_2 {
#define ATOM_DFP5_DTD_MODE_TBL_ADDR (ATOM_DFP5_EDID_ADDR + ATOM_EDID_RAW_DATASIZE)
#define ATOM_DFP5_STD_MODE_TBL_ADDR (ATOM_DFP5_DTD_MODE_TBL_ADDR + ATOM_DTD_MODE_SUPPORT_TBL_SIZE)
-#define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR+ATOM_STD_MODE_SUPPORT_TBL_SIZE)
+#define ATOM_DP_TRAINING_TBL_ADDR (ATOM_DFP5_STD_MODE_TBL_ADDR+ATOM_STD_MODE_SUPPORT_TBL_SIZE)
-#define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR + 256)
-#define ATOM_STACK_STORAGE_END (ATOM_STACK_STORAGE_START + 512)
+#define ATOM_STACK_STORAGE_START (ATOM_DP_TRAINING_TBL_ADDR+256)
+#define ATOM_STACK_STORAGE_END ATOM_STACK_STORAGE_START+512
-/* The size below is in Kb! */
+//The size below is in Kb!
#define ATOM_VRAM_RESERVE_SIZE ((((ATOM_STACK_STORAGE_END - ATOM_HWICON1_SURFACE_ADDR)>>10)+4)&0xFFFC)
-
+
#define ATOM_VRAM_OPERATION_FLAGS_MASK 0xC0000000L
#define ATOM_VRAM_OPERATION_FLAGS_SHIFT 30
#define ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION 0x1
#define ATOM_VRAM_BLOCK_NEEDS_RESERVATION 0x0
-/***********************************************************************************/
-/* Structure used in VRAM_UsageByFirmwareTable */
-/* Note1: This table is filled by SetBiosReservationStartInFB in CoreCommSubs.asm */
-/* at running time. */
-/* note2: From RV770, the memory is more than 32bit addressable, so we will change */
-/* ucTableFormatRevision=1,ucTableContentRevision=4, the strcuture remains */
-/* exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware */
-/* (in offset to start of memory address) is KB aligned instead of byte aligend. */
-/***********************************************************************************/
+/***********************************************************************************/
+// Structure used in VRAM_UsageByFirmwareTable
+// Note1: This table is filled by SetBiosReservationStartInFB in CoreCommSubs.asm
+// at running time.
+// note2: From RV770, the memory is more than 32bit addressable, so we will change
+// ucTableFormatRevision=1,ucTableContentRevision=4, the strcuture remains
+// exactly same as 1.1 and 1.2 (1.3 is never in use), but ulStartAddrUsedByFirmware
+// (in offset to start of memory address) is KB aligned instead of byte aligend.
+/***********************************************************************************/
+// Note3:
+/* If we change usReserved to "usFBUsedbyDrvInKB", then to VBIOS this usFBUsedbyDrvInKB is a predefined, unchanged constant across VGA or non VGA adapter,
+for CAIL, The size of FB access area is known, only thing missing is the Offset of FB Access area, so we can have:
+
+If (ulStartAddrUsedByFirmware!=0)
+FBAccessAreaOffset= ulStartAddrUsedByFirmware - usFBUsedbyDrvInKB;
+Reserved area has been claimed by VBIOS including this FB access area; CAIL doesn't need to reserve any extra area for this purpose
+else //Non VGA case
+ if (FB_Size<=2Gb)
+ FBAccessAreaOffset= FB_Size - usFBUsedbyDrvInKB;
+ else
+ FBAccessAreaOffset= Aper_Size - usFBUsedbyDrvInKB
+
+CAIL needs to claim an reserved area defined by FBAccessAreaOffset and usFBUsedbyDrvInKB in non VGA case.*/
+
#define ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO 1
-typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO {
- ULONG ulStartAddrUsedByFirmware;
- USHORT usFirmwareUseInKb;
- USHORT usReserved;
-} ATOM_FIRMWARE_VRAM_RESERVE_INFO;
+typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO
+{
+ ULONG ulStartAddrUsedByFirmware;
+ USHORT usFirmwareUseInKb;
+ USHORT usReserved;
+}ATOM_FIRMWARE_VRAM_RESERVE_INFO;
-typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_FIRMWARE_VRAM_RESERVE_INFO
- asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO];
-} ATOM_VRAM_USAGE_BY_FIRMWARE;
+typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_FIRMWARE_VRAM_RESERVE_INFO asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO];
+}ATOM_VRAM_USAGE_BY_FIRMWARE;
-/****************************************************************************/
-/* Structure used in GPIO_Pin_LUTTable */
-/****************************************************************************/
-typedef struct _ATOM_GPIO_PIN_ASSIGNMENT {
- USHORT usGpioPin_AIndex;
- UCHAR ucGpioPinBitShift;
- UCHAR ucGPIO_ID;
-} ATOM_GPIO_PIN_ASSIGNMENT;
+// change verion to 1.5, when allow driver to allocate the vram area for command table access.
+typedef struct _ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5
+{
+ ULONG ulStartAddrUsedByFirmware;
+ USHORT usFirmwareUseInKb;
+ USHORT usFBUsedByDrvInKb;
+}ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5;
-typedef struct _ATOM_GPIO_PIN_LUT {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_GPIO_PIN_ASSIGNMENT asGPIO_Pin[1];
-} ATOM_GPIO_PIN_LUT;
+typedef struct _ATOM_VRAM_USAGE_BY_FIRMWARE_V1_5
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_FIRMWARE_VRAM_RESERVE_INFO_V1_5 asFirmwareVramReserveInfo[ATOM_MAX_FIRMWARE_VRAM_USAGE_INFO];
+}ATOM_VRAM_USAGE_BY_FIRMWARE_V1_5;
+
+/****************************************************************************/
+// Structure used in GPIO_Pin_LUTTable
+/****************************************************************************/
+typedef struct _ATOM_GPIO_PIN_ASSIGNMENT
+{
+ USHORT usGpioPin_AIndex;
+ UCHAR ucGpioPinBitShift;
+ UCHAR ucGPIO_ID;
+}ATOM_GPIO_PIN_ASSIGNMENT;
-/****************************************************************************/
-/* Structure used in ComponentVideoInfoTable */
-/****************************************************************************/
+typedef struct _ATOM_GPIO_PIN_LUT
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_GPIO_PIN_ASSIGNMENT asGPIO_Pin[1];
+}ATOM_GPIO_PIN_LUT;
+
+/****************************************************************************/
+// Structure used in ComponentVideoInfoTable
+/****************************************************************************/
#define GPIO_PIN_ACTIVE_HIGH 0x1
#define MAX_SUPPORTED_CV_STANDARDS 5
-/* definitions for ATOM_D_INFO.ucSettings */
-#define ATOM_GPIO_SETTINGS_BITSHIFT_MASK 0x1F /* [4:0] */
-#define ATOM_GPIO_SETTINGS_RESERVED_MASK 0x60 /* [6:5] = must be zeroed out */
-#define ATOM_GPIO_SETTINGS_ACTIVE_MASK 0x80 /* [7] */
+// definitions for ATOM_D_INFO.ucSettings
+#define ATOM_GPIO_SETTINGS_BITSHIFT_MASK 0x1F // [4:0]
+#define ATOM_GPIO_SETTINGS_RESERVED_MASK 0x60 // [6:5] = must be zeroed out
+#define ATOM_GPIO_SETTINGS_ACTIVE_MASK 0x80 // [7]
-typedef struct _ATOM_GPIO_INFO {
- USHORT usAOffset;
- UCHAR ucSettings;
- UCHAR ucReserved;
-} ATOM_GPIO_INFO;
+typedef struct _ATOM_GPIO_INFO
+{
+ USHORT usAOffset;
+ UCHAR ucSettings;
+ UCHAR ucReserved;
+}ATOM_GPIO_INFO;
-/* definitions for ATOM_COMPONENT_VIDEO_INFO.ucMiscInfo (bit vector) */
+// definitions for ATOM_COMPONENT_VIDEO_INFO.ucMiscInfo (bit vector)
#define ATOM_CV_RESTRICT_FORMAT_SELECTION 0x2
-/* definitions for ATOM_COMPONENT_VIDEO_INFO.uc480i/uc480p/uc720p/uc1080i */
-#define ATOM_GPIO_DEFAULT_MODE_EN 0x80 /* [7]; */
-#define ATOM_GPIO_SETTING_PERMODE_MASK 0x7F /* [6:0] */
-
-/* definitions for ATOM_COMPONENT_VIDEO_INFO.ucLetterBoxMode */
-/* Line 3 out put 5V. */
-#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_A 0x01 /* represent gpio 3 state for 16:9 */
-#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_B 0x02 /* represent gpio 4 state for 16:9 */
-#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_SHIFT 0x0
-
-/* Line 3 out put 2.2V */
-#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_A 0x04 /* represent gpio 3 state for 4:3 Letter box */
-#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_B 0x08 /* represent gpio 4 state for 4:3 Letter box */
-#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_SHIFT 0x2
-
-/* Line 3 out put 0V */
-#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_A 0x10 /* represent gpio 3 state for 4:3 */
-#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_B 0x20 /* represent gpio 4 state for 4:3 */
-#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_SHIFT 0x4
-
-#define ATOM_CV_LINE3_ASPECTRATIO_MASK 0x3F /* bit [5:0] */
-
-#define ATOM_CV_LINE3_ASPECTRATIO_EXIST 0x80 /* bit 7 */
-
-/* GPIO bit index in gpio setting per mode value, also represend the block no. in gpio blocks. */
-#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_A 3 /* bit 3 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode. */
-#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_B 4 /* bit 4 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode. */
-
-typedef struct _ATOM_COMPONENT_VIDEO_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- USHORT usMask_PinRegisterIndex;
- USHORT usEN_PinRegisterIndex;
- USHORT usY_PinRegisterIndex;
- USHORT usA_PinRegisterIndex;
- UCHAR ucBitShift;
- UCHAR ucPinActiveState; /* ucPinActiveState: Bit0=1 active high, =0 active low */
- ATOM_DTD_FORMAT sReserved; /* must be zeroed out */
- UCHAR ucMiscInfo;
- UCHAR uc480i;
- UCHAR uc480p;
- UCHAR uc720p;
- UCHAR uc1080i;
- UCHAR ucLetterBoxMode;
- UCHAR ucReserved[3];
- UCHAR ucNumOfWbGpioBlocks; /* For Component video D-Connector support. If zere, NTSC type connector */
- ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS];
- ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS];
-} ATOM_COMPONENT_VIDEO_INFO;
-
-/* ucTableFormatRevision=2 */
-/* ucTableContentRevision=1 */
-typedef struct _ATOM_COMPONENT_VIDEO_INFO_V21 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- UCHAR ucMiscInfo;
- UCHAR uc480i;
- UCHAR uc480p;
- UCHAR uc720p;
- UCHAR uc1080i;
- UCHAR ucReserved;
- UCHAR ucLetterBoxMode;
- UCHAR ucNumOfWbGpioBlocks; /* For Component video D-Connector support. If zere, NTSC type connector */
- ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS];
- ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS];
-} ATOM_COMPONENT_VIDEO_INFO_V21;
+// definitions for ATOM_COMPONENT_VIDEO_INFO.uc480i/uc480p/uc720p/uc1080i
+#define ATOM_GPIO_DEFAULT_MODE_EN 0x80 //[7];
+#define ATOM_GPIO_SETTING_PERMODE_MASK 0x7F //[6:0]
+
+// definitions for ATOM_COMPONENT_VIDEO_INFO.ucLetterBoxMode
+//Line 3 out put 5V.
+#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_A 0x01 //represent gpio 3 state for 16:9
+#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_B 0x02 //represent gpio 4 state for 16:9
+#define ATOM_CV_LINE3_ASPECTRATIO_16_9_GPIO_SHIFT 0x0
+
+//Line 3 out put 2.2V
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_A 0x04 //represent gpio 3 state for 4:3 Letter box
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_B 0x08 //represent gpio 4 state for 4:3 Letter box
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_LETBOX_GPIO_SHIFT 0x2
+
+//Line 3 out put 0V
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_A 0x10 //represent gpio 3 state for 4:3
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_B 0x20 //represent gpio 4 state for 4:3
+#define ATOM_CV_LINE3_ASPECTRATIO_4_3_GPIO_SHIFT 0x4
+
+#define ATOM_CV_LINE3_ASPECTRATIO_MASK 0x3F // bit [5:0]
+
+#define ATOM_CV_LINE3_ASPECTRATIO_EXIST 0x80 //bit 7
+
+//GPIO bit index in gpio setting per mode value, also represend the block no. in gpio blocks.
+#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_A 3 //bit 3 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode.
+#define ATOM_GPIO_INDEX_LINE3_ASPECRATIO_GPIO_B 4 //bit 4 in uc480i/uc480p/uc720p/uc1080i, which represend the default gpio bit setting for the mode.
+
+
+typedef struct _ATOM_COMPONENT_VIDEO_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usMask_PinRegisterIndex;
+ USHORT usEN_PinRegisterIndex;
+ USHORT usY_PinRegisterIndex;
+ USHORT usA_PinRegisterIndex;
+ UCHAR ucBitShift;
+ UCHAR ucPinActiveState; //ucPinActiveState: Bit0=1 active high, =0 active low
+ ATOM_DTD_FORMAT sReserved; // must be zeroed out
+ UCHAR ucMiscInfo;
+ UCHAR uc480i;
+ UCHAR uc480p;
+ UCHAR uc720p;
+ UCHAR uc1080i;
+ UCHAR ucLetterBoxMode;
+ UCHAR ucReserved[3];
+ UCHAR ucNumOfWbGpioBlocks; //For Component video D-Connector support. If zere, NTSC type connector
+ ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS];
+ ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS];
+}ATOM_COMPONENT_VIDEO_INFO;
+
+//ucTableFormatRevision=2
+//ucTableContentRevision=1
+typedef struct _ATOM_COMPONENT_VIDEO_INFO_V21
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucMiscInfo;
+ UCHAR uc480i;
+ UCHAR uc480p;
+ UCHAR uc720p;
+ UCHAR uc1080i;
+ UCHAR ucReserved;
+ UCHAR ucLetterBoxMode;
+ UCHAR ucNumOfWbGpioBlocks; //For Component video D-Connector support. If zere, NTSC type connector
+ ATOM_GPIO_INFO aWbGpioStateBlock[MAX_SUPPORTED_CV_STANDARDS];
+ ATOM_DTD_FORMAT aModeTimings[MAX_SUPPORTED_CV_STANDARDS];
+}ATOM_COMPONENT_VIDEO_INFO_V21;
#define ATOM_COMPONENT_VIDEO_INFO_LAST ATOM_COMPONENT_VIDEO_INFO_V21
-/****************************************************************************/
-/* Structure used in object_InfoTable */
-/****************************************************************************/
-typedef struct _ATOM_OBJECT_HEADER {
- ATOM_COMMON_TABLE_HEADER sHeader;
- USHORT usDeviceSupport;
- USHORT usConnectorObjectTableOffset;
- USHORT usRouterObjectTableOffset;
- USHORT usEncoderObjectTableOffset;
- USHORT usProtectionObjectTableOffset; /* only available when Protection block is independent. */
- USHORT usDisplayPathTableOffset;
-} ATOM_OBJECT_HEADER;
-
-typedef struct _ATOM_DISPLAY_OBJECT_PATH {
- USHORT usDeviceTag; /* supported device */
- USHORT usSize; /* the size of ATOM_DISPLAY_OBJECT_PATH */
- USHORT usConnObjectId; /* Connector Object ID */
- USHORT usGPUObjectId; /* GPU ID */
- USHORT usGraphicObjIds[1]; /* 1st Encoder Obj source from GPU to last Graphic Obj destinate to connector. */
-} ATOM_DISPLAY_OBJECT_PATH;
-
-typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE {
- UCHAR ucNumOfDispPath;
- UCHAR ucVersion;
- UCHAR ucPadding[2];
- ATOM_DISPLAY_OBJECT_PATH asDispPath[1];
-} ATOM_DISPLAY_OBJECT_PATH_TABLE;
-
-typedef struct _ATOM_OBJECT /* each object has this structure */
-{
- USHORT usObjectID;
- USHORT usSrcDstTableOffset;
- USHORT usRecordOffset; /* this pointing to a bunch of records defined below */
- USHORT usReserved;
-} ATOM_OBJECT;
-
-typedef struct _ATOM_OBJECT_TABLE /* Above 4 object table offset pointing to a bunch of objects all have this structure */
-{
- UCHAR ucNumberOfObjects;
- UCHAR ucPadding[3];
- ATOM_OBJECT asObjects[1];
-} ATOM_OBJECT_TABLE;
-
-typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT /* usSrcDstTableOffset pointing to this structure */
-{
- UCHAR ucNumberOfSrc;
- USHORT usSrcObjectID[1];
- UCHAR ucNumberOfDst;
- USHORT usDstObjectID[1];
-} ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT;
-
-/* Related definitions, all records are differnt but they have a commond header */
-typedef struct _ATOM_COMMON_RECORD_HEADER {
- UCHAR ucRecordType; /* An emun to indicate the record type */
- UCHAR ucRecordSize; /* The size of the whole record in byte */
-} ATOM_COMMON_RECORD_HEADER;
-
-#define ATOM_I2C_RECORD_TYPE 1
+/****************************************************************************/
+// Structure used in object_InfoTable
+/****************************************************************************/
+typedef struct _ATOM_OBJECT_HEADER
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usDeviceSupport;
+ USHORT usConnectorObjectTableOffset;
+ USHORT usRouterObjectTableOffset;
+ USHORT usEncoderObjectTableOffset;
+ USHORT usProtectionObjectTableOffset; //only available when Protection block is independent.
+ USHORT usDisplayPathTableOffset;
+}ATOM_OBJECT_HEADER;
+
+typedef struct _ATOM_OBJECT_HEADER_V3
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usDeviceSupport;
+ USHORT usConnectorObjectTableOffset;
+ USHORT usRouterObjectTableOffset;
+ USHORT usEncoderObjectTableOffset;
+ USHORT usProtectionObjectTableOffset; //only available when Protection block is independent.
+ USHORT usDisplayPathTableOffset;
+ USHORT usMiscObjectTableOffset;
+}ATOM_OBJECT_HEADER_V3;
+
+typedef struct _ATOM_DISPLAY_OBJECT_PATH
+{
+ USHORT usDeviceTag; //supported device
+ USHORT usSize; //the size of ATOM_DISPLAY_OBJECT_PATH
+ USHORT usConnObjectId; //Connector Object ID
+ USHORT usGPUObjectId; //GPU ID
+ USHORT usGraphicObjIds[1]; //1st Encoder Obj source from GPU to last Graphic Obj destinate to connector.
+}ATOM_DISPLAY_OBJECT_PATH;
+
+typedef struct _ATOM_DISPLAY_OBJECT_PATH_TABLE
+{
+ UCHAR ucNumOfDispPath;
+ UCHAR ucVersion;
+ UCHAR ucPadding[2];
+ ATOM_DISPLAY_OBJECT_PATH asDispPath[1];
+}ATOM_DISPLAY_OBJECT_PATH_TABLE;
+
+
+typedef struct _ATOM_OBJECT //each object has this structure
+{
+ USHORT usObjectID;
+ USHORT usSrcDstTableOffset;
+ USHORT usRecordOffset; //this pointing to a bunch of records defined below
+ USHORT usReserved;
+}ATOM_OBJECT;
+
+typedef struct _ATOM_OBJECT_TABLE //Above 4 object table offset pointing to a bunch of objects all have this structure
+{
+ UCHAR ucNumberOfObjects;
+ UCHAR ucPadding[3];
+ ATOM_OBJECT asObjects[1];
+}ATOM_OBJECT_TABLE;
+
+typedef struct _ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT //usSrcDstTableOffset pointing to this structure
+{
+ UCHAR ucNumberOfSrc;
+ USHORT usSrcObjectID[1];
+ UCHAR ucNumberOfDst;
+ USHORT usDstObjectID[1];
+}ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT;
+
+
+//Two definitions below are for OPM on MXM module designs
+
+#define EXT_HPDPIN_LUTINDEX_0 0
+#define EXT_HPDPIN_LUTINDEX_1 1
+#define EXT_HPDPIN_LUTINDEX_2 2
+#define EXT_HPDPIN_LUTINDEX_3 3
+#define EXT_HPDPIN_LUTINDEX_4 4
+#define EXT_HPDPIN_LUTINDEX_5 5
+#define EXT_HPDPIN_LUTINDEX_6 6
+#define EXT_HPDPIN_LUTINDEX_7 7
+#define MAX_NUMBER_OF_EXT_HPDPIN_LUT_ENTRIES (EXT_HPDPIN_LUTINDEX_7+1)
+
+#define EXT_AUXDDC_LUTINDEX_0 0
+#define EXT_AUXDDC_LUTINDEX_1 1
+#define EXT_AUXDDC_LUTINDEX_2 2
+#define EXT_AUXDDC_LUTINDEX_3 3
+#define EXT_AUXDDC_LUTINDEX_4 4
+#define EXT_AUXDDC_LUTINDEX_5 5
+#define EXT_AUXDDC_LUTINDEX_6 6
+#define EXT_AUXDDC_LUTINDEX_7 7
+#define MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES (EXT_AUXDDC_LUTINDEX_7+1)
+
+typedef struct _EXT_DISPLAY_PATH
+{
+ USHORT usDeviceTag; //A bit vector to show what devices are supported
+ USHORT usDeviceACPIEnum; //16bit device ACPI id.
+ USHORT usDeviceConnector; //A physical connector for displays to plug in, using object connector definitions
+ UCHAR ucExtAUXDDCLutIndex; //An index into external AUX/DDC channel LUT
+ UCHAR ucExtHPDPINLutIndex; //An index into external HPD pin LUT
+ USHORT usExtEncoderObjId; //external encoder object id
+ USHORT usReserved[3];
+}EXT_DISPLAY_PATH;
+
+#define NUMBER_OF_UCHAR_FOR_GUID 16
+#define MAX_NUMBER_OF_EXT_DISPLAY_PATH 7
+
+typedef struct _ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucGuid [NUMBER_OF_UCHAR_FOR_GUID]; // a GUID is a 16 byte long string
+ EXT_DISPLAY_PATH sPath[MAX_NUMBER_OF_EXT_DISPLAY_PATH]; // total of fixed 7 entries.
+ UCHAR ucChecksum; // a simple Checksum of the sum of whole structure equal to 0x0.
+ UCHAR Reserved [7]; // for potential expansion
+}ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO;
+
+//Related definitions, all records are differnt but they have a commond header
+typedef struct _ATOM_COMMON_RECORD_HEADER
+{
+ UCHAR ucRecordType; //An emun to indicate the record type
+ UCHAR ucRecordSize; //The size of the whole record in byte
+}ATOM_COMMON_RECORD_HEADER;
+
+
+#define ATOM_I2C_RECORD_TYPE 1
#define ATOM_HPD_INT_RECORD_TYPE 2
#define ATOM_OUTPUT_PROTECTION_RECORD_TYPE 3
#define ATOM_CONNECTOR_DEVICE_TAG_RECORD_TYPE 4
-#define ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD_TYPE 5 /* Obsolete, switch to use GPIO_CNTL_RECORD_TYPE */
-#define ATOM_ENCODER_FPGA_CONTROL_RECORD_TYPE 6 /* Obsolete, switch to use GPIO_CNTL_RECORD_TYPE */
+#define ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD_TYPE 5 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE
+#define ATOM_ENCODER_FPGA_CONTROL_RECORD_TYPE 6 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE
#define ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD_TYPE 7
-#define ATOM_JTAG_RECORD_TYPE 8 /* Obsolete, switch to use GPIO_CNTL_RECORD_TYPE */
+#define ATOM_JTAG_RECORD_TYPE 8 //Obsolete, switch to use GPIO_CNTL_RECORD_TYPE
#define ATOM_OBJECT_GPIO_CNTL_RECORD_TYPE 9
#define ATOM_ENCODER_DVO_CF_RECORD_TYPE 10
#define ATOM_CONNECTOR_CF_RECORD_TYPE 11
#define ATOM_CONNECTOR_HARDCODE_DTD_RECORD_TYPE 12
#define ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE 13
-#define ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE 14
-#define ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE 15
-
-/* Must be updated when new record type is added,equal to that record definition! */
-#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_CONNECTOR_CF_RECORD_TYPE
-
-typedef struct _ATOM_I2C_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- ATOM_I2C_ID_CONFIG sucI2cId;
- UCHAR ucI2CAddr; /* The slave address, it's 0 when the record is attached to connector for DDC */
-} ATOM_I2C_RECORD;
-
-typedef struct _ATOM_HPD_INT_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- UCHAR ucHPDIntGPIOID; /* Corresponding block in GPIO_PIN_INFO table gives the pin info */
- UCHAR ucPlugged_PinState;
-} ATOM_HPD_INT_RECORD;
-
-typedef struct _ATOM_OUTPUT_PROTECTION_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- UCHAR ucProtectionFlag;
- UCHAR ucReserved;
-} ATOM_OUTPUT_PROTECTION_RECORD;
-
-typedef struct _ATOM_CONNECTOR_DEVICE_TAG {
- ULONG ulACPIDeviceEnum; /* Reserved for now */
- USHORT usDeviceID; /* This Id is same as "ATOM_DEVICE_XXX_SUPPORT" */
- USHORT usPadding;
-} ATOM_CONNECTOR_DEVICE_TAG;
-
-typedef struct _ATOM_CONNECTOR_DEVICE_TAG_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- UCHAR ucNumberOfDevice;
- UCHAR ucReserved;
- ATOM_CONNECTOR_DEVICE_TAG asDeviceTag[1]; /* This Id is same as "ATOM_DEVICE_XXX_SUPPORT", 1 is only for allocation */
-} ATOM_CONNECTOR_DEVICE_TAG_RECORD;
-
-typedef struct _ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- UCHAR ucConfigGPIOID;
- UCHAR ucConfigGPIOState; /* Set to 1 when it's active high to enable external flow in */
- UCHAR ucFlowinGPIPID;
- UCHAR ucExtInGPIPID;
-} ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD;
-
-typedef struct _ATOM_ENCODER_FPGA_CONTROL_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- UCHAR ucCTL1GPIO_ID;
- UCHAR ucCTL1GPIOState; /* Set to 1 when it's active high */
- UCHAR ucCTL2GPIO_ID;
- UCHAR ucCTL2GPIOState; /* Set to 1 when it's active high */
- UCHAR ucCTL3GPIO_ID;
- UCHAR ucCTL3GPIOState; /* Set to 1 when it's active high */
- UCHAR ucCTLFPGA_IN_ID;
- UCHAR ucPadding[3];
-} ATOM_ENCODER_FPGA_CONTROL_RECORD;
-
-typedef struct _ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- UCHAR ucGPIOID; /* Corresponding block in GPIO_PIN_INFO table gives the pin info */
- UCHAR ucTVActiveState; /* Indicating when the pin==0 or 1 when TV is connected */
-} ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD;
-
-typedef struct _ATOM_JTAG_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- UCHAR ucTMSGPIO_ID;
- UCHAR ucTMSGPIOState; /* Set to 1 when it's active high */
- UCHAR ucTCKGPIO_ID;
- UCHAR ucTCKGPIOState; /* Set to 1 when it's active high */
- UCHAR ucTDOGPIO_ID;
- UCHAR ucTDOGPIOState; /* Set to 1 when it's active high */
- UCHAR ucTDIGPIO_ID;
- UCHAR ucTDIGPIOState; /* Set to 1 when it's active high */
- UCHAR ucPadding[2];
-} ATOM_JTAG_RECORD;
-
-/* The following generic object gpio pin control record type will replace JTAG_RECORD/FPGA_CONTROL_RECORD/DVI_EXT_INPUT_RECORD above gradually */
-typedef struct _ATOM_GPIO_PIN_CONTROL_PAIR {
- UCHAR ucGPIOID; /* GPIO_ID, find the corresponding ID in GPIO_LUT table */
- UCHAR ucGPIO_PinState; /* Pin state showing how to set-up the pin */
-} ATOM_GPIO_PIN_CONTROL_PAIR;
-
-typedef struct _ATOM_OBJECT_GPIO_CNTL_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- UCHAR ucFlags; /* Future expnadibility */
- UCHAR ucNumberOfPins; /* Number of GPIO pins used to control the object */
- ATOM_GPIO_PIN_CONTROL_PAIR asGpio[1]; /* the real gpio pin pair determined by number of pins ucNumberOfPins */
-} ATOM_OBJECT_GPIO_CNTL_RECORD;
-
-/* Definitions for GPIO pin state */
+#define ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE 14
+#define ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE 15
+#define ATOM_CONNECTOR_HPDPIN_LUT_RECORD_TYPE 16 //This is for the case when connectors are not known to object table
+#define ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE 17 //This is for the case when connectors are not known to object table
+#define ATOM_OBJECT_LINK_RECORD_TYPE 18 //Once this record is present under one object, it indicats the oobject is linked to another obj described by the record
+#define ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE 19
+
+
+//Must be updated when new record type is added,equal to that record definition!
+#define ATOM_MAX_OBJECT_RECORD_NUMBER ATOM_CONNECTOR_REMOTE_CAP_RECORD_TYPE
+
+typedef struct _ATOM_I2C_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ ATOM_I2C_ID_CONFIG sucI2cId;
+ UCHAR ucI2CAddr; //The slave address, it's 0 when the record is attached to connector for DDC
+}ATOM_I2C_RECORD;
+
+typedef struct _ATOM_HPD_INT_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucHPDIntGPIOID; //Corresponding block in GPIO_PIN_INFO table gives the pin info
+ UCHAR ucPlugged_PinState;
+}ATOM_HPD_INT_RECORD;
+
+
+typedef struct _ATOM_OUTPUT_PROTECTION_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucProtectionFlag;
+ UCHAR ucReserved;
+}ATOM_OUTPUT_PROTECTION_RECORD;
+
+typedef struct _ATOM_CONNECTOR_DEVICE_TAG
+{
+ ULONG ulACPIDeviceEnum; //Reserved for now
+ USHORT usDeviceID; //This Id is same as "ATOM_DEVICE_XXX_SUPPORT"
+ USHORT usPadding;
+}ATOM_CONNECTOR_DEVICE_TAG;
+
+typedef struct _ATOM_CONNECTOR_DEVICE_TAG_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucNumberOfDevice;
+ UCHAR ucReserved;
+ ATOM_CONNECTOR_DEVICE_TAG asDeviceTag[1]; //This Id is same as "ATOM_DEVICE_XXX_SUPPORT", 1 is only for allocation
+}ATOM_CONNECTOR_DEVICE_TAG_RECORD;
+
+
+typedef struct _ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucConfigGPIOID;
+ UCHAR ucConfigGPIOState; //Set to 1 when it's active high to enable external flow in
+ UCHAR ucFlowinGPIPID;
+ UCHAR ucExtInGPIPID;
+}ATOM_CONNECTOR_DVI_EXT_INPUT_RECORD;
+
+typedef struct _ATOM_ENCODER_FPGA_CONTROL_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucCTL1GPIO_ID;
+ UCHAR ucCTL1GPIOState; //Set to 1 when it's active high
+ UCHAR ucCTL2GPIO_ID;
+ UCHAR ucCTL2GPIOState; //Set to 1 when it's active high
+ UCHAR ucCTL3GPIO_ID;
+ UCHAR ucCTL3GPIOState; //Set to 1 when it's active high
+ UCHAR ucCTLFPGA_IN_ID;
+ UCHAR ucPadding[3];
+}ATOM_ENCODER_FPGA_CONTROL_RECORD;
+
+typedef struct _ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucGPIOID; //Corresponding block in GPIO_PIN_INFO table gives the pin info
+ UCHAR ucTVActiveState; //Indicating when the pin==0 or 1 when TV is connected
+}ATOM_CONNECTOR_CVTV_SHARE_DIN_RECORD;
+
+typedef struct _ATOM_JTAG_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucTMSGPIO_ID;
+ UCHAR ucTMSGPIOState; //Set to 1 when it's active high
+ UCHAR ucTCKGPIO_ID;
+ UCHAR ucTCKGPIOState; //Set to 1 when it's active high
+ UCHAR ucTDOGPIO_ID;
+ UCHAR ucTDOGPIOState; //Set to 1 when it's active high
+ UCHAR ucTDIGPIO_ID;
+ UCHAR ucTDIGPIOState; //Set to 1 when it's active high
+ UCHAR ucPadding[2];
+}ATOM_JTAG_RECORD;
+
+
+//The following generic object gpio pin control record type will replace JTAG_RECORD/FPGA_CONTROL_RECORD/DVI_EXT_INPUT_RECORD above gradually
+typedef struct _ATOM_GPIO_PIN_CONTROL_PAIR
+{
+ UCHAR ucGPIOID; // GPIO_ID, find the corresponding ID in GPIO_LUT table
+ UCHAR ucGPIO_PinState; // Pin state showing how to set-up the pin
+}ATOM_GPIO_PIN_CONTROL_PAIR;
+
+typedef struct _ATOM_OBJECT_GPIO_CNTL_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucFlags; // Future expnadibility
+ UCHAR ucNumberOfPins; // Number of GPIO pins used to control the object
+ ATOM_GPIO_PIN_CONTROL_PAIR asGpio[1]; // the real gpio pin pair determined by number of pins ucNumberOfPins
+}ATOM_OBJECT_GPIO_CNTL_RECORD;
+
+//Definitions for GPIO pin state
#define GPIO_PIN_TYPE_INPUT 0x00
#define GPIO_PIN_TYPE_OUTPUT 0x10
#define GPIO_PIN_TYPE_HW_CONTROL 0x20
-/* For GPIO_PIN_TYPE_OUTPUT the following is defined */
+//For GPIO_PIN_TYPE_OUTPUT the following is defined
#define GPIO_PIN_OUTPUT_STATE_MASK 0x01
#define GPIO_PIN_OUTPUT_STATE_SHIFT 0
#define GPIO_PIN_STATE_ACTIVE_LOW 0x0
#define GPIO_PIN_STATE_ACTIVE_HIGH 0x1
-typedef struct _ATOM_ENCODER_DVO_CF_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- ULONG ulStrengthControl; /* DVOA strength control for CF */
- UCHAR ucPadding[2];
-} ATOM_ENCODER_DVO_CF_RECORD;
+// Indexes to GPIO array in GLSync record
+#define ATOM_GPIO_INDEX_GLSYNC_REFCLK 0
+#define ATOM_GPIO_INDEX_GLSYNC_HSYNC 1
+#define ATOM_GPIO_INDEX_GLSYNC_VSYNC 2
+#define ATOM_GPIO_INDEX_GLSYNC_SWAP_REQ 3
+#define ATOM_GPIO_INDEX_GLSYNC_SWAP_GNT 4
+#define ATOM_GPIO_INDEX_GLSYNC_INTERRUPT 5
+#define ATOM_GPIO_INDEX_GLSYNC_V_RESET 6
+#define ATOM_GPIO_INDEX_GLSYNC_MAX 7
+
+typedef struct _ATOM_ENCODER_DVO_CF_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ ULONG ulStrengthControl; // DVOA strength control for CF
+ UCHAR ucPadding[2];
+}ATOM_ENCODER_DVO_CF_RECORD;
-/* value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle */
+// value for ATOM_CONNECTOR_CF_RECORD.ucConnectedDvoBundle
#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_UPPER12BITBUNDLEA 1
#define ATOM_CONNECTOR_CF_RECORD_CONNECTED_LOWER12BITBUNDLEB 2
-typedef struct _ATOM_CONNECTOR_CF_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- USHORT usMaxPixClk;
- UCHAR ucFlowCntlGpioId;
- UCHAR ucSwapCntlGpioId;
- UCHAR ucConnectedDvoBundle;
- UCHAR ucPadding;
-} ATOM_CONNECTOR_CF_RECORD;
-
-typedef struct _ATOM_CONNECTOR_HARDCODE_DTD_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- ATOM_DTD_FORMAT asTiming;
-} ATOM_CONNECTOR_HARDCODE_DTD_RECORD;
-
-typedef struct _ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader; /* ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE */
- UCHAR ucSubConnectorType; /* CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D|X_ID_DUAL_LINK_DVI_D|HDMI_TYPE_A */
- UCHAR ucReserved;
-} ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD;
-
-typedef struct _ATOM_ROUTER_DDC_PATH_SELECT_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- UCHAR ucMuxType; /* decide the number of ucMuxState, =0, no pin state, =1: single state with complement, >1: multiple state */
- UCHAR ucMuxControlPin;
- UCHAR ucMuxState[2]; /* for alligment purpose */
-} ATOM_ROUTER_DDC_PATH_SELECT_RECORD;
-
-typedef struct _ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD {
- ATOM_COMMON_RECORD_HEADER sheader;
- UCHAR ucMuxType;
- UCHAR ucMuxControlPin;
- UCHAR ucMuxState[2]; /* for alligment purpose */
-} ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD;
-
-/* define ucMuxType */
+typedef struct _ATOM_CONNECTOR_CF_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ USHORT usMaxPixClk;
+ UCHAR ucFlowCntlGpioId;
+ UCHAR ucSwapCntlGpioId;
+ UCHAR ucConnectedDvoBundle;
+ UCHAR ucPadding;
+}ATOM_CONNECTOR_CF_RECORD;
+
+typedef struct _ATOM_CONNECTOR_HARDCODE_DTD_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ ATOM_DTD_FORMAT asTiming;
+}ATOM_CONNECTOR_HARDCODE_DTD_RECORD;
+
+typedef struct _ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader; //ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD_TYPE
+ UCHAR ucSubConnectorType; //CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D|X_ID_DUAL_LINK_DVI_D|HDMI_TYPE_A
+ UCHAR ucReserved;
+}ATOM_CONNECTOR_PCIE_SUBCONNECTOR_RECORD;
+
+
+typedef struct _ATOM_ROUTER_DDC_PATH_SELECT_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucMuxType; //decide the number of ucMuxState, =0, no pin state, =1: single state with complement, >1: multiple state
+ UCHAR ucMuxControlPin;
+ UCHAR ucMuxState[2]; //for alligment purpose
+}ATOM_ROUTER_DDC_PATH_SELECT_RECORD;
+
+typedef struct _ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucMuxType;
+ UCHAR ucMuxControlPin;
+ UCHAR ucMuxState[2]; //for alligment purpose
+}ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD;
+
+// define ucMuxType
#define ATOM_ROUTER_MUX_PIN_STATE_MASK 0x0f
#define ATOM_ROUTER_MUX_PIN_SINGLE_STATE_COMPLEMENT 0x01
-/****************************************************************************/
-/* ASIC voltage data table */
-/****************************************************************************/
-typedef struct _ATOM_VOLTAGE_INFO_HEADER {
- USHORT usVDDCBaseLevel; /* In number of 50mv unit */
- USHORT usReserved; /* For possible extension table offset */
- UCHAR ucNumOfVoltageEntries;
- UCHAR ucBytesPerVoltageEntry;
- UCHAR ucVoltageStep; /* Indicating in how many mv increament is one step, 0.5mv unit */
- UCHAR ucDefaultVoltageEntry;
- UCHAR ucVoltageControlI2cLine;
- UCHAR ucVoltageControlAddress;
- UCHAR ucVoltageControlOffset;
-} ATOM_VOLTAGE_INFO_HEADER;
-
-typedef struct _ATOM_VOLTAGE_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_VOLTAGE_INFO_HEADER viHeader;
- UCHAR ucVoltageEntries[64]; /* 64 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries*ucBytesPerVoltageEntry */
-} ATOM_VOLTAGE_INFO;
-
-typedef struct _ATOM_VOLTAGE_FORMULA {
- USHORT usVoltageBaseLevel; /* In number of 1mv unit */
- USHORT usVoltageStep; /* Indicating in how many mv increament is one step, 1mv unit */
- UCHAR ucNumOfVoltageEntries; /* Number of Voltage Entry, which indicate max Voltage */
- UCHAR ucFlag; /* bit0=0 :step is 1mv =1 0.5mv */
- UCHAR ucBaseVID; /* if there is no lookup table, VID= BaseVID + ( Vol - BaseLevle ) /VoltageStep */
- UCHAR ucReserved;
- UCHAR ucVIDAdjustEntries[32]; /* 32 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries */
-} ATOM_VOLTAGE_FORMULA;
-
-typedef struct _ATOM_VOLTAGE_CONTROL {
- UCHAR ucVoltageControlId; /* Indicate it is controlled by I2C or GPIO or HW state machine */
- UCHAR ucVoltageControlI2cLine;
- UCHAR ucVoltageControlAddress;
- UCHAR ucVoltageControlOffset;
- USHORT usGpioPin_AIndex; /* GPIO_PAD register index */
- UCHAR ucGpioPinBitShift[9]; /* at most 8 pin support 255 VIDs, termintate with 0xff */
- UCHAR ucReserved;
-} ATOM_VOLTAGE_CONTROL;
-
-/* Define ucVoltageControlId */
+typedef struct _ATOM_CONNECTOR_HPDPIN_LUT_RECORD //record for ATOM_CONNECTOR_HPDPIN_LUT_RECORD_TYPE
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ UCHAR ucHPDPINMap[MAX_NUMBER_OF_EXT_HPDPIN_LUT_ENTRIES]; //An fixed size array which maps external pins to internal GPIO_PIN_INFO table
+}ATOM_CONNECTOR_HPDPIN_LUT_RECORD;
+
+typedef struct _ATOM_CONNECTOR_AUXDDC_LUT_RECORD //record for ATOM_CONNECTOR_AUXDDC_LUT_RECORD_TYPE
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ ATOM_I2C_ID_CONFIG ucAUXDDCMap[MAX_NUMBER_OF_EXT_AUXDDC_LUT_ENTRIES]; //An fixed size array which maps external pins to internal DDC ID
+}ATOM_CONNECTOR_AUXDDC_LUT_RECORD;
+
+typedef struct _ATOM_OBJECT_LINK_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ USHORT usObjectID; //could be connector, encorder or other object in object.h
+}ATOM_OBJECT_LINK_RECORD;
+
+typedef struct _ATOM_CONNECTOR_REMOTE_CAP_RECORD
+{
+ ATOM_COMMON_RECORD_HEADER sheader;
+ USHORT usReserved;
+}ATOM_CONNECTOR_REMOTE_CAP_RECORD;
+
+/****************************************************************************/
+// ASIC voltage data table
+/****************************************************************************/
+typedef struct _ATOM_VOLTAGE_INFO_HEADER
+{
+ USHORT usVDDCBaseLevel; //In number of 50mv unit
+ USHORT usReserved; //For possible extension table offset
+ UCHAR ucNumOfVoltageEntries;
+ UCHAR ucBytesPerVoltageEntry;
+ UCHAR ucVoltageStep; //Indicating in how many mv increament is one step, 0.5mv unit
+ UCHAR ucDefaultVoltageEntry;
+ UCHAR ucVoltageControlI2cLine;
+ UCHAR ucVoltageControlAddress;
+ UCHAR ucVoltageControlOffset;
+}ATOM_VOLTAGE_INFO_HEADER;
+
+typedef struct _ATOM_VOLTAGE_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_VOLTAGE_INFO_HEADER viHeader;
+ UCHAR ucVoltageEntries[64]; //64 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries*ucBytesPerVoltageEntry
+}ATOM_VOLTAGE_INFO;
+
+
+typedef struct _ATOM_VOLTAGE_FORMULA
+{
+ USHORT usVoltageBaseLevel; // In number of 1mv unit
+ USHORT usVoltageStep; // Indicating in how many mv increament is one step, 1mv unit
+ UCHAR ucNumOfVoltageEntries; // Number of Voltage Entry, which indicate max Voltage
+ UCHAR ucFlag; // bit0=0 :step is 1mv =1 0.5mv
+ UCHAR ucBaseVID; // if there is no lookup table, VID= BaseVID + ( Vol - BaseLevle ) /VoltageStep
+ UCHAR ucReserved;
+ UCHAR ucVIDAdjustEntries[32]; // 32 is for allocation, the actual number of entry is present at ucNumOfVoltageEntries
+}ATOM_VOLTAGE_FORMULA;
+
+typedef struct _VOLTAGE_LUT_ENTRY
+{
+ USHORT usVoltageCode; // The Voltage ID, either GPIO or I2C code
+ USHORT usVoltageValue; // The corresponding Voltage Value, in mV
+}VOLTAGE_LUT_ENTRY;
+
+typedef struct _ATOM_VOLTAGE_FORMULA_V2
+{
+ UCHAR ucNumOfVoltageEntries; // Number of Voltage Entry, which indicate max Voltage
+ UCHAR ucReserved[3];
+ VOLTAGE_LUT_ENTRY asVIDAdjustEntries[32];// 32 is for allocation, the actual number of entries is in ucNumOfVoltageEntries
+}ATOM_VOLTAGE_FORMULA_V2;
+
+typedef struct _ATOM_VOLTAGE_CONTROL
+{
+ UCHAR ucVoltageControlId; //Indicate it is controlled by I2C or GPIO or HW state machine
+ UCHAR ucVoltageControlI2cLine;
+ UCHAR ucVoltageControlAddress;
+ UCHAR ucVoltageControlOffset;
+ USHORT usGpioPin_AIndex; //GPIO_PAD register index
+ UCHAR ucGpioPinBitShift[9]; //at most 8 pin support 255 VIDs, termintate with 0xff
+ UCHAR ucReserved;
+}ATOM_VOLTAGE_CONTROL;
+
+// Define ucVoltageControlId
#define VOLTAGE_CONTROLLED_BY_HW 0x00
#define VOLTAGE_CONTROLLED_BY_I2C_MASK 0x7F
#define VOLTAGE_CONTROLLED_BY_GPIO 0x80
-#define VOLTAGE_CONTROL_ID_LM64 0x01 /* I2C control, used for R5xx Core Voltage */
-#define VOLTAGE_CONTROL_ID_DAC 0x02 /* I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI */
-#define VOLTAGE_CONTROL_ID_VT116xM 0x03 /* I2C control, used for R6xx Core Voltage */
-#define VOLTAGE_CONTROL_ID_DS4402 0x04
-
-typedef struct _ATOM_VOLTAGE_OBJECT {
- UCHAR ucVoltageType; /* Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI */
- UCHAR ucSize; /* Size of Object */
- ATOM_VOLTAGE_CONTROL asControl; /* describ how to control */
- ATOM_VOLTAGE_FORMULA asFormula; /* Indicate How to convert real Voltage to VID */
-} ATOM_VOLTAGE_OBJECT;
-
-typedef struct _ATOM_VOLTAGE_OBJECT_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_VOLTAGE_OBJECT asVoltageObj[3]; /* Info for Voltage control */
-} ATOM_VOLTAGE_OBJECT_INFO;
-
-typedef struct _ATOM_LEAKID_VOLTAGE {
- UCHAR ucLeakageId;
- UCHAR ucReserved;
- USHORT usVoltage;
-} ATOM_LEAKID_VOLTAGE;
-
-typedef struct _ATOM_ASIC_PROFILE_VOLTAGE {
- UCHAR ucProfileId;
- UCHAR ucReserved;
- USHORT usSize;
- USHORT usEfuseSpareStartAddr;
- USHORT usFuseIndex[8]; /* from LSB to MSB, Max 8bit,end of 0xffff if less than 8 efuse id, */
- ATOM_LEAKID_VOLTAGE asLeakVol[2]; /* Leakid and relatd voltage */
-} ATOM_ASIC_PROFILE_VOLTAGE;
-
-/* ucProfileId */
-#define ATOM_ASIC_PROFILE_ID_EFUSE_VOLTAGE 1
+#define VOLTAGE_CONTROL_ID_LM64 0x01 //I2C control, used for R5xx Core Voltage
+#define VOLTAGE_CONTROL_ID_DAC 0x02 //I2C control, used for R5xx/R6xx MVDDC,MVDDQ or VDDCI
+#define VOLTAGE_CONTROL_ID_VT116xM 0x03 //I2C control, used for R6xx Core Voltage
+#define VOLTAGE_CONTROL_ID_DS4402 0x04
+
+typedef struct _ATOM_VOLTAGE_OBJECT
+{
+ UCHAR ucVoltageType; //Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI
+ UCHAR ucSize; //Size of Object
+ ATOM_VOLTAGE_CONTROL asControl; //describ how to control
+ ATOM_VOLTAGE_FORMULA asFormula; //Indicate How to convert real Voltage to VID
+}ATOM_VOLTAGE_OBJECT;
+
+typedef struct _ATOM_VOLTAGE_OBJECT_V2
+{
+ UCHAR ucVoltageType; //Indicate Voltage Source: VDDC, MVDDC, MVDDQ or MVDDCI
+ UCHAR ucSize; //Size of Object
+ ATOM_VOLTAGE_CONTROL asControl; //describ how to control
+ ATOM_VOLTAGE_FORMULA_V2 asFormula; //Indicate How to convert real Voltage to VID
+}ATOM_VOLTAGE_OBJECT_V2;
+
+typedef struct _ATOM_VOLTAGE_OBJECT_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_VOLTAGE_OBJECT asVoltageObj[3]; //Info for Voltage control
+}ATOM_VOLTAGE_OBJECT_INFO;
+
+typedef struct _ATOM_VOLTAGE_OBJECT_INFO_V2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_VOLTAGE_OBJECT_V2 asVoltageObj[3]; //Info for Voltage control
+}ATOM_VOLTAGE_OBJECT_INFO_V2;
+
+typedef struct _ATOM_LEAKID_VOLTAGE
+{
+ UCHAR ucLeakageId;
+ UCHAR ucReserved;
+ USHORT usVoltage;
+}ATOM_LEAKID_VOLTAGE;
+
+typedef struct _ATOM_ASIC_PROFILE_VOLTAGE
+{
+ UCHAR ucProfileId;
+ UCHAR ucReserved;
+ USHORT usSize;
+ USHORT usEfuseSpareStartAddr;
+ USHORT usFuseIndex[8]; //from LSB to MSB, Max 8bit,end of 0xffff if less than 8 efuse id,
+ ATOM_LEAKID_VOLTAGE asLeakVol[2]; //Leakid and relatd voltage
+}ATOM_ASIC_PROFILE_VOLTAGE;
+
+//ucProfileId
+#define ATOM_ASIC_PROFILE_ID_EFUSE_VOLTAGE 1
#define ATOM_ASIC_PROFILE_ID_EFUSE_PERFORMANCE_VOLTAGE 1
#define ATOM_ASIC_PROFILE_ID_EFUSE_THERMAL_VOLTAGE 2
-typedef struct _ATOM_ASIC_PROFILING_INFO {
- ATOM_COMMON_TABLE_HEADER asHeader;
- ATOM_ASIC_PROFILE_VOLTAGE asVoltage;
-} ATOM_ASIC_PROFILING_INFO;
-
-typedef struct _ATOM_POWER_SOURCE_OBJECT {
- UCHAR ucPwrSrcId; /* Power source */
- UCHAR ucPwrSensorType; /* GPIO, I2C or none */
- UCHAR ucPwrSensId; /* if GPIO detect, it is GPIO id, if I2C detect, it is I2C id */
- UCHAR ucPwrSensSlaveAddr; /* Slave address if I2C detect */
- UCHAR ucPwrSensRegIndex; /* I2C register Index if I2C detect */
- UCHAR ucPwrSensRegBitMask; /* detect which bit is used if I2C detect */
- UCHAR ucPwrSensActiveState; /* high active or low active */
- UCHAR ucReserve[3]; /* reserve */
- USHORT usSensPwr; /* in unit of watt */
-} ATOM_POWER_SOURCE_OBJECT;
-
-typedef struct _ATOM_POWER_SOURCE_INFO {
- ATOM_COMMON_TABLE_HEADER asHeader;
- UCHAR asPwrbehave[16];
- ATOM_POWER_SOURCE_OBJECT asPwrObj[1];
-} ATOM_POWER_SOURCE_INFO;
-
-/* Define ucPwrSrcId */
+typedef struct _ATOM_ASIC_PROFILING_INFO
+{
+ ATOM_COMMON_TABLE_HEADER asHeader;
+ ATOM_ASIC_PROFILE_VOLTAGE asVoltage;
+}ATOM_ASIC_PROFILING_INFO;
+
+typedef struct _ATOM_POWER_SOURCE_OBJECT
+{
+ UCHAR ucPwrSrcId; // Power source
+ UCHAR ucPwrSensorType; // GPIO, I2C or none
+ UCHAR ucPwrSensId; // if GPIO detect, it is GPIO id, if I2C detect, it is I2C id
+ UCHAR ucPwrSensSlaveAddr; // Slave address if I2C detect
+ UCHAR ucPwrSensRegIndex; // I2C register Index if I2C detect
+ UCHAR ucPwrSensRegBitMask; // detect which bit is used if I2C detect
+ UCHAR ucPwrSensActiveState; // high active or low active
+ UCHAR ucReserve[3]; // reserve
+ USHORT usSensPwr; // in unit of watt
+}ATOM_POWER_SOURCE_OBJECT;
+
+typedef struct _ATOM_POWER_SOURCE_INFO
+{
+ ATOM_COMMON_TABLE_HEADER asHeader;
+ UCHAR asPwrbehave[16];
+ ATOM_POWER_SOURCE_OBJECT asPwrObj[1];
+}ATOM_POWER_SOURCE_INFO;
+
+
+//Define ucPwrSrcId
#define POWERSOURCE_PCIE_ID1 0x00
#define POWERSOURCE_6PIN_CONNECTOR_ID1 0x01
#define POWERSOURCE_8PIN_CONNECTOR_ID1 0x02
#define POWERSOURCE_6PIN_CONNECTOR_ID2 0x04
#define POWERSOURCE_8PIN_CONNECTOR_ID2 0x08
-/* define ucPwrSensorId */
+//define ucPwrSensorId
#define POWER_SENSOR_ALWAYS 0x00
#define POWER_SENSOR_GPIO 0x01
#define POWER_SENSOR_I2C 0x02
+typedef struct _ATOM_INTEGRATED_SYSTEM_INFO_V6
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ULONG ulBootUpEngineClock;
+ ULONG ulDentistVCOFreq;
+ ULONG ulBootUpUMAClock;
+ ULONG ulReserved1[8];
+ ULONG ulBootUpReqDisplayVector;
+ ULONG ulOtherDisplayMisc;
+ ULONG ulGPUCapInfo;
+ ULONG ulReserved2[3];
+ ULONG ulSystemConfig;
+ ULONG ulCPUCapInfo;
+ USHORT usMaxNBVoltage;
+ USHORT usMinNBVoltage;
+ USHORT usBootUpNBVoltage;
+ USHORT usExtDispConnInfoOffset;
+ UCHAR ucHtcTmpLmt;
+ UCHAR ucTjOffset;
+ UCHAR ucMemoryType;
+ UCHAR ucUMAChannelNumber;
+ ULONG ulCSR_M3_ARB_CNTL_DEFAULT[10];
+ ULONG ulCSR_M3_ARB_CNTL_UVD[10];
+ ULONG ulCSR_M3_ARB_CNTL_FS3D[10];
+ ULONG ulReserved3[42];
+ ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO sExtDispConnInfo;
+}ATOM_INTEGRATED_SYSTEM_INFO_V6;
+
+/**********************************************************************************************************************
+// ATOM_INTEGRATED_SYSTEM_INFO_V6 Description
+//ulBootUpEngineClock: VBIOS bootup Engine clock frequency, in 10kHz unit.
+//ulDentistVCOFreq: Dentist VCO clock in 10kHz unit.
+//ulBootUpUMAClock: System memory boot up clock frequency in 10Khz unit.
+//ulReserved1[8] Reserved by now, must be 0x0.
+//ulBootUpReqDisplayVector VBIOS boot up display IDs
+// ATOM_DEVICE_CRT1_SUPPORT 0x0001
+// ATOM_DEVICE_CRT2_SUPPORT 0x0010
+// ATOM_DEVICE_DFP1_SUPPORT 0x0008
+// ATOM_DEVICE_DFP6_SUPPORT 0x0040
+// ATOM_DEVICE_DFP2_SUPPORT 0x0080
+// ATOM_DEVICE_DFP3_SUPPORT 0x0200
+// ATOM_DEVICE_DFP4_SUPPORT 0x0400
+// ATOM_DEVICE_DFP5_SUPPORT 0x0800
+// ATOM_DEVICE_LCD1_SUPPORT 0x0002
+//ulOtherDisplayMisc Other display related flags, not defined yet.
+//ulGPUCapInfo TBD
+//ulReserved2[3] must be 0x0 for the reserved.
+//ulSystemConfig TBD
+//ulCPUCapInfo TBD
+//usMaxNBVoltage High NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse.
+//usMinNBVoltage Low NB voltage in unit of mv, calculated using current VDDNB (D24F2xDC) and VDDNB offset fuse.
+//usBootUpNBVoltage Boot up NB voltage in unit of mv.
+//ucHtcTmpLmt Bit [22:16] of D24F3x64 Thermal Control (HTC) Register.
+//ucTjOffset Bit [28:22] of D24F3xE4 Thermtrip Status Register,may not be needed.
+//ucMemoryType [3:0]=1:DDR1;=2:DDR2;=3:DDR3.[7:4] is reserved.
+//ucUMAChannelNumber System memory channel numbers.
+//usExtDispConnectionInfoOffset ATOM_EXTERNAL_DISPLAY_CONNECTION_INFO offset relative to beginning of this table.
+//ulCSR_M3_ARB_CNTL_DEFAULT[10] Arrays with values for CSR M3 arbiter for default
+//ulCSR_M3_ARB_CNTL_UVD[10] Arrays with values for CSR M3 arbiter for UVD playback.
+//ulCSR_M3_ARB_CNTL_FS3D[10] Arrays with values for CSR M3 arbiter for Full Screen 3D applications.
+**********************************************************************************************************************/
+
/**************************************************************************/
-/* This portion is only used when ext thermal chip or engine/memory clock SS chip is populated on a design */
-/* Memory SS Info Table */
-/* Define Memory Clock SS chip ID */
+// This portion is only used when ext thermal chip or engine/memory clock SS chip is populated on a design
+//Memory SS Info Table
+//Define Memory Clock SS chip ID
#define ICS91719 1
#define ICS91720 2
-/* Define one structure to inform SW a "block of data" writing to external SS chip via I2C protocol */
-typedef struct _ATOM_I2C_DATA_RECORD {
- UCHAR ucNunberOfBytes; /* Indicates how many bytes SW needs to write to the external ASIC for one block, besides to "Start" and "Stop" */
- UCHAR ucI2CData[1]; /* I2C data in bytes, should be less than 16 bytes usually */
-} ATOM_I2C_DATA_RECORD;
-
-/* Define one structure to inform SW how many blocks of data writing to external SS chip via I2C protocol, in addition to other information */
-typedef struct _ATOM_I2C_DEVICE_SETUP_INFO {
- ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; /* I2C line and HW/SW assisted cap. */
- UCHAR ucSSChipID; /* SS chip being used */
- UCHAR ucSSChipSlaveAddr; /* Slave Address to set up this SS chip */
- UCHAR ucNumOfI2CDataRecords; /* number of data block */
- ATOM_I2C_DATA_RECORD asI2CData[1];
-} ATOM_I2C_DEVICE_SETUP_INFO;
-
-/* ========================================================================================== */
-typedef struct _ATOM_ASIC_MVDD_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_I2C_DEVICE_SETUP_INFO asI2CSetup[1];
-} ATOM_ASIC_MVDD_INFO;
-
-/* ========================================================================================== */
+//Define one structure to inform SW a "block of data" writing to external SS chip via I2C protocol
+typedef struct _ATOM_I2C_DATA_RECORD
+{
+ UCHAR ucNunberOfBytes; //Indicates how many bytes SW needs to write to the external ASIC for one block, besides to "Start" and "Stop"
+ UCHAR ucI2CData[1]; //I2C data in bytes, should be less than 16 bytes usually
+}ATOM_I2C_DATA_RECORD;
+
+
+//Define one structure to inform SW how many blocks of data writing to external SS chip via I2C protocol, in addition to other information
+typedef struct _ATOM_I2C_DEVICE_SETUP_INFO
+{
+ ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; //I2C line and HW/SW assisted cap.
+ UCHAR ucSSChipID; //SS chip being used
+ UCHAR ucSSChipSlaveAddr; //Slave Address to set up this SS chip
+ UCHAR ucNumOfI2CDataRecords; //number of data block
+ ATOM_I2C_DATA_RECORD asI2CData[1];
+}ATOM_I2C_DEVICE_SETUP_INFO;
+
+//==========================================================================================
+typedef struct _ATOM_ASIC_MVDD_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_I2C_DEVICE_SETUP_INFO asI2CSetup[1];
+}ATOM_ASIC_MVDD_INFO;
+
+//==========================================================================================
#define ATOM_MCLK_SS_INFO ATOM_ASIC_MVDD_INFO
-/* ========================================================================================== */
+//==========================================================================================
/**************************************************************************/
-typedef struct _ATOM_ASIC_SS_ASSIGNMENT {
- ULONG ulTargetClockRange; /* Clock Out frequence (VCO ), in unit of 10Khz */
- USHORT usSpreadSpectrumPercentage; /* in unit of 0.01% */
- USHORT usSpreadRateInKhz; /* in unit of kHz, modulation freq */
- UCHAR ucClockIndication; /* Indicate which clock source needs SS */
- UCHAR ucSpreadSpectrumMode; /* Bit1=0 Down Spread,=1 Center Spread. */
- UCHAR ucReserved[2];
-} ATOM_ASIC_SS_ASSIGNMENT;
-
-/* Define ucSpreadSpectrumType */
+typedef struct _ATOM_ASIC_SS_ASSIGNMENT
+{
+ ULONG ulTargetClockRange; //Clock Out frequence (VCO ), in unit of 10Khz
+ USHORT usSpreadSpectrumPercentage; //in unit of 0.01%
+ USHORT usSpreadRateInKhz; //in unit of kHz, modulation freq
+ UCHAR ucClockIndication; //Indicate which clock source needs SS
+ UCHAR ucSpreadSpectrumMode; //Bit1=0 Down Spread,=1 Center Spread.
+ UCHAR ucReserved[2];
+}ATOM_ASIC_SS_ASSIGNMENT;
+
+//Define ucClockIndication, SW uses the IDs below to search if the SS is requried/enabled on a clock branch/signal type.
+//SS is not required or enabled if a match is not found.
#define ASIC_INTERNAL_MEMORY_SS 1
#define ASIC_INTERNAL_ENGINE_SS 2
-#define ASIC_INTERNAL_UVD_SS 3
+#define ASIC_INTERNAL_UVD_SS 3
+#define ASIC_INTERNAL_SS_ON_TMDS 4
+#define ASIC_INTERNAL_SS_ON_HDMI 5
+#define ASIC_INTERNAL_SS_ON_LVDS 6
+#define ASIC_INTERNAL_SS_ON_DP 7
+#define ASIC_INTERNAL_SS_ON_DCPLL 8
+
+typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V2
+{
+ ULONG ulTargetClockRange; //For mem/engine/uvd, Clock Out frequence (VCO ), in unit of 10Khz
+ //For TMDS/HDMI/LVDS, it is pixel clock , for DP, it is link clock ( 27000 or 16200 )
+ USHORT usSpreadSpectrumPercentage; //in unit of 0.01%
+ USHORT usSpreadRateIn10Hz; //in unit of 10Hz, modulation freq
+ UCHAR ucClockIndication; //Indicate which clock source needs SS
+ UCHAR ucSpreadSpectrumMode; //Bit0=0 Down Spread,=1 Center Spread, bit1=0: internal SS bit1=1: external SS
+ UCHAR ucReserved[2];
+}ATOM_ASIC_SS_ASSIGNMENT_V2;
+
+//ucSpreadSpectrumMode
+//#define ATOM_SS_DOWN_SPREAD_MODE_MASK 0x00000000
+//#define ATOM_SS_DOWN_SPREAD_MODE 0x00000000
+//#define ATOM_SS_CENTRE_SPREAD_MODE_MASK 0x00000001
+//#define ATOM_SS_CENTRE_SPREAD_MODE 0x00000001
+//#define ATOM_INTERNAL_SS_MASK 0x00000000
+//#define ATOM_EXTERNAL_SS_MASK 0x00000002
+
+typedef struct _ATOM_ASIC_INTERNAL_SS_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_ASIC_SS_ASSIGNMENT asSpreadSpectrum[4];
+}ATOM_ASIC_INTERNAL_SS_INFO;
-typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_ASIC_SS_ASSIGNMENT asSpreadSpectrum[4];
-} ATOM_ASIC_INTERNAL_SS_INFO;
+typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_ASIC_SS_ASSIGNMENT_V2 asSpreadSpectrum[1]; //this is point only.
+}ATOM_ASIC_INTERNAL_SS_INFO_V2;
-/* ==============================Scratch Pad Definition Portion=============================== */
+typedef struct _ATOM_ASIC_SS_ASSIGNMENT_V3
+{
+ ULONG ulTargetClockRange; //For mem/engine/uvd, Clock Out frequence (VCO ), in unit of 10Khz
+ //For TMDS/HDMI/LVDS, it is pixel clock , for DP, it is link clock ( 27000 or 16200 )
+ USHORT usSpreadSpectrumPercentage; //in unit of 0.01%
+ USHORT usSpreadRateIn10Hz; //in unit of 10Hz, modulation freq
+ UCHAR ucClockIndication; //Indicate which clock source needs SS
+ UCHAR ucSpreadSpectrumMode; //Bit0=0 Down Spread,=1 Center Spread, bit1=0: internal SS bit1=1: external SS
+ UCHAR ucReserved[2];
+}ATOM_ASIC_SS_ASSIGNMENT_V3;
+
+typedef struct _ATOM_ASIC_INTERNAL_SS_INFO_V3
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_ASIC_SS_ASSIGNMENT_V3 asSpreadSpectrum[1]; //this is pointer only.
+}ATOM_ASIC_INTERNAL_SS_INFO_V3;
+
+
+//==============================Scratch Pad Definition Portion===============================
#define ATOM_DEVICE_CONNECT_INFO_DEF 0
#define ATOM_ROM_LOCATION_DEF 1
#define ATOM_TV_STANDARD_DEF 2
@@ -2995,7 +3852,8 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_I2C_CHANNEL_STATUS_DEF 8
#define ATOM_I2C_CHANNEL_STATUS1_DEF 9
-/* BIOS_0_SCRATCH Definition */
+
+// BIOS_0_SCRATCH Definition
#define ATOM_S0_CRT1_MONO 0x00000001L
#define ATOM_S0_CRT1_COLOR 0x00000002L
#define ATOM_S0_CRT1_MASK (ATOM_S0_CRT1_MONO+ATOM_S0_CRT1_COLOR)
@@ -3008,6 +3866,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S0_CV_DIN_A 0x00000020L
#define ATOM_S0_CV_MASK_A (ATOM_S0_CV_A+ATOM_S0_CV_DIN_A)
+
#define ATOM_S0_CRT2_MONO 0x00000100L
#define ATOM_S0_CRT2_COLOR 0x00000200L
#define ATOM_S0_CRT2_MASK (ATOM_S0_CRT2_MONO+ATOM_S0_CRT2_COLOR)
@@ -3025,28 +3884,27 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S0_DFP2 0x00020000L
#define ATOM_S0_LCD1 0x00040000L
#define ATOM_S0_LCD2 0x00080000L
-#define ATOM_S0_TV2 0x00100000L
-#define ATOM_S0_DFP3 0x00200000L
-#define ATOM_S0_DFP4 0x00400000L
-#define ATOM_S0_DFP5 0x00800000L
+#define ATOM_S0_DFP6 0x00100000L
+#define ATOM_S0_DFP3 0x00200000L
+#define ATOM_S0_DFP4 0x00400000L
+#define ATOM_S0_DFP5 0x00800000L
-#define ATOM_S0_DFP_MASK \
- (ATOM_S0_DFP1 | ATOM_S0_DFP2 | ATOM_S0_DFP3 | ATOM_S0_DFP4 | ATOM_S0_DFP5)
+#define ATOM_S0_DFP_MASK ATOM_S0_DFP1 | ATOM_S0_DFP2 | ATOM_S0_DFP3 | ATOM_S0_DFP4 | ATOM_S0_DFP5 | ATOM_S0_DFP6
-#define ATOM_S0_FAD_REGISTER_BUG 0x02000000L /* If set, indicates we are running a PCIE asic with */
- /* the FAD/HDP reg access bug. Bit is read by DAL */
+#define ATOM_S0_FAD_REGISTER_BUG 0x02000000L // If set, indicates we are running a PCIE asic with
+ // the FAD/HDP reg access bug. Bit is read by DAL, this is obsolete from RV5xx
#define ATOM_S0_THERMAL_STATE_MASK 0x1C000000L
#define ATOM_S0_THERMAL_STATE_SHIFT 26
#define ATOM_S0_SYSTEM_POWER_STATE_MASK 0xE0000000L
-#define ATOM_S0_SYSTEM_POWER_STATE_SHIFT 29
+#define ATOM_S0_SYSTEM_POWER_STATE_SHIFT 29
#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_AC 1
#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_DC 2
#define ATOM_S0_SYSTEM_POWER_STATE_VALUE_LITEAC 3
-/* Byte aligned definition for BIOS usage */
+//Byte aligned defintion for BIOS usage
#define ATOM_S0_CRT1_MONOb0 0x01
#define ATOM_S0_CRT1_COLORb0 0x02
#define ATOM_S0_CRT1_MASKb0 (ATOM_S0_CRT1_MONOb0+ATOM_S0_CRT1_COLORb0)
@@ -3076,8 +3934,11 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S0_DFP2b2 0x02
#define ATOM_S0_LCD1b2 0x04
#define ATOM_S0_LCD2b2 0x08
-#define ATOM_S0_TV2b2 0x10
-#define ATOM_S0_DFP3b2 0x20
+#define ATOM_S0_DFP6b2 0x10
+#define ATOM_S0_DFP3b2 0x20
+#define ATOM_S0_DFP4b2 0x40
+#define ATOM_S0_DFP5b2 0x80
+
#define ATOM_S0_THERMAL_STATE_MASKb3 0x1C
#define ATOM_S0_THERMAL_STATE_SHIFTb3 2
@@ -3085,43 +3946,20 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S0_SYSTEM_POWER_STATE_MASKb3 0xE0
#define ATOM_S0_LCD1_SHIFT 18
-/* BIOS_1_SCRATCH Definition */
+// BIOS_1_SCRATCH Definition
#define ATOM_S1_ROM_LOCATION_MASK 0x0000FFFFL
#define ATOM_S1_PCI_BUS_DEV_MASK 0xFFFF0000L
-/* BIOS_2_SCRATCH Definition */
+// BIOS_2_SCRATCH Definition
#define ATOM_S2_TV1_STANDARD_MASK 0x0000000FL
#define ATOM_S2_CURRENT_BL_LEVEL_MASK 0x0000FF00L
#define ATOM_S2_CURRENT_BL_LEVEL_SHIFT 8
-#define ATOM_S2_CRT1_DPMS_STATE 0x00010000L
-#define ATOM_S2_LCD1_DPMS_STATE 0x00020000L
-#define ATOM_S2_TV1_DPMS_STATE 0x00040000L
-#define ATOM_S2_DFP1_DPMS_STATE 0x00080000L
-#define ATOM_S2_CRT2_DPMS_STATE 0x00100000L
-#define ATOM_S2_LCD2_DPMS_STATE 0x00200000L
-#define ATOM_S2_TV2_DPMS_STATE 0x00400000L
-#define ATOM_S2_DFP2_DPMS_STATE 0x00800000L
-#define ATOM_S2_CV_DPMS_STATE 0x01000000L
-#define ATOM_S2_DFP3_DPMS_STATE 0x02000000L
-#define ATOM_S2_DFP4_DPMS_STATE 0x04000000L
-#define ATOM_S2_DFP5_DPMS_STATE 0x08000000L
-
-#define ATOM_S2_DFP_DPM_STATE \
- (ATOM_S2_DFP1_DPMS_STATE | ATOM_S2_DFP2_DPMS_STATE | \
- ATOM_S2_DFP3_DPMS_STATE | ATOM_S2_DFP4_DPMS_STATE | \
- ATOM_S2_DFP5_DPMS_STATE)
-
-#define ATOM_S2_DEVICE_DPMS_STATE \
- (ATOM_S2_CRT1_DPMS_STATE + ATOM_S2_LCD1_DPMS_STATE + \
- ATOM_S2_TV1_DPMS_STATE + ATOM_S2_DFP_DPMS_STATE + \
- ATOM_S2_CRT2_DPMS_STATE + ATOM_S2_LCD2_DPMS_STATE + \
- ATOM_S2_TV2_DPMS_STATE + ATOM_S2_CV_DPMS_STATE)
-
#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK 0x0C000000L
#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASK_SHIFT 26
#define ATOM_S2_FORCEDLOWPWRMODE_STATE_CHANGE 0x10000000L
+#define ATOM_S2_DEVICE_DPMS_STATE 0x00010000L
#define ATOM_S2_VRI_BRIGHT_ENABLE 0x20000000L
#define ATOM_S2_DISPLAY_ROTATION_0_DEGREE 0x0
@@ -3131,21 +3969,11 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S2_DISPLAY_ROTATION_DEGREE_SHIFT 30
#define ATOM_S2_DISPLAY_ROTATION_ANGLE_MASK 0xC0000000L
-/* Byte aligned definition for BIOS usage */
+
+//Byte aligned defintion for BIOS usage
#define ATOM_S2_TV1_STANDARD_MASKb0 0x0F
#define ATOM_S2_CURRENT_BL_LEVEL_MASKb1 0xFF
-#define ATOM_S2_CRT1_DPMS_STATEb2 0x01
-#define ATOM_S2_LCD1_DPMS_STATEb2 0x02
-#define ATOM_S2_TV1_DPMS_STATEb2 0x04
-#define ATOM_S2_DFP1_DPMS_STATEb2 0x08
-#define ATOM_S2_CRT2_DPMS_STATEb2 0x10
-#define ATOM_S2_LCD2_DPMS_STATEb2 0x20
-#define ATOM_S2_TV2_DPMS_STATEb2 0x40
-#define ATOM_S2_DFP2_DPMS_STATEb2 0x80
-#define ATOM_S2_CV_DPMS_STATEb3 0x01
-#define ATOM_S2_DFP3_DPMS_STATEb3 0x02
-#define ATOM_S2_DFP4_DPMS_STATEb3 0x04
-#define ATOM_S2_DFP5_DPMS_STATEb3 0x08
+#define ATOM_S2_DEVICE_DPMS_STATEb2 0x01
#define ATOM_S2_DEVICE_DPMS_MASKw1 0x3FF
#define ATOM_S2_FORCEDLOWPWRMODE_STATE_MASKb3 0x0C
@@ -3153,21 +3981,22 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S2_VRI_BRIGHT_ENABLEb3 0x20
#define ATOM_S2_ROTATION_STATE_MASKb3 0xC0
-/* BIOS_3_SCRATCH Definition */
+
+// BIOS_3_SCRATCH Definition
#define ATOM_S3_CRT1_ACTIVE 0x00000001L
#define ATOM_S3_LCD1_ACTIVE 0x00000002L
#define ATOM_S3_TV1_ACTIVE 0x00000004L
#define ATOM_S3_DFP1_ACTIVE 0x00000008L
#define ATOM_S3_CRT2_ACTIVE 0x00000010L
#define ATOM_S3_LCD2_ACTIVE 0x00000020L
-#define ATOM_S3_TV2_ACTIVE 0x00000040L
+#define ATOM_S3_DFP6_ACTIVE 0x00000040L
#define ATOM_S3_DFP2_ACTIVE 0x00000080L
#define ATOM_S3_CV_ACTIVE 0x00000100L
#define ATOM_S3_DFP3_ACTIVE 0x00000200L
#define ATOM_S3_DFP4_ACTIVE 0x00000400L
#define ATOM_S3_DFP5_ACTIVE 0x00000800L
-#define ATOM_S3_DEVICE_ACTIVE_MASK 0x000003FFL
+#define ATOM_S3_DEVICE_ACTIVE_MASK 0x00000FFFL
#define ATOM_S3_LCD_FULLEXPANSION_ACTIVE 0x00001000L
#define ATOM_S3_LCD_EXPANSION_ASPEC_RATIO_ACTIVE 0x00002000L
@@ -3178,7 +4007,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S3_DFP1_CRTC_ACTIVE 0x00080000L
#define ATOM_S3_CRT2_CRTC_ACTIVE 0x00100000L
#define ATOM_S3_LCD2_CRTC_ACTIVE 0x00200000L
-#define ATOM_S3_TV2_CRTC_ACTIVE 0x00400000L
+#define ATOM_S3_DFP6_CRTC_ACTIVE 0x00400000L
#define ATOM_S3_DFP2_CRTC_ACTIVE 0x00800000L
#define ATOM_S3_CV_CRTC_ACTIVE 0x01000000L
#define ATOM_S3_DFP3_CRTC_ACTIVE 0x02000000L
@@ -3187,17 +4016,18 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S3_DEVICE_CRTC_ACTIVE_MASK 0x0FFF0000L
#define ATOM_S3_ASIC_GUI_ENGINE_HUNG 0x20000000L
+//Below two definitions are not supported in pplib, but in the old powerplay in DAL
#define ATOM_S3_ALLOW_FAST_PWR_SWITCH 0x40000000L
#define ATOM_S3_RQST_GPU_USE_MIN_PWR 0x80000000L
-/* Byte aligned definition for BIOS usage */
+//Byte aligned defintion for BIOS usage
#define ATOM_S3_CRT1_ACTIVEb0 0x01
#define ATOM_S3_LCD1_ACTIVEb0 0x02
#define ATOM_S3_TV1_ACTIVEb0 0x04
#define ATOM_S3_DFP1_ACTIVEb0 0x08
#define ATOM_S3_CRT2_ACTIVEb0 0x10
#define ATOM_S3_LCD2_ACTIVEb0 0x20
-#define ATOM_S3_TV2_ACTIVEb0 0x40
+#define ATOM_S3_DFP6_ACTIVEb0 0x40
#define ATOM_S3_DFP2_ACTIVEb0 0x80
#define ATOM_S3_CV_ACTIVEb1 0x01
#define ATOM_S3_DFP3_ACTIVEb1 0x02
@@ -3212,7 +4042,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S3_DFP1_CRTC_ACTIVEb2 0x08
#define ATOM_S3_CRT2_CRTC_ACTIVEb2 0x10
#define ATOM_S3_LCD2_CRTC_ACTIVEb2 0x20
-#define ATOM_S3_TV2_CRTC_ACTIVEb2 0x40
+#define ATOM_S3_DFP6_CRTC_ACTIVEb2 0x40
#define ATOM_S3_DFP2_CRTC_ACTIVEb2 0x80
#define ATOM_S3_CV_CRTC_ACTIVEb3 0x01
#define ATOM_S3_DFP3_CRTC_ACTIVEb3 0x02
@@ -3221,35 +4051,31 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S3_ACTIVE_CRTC2w1 0xFFF
-#define ATOM_S3_ASIC_GUI_ENGINE_HUNGb3 0x20
-#define ATOM_S3_ALLOW_FAST_PWR_SWITCHb3 0x40
-#define ATOM_S3_RQST_GPU_USE_MIN_PWRb3 0x80
-
-/* BIOS_4_SCRATCH Definition */
+// BIOS_4_SCRATCH Definition
#define ATOM_S4_LCD1_PANEL_ID_MASK 0x000000FFL
#define ATOM_S4_LCD1_REFRESH_MASK 0x0000FF00L
#define ATOM_S4_LCD1_REFRESH_SHIFT 8
-/* Byte aligned definition for BIOS usage */
+//Byte aligned defintion for BIOS usage
#define ATOM_S4_LCD1_PANEL_ID_MASKb0 0x0FF
#define ATOM_S4_LCD1_REFRESH_MASKb1 ATOM_S4_LCD1_PANEL_ID_MASKb0
#define ATOM_S4_VRAM_INFO_MASKb2 ATOM_S4_LCD1_PANEL_ID_MASKb0
-/* BIOS_5_SCRATCH Definition, BIOS_5_SCRATCH is used by Firmware only !!!! */
+// BIOS_5_SCRATCH Definition, BIOS_5_SCRATCH is used by Firmware only !!!!
#define ATOM_S5_DOS_REQ_CRT1b0 0x01
#define ATOM_S5_DOS_REQ_LCD1b0 0x02
#define ATOM_S5_DOS_REQ_TV1b0 0x04
#define ATOM_S5_DOS_REQ_DFP1b0 0x08
#define ATOM_S5_DOS_REQ_CRT2b0 0x10
#define ATOM_S5_DOS_REQ_LCD2b0 0x20
-#define ATOM_S5_DOS_REQ_TV2b0 0x40
+#define ATOM_S5_DOS_REQ_DFP6b0 0x40
#define ATOM_S5_DOS_REQ_DFP2b0 0x80
#define ATOM_S5_DOS_REQ_CVb1 0x01
#define ATOM_S5_DOS_REQ_DFP3b1 0x02
#define ATOM_S5_DOS_REQ_DFP4b1 0x04
#define ATOM_S5_DOS_REQ_DFP5b1 0x08
-#define ATOM_S5_DOS_REQ_DEVICEw0 0x03FF
+#define ATOM_S5_DOS_REQ_DEVICEw0 0x0FFF
#define ATOM_S5_DOS_REQ_CRT1 0x0001
#define ATOM_S5_DOS_REQ_LCD1 0x0002
@@ -3257,22 +4083,21 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S5_DOS_REQ_DFP1 0x0008
#define ATOM_S5_DOS_REQ_CRT2 0x0010
#define ATOM_S5_DOS_REQ_LCD2 0x0020
-#define ATOM_S5_DOS_REQ_TV2 0x0040
+#define ATOM_S5_DOS_REQ_DFP6 0x0040
#define ATOM_S5_DOS_REQ_DFP2 0x0080
#define ATOM_S5_DOS_REQ_CV 0x0100
-#define ATOM_S5_DOS_REQ_DFP3 0x0200
-#define ATOM_S5_DOS_REQ_DFP4 0x0400
-#define ATOM_S5_DOS_REQ_DFP5 0x0800
+#define ATOM_S5_DOS_REQ_DFP3 0x0200
+#define ATOM_S5_DOS_REQ_DFP4 0x0400
+#define ATOM_S5_DOS_REQ_DFP5 0x0800
#define ATOM_S5_DOS_FORCE_CRT1b2 ATOM_S5_DOS_REQ_CRT1b0
#define ATOM_S5_DOS_FORCE_TV1b2 ATOM_S5_DOS_REQ_TV1b0
#define ATOM_S5_DOS_FORCE_CRT2b2 ATOM_S5_DOS_REQ_CRT2b0
#define ATOM_S5_DOS_FORCE_CVb3 ATOM_S5_DOS_REQ_CVb1
-#define ATOM_S5_DOS_FORCE_DEVICEw1 \
- (ATOM_S5_DOS_FORCE_CRT1b2 + ATOM_S5_DOS_FORCE_TV1b2 + \
- ATOM_S5_DOS_FORCE_CRT2b2 + (ATOM_S5_DOS_FORCE_CVb3 << 8))
+#define ATOM_S5_DOS_FORCE_DEVICEw1 (ATOM_S5_DOS_FORCE_CRT1b2+ATOM_S5_DOS_FORCE_TV1b2+ATOM_S5_DOS_FORCE_CRT2b2+\
+ (ATOM_S5_DOS_FORCE_CVb3<<8))
-/* BIOS_6_SCRATCH Definition */
+// BIOS_6_SCRATCH Definition
#define ATOM_S6_DEVICE_CHANGE 0x00000001L
#define ATOM_S6_SCALER_CHANGE 0x00000002L
#define ATOM_S6_LID_CHANGE 0x00000004L
@@ -3285,11 +4110,11 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S6_HW_I2C_BUSY_STATE 0x00000200L
#define ATOM_S6_THERMAL_STATE_CHANGE 0x00000400L
#define ATOM_S6_INTERRUPT_SET_BY_BIOS 0x00000800L
-#define ATOM_S6_REQ_LCD_EXPANSION_FULL 0x00001000L /* Normal expansion Request bit for LCD */
-#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIO 0x00002000L /* Aspect ratio expansion Request bit for LCD */
+#define ATOM_S6_REQ_LCD_EXPANSION_FULL 0x00001000L //Normal expansion Request bit for LCD
+#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIO 0x00002000L //Aspect ratio expansion Request bit for LCD
-#define ATOM_S6_DISPLAY_STATE_CHANGE 0x00004000L /* This bit is recycled when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_H_expansion */
-#define ATOM_S6_I2C_STATE_CHANGE 0x00008000L /* This bit is recycled,when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_V_expansion */
+#define ATOM_S6_DISPLAY_STATE_CHANGE 0x00004000L //This bit is recycled when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_H_expansion
+#define ATOM_S6_I2C_STATE_CHANGE 0x00008000L //This bit is recycled,when ATOM_BIOS_INFO_BIOS_SCRATCH6_SCL2_REDEFINE is set,previously it's SCL2_V_expansion
#define ATOM_S6_ACC_REQ_CRT1 0x00010000L
#define ATOM_S6_ACC_REQ_LCD1 0x00020000L
@@ -3297,7 +4122,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S6_ACC_REQ_DFP1 0x00080000L
#define ATOM_S6_ACC_REQ_CRT2 0x00100000L
#define ATOM_S6_ACC_REQ_LCD2 0x00200000L
-#define ATOM_S6_ACC_REQ_TV2 0x00400000L
+#define ATOM_S6_ACC_REQ_DFP6 0x00400000L
#define ATOM_S6_ACC_REQ_DFP2 0x00800000L
#define ATOM_S6_ACC_REQ_CV 0x01000000L
#define ATOM_S6_ACC_REQ_DFP3 0x02000000L
@@ -3310,7 +4135,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S6_VRI_BRIGHTNESS_CHANGE 0x40000000L
#define ATOM_S6_CONFIG_DISPLAY_CHANGE_MASK 0x80000000L
-/* Byte aligned definition for BIOS usage */
+//Byte aligned defintion for BIOS usage
#define ATOM_S6_DEVICE_CHANGEb0 0x01
#define ATOM_S6_SCALER_CHANGEb0 0x02
#define ATOM_S6_LID_CHANGEb0 0x04
@@ -3320,11 +4145,11 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S6_LID_STATEb0 0x40
#define ATOM_S6_DOCK_STATEb0 0x80
#define ATOM_S6_CRITICAL_STATEb1 0x01
-#define ATOM_S6_HW_I2C_BUSY_STATEb1 0x02
+#define ATOM_S6_HW_I2C_BUSY_STATEb1 0x02
#define ATOM_S6_THERMAL_STATE_CHANGEb1 0x04
#define ATOM_S6_INTERRUPT_SET_BY_BIOSb1 0x08
-#define ATOM_S6_REQ_LCD_EXPANSION_FULLb1 0x10
-#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIOb1 0x20
+#define ATOM_S6_REQ_LCD_EXPANSION_FULLb1 0x10
+#define ATOM_S6_REQ_LCD_EXPANSION_ASPEC_RATIOb1 0x20
#define ATOM_S6_ACC_REQ_CRT1b2 0x01
#define ATOM_S6_ACC_REQ_LCD1b2 0x02
@@ -3332,12 +4157,12 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S6_ACC_REQ_DFP1b2 0x08
#define ATOM_S6_ACC_REQ_CRT2b2 0x10
#define ATOM_S6_ACC_REQ_LCD2b2 0x20
-#define ATOM_S6_ACC_REQ_TV2b2 0x40
+#define ATOM_S6_ACC_REQ_DFP6b2 0x40
#define ATOM_S6_ACC_REQ_DFP2b2 0x80
#define ATOM_S6_ACC_REQ_CVb3 0x01
-#define ATOM_S6_ACC_REQ_DFP3b3 0x02
-#define ATOM_S6_ACC_REQ_DFP4b3 0x04
-#define ATOM_S6_ACC_REQ_DFP5b3 0x08
+#define ATOM_S6_ACC_REQ_DFP3b3 0x02
+#define ATOM_S6_ACC_REQ_DFP4b3 0x04
+#define ATOM_S6_ACC_REQ_DFP5b3 0x08
#define ATOM_S6_ACC_REQ_DEVICEw1 ATOM_S5_DOS_REQ_DEVICEw0
#define ATOM_S6_SYSTEM_POWER_MODE_CHANGEb3 0x10
@@ -3366,7 +4191,7 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT 30
#define ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT 31
-/* BIOS_7_SCRATCH Definition, BIOS_7_SCRATCH is used by Firmware only !!!! */
+// BIOS_7_SCRATCH Definition, BIOS_7_SCRATCH is used by Firmware only !!!!
#define ATOM_S7_DOS_MODE_TYPEb0 0x03
#define ATOM_S7_DOS_MODE_VGAb0 0x00
#define ATOM_S7_DOS_MODE_VESAb0 0x01
@@ -3378,220 +4203,194 @@ typedef struct _ATOM_ASIC_INTERNAL_SS_INFO {
#define ATOM_S7_DOS_8BIT_DAC_EN_SHIFT 8
-/* BIOS_8_SCRATCH Definition */
+// BIOS_8_SCRATCH Definition
#define ATOM_S8_I2C_CHANNEL_BUSY_MASK 0x00000FFFF
-#define ATOM_S8_I2C_HW_ENGINE_BUSY_MASK 0x0FFFF0000
+#define ATOM_S8_I2C_HW_ENGINE_BUSY_MASK 0x0FFFF0000
#define ATOM_S8_I2C_CHANNEL_BUSY_SHIFT 0
#define ATOM_S8_I2C_ENGINE_BUSY_SHIFT 16
-/* BIOS_9_SCRATCH Definition */
-#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_MASK
+// BIOS_9_SCRATCH Definition
+#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_MASK
#define ATOM_S9_I2C_CHANNEL_COMPLETED_MASK 0x0000FFFF
#endif
-#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_MASK
+#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_MASK
#define ATOM_S9_I2C_CHANNEL_ABORTED_MASK 0xFFFF0000
#endif
-#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT
+#ifndef ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT
#define ATOM_S9_I2C_CHANNEL_COMPLETED_SHIFT 0
#endif
-#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT
+#ifndef ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT
#define ATOM_S9_I2C_CHANNEL_ABORTED_SHIFT 16
#endif
+
#define ATOM_FLAG_SET 0x20
#define ATOM_FLAG_CLEAR 0
-#define CLEAR_ATOM_S6_ACC_MODE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_ACC_MODE_SHIFT | ATOM_FLAG_CLEAR)
-#define SET_ATOM_S6_DEVICE_CHANGE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_DEVICE_CHANGE_SHIFT | ATOM_FLAG_SET)
-#define SET_ATOM_S6_VRI_BRIGHTNESS_CHANGE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT | ATOM_FLAG_SET)
-#define SET_ATOM_S6_SCALER_CHANGE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_SCALER_CHANGE_SHIFT | ATOM_FLAG_SET)
-#define SET_ATOM_S6_LID_CHANGE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_LID_CHANGE_SHIFT | ATOM_FLAG_SET)
-
-#define SET_ATOM_S6_LID_STATE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) |\
- ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_SET)
-#define CLEAR_ATOM_S6_LID_STATE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_CLEAR)
-
-#define SET_ATOM_S6_DOCK_CHANGE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8)| \
- ATOM_S6_DOCKING_CHANGE_SHIFT | ATOM_FLAG_SET)
-#define SET_ATOM_S6_DOCK_STATE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_SET)
-#define CLEAR_ATOM_S6_DOCK_STATE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_CLEAR)
-
-#define SET_ATOM_S6_THERMAL_STATE_CHANGE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_THERMAL_STATE_CHANGE_SHIFT | ATOM_FLAG_SET)
-#define SET_ATOM_S6_SYSTEM_POWER_MODE_CHANGE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT | ATOM_FLAG_SET)
-#define SET_ATOM_S6_INTERRUPT_SET_BY_BIOS \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT | ATOM_FLAG_SET)
-
-#define SET_ATOM_S6_CRITICAL_STATE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_SET)
-#define CLEAR_ATOM_S6_CRITICAL_STATE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_CLEAR)
-
-#define SET_ATOM_S6_REQ_SCALER \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_SET)
-#define CLEAR_ATOM_S6_REQ_SCALER \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_CLEAR )
-
-#define SET_ATOM_S6_REQ_SCALER_ARATIO \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_SET )
-#define CLEAR_ATOM_S6_REQ_SCALER_ARATIO \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_CLEAR )
-
-#define SET_ATOM_S6_I2C_STATE_CHANGE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_I2C_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
-
-#define SET_ATOM_S6_DISPLAY_STATE_CHANGE \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
-
-#define SET_ATOM_S6_DEVICE_RECONFIG \
- ((ATOM_ACC_CHANGE_INFO_DEF << 8) | \
- ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT | ATOM_FLAG_SET)
-#define CLEAR_ATOM_S0_LCD1 \
- ((ATOM_DEVICE_CONNECT_INFO_DEF << 8 ) | \
- ATOM_S0_LCD1_SHIFT | ATOM_FLAG_CLEAR )
-#define SET_ATOM_S7_DOS_8BIT_DAC_EN \
- ((ATOM_DOS_MODE_INFO_DEF << 8) | \
- ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_SET )
-#define CLEAR_ATOM_S7_DOS_8BIT_DAC_EN \
- ((ATOM_DOS_MODE_INFO_DEF << 8) | \
- ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_CLEAR )
+#define CLEAR_ATOM_S6_ACC_MODE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_ACC_MODE_SHIFT | ATOM_FLAG_CLEAR)
+#define SET_ATOM_S6_DEVICE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DEVICE_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_VRI_BRIGHTNESS_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_VRI_BRIGHTNESS_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_SCALER_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_SCALER_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_LID_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_CHANGE_SHIFT | ATOM_FLAG_SET)
-/****************************************************************************/
-/* Portion II: Definitinos only used in Driver */
+#define SET_ATOM_S6_LID_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S6_LID_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_LID_STATE_SHIFT | ATOM_FLAG_CLEAR)
+
+#define SET_ATOM_S6_DOCK_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCKING_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_DOCK_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S6_DOCK_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DOCK_STATE_SHIFT | ATOM_FLAG_CLEAR)
+
+#define SET_ATOM_S6_THERMAL_STATE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_THERMAL_STATE_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_SYSTEM_POWER_MODE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_SYSTEM_POWER_MODE_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define SET_ATOM_S6_INTERRUPT_SET_BY_BIOS ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_INTERRUPT_SET_BY_BIOS_SHIFT | ATOM_FLAG_SET)
+
+#define SET_ATOM_S6_CRITICAL_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S6_CRITICAL_STATE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CRITICAL_STATE_SHIFT | ATOM_FLAG_CLEAR)
+
+#define SET_ATOM_S6_REQ_SCALER ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S6_REQ_SCALER ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_SHIFT | ATOM_FLAG_CLEAR )
+
+#define SET_ATOM_S6_REQ_SCALER_ARATIO ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_SET )
+#define CLEAR_ATOM_S6_REQ_SCALER_ARATIO ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_REQ_SCALER_ARATIO_SHIFT | ATOM_FLAG_CLEAR )
+
+#define SET_ATOM_S6_I2C_STATE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_I2C_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
+
+#define SET_ATOM_S6_DISPLAY_STATE_CHANGE ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_DISPLAY_STATE_CHANGE_SHIFT | ATOM_FLAG_SET )
+
+#define SET_ATOM_S6_DEVICE_RECONFIG ((ATOM_ACC_CHANGE_INFO_DEF << 8 )|ATOM_S6_CONFIG_DISPLAY_CHANGE_SHIFT | ATOM_FLAG_SET)
+#define CLEAR_ATOM_S0_LCD1 ((ATOM_DEVICE_CONNECT_INFO_DEF << 8 )| ATOM_S0_LCD1_SHIFT | ATOM_FLAG_CLEAR )
+#define SET_ATOM_S7_DOS_8BIT_DAC_EN ((ATOM_DOS_MODE_INFO_DEF << 8 )|ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_SET )
+#define CLEAR_ATOM_S7_DOS_8BIT_DAC_EN ((ATOM_DOS_MODE_INFO_DEF << 8 )|ATOM_S7_DOS_8BIT_DAC_EN_SHIFT | ATOM_FLAG_CLEAR )
+
+/****************************************************************************/
+//Portion II: Definitinos only used in Driver
/****************************************************************************/
-/* Macros used by driver */
+// Macros used by driver
+#ifdef __cplusplus
+#define GetIndexIntoMasterTable(MasterOrData, FieldName) ((reinterpret_cast<char*>(&(static_cast<ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES*>(0))->FieldName)-static_cast<char*>(0))/sizeof(USHORT))
-#define GetIndexIntoMasterTable(MasterOrData, FieldName) (((char *)(&((ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES *)0)->FieldName)-(char *)0)/sizeof(USHORT))
+#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) (((static_cast<ATOM_COMMON_TABLE_HEADER*>(TABLE_HEADER_OFFSET))->ucTableFormatRevision )&0x3F)
+#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) (((static_cast<ATOM_COMMON_TABLE_HEADER*>(TABLE_HEADER_OFFSET))->ucTableContentRevision)&0x3F)
+#else // not __cplusplus
+#define GetIndexIntoMasterTable(MasterOrData, FieldName) (((char*)(&((ATOM_MASTER_LIST_OF_##MasterOrData##_TABLES*)0)->FieldName)-(char*)0)/sizeof(USHORT))
#define GET_COMMAND_TABLE_COMMANDSET_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableFormatRevision)&0x3F)
#define GET_COMMAND_TABLE_PARAMETER_REVISION(TABLE_HEADER_OFFSET) ((((ATOM_COMMON_TABLE_HEADER*)TABLE_HEADER_OFFSET)->ucTableContentRevision)&0x3F)
+#endif // __cplusplus
#define GET_DATA_TABLE_MAJOR_REVISION GET_COMMAND_TABLE_COMMANDSET_REVISION
#define GET_DATA_TABLE_MINOR_REVISION GET_COMMAND_TABLE_PARAMETER_REVISION
-/****************************************************************************/
-/* Portion III: Definitinos only used in VBIOS */
+/****************************************************************************/
+//Portion III: Definitinos only used in VBIOS
/****************************************************************************/
#define ATOM_DAC_SRC 0x80
#define ATOM_SRC_DAC1 0
#define ATOM_SRC_DAC2 0x80
-#ifdef UEFI_BUILD
-#define USHORT UTEMP
-#endif
-
-typedef struct _MEMORY_PLLINIT_PARAMETERS {
- ULONG ulTargetMemoryClock; /* In 10Khz unit */
- UCHAR ucAction; /* not define yet */
- UCHAR ucFbDiv_Hi; /* Fbdiv Hi byte */
- UCHAR ucFbDiv; /* FB value */
- UCHAR ucPostDiv; /* Post div */
-} MEMORY_PLLINIT_PARAMETERS;
+typedef struct _MEMORY_PLLINIT_PARAMETERS
+{
+ ULONG ulTargetMemoryClock; //In 10Khz unit
+ UCHAR ucAction; //not define yet
+ UCHAR ucFbDiv_Hi; //Fbdiv Hi byte
+ UCHAR ucFbDiv; //FB value
+ UCHAR ucPostDiv; //Post div
+}MEMORY_PLLINIT_PARAMETERS;
#define MEMORY_PLLINIT_PS_ALLOCATION MEMORY_PLLINIT_PARAMETERS
-#define GPIO_PIN_WRITE 0x01
+
+#define GPIO_PIN_WRITE 0x01
#define GPIO_PIN_READ 0x00
-typedef struct _GPIO_PIN_CONTROL_PARAMETERS {
- UCHAR ucGPIO_ID; /* return value, read from GPIO pins */
- UCHAR ucGPIOBitShift; /* define which bit in uGPIOBitVal need to be update */
- UCHAR ucGPIOBitVal; /* Set/Reset corresponding bit defined in ucGPIOBitMask */
- UCHAR ucAction; /* =GPIO_PIN_WRITE: Read; =GPIO_PIN_READ: Write */
-} GPIO_PIN_CONTROL_PARAMETERS;
-
-typedef struct _ENABLE_SCALER_PARAMETERS {
- UCHAR ucScaler; /* ATOM_SCALER1, ATOM_SCALER2 */
- UCHAR ucEnable; /* ATOM_SCALER_DISABLE or ATOM_SCALER_CENTER or ATOM_SCALER_EXPANSION */
- UCHAR ucTVStandard; /* */
- UCHAR ucPadding[1];
-} ENABLE_SCALER_PARAMETERS;
-#define ENABLE_SCALER_PS_ALLOCATION ENABLE_SCALER_PARAMETERS
-
-/* ucEnable: */
+typedef struct _GPIO_PIN_CONTROL_PARAMETERS
+{
+ UCHAR ucGPIO_ID; //return value, read from GPIO pins
+ UCHAR ucGPIOBitShift; //define which bit in uGPIOBitVal need to be update
+ UCHAR ucGPIOBitVal; //Set/Reset corresponding bit defined in ucGPIOBitMask
+ UCHAR ucAction; //=GPIO_PIN_WRITE: Read; =GPIO_PIN_READ: Write
+}GPIO_PIN_CONTROL_PARAMETERS;
+
+typedef struct _ENABLE_SCALER_PARAMETERS
+{
+ UCHAR ucScaler; // ATOM_SCALER1, ATOM_SCALER2
+ UCHAR ucEnable; // ATOM_SCALER_DISABLE or ATOM_SCALER_CENTER or ATOM_SCALER_EXPANSION
+ UCHAR ucTVStandard; //
+ UCHAR ucPadding[1];
+}ENABLE_SCALER_PARAMETERS;
+#define ENABLE_SCALER_PS_ALLOCATION ENABLE_SCALER_PARAMETERS
+
+//ucEnable:
#define SCALER_BYPASS_AUTO_CENTER_NO_REPLICATION 0
#define SCALER_BYPASS_AUTO_CENTER_AUTO_REPLICATION 1
#define SCALER_ENABLE_2TAP_ALPHA_MODE 2
#define SCALER_ENABLE_MULTITAP_MODE 3
-typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS {
- ULONG usHWIconHorzVertPosn; /* Hardware Icon Vertical position */
- UCHAR ucHWIconVertOffset; /* Hardware Icon Vertical offset */
- UCHAR ucHWIconHorzOffset; /* Hardware Icon Horizontal offset */
- UCHAR ucSelection; /* ATOM_CURSOR1 or ATOM_ICON1 or ATOM_CURSOR2 or ATOM_ICON2 */
- UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
-} ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS;
-
-typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION {
- ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS sEnableIcon;
- ENABLE_CRTC_PARAMETERS sReserved;
-} ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION;
-
-typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS {
- USHORT usHight; /* Image Hight */
- USHORT usWidth; /* Image Width */
- UCHAR ucSurface; /* Surface 1 or 2 */
- UCHAR ucPadding[3];
-} ENABLE_GRAPH_SURFACE_PARAMETERS;
-
-typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2 {
- USHORT usHight; /* Image Hight */
- USHORT usWidth; /* Image Width */
- UCHAR ucSurface; /* Surface 1 or 2 */
- UCHAR ucEnable; /* ATOM_ENABLE or ATOM_DISABLE */
- UCHAR ucPadding[2];
-} ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2;
-
-typedef struct _ENABLE_GRAPH_SURFACE_PS_ALLOCATION {
- ENABLE_GRAPH_SURFACE_PARAMETERS sSetSurface;
- ENABLE_YUV_PS_ALLOCATION sReserved; /* Don't set this one */
-} ENABLE_GRAPH_SURFACE_PS_ALLOCATION;
-
-typedef struct _MEMORY_CLEAN_UP_PARAMETERS {
- USHORT usMemoryStart; /* in 8Kb boundry, offset from memory base address */
- USHORT usMemorySize; /* 8Kb blocks aligned */
-} MEMORY_CLEAN_UP_PARAMETERS;
+typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS
+{
+ ULONG usHWIconHorzVertPosn; // Hardware Icon Vertical position
+ UCHAR ucHWIconVertOffset; // Hardware Icon Vertical offset
+ UCHAR ucHWIconHorzOffset; // Hardware Icon Horizontal offset
+ UCHAR ucSelection; // ATOM_CURSOR1 or ATOM_ICON1 or ATOM_CURSOR2 or ATOM_ICON2
+ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
+}ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS;
+
+typedef struct _ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION
+{
+ ENABLE_HARDWARE_ICON_CURSOR_PARAMETERS sEnableIcon;
+ ENABLE_CRTC_PARAMETERS sReserved;
+}ENABLE_HARDWARE_ICON_CURSOR_PS_ALLOCATION;
+
+typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS
+{
+ USHORT usHight; // Image Hight
+ USHORT usWidth; // Image Width
+ UCHAR ucSurface; // Surface 1 or 2
+ UCHAR ucPadding[3];
+}ENABLE_GRAPH_SURFACE_PARAMETERS;
+
+typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2
+{
+ USHORT usHight; // Image Hight
+ USHORT usWidth; // Image Width
+ UCHAR ucSurface; // Surface 1 or 2
+ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
+ UCHAR ucPadding[2];
+}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_2;
+
+typedef struct _ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3
+{
+ USHORT usHight; // Image Hight
+ USHORT usWidth; // Image Width
+ UCHAR ucSurface; // Surface 1 or 2
+ UCHAR ucEnable; // ATOM_ENABLE or ATOM_DISABLE
+ USHORT usDeviceId; // Active Device Id for this surface. If no device, set to 0.
+}ENABLE_GRAPH_SURFACE_PARAMETERS_V1_3;
+
+typedef struct _ENABLE_GRAPH_SURFACE_PS_ALLOCATION
+{
+ ENABLE_GRAPH_SURFACE_PARAMETERS sSetSurface;
+ ENABLE_YUV_PS_ALLOCATION sReserved; // Don't set this one
+}ENABLE_GRAPH_SURFACE_PS_ALLOCATION;
+
+typedef struct _MEMORY_CLEAN_UP_PARAMETERS
+{
+ USHORT usMemoryStart; //in 8Kb boundry, offset from memory base address
+ USHORT usMemorySize; //8Kb blocks aligned
+}MEMORY_CLEAN_UP_PARAMETERS;
#define MEMORY_CLEAN_UP_PS_ALLOCATION MEMORY_CLEAN_UP_PARAMETERS
-typedef struct _GET_DISPLAY_SURFACE_SIZE_PARAMETERS {
- USHORT usX_Size; /* When use as input parameter, usX_Size indicates which CRTC */
- USHORT usY_Size;
-} GET_DISPLAY_SURFACE_SIZE_PARAMETERS;
+typedef struct _GET_DISPLAY_SURFACE_SIZE_PARAMETERS
+{
+ USHORT usX_Size; //When use as input parameter, usX_Size indicates which CRTC
+ USHORT usY_Size;
+}GET_DISPLAY_SURFACE_SIZE_PARAMETERS;
-typedef struct _INDIRECT_IO_ACCESS {
- ATOM_COMMON_TABLE_HEADER sHeader;
- UCHAR IOAccessSequence[256];
+typedef struct _INDIRECT_IO_ACCESS
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR IOAccessSequence[256];
} INDIRECT_IO_ACCESS;
#define INDIRECT_READ 0x00
@@ -3615,93 +4414,108 @@ typedef struct _INDIRECT_IO_ACCESS {
#define INDIRECT_IO_NBMISC_READ INDIRECT_IO_NBMISC | INDIRECT_READ
#define INDIRECT_IO_NBMISC_WRITE INDIRECT_IO_NBMISC | INDIRECT_WRITE
-typedef struct _ATOM_OEM_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
-} ATOM_OEM_INFO;
-
-typedef struct _ATOM_TV_MODE {
- UCHAR ucVMode_Num; /* Video mode number */
- UCHAR ucTV_Mode_Num; /* Internal TV mode number */
-} ATOM_TV_MODE;
-
-typedef struct _ATOM_BIOS_INT_TVSTD_MODE {
- ATOM_COMMON_TABLE_HEADER sHeader;
- USHORT usTV_Mode_LUT_Offset; /* Pointer to standard to internal number conversion table */
- USHORT usTV_FIFO_Offset; /* Pointer to FIFO entry table */
- USHORT usNTSC_Tbl_Offset; /* Pointer to SDTV_Mode_NTSC table */
- USHORT usPAL_Tbl_Offset; /* Pointer to SDTV_Mode_PAL table */
- USHORT usCV_Tbl_Offset; /* Pointer to SDTV_Mode_PAL table */
-} ATOM_BIOS_INT_TVSTD_MODE;
-
-typedef struct _ATOM_TV_MODE_SCALER_PTR {
- USHORT ucFilter0_Offset; /* Pointer to filter format 0 coefficients */
- USHORT usFilter1_Offset; /* Pointer to filter format 0 coefficients */
- UCHAR ucTV_Mode_Num;
-} ATOM_TV_MODE_SCALER_PTR;
-
-typedef struct _ATOM_STANDARD_VESA_TIMING {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_DTD_FORMAT aModeTimings[16]; /* 16 is not the real array number, just for initial allocation */
-} ATOM_STANDARD_VESA_TIMING;
-
-typedef struct _ATOM_STD_FORMAT {
- USHORT usSTD_HDisp;
- USHORT usSTD_VDisp;
- USHORT usSTD_RefreshRate;
- USHORT usReserved;
-} ATOM_STD_FORMAT;
-
-typedef struct _ATOM_VESA_TO_EXTENDED_MODE {
- USHORT usVESA_ModeNumber;
- USHORT usExtendedModeNumber;
-} ATOM_VESA_TO_EXTENDED_MODE;
-
-typedef struct _ATOM_VESA_TO_INTENAL_MODE_LUT {
- ATOM_COMMON_TABLE_HEADER sHeader;
- ATOM_VESA_TO_EXTENDED_MODE asVESA_ToExtendedModeInfo[76];
-} ATOM_VESA_TO_INTENAL_MODE_LUT;
+typedef struct _ATOM_OEM_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
+}ATOM_OEM_INFO;
+
+typedef struct _ATOM_TV_MODE
+{
+ UCHAR ucVMode_Num; //Video mode number
+ UCHAR ucTV_Mode_Num; //Internal TV mode number
+}ATOM_TV_MODE;
+
+typedef struct _ATOM_BIOS_INT_TVSTD_MODE
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usTV_Mode_LUT_Offset; // Pointer to standard to internal number conversion table
+ USHORT usTV_FIFO_Offset; // Pointer to FIFO entry table
+ USHORT usNTSC_Tbl_Offset; // Pointer to SDTV_Mode_NTSC table
+ USHORT usPAL_Tbl_Offset; // Pointer to SDTV_Mode_PAL table
+ USHORT usCV_Tbl_Offset; // Pointer to SDTV_Mode_PAL table
+}ATOM_BIOS_INT_TVSTD_MODE;
+
+
+typedef struct _ATOM_TV_MODE_SCALER_PTR
+{
+ USHORT ucFilter0_Offset; //Pointer to filter format 0 coefficients
+ USHORT usFilter1_Offset; //Pointer to filter format 0 coefficients
+ UCHAR ucTV_Mode_Num;
+}ATOM_TV_MODE_SCALER_PTR;
+
+typedef struct _ATOM_STANDARD_VESA_TIMING
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_DTD_FORMAT aModeTimings[16]; // 16 is not the real array number, just for initial allocation
+}ATOM_STANDARD_VESA_TIMING;
+
+
+typedef struct _ATOM_STD_FORMAT
+{
+ USHORT usSTD_HDisp;
+ USHORT usSTD_VDisp;
+ USHORT usSTD_RefreshRate;
+ USHORT usReserved;
+}ATOM_STD_FORMAT;
+
+typedef struct _ATOM_VESA_TO_EXTENDED_MODE
+{
+ USHORT usVESA_ModeNumber;
+ USHORT usExtendedModeNumber;
+}ATOM_VESA_TO_EXTENDED_MODE;
+
+typedef struct _ATOM_VESA_TO_INTENAL_MODE_LUT
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ ATOM_VESA_TO_EXTENDED_MODE asVESA_ToExtendedModeInfo[76];
+}ATOM_VESA_TO_INTENAL_MODE_LUT;
/*************** ATOM Memory Related Data Structure ***********************/
-typedef struct _ATOM_MEMORY_VENDOR_BLOCK {
- UCHAR ucMemoryType;
- UCHAR ucMemoryVendor;
- UCHAR ucAdjMCId;
- UCHAR ucDynClkId;
- ULONG ulDllResetClkRange;
-} ATOM_MEMORY_VENDOR_BLOCK;
-
-typedef struct _ATOM_MEMORY_SETTING_ID_CONFIG {
+typedef struct _ATOM_MEMORY_VENDOR_BLOCK{
+ UCHAR ucMemoryType;
+ UCHAR ucMemoryVendor;
+ UCHAR ucAdjMCId;
+ UCHAR ucDynClkId;
+ ULONG ulDllResetClkRange;
+}ATOM_MEMORY_VENDOR_BLOCK;
+
+
+typedef struct _ATOM_MEMORY_SETTING_ID_CONFIG{
#if ATOM_BIG_ENDIAN
- ULONG ucMemBlkId:8;
- ULONG ulMemClockRange:24;
+ ULONG ucMemBlkId:8;
+ ULONG ulMemClockRange:24;
#else
- ULONG ulMemClockRange:24;
- ULONG ucMemBlkId:8;
+ ULONG ulMemClockRange:24;
+ ULONG ucMemBlkId:8;
#endif
-} ATOM_MEMORY_SETTING_ID_CONFIG;
-
-typedef union _ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS {
- ATOM_MEMORY_SETTING_ID_CONFIG slAccess;
- ULONG ulAccess;
-} ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS;
-
-typedef struct _ATOM_MEMORY_SETTING_DATA_BLOCK {
- ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS ulMemoryID;
- ULONG aulMemData[1];
-} ATOM_MEMORY_SETTING_DATA_BLOCK;
-
-typedef struct _ATOM_INIT_REG_INDEX_FORMAT {
- USHORT usRegIndex; /* MC register index */
- UCHAR ucPreRegDataLength; /* offset in ATOM_INIT_REG_DATA_BLOCK.saRegDataBuf */
-} ATOM_INIT_REG_INDEX_FORMAT;
-
-typedef struct _ATOM_INIT_REG_BLOCK {
- USHORT usRegIndexTblSize; /* size of asRegIndexBuf */
- USHORT usRegDataBlkSize; /* size of ATOM_MEMORY_SETTING_DATA_BLOCK */
- ATOM_INIT_REG_INDEX_FORMAT asRegIndexBuf[1];
- ATOM_MEMORY_SETTING_DATA_BLOCK asRegDataBuf[1];
-} ATOM_INIT_REG_BLOCK;
+}ATOM_MEMORY_SETTING_ID_CONFIG;
+
+typedef union _ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS
+{
+ ATOM_MEMORY_SETTING_ID_CONFIG slAccess;
+ ULONG ulAccess;
+}ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS;
+
+
+typedef struct _ATOM_MEMORY_SETTING_DATA_BLOCK{
+ ATOM_MEMORY_SETTING_ID_CONFIG_ACCESS ulMemoryID;
+ ULONG aulMemData[1];
+}ATOM_MEMORY_SETTING_DATA_BLOCK;
+
+
+typedef struct _ATOM_INIT_REG_INDEX_FORMAT{
+ USHORT usRegIndex; // MC register index
+ UCHAR ucPreRegDataLength; // offset in ATOM_INIT_REG_DATA_BLOCK.saRegDataBuf
+}ATOM_INIT_REG_INDEX_FORMAT;
+
+
+typedef struct _ATOM_INIT_REG_BLOCK{
+ USHORT usRegIndexTblSize; //size of asRegIndexBuf
+ USHORT usRegDataBlkSize; //size of ATOM_MEMORY_SETTING_DATA_BLOCK
+ ATOM_INIT_REG_INDEX_FORMAT asRegIndexBuf[1];
+ ATOM_MEMORY_SETTING_DATA_BLOCK asRegDataBuf[1];
+}ATOM_INIT_REG_BLOCK;
#define END_OF_REG_INDEX_BLOCK 0x0ffff
#define END_OF_REG_DATA_BLOCK 0x00000000
@@ -3716,16 +4530,19 @@ typedef struct _ATOM_INIT_REG_BLOCK {
#define INDEX_ACCESS_RANGE_END (INDEX_ACCESS_RANGE_BEGIN + 1)
#define VALUE_INDEX_ACCESS_SINGLE (INDEX_ACCESS_RANGE_END + 1)
-typedef struct _ATOM_MC_INIT_PARAM_TABLE {
- ATOM_COMMON_TABLE_HEADER sHeader;
- USHORT usAdjustARB_SEQDataOffset;
- USHORT usMCInitMemTypeTblOffset;
- USHORT usMCInitCommonTblOffset;
- USHORT usMCInitPowerDownTblOffset;
- ULONG ulARB_SEQDataBuf[32];
- ATOM_INIT_REG_BLOCK asMCInitMemType;
- ATOM_INIT_REG_BLOCK asMCInitCommon;
-} ATOM_MC_INIT_PARAM_TABLE;
+
+typedef struct _ATOM_MC_INIT_PARAM_TABLE
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usAdjustARB_SEQDataOffset;
+ USHORT usMCInitMemTypeTblOffset;
+ USHORT usMCInitCommonTblOffset;
+ USHORT usMCInitPowerDownTblOffset;
+ ULONG ulARB_SEQDataBuf[32];
+ ATOM_INIT_REG_BLOCK asMCInitMemType;
+ ATOM_INIT_REG_BLOCK asMCInitCommon;
+}ATOM_MC_INIT_PARAM_TABLE;
+
#define _4Mx16 0x2
#define _4Mx32 0x3
@@ -3751,221 +4568,272 @@ typedef struct _ATOM_MC_INIT_PARAM_TABLE {
#define QIMONDA INFINEON
#define PROMOS MOSEL
+#define KRETON INFINEON
-/* ///////////Support for GDDR5 MC uCode to reside in upper 64K of ROM///////////// */
+/////////////Support for GDDR5 MC uCode to reside in upper 64K of ROM/////////////
#define UCODE_ROM_START_ADDRESS 0x1c000
-#define UCODE_SIGNATURE 0x4375434d /* 'MCuC' - MC uCode */
-
-/* uCode block header for reference */
-
-typedef struct _MCuCodeHeader {
- ULONG ulSignature;
- UCHAR ucRevision;
- UCHAR ucChecksum;
- UCHAR ucReserved1;
- UCHAR ucReserved2;
- USHORT usParametersLength;
- USHORT usUCodeLength;
- USHORT usReserved1;
- USHORT usReserved2;
+#define UCODE_SIGNATURE 0x4375434d // 'MCuC' - MC uCode
+
+//uCode block header for reference
+
+typedef struct _MCuCodeHeader
+{
+ ULONG ulSignature;
+ UCHAR ucRevision;
+ UCHAR ucChecksum;
+ UCHAR ucReserved1;
+ UCHAR ucReserved2;
+ USHORT usParametersLength;
+ USHORT usUCodeLength;
+ USHORT usReserved1;
+ USHORT usReserved2;
} MCuCodeHeader;
-/* //////////////////////////////////////////////////////////////////////////////// */
+//////////////////////////////////////////////////////////////////////////////////
#define ATOM_MAX_NUMBER_OF_VRAM_MODULE 16
#define ATOM_VRAM_MODULE_MEMORY_VENDOR_ID_MASK 0xF
-typedef struct _ATOM_VRAM_MODULE_V1 {
- ULONG ulReserved;
- USHORT usEMRSValue;
- USHORT usMRSValue;
- USHORT usReserved;
- UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */
- UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] reserved; */
- UCHAR ucMemoryVenderID; /* Predefined,never change across designs or memory type/vender */
- UCHAR ucMemoryDeviceCfg; /* [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32... */
- UCHAR ucRow; /* Number of Row,in power of 2; */
- UCHAR ucColumn; /* Number of Column,in power of 2; */
- UCHAR ucBank; /* Nunber of Bank; */
- UCHAR ucRank; /* Number of Rank, in power of 2 */
- UCHAR ucChannelNum; /* Number of channel; */
- UCHAR ucChannelConfig; /* [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2 */
- UCHAR ucDefaultMVDDQ_ID; /* Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data; */
- UCHAR ucDefaultMVDDC_ID; /* Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data; */
- UCHAR ucReserved[2];
-} ATOM_VRAM_MODULE_V1;
-
-typedef struct _ATOM_VRAM_MODULE_V2 {
- ULONG ulReserved;
- ULONG ulFlags; /* To enable/disable functionalities based on memory type */
- ULONG ulEngineClock; /* Override of default engine clock for particular memory type */
- ULONG ulMemoryClock; /* Override of default memory clock for particular memory type */
- USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */
- USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */
- USHORT usEMRSValue;
- USHORT usMRSValue;
- USHORT usReserved;
- UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */
- UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now; */
- UCHAR ucMemoryVenderID; /* Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed */
- UCHAR ucMemoryDeviceCfg; /* [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32... */
- UCHAR ucRow; /* Number of Row,in power of 2; */
- UCHAR ucColumn; /* Number of Column,in power of 2; */
- UCHAR ucBank; /* Nunber of Bank; */
- UCHAR ucRank; /* Number of Rank, in power of 2 */
- UCHAR ucChannelNum; /* Number of channel; */
- UCHAR ucChannelConfig; /* [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2 */
- UCHAR ucDefaultMVDDQ_ID; /* Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data; */
- UCHAR ucDefaultMVDDC_ID; /* Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data; */
- UCHAR ucRefreshRateFactor;
- UCHAR ucReserved[3];
-} ATOM_VRAM_MODULE_V2;
-
-typedef struct _ATOM_MEMORY_TIMING_FORMAT {
- ULONG ulClkRange; /* memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing */
- union {
- USHORT usMRS; /* mode register */
- USHORT usDDR3_MR0;
- };
- union {
- USHORT usEMRS; /* extended mode register */
- USHORT usDDR3_MR1;
- };
- UCHAR ucCL; /* CAS latency */
- UCHAR ucWL; /* WRITE Latency */
- UCHAR uctRAS; /* tRAS */
- UCHAR uctRC; /* tRC */
- UCHAR uctRFC; /* tRFC */
- UCHAR uctRCDR; /* tRCDR */
- UCHAR uctRCDW; /* tRCDW */
- UCHAR uctRP; /* tRP */
- UCHAR uctRRD; /* tRRD */
- UCHAR uctWR; /* tWR */
- UCHAR uctWTR; /* tWTR */
- UCHAR uctPDIX; /* tPDIX */
- UCHAR uctFAW; /* tFAW */
- UCHAR uctAOND; /* tAOND */
- union {
- struct {
- UCHAR ucflag; /* flag to control memory timing calculation. bit0= control EMRS2 Infineon */
- UCHAR ucReserved;
- };
- USHORT usDDR3_MR2;
- };
-} ATOM_MEMORY_TIMING_FORMAT;
-
-typedef struct _ATOM_MEMORY_TIMING_FORMAT_V1 {
- ULONG ulClkRange; /* memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing */
- USHORT usMRS; /* mode register */
- USHORT usEMRS; /* extended mode register */
- UCHAR ucCL; /* CAS latency */
- UCHAR ucWL; /* WRITE Latency */
- UCHAR uctRAS; /* tRAS */
- UCHAR uctRC; /* tRC */
- UCHAR uctRFC; /* tRFC */
- UCHAR uctRCDR; /* tRCDR */
- UCHAR uctRCDW; /* tRCDW */
- UCHAR uctRP; /* tRP */
- UCHAR uctRRD; /* tRRD */
- UCHAR uctWR; /* tWR */
- UCHAR uctWTR; /* tWTR */
- UCHAR uctPDIX; /* tPDIX */
- UCHAR uctFAW; /* tFAW */
- UCHAR uctAOND; /* tAOND */
- UCHAR ucflag; /* flag to control memory timing calculation. bit0= control EMRS2 Infineon */
-/* ///////////////////////GDDR parameters/////////////////////////////////// */
- UCHAR uctCCDL; /* */
- UCHAR uctCRCRL; /* */
- UCHAR uctCRCWL; /* */
- UCHAR uctCKE; /* */
- UCHAR uctCKRSE; /* */
- UCHAR uctCKRSX; /* */
- UCHAR uctFAW32; /* */
- UCHAR ucReserved1; /* */
- UCHAR ucReserved2; /* */
- UCHAR ucTerminator;
-} ATOM_MEMORY_TIMING_FORMAT_V1;
-
-typedef struct _ATOM_MEMORY_FORMAT {
- ULONG ulDllDisClock; /* memory DLL will be disable when target memory clock is below this clock */
- union {
- USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */
- USHORT usDDR3_Reserved; /* Not used for DDR3 memory */
- };
- union {
- USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */
- USHORT usDDR3_MR3; /* Used for DDR3 memory */
- };
- UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now; */
- UCHAR ucMemoryVenderID; /* Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed */
- UCHAR ucRow; /* Number of Row,in power of 2; */
- UCHAR ucColumn; /* Number of Column,in power of 2; */
- UCHAR ucBank; /* Nunber of Bank; */
- UCHAR ucRank; /* Number of Rank, in power of 2 */
- UCHAR ucBurstSize; /* burst size, 0= burst size=4 1= burst size=8 */
- UCHAR ucDllDisBit; /* position of DLL Enable/Disable bit in EMRS ( Extended Mode Register ) */
- UCHAR ucRefreshRateFactor; /* memory refresh rate in unit of ms */
- UCHAR ucDensity; /* _8Mx32, _16Mx32, _16Mx16, _32Mx16 */
- UCHAR ucPreamble; /* [7:4] Write Preamble, [3:0] Read Preamble */
- UCHAR ucMemAttrib; /* Memory Device Addribute, like RDBI/WDBI etc */
- ATOM_MEMORY_TIMING_FORMAT asMemTiming[5]; /* Memory Timing block sort from lower clock to higher clock */
-} ATOM_MEMORY_FORMAT;
-
-typedef struct _ATOM_VRAM_MODULE_V3 {
- ULONG ulChannelMapCfg; /* board dependent paramenter:Channel combination */
- USHORT usSize; /* size of ATOM_VRAM_MODULE_V3 */
- USHORT usDefaultMVDDQ; /* board dependent parameter:Default Memory Core Voltage */
- USHORT usDefaultMVDDC; /* board dependent parameter:Default Memory IO Voltage */
- UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */
- UCHAR ucChannelNum; /* board dependent parameter:Number of channel; */
- UCHAR ucChannelSize; /* board dependent parameter:32bit or 64bit */
- UCHAR ucVREFI; /* board dependnt parameter: EXT or INT +160mv to -140mv */
- UCHAR ucNPL_RT; /* board dependent parameter:NPL round trip delay, used for calculate memory timing parameters */
- UCHAR ucFlag; /* To enable/disable functionalities based on memory type */
- ATOM_MEMORY_FORMAT asMemory; /* describ all of video memory parameters from memory spec */
-} ATOM_VRAM_MODULE_V3;
-
-/* ATOM_VRAM_MODULE_V3.ucNPL_RT */
+typedef struct _ATOM_VRAM_MODULE_V1
+{
+ ULONG ulReserved;
+ USHORT usEMRSValue;
+ USHORT usMRSValue;
+ USHORT usReserved;
+ UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+ UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] reserved;
+ UCHAR ucMemoryVenderID; // Predefined,never change across designs or memory type/vender
+ UCHAR ucMemoryDeviceCfg; // [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32...
+ UCHAR ucRow; // Number of Row,in power of 2;
+ UCHAR ucColumn; // Number of Column,in power of 2;
+ UCHAR ucBank; // Nunber of Bank;
+ UCHAR ucRank; // Number of Rank, in power of 2
+ UCHAR ucChannelNum; // Number of channel;
+ UCHAR ucChannelConfig; // [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2
+ UCHAR ucDefaultMVDDQ_ID; // Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data;
+ UCHAR ucDefaultMVDDC_ID; // Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data;
+ UCHAR ucReserved[2];
+}ATOM_VRAM_MODULE_V1;
+
+
+typedef struct _ATOM_VRAM_MODULE_V2
+{
+ ULONG ulReserved;
+ ULONG ulFlags; // To enable/disable functionalities based on memory type
+ ULONG ulEngineClock; // Override of default engine clock for particular memory type
+ ULONG ulMemoryClock; // Override of default memory clock for particular memory type
+ USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+ USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+ USHORT usEMRSValue;
+ USHORT usMRSValue;
+ USHORT usReserved;
+ UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+ UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now;
+ UCHAR ucMemoryVenderID; // Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed
+ UCHAR ucMemoryDeviceCfg; // [7:4]=0x0:4M;=0x1:8M;=0x2:16M;0x3:32M....[3:0]=0x0:x4;=0x1:x8;=0x2:x16;=0x3:x32...
+ UCHAR ucRow; // Number of Row,in power of 2;
+ UCHAR ucColumn; // Number of Column,in power of 2;
+ UCHAR ucBank; // Nunber of Bank;
+ UCHAR ucRank; // Number of Rank, in power of 2
+ UCHAR ucChannelNum; // Number of channel;
+ UCHAR ucChannelConfig; // [3:0]=Indication of what channel combination;[4:7]=Channel bit width, in number of 2
+ UCHAR ucDefaultMVDDQ_ID; // Default MVDDQ setting for this memory block, ID linking to MVDDQ info table to find real set-up data;
+ UCHAR ucDefaultMVDDC_ID; // Default MVDDC setting for this memory block, ID linking to MVDDC info table to find real set-up data;
+ UCHAR ucRefreshRateFactor;
+ UCHAR ucReserved[3];
+}ATOM_VRAM_MODULE_V2;
+
+
+typedef struct _ATOM_MEMORY_TIMING_FORMAT
+{
+ ULONG ulClkRange; // memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing
+ union{
+ USHORT usMRS; // mode register
+ USHORT usDDR3_MR0;
+ };
+ union{
+ USHORT usEMRS; // extended mode register
+ USHORT usDDR3_MR1;
+ };
+ UCHAR ucCL; // CAS latency
+ UCHAR ucWL; // WRITE Latency
+ UCHAR uctRAS; // tRAS
+ UCHAR uctRC; // tRC
+ UCHAR uctRFC; // tRFC
+ UCHAR uctRCDR; // tRCDR
+ UCHAR uctRCDW; // tRCDW
+ UCHAR uctRP; // tRP
+ UCHAR uctRRD; // tRRD
+ UCHAR uctWR; // tWR
+ UCHAR uctWTR; // tWTR
+ UCHAR uctPDIX; // tPDIX
+ UCHAR uctFAW; // tFAW
+ UCHAR uctAOND; // tAOND
+ union
+ {
+ struct {
+ UCHAR ucflag; // flag to control memory timing calculation. bit0= control EMRS2 Infineon
+ UCHAR ucReserved;
+ };
+ USHORT usDDR3_MR2;
+ };
+}ATOM_MEMORY_TIMING_FORMAT;
+
+
+typedef struct _ATOM_MEMORY_TIMING_FORMAT_V1
+{
+ ULONG ulClkRange; // memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing
+ USHORT usMRS; // mode register
+ USHORT usEMRS; // extended mode register
+ UCHAR ucCL; // CAS latency
+ UCHAR ucWL; // WRITE Latency
+ UCHAR uctRAS; // tRAS
+ UCHAR uctRC; // tRC
+ UCHAR uctRFC; // tRFC
+ UCHAR uctRCDR; // tRCDR
+ UCHAR uctRCDW; // tRCDW
+ UCHAR uctRP; // tRP
+ UCHAR uctRRD; // tRRD
+ UCHAR uctWR; // tWR
+ UCHAR uctWTR; // tWTR
+ UCHAR uctPDIX; // tPDIX
+ UCHAR uctFAW; // tFAW
+ UCHAR uctAOND; // tAOND
+ UCHAR ucflag; // flag to control memory timing calculation. bit0= control EMRS2 Infineon
+////////////////////////////////////GDDR parameters///////////////////////////////////
+ UCHAR uctCCDL; //
+ UCHAR uctCRCRL; //
+ UCHAR uctCRCWL; //
+ UCHAR uctCKE; //
+ UCHAR uctCKRSE; //
+ UCHAR uctCKRSX; //
+ UCHAR uctFAW32; //
+ UCHAR ucMR5lo; //
+ UCHAR ucMR5hi; //
+ UCHAR ucTerminator;
+}ATOM_MEMORY_TIMING_FORMAT_V1;
+
+typedef struct _ATOM_MEMORY_TIMING_FORMAT_V2
+{
+ ULONG ulClkRange; // memory clock in 10kHz unit, when target memory clock is below this clock, use this memory timing
+ USHORT usMRS; // mode register
+ USHORT usEMRS; // extended mode register
+ UCHAR ucCL; // CAS latency
+ UCHAR ucWL; // WRITE Latency
+ UCHAR uctRAS; // tRAS
+ UCHAR uctRC; // tRC
+ UCHAR uctRFC; // tRFC
+ UCHAR uctRCDR; // tRCDR
+ UCHAR uctRCDW; // tRCDW
+ UCHAR uctRP; // tRP
+ UCHAR uctRRD; // tRRD
+ UCHAR uctWR; // tWR
+ UCHAR uctWTR; // tWTR
+ UCHAR uctPDIX; // tPDIX
+ UCHAR uctFAW; // tFAW
+ UCHAR uctAOND; // tAOND
+ UCHAR ucflag; // flag to control memory timing calculation. bit0= control EMRS2 Infineon
+////////////////////////////////////GDDR parameters///////////////////////////////////
+ UCHAR uctCCDL; //
+ UCHAR uctCRCRL; //
+ UCHAR uctCRCWL; //
+ UCHAR uctCKE; //
+ UCHAR uctCKRSE; //
+ UCHAR uctCKRSX; //
+ UCHAR uctFAW32; //
+ UCHAR ucMR4lo; //
+ UCHAR ucMR4hi; //
+ UCHAR ucMR5lo; //
+ UCHAR ucMR5hi; //
+ UCHAR ucTerminator;
+ UCHAR ucReserved;
+}ATOM_MEMORY_TIMING_FORMAT_V2;
+
+typedef struct _ATOM_MEMORY_FORMAT
+{
+ ULONG ulDllDisClock; // memory DLL will be disable when target memory clock is below this clock
+ union{
+ USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+ USHORT usDDR3_Reserved; // Not used for DDR3 memory
+ };
+ union{
+ USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+ USHORT usDDR3_MR3; // Used for DDR3 memory
+ };
+ UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4;[3:0] - must not be used for now;
+ UCHAR ucMemoryVenderID; // Predefined,never change across designs or memory type/vender. If not predefined, vendor detection table gets executed
+ UCHAR ucRow; // Number of Row,in power of 2;
+ UCHAR ucColumn; // Number of Column,in power of 2;
+ UCHAR ucBank; // Nunber of Bank;
+ UCHAR ucRank; // Number of Rank, in power of 2
+ UCHAR ucBurstSize; // burst size, 0= burst size=4 1= burst size=8
+ UCHAR ucDllDisBit; // position of DLL Enable/Disable bit in EMRS ( Extended Mode Register )
+ UCHAR ucRefreshRateFactor; // memory refresh rate in unit of ms
+ UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+ UCHAR ucPreamble; //[7:4] Write Preamble, [3:0] Read Preamble
+ UCHAR ucMemAttrib; // Memory Device Addribute, like RDBI/WDBI etc
+ ATOM_MEMORY_TIMING_FORMAT asMemTiming[5]; //Memory Timing block sort from lower clock to higher clock
+}ATOM_MEMORY_FORMAT;
+
+
+typedef struct _ATOM_VRAM_MODULE_V3
+{
+ ULONG ulChannelMapCfg; // board dependent paramenter:Channel combination
+ USHORT usSize; // size of ATOM_VRAM_MODULE_V3
+ USHORT usDefaultMVDDQ; // board dependent parameter:Default Memory Core Voltage
+ USHORT usDefaultMVDDC; // board dependent parameter:Default Memory IO Voltage
+ UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+ UCHAR ucChannelNum; // board dependent parameter:Number of channel;
+ UCHAR ucChannelSize; // board dependent parameter:32bit or 64bit
+ UCHAR ucVREFI; // board dependnt parameter: EXT or INT +160mv to -140mv
+ UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
+ UCHAR ucFlag; // To enable/disable functionalities based on memory type
+ ATOM_MEMORY_FORMAT asMemory; // describ all of video memory parameters from memory spec
+}ATOM_VRAM_MODULE_V3;
+
+
+//ATOM_VRAM_MODULE_V3.ucNPL_RT
#define NPL_RT_MASK 0x0f
#define BATTERY_ODT_MASK 0xc0
#define ATOM_VRAM_MODULE ATOM_VRAM_MODULE_V3
-typedef struct _ATOM_VRAM_MODULE_V4 {
- ULONG ulChannelMapCfg; /* board dependent parameter: Channel combination */
- USHORT usModuleSize; /* size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE */
- USHORT usPrivateReserved; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */
- /* MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) */
- USHORT usReserved;
- UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */
- UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now; */
- UCHAR ucChannelNum; /* Number of channels present in this module config */
- UCHAR ucChannelWidth; /* 0 - 32 bits; 1 - 64 bits */
- UCHAR ucDensity; /* _8Mx32, _16Mx32, _16Mx16, _32Mx16 */
- UCHAR ucFlag; /* To enable/disable functionalities based on memory type */
- UCHAR ucMisc; /* bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8 */
- UCHAR ucVREFI; /* board dependent parameter */
- UCHAR ucNPL_RT; /* board dependent parameter:NPL round trip delay, used for calculate memory timing parameters */
- UCHAR ucPreamble; /* [7:4] Write Preamble, [3:0] Read Preamble */
- UCHAR ucMemorySize; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */
- /* Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros */
- UCHAR ucReserved[3];
-
-/* compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level */
- union {
- USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */
- USHORT usDDR3_Reserved;
- };
- union {
- USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */
- USHORT usDDR3_MR3; /* Used for DDR3 memory */
- };
- UCHAR ucMemoryVenderID; /* Predefined, If not predefined, vendor detection table gets executed */
- UCHAR ucRefreshRateFactor; /* [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) */
- UCHAR ucReserved2[2];
- ATOM_MEMORY_TIMING_FORMAT asMemTiming[5]; /* Memory Timing block sort from lower clock to higher clock */
-} ATOM_VRAM_MODULE_V4;
+typedef struct _ATOM_VRAM_MODULE_V4
+{
+ ULONG ulChannelMapCfg; // board dependent parameter: Channel combination
+ USHORT usModuleSize; // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE
+ USHORT usPrivateReserved; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+ // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
+ USHORT usReserved;
+ UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+ UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now;
+ UCHAR ucChannelNum; // Number of channels present in this module config
+ UCHAR ucChannelWidth; // 0 - 32 bits; 1 - 64 bits
+ UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+ UCHAR ucFlag; // To enable/disable functionalities based on memory type
+ UCHAR ucMisc; // bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8
+ UCHAR ucVREFI; // board dependent parameter
+ UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
+ UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble
+ UCHAR ucMemorySize; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+ // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
+ UCHAR ucReserved[3];
+
+//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level
+ union{
+ USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+ USHORT usDDR3_Reserved;
+ };
+ union{
+ USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+ USHORT usDDR3_MR3; // Used for DDR3 memory
+ };
+ UCHAR ucMemoryVenderID; // Predefined, If not predefined, vendor detection table gets executed
+ UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
+ UCHAR ucReserved2[2];
+ ATOM_MEMORY_TIMING_FORMAT asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
+}ATOM_VRAM_MODULE_V4;
#define VRAM_MODULE_V4_MISC_RANK_MASK 0x3
#define VRAM_MODULE_V4_MISC_DUAL_RANK 0x1
@@ -3973,96 +4841,139 @@ typedef struct _ATOM_VRAM_MODULE_V4 {
#define VRAM_MODULE_V4_MISC_BL8 0x4
#define VRAM_MODULE_V4_MISC_DUAL_CS 0x10
-typedef struct _ATOM_VRAM_MODULE_V5 {
- ULONG ulChannelMapCfg; /* board dependent parameter: Channel combination */
- USHORT usModuleSize; /* size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE */
- USHORT usPrivateReserved; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */
- /* MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS) */
- USHORT usReserved;
- UCHAR ucExtMemoryID; /* An external indicator (by hardcode, callback or pin) to tell what is the current memory module */
- UCHAR ucMemoryType; /* [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now; */
- UCHAR ucChannelNum; /* Number of channels present in this module config */
- UCHAR ucChannelWidth; /* 0 - 32 bits; 1 - 64 bits */
- UCHAR ucDensity; /* _8Mx32, _16Mx32, _16Mx16, _32Mx16 */
- UCHAR ucFlag; /* To enable/disable functionalities based on memory type */
- UCHAR ucMisc; /* bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8 */
- UCHAR ucVREFI; /* board dependent parameter */
- UCHAR ucNPL_RT; /* board dependent parameter:NPL round trip delay, used for calculate memory timing parameters */
- UCHAR ucPreamble; /* [7:4] Write Preamble, [3:0] Read Preamble */
- UCHAR ucMemorySize; /* BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!! */
- /* Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros */
- UCHAR ucReserved[3];
+typedef struct _ATOM_VRAM_MODULE_V5
+{
+ ULONG ulChannelMapCfg; // board dependent parameter: Channel combination
+ USHORT usModuleSize; // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE
+ USHORT usPrivateReserved; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+ // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
+ USHORT usReserved;
+ UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+ UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now;
+ UCHAR ucChannelNum; // Number of channels present in this module config
+ UCHAR ucChannelWidth; // 0 - 32 bits; 1 - 64 bits
+ UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+ UCHAR ucFlag; // To enable/disable functionalities based on memory type
+ UCHAR ucMisc; // bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8
+ UCHAR ucVREFI; // board dependent parameter
+ UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
+ UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble
+ UCHAR ucMemorySize; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+ // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
+ UCHAR ucReserved[3];
+
+//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level
+ USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+ USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+ UCHAR ucMemoryVenderID; // Predefined, If not predefined, vendor detection table gets executed
+ UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
+ UCHAR ucFIFODepth; // FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth
+ UCHAR ucCDR_Bandwidth; // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth
+ ATOM_MEMORY_TIMING_FORMAT_V1 asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
+}ATOM_VRAM_MODULE_V5;
+
+typedef struct _ATOM_VRAM_MODULE_V6
+{
+ ULONG ulChannelMapCfg; // board dependent parameter: Channel combination
+ USHORT usModuleSize; // size of ATOM_VRAM_MODULE_V4, make it easy for VBIOS to look for next entry of VRAM_MODULE
+ USHORT usPrivateReserved; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+ // MC_ARB_RAMCFG (includes NOOFBANK,NOOFRANKS,NOOFROWS,NOOFCOLS)
+ USHORT usReserved;
+ UCHAR ucExtMemoryID; // An external indicator (by hardcode, callback or pin) to tell what is the current memory module
+ UCHAR ucMemoryType; // [7:4]=0x1:DDR1;=0x2:DDR2;=0x3:DDR3;=0x4:DDR4; 0x5:DDR5 [3:0] - Must be 0x0 for now;
+ UCHAR ucChannelNum; // Number of channels present in this module config
+ UCHAR ucChannelWidth; // 0 - 32 bits; 1 - 64 bits
+ UCHAR ucDensity; // _8Mx32, _16Mx32, _16Mx16, _32Mx16
+ UCHAR ucFlag; // To enable/disable functionalities based on memory type
+ UCHAR ucMisc; // bit0: 0 - single rank; 1 - dual rank; bit2: 0 - burstlength 4, 1 - burstlength 8
+ UCHAR ucVREFI; // board dependent parameter
+ UCHAR ucNPL_RT; // board dependent parameter:NPL round trip delay, used for calculate memory timing parameters
+ UCHAR ucPreamble; // [7:4] Write Preamble, [3:0] Read Preamble
+ UCHAR ucMemorySize; // BIOS internal reserved space to optimize code size, updated by the compiler, shouldn't be modified manually!!
+ // Total memory size in unit of 16MB for CONFIG_MEMSIZE - bit[23:0] zeros
+ UCHAR ucReserved[3];
+
+//compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level
+ USHORT usEMRS2Value; // EMRS2 Value is used for GDDR2 and GDDR4 memory type
+ USHORT usEMRS3Value; // EMRS3 Value is used for GDDR2 and GDDR4 memory type
+ UCHAR ucMemoryVenderID; // Predefined, If not predefined, vendor detection table gets executed
+ UCHAR ucRefreshRateFactor; // [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms)
+ UCHAR ucFIFODepth; // FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth
+ UCHAR ucCDR_Bandwidth; // [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth
+ ATOM_MEMORY_TIMING_FORMAT_V2 asMemTiming[5];//Memory Timing block sort from lower clock to higher clock
+}ATOM_VRAM_MODULE_V6;
+
+
+
+typedef struct _ATOM_VRAM_INFO_V2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucNumOfVRAMModule;
+ ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule;
+}ATOM_VRAM_INFO_V2;
-/* compare with V3, we flat the struct by merging ATOM_MEMORY_FORMAT (as is) into V4 as the same level */
- USHORT usEMRS2Value; /* EMRS2 Value is used for GDDR2 and GDDR4 memory type */
- USHORT usEMRS3Value; /* EMRS3 Value is used for GDDR2 and GDDR4 memory type */
- UCHAR ucMemoryVenderID; /* Predefined, If not predefined, vendor detection table gets executed */
- UCHAR ucRefreshRateFactor; /* [1:0]=RefreshFactor (00=8ms, 01=16ms, 10=32ms,11=64ms) */
- UCHAR ucFIFODepth; /* FIFO depth supposes to be detected during vendor detection, but if we dont do vendor detection we have to hardcode FIFO Depth */
- UCHAR ucCDR_Bandwidth; /* [0:3]=Read CDR bandwidth, [4:7] - Write CDR Bandwidth */
- ATOM_MEMORY_TIMING_FORMAT_V1 asMemTiming[5]; /* Memory Timing block sort from lower clock to higher clock */
-} ATOM_VRAM_MODULE_V5;
-
-typedef struct _ATOM_VRAM_INFO_V2 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- UCHAR ucNumOfVRAMModule;
- ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; /* just for allocation, real number of blocks is in ucNumOfVRAMModule; */
-} ATOM_VRAM_INFO_V2;
-
-typedef struct _ATOM_VRAM_INFO_V3 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- USHORT usMemAdjustTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting */
- USHORT usMemClkPatchTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting */
- USHORT usRerseved;
- UCHAR aVID_PinsShift[9]; /* 8 bit strap maximum+terminator */
- UCHAR ucNumOfVRAMModule;
- ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; /* just for allocation, real number of blocks is in ucNumOfVRAMModule; */
- ATOM_INIT_REG_BLOCK asMemPatch; /* for allocation */
- /* ATOM_INIT_REG_BLOCK aMemAdjust; */
-} ATOM_VRAM_INFO_V3;
+typedef struct _ATOM_VRAM_INFO_V3
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
+ USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
+ USHORT usRerseved;
+ UCHAR aVID_PinsShift[9]; // 8 bit strap maximum+terminator
+ UCHAR ucNumOfVRAMModule;
+ ATOM_VRAM_MODULE aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule;
+ ATOM_INIT_REG_BLOCK asMemPatch; // for allocation
+ // ATOM_INIT_REG_BLOCK aMemAdjust;
+}ATOM_VRAM_INFO_V3;
#define ATOM_VRAM_INFO_LAST ATOM_VRAM_INFO_V3
-typedef struct _ATOM_VRAM_INFO_V4 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- USHORT usMemAdjustTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting */
- USHORT usMemClkPatchTblOffset; /* offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting */
- USHORT usRerseved;
- UCHAR ucMemDQ7_0ByteRemap; /* DQ line byte remap, =0: Memory Data line BYTE0, =1: BYTE1, =2: BYTE2, =3: BYTE3 */
- ULONG ulMemDQ7_0BitRemap; /* each DQ line ( 7~0) use 3bits, like: DQ0=Bit[2:0], DQ1:[5:3], ... DQ7:[23:21] */
- UCHAR ucReservde[4];
- UCHAR ucNumOfVRAMModule;
- ATOM_VRAM_MODULE_V4 aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; /* just for allocation, real number of blocks is in ucNumOfVRAMModule; */
- ATOM_INIT_REG_BLOCK asMemPatch; /* for allocation */
- /* ATOM_INIT_REG_BLOCK aMemAdjust; */
-} ATOM_VRAM_INFO_V4;
-
-typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- UCHAR aVID_PinsShift[9]; /* 8 bit strap maximum+terminator */
-} ATOM_VRAM_GPIO_DETECTION_INFO;
-
-typedef struct _ATOM_MEMORY_TRAINING_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- UCHAR ucTrainingLoop;
- UCHAR ucReserved[3];
- ATOM_INIT_REG_BLOCK asMemTrainingSetting;
-} ATOM_MEMORY_TRAINING_INFO;
-
-typedef struct SW_I2C_CNTL_DATA_PARAMETERS {
- UCHAR ucControl;
- UCHAR ucData;
- UCHAR ucSatus;
- UCHAR ucTemp;
+typedef struct _ATOM_VRAM_INFO_V4
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usMemAdjustTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory vendor specific MC adjust setting
+ USHORT usMemClkPatchTblOffset; // offset of ATOM_INIT_REG_BLOCK structure for memory clock specific MC setting
+ USHORT usRerseved;
+ UCHAR ucMemDQ7_0ByteRemap; // DQ line byte remap, =0: Memory Data line BYTE0, =1: BYTE1, =2: BYTE2, =3: BYTE3
+ ULONG ulMemDQ7_0BitRemap; // each DQ line ( 7~0) use 3bits, like: DQ0=Bit[2:0], DQ1:[5:3], ... DQ7:[23:21]
+ UCHAR ucReservde[4];
+ UCHAR ucNumOfVRAMModule;
+ ATOM_VRAM_MODULE_V4 aVramInfo[ATOM_MAX_NUMBER_OF_VRAM_MODULE]; // just for allocation, real number of blocks is in ucNumOfVRAMModule;
+ ATOM_INIT_REG_BLOCK asMemPatch; // for allocation
+ // ATOM_INIT_REG_BLOCK aMemAdjust;
+}ATOM_VRAM_INFO_V4;
+
+typedef struct _ATOM_VRAM_GPIO_DETECTION_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR aVID_PinsShift[9]; //8 bit strap maximum+terminator
+}ATOM_VRAM_GPIO_DETECTION_INFO;
+
+
+typedef struct _ATOM_MEMORY_TRAINING_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucTrainingLoop;
+ UCHAR ucReserved[3];
+ ATOM_INIT_REG_BLOCK asMemTrainingSetting;
+}ATOM_MEMORY_TRAINING_INFO;
+
+
+typedef struct SW_I2C_CNTL_DATA_PARAMETERS
+{
+ UCHAR ucControl;
+ UCHAR ucData;
+ UCHAR ucSatus;
+ UCHAR ucTemp;
} SW_I2C_CNTL_DATA_PARAMETERS;
#define SW_I2C_CNTL_DATA_PS_ALLOCATION SW_I2C_CNTL_DATA_PARAMETERS
-typedef struct _SW_I2C_IO_DATA_PARAMETERS {
- USHORT GPIO_Info;
- UCHAR ucAct;
- UCHAR ucData;
-} SW_I2C_IO_DATA_PARAMETERS;
+typedef struct _SW_I2C_IO_DATA_PARAMETERS
+{
+ USHORT GPIO_Info;
+ UCHAR ucAct;
+ UCHAR ucData;
+ } SW_I2C_IO_DATA_PARAMETERS;
#define SW_I2C_IO_DATA_PS_ALLOCATION SW_I2C_IO_DATA_PARAMETERS
@@ -4087,127 +4998,136 @@ typedef struct _SW_I2C_IO_DATA_PARAMETERS {
#define SW_I2C_CNTL_CLOSE 5
#define SW_I2C_CNTL_WRITE1BIT 6
-/* ==============================VESA definition Portion=============================== */
+//==============================VESA definition Portion===============================
#define VESA_OEM_PRODUCT_REV '01.00'
-#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB /* refer to VBE spec p.32, no TTY support */
+#define VESA_MODE_ATTRIBUTE_MODE_SUPPORT 0xBB //refer to VBE spec p.32, no TTY support
#define VESA_MODE_WIN_ATTRIBUTE 7
#define VESA_WIN_SIZE 64
-typedef struct _PTR_32_BIT_STRUCTURE {
- USHORT Offset16;
- USHORT Segment16;
+typedef struct _PTR_32_BIT_STRUCTURE
+{
+ USHORT Offset16;
+ USHORT Segment16;
} PTR_32_BIT_STRUCTURE;
-typedef union _PTR_32_BIT_UNION {
- PTR_32_BIT_STRUCTURE SegmentOffset;
- ULONG Ptr32_Bit;
+typedef union _PTR_32_BIT_UNION
+{
+ PTR_32_BIT_STRUCTURE SegmentOffset;
+ ULONG Ptr32_Bit;
} PTR_32_BIT_UNION;
-typedef struct _VBE_1_2_INFO_BLOCK_UPDATABLE {
- UCHAR VbeSignature[4];
- USHORT VbeVersion;
- PTR_32_BIT_UNION OemStringPtr;
- UCHAR Capabilities[4];
- PTR_32_BIT_UNION VideoModePtr;
- USHORT TotalMemory;
+typedef struct _VBE_1_2_INFO_BLOCK_UPDATABLE
+{
+ UCHAR VbeSignature[4];
+ USHORT VbeVersion;
+ PTR_32_BIT_UNION OemStringPtr;
+ UCHAR Capabilities[4];
+ PTR_32_BIT_UNION VideoModePtr;
+ USHORT TotalMemory;
} VBE_1_2_INFO_BLOCK_UPDATABLE;
-typedef struct _VBE_2_0_INFO_BLOCK_UPDATABLE {
- VBE_1_2_INFO_BLOCK_UPDATABLE CommonBlock;
- USHORT OemSoftRev;
- PTR_32_BIT_UNION OemVendorNamePtr;
- PTR_32_BIT_UNION OemProductNamePtr;
- PTR_32_BIT_UNION OemProductRevPtr;
+
+typedef struct _VBE_2_0_INFO_BLOCK_UPDATABLE
+{
+ VBE_1_2_INFO_BLOCK_UPDATABLE CommonBlock;
+ USHORT OemSoftRev;
+ PTR_32_BIT_UNION OemVendorNamePtr;
+ PTR_32_BIT_UNION OemProductNamePtr;
+ PTR_32_BIT_UNION OemProductRevPtr;
} VBE_2_0_INFO_BLOCK_UPDATABLE;
-typedef union _VBE_VERSION_UNION {
- VBE_2_0_INFO_BLOCK_UPDATABLE VBE_2_0_InfoBlock;
- VBE_1_2_INFO_BLOCK_UPDATABLE VBE_1_2_InfoBlock;
+typedef union _VBE_VERSION_UNION
+{
+ VBE_2_0_INFO_BLOCK_UPDATABLE VBE_2_0_InfoBlock;
+ VBE_1_2_INFO_BLOCK_UPDATABLE VBE_1_2_InfoBlock;
} VBE_VERSION_UNION;
-typedef struct _VBE_INFO_BLOCK {
- VBE_VERSION_UNION UpdatableVBE_Info;
- UCHAR Reserved[222];
- UCHAR OemData[256];
+typedef struct _VBE_INFO_BLOCK
+{
+ VBE_VERSION_UNION UpdatableVBE_Info;
+ UCHAR Reserved[222];
+ UCHAR OemData[256];
} VBE_INFO_BLOCK;
-typedef struct _VBE_FP_INFO {
- USHORT HSize;
- USHORT VSize;
- USHORT FPType;
- UCHAR RedBPP;
- UCHAR GreenBPP;
- UCHAR BlueBPP;
- UCHAR ReservedBPP;
- ULONG RsvdOffScrnMemSize;
- ULONG RsvdOffScrnMEmPtr;
- UCHAR Reserved[14];
+typedef struct _VBE_FP_INFO
+{
+ USHORT HSize;
+ USHORT VSize;
+ USHORT FPType;
+ UCHAR RedBPP;
+ UCHAR GreenBPP;
+ UCHAR BlueBPP;
+ UCHAR ReservedBPP;
+ ULONG RsvdOffScrnMemSize;
+ ULONG RsvdOffScrnMEmPtr;
+ UCHAR Reserved[14];
} VBE_FP_INFO;
-typedef struct _VESA_MODE_INFO_BLOCK {
-/* Mandatory information for all VBE revisions */
- USHORT ModeAttributes; /* dw ? ; mode attributes */
- UCHAR WinAAttributes; /* db ? ; window A attributes */
- UCHAR WinBAttributes; /* db ? ; window B attributes */
- USHORT WinGranularity; /* dw ? ; window granularity */
- USHORT WinSize; /* dw ? ; window size */
- USHORT WinASegment; /* dw ? ; window A start segment */
- USHORT WinBSegment; /* dw ? ; window B start segment */
- ULONG WinFuncPtr; /* dd ? ; real mode pointer to window function */
- USHORT BytesPerScanLine; /* dw ? ; bytes per scan line */
-
-/* ; Mandatory information for VBE 1.2 and above */
- USHORT XResolution; /* dw ? ; horizontal resolution in pixels or characters */
- USHORT YResolution; /* dw ? ; vertical resolution in pixels or characters */
- UCHAR XCharSize; /* db ? ; character cell width in pixels */
- UCHAR YCharSize; /* db ? ; character cell height in pixels */
- UCHAR NumberOfPlanes; /* db ? ; number of memory planes */
- UCHAR BitsPerPixel; /* db ? ; bits per pixel */
- UCHAR NumberOfBanks; /* db ? ; number of banks */
- UCHAR MemoryModel; /* db ? ; memory model type */
- UCHAR BankSize; /* db ? ; bank size in KB */
- UCHAR NumberOfImagePages; /* db ? ; number of images */
- UCHAR ReservedForPageFunction; /* db 1 ; reserved for page function */
-
-/* ; Direct Color fields(required for direct/6 and YUV/7 memory models) */
- UCHAR RedMaskSize; /* db ? ; size of direct color red mask in bits */
- UCHAR RedFieldPosition; /* db ? ; bit position of lsb of red mask */
- UCHAR GreenMaskSize; /* db ? ; size of direct color green mask in bits */
- UCHAR GreenFieldPosition; /* db ? ; bit position of lsb of green mask */
- UCHAR BlueMaskSize; /* db ? ; size of direct color blue mask in bits */
- UCHAR BlueFieldPosition; /* db ? ; bit position of lsb of blue mask */
- UCHAR RsvdMaskSize; /* db ? ; size of direct color reserved mask in bits */
- UCHAR RsvdFieldPosition; /* db ? ; bit position of lsb of reserved mask */
- UCHAR DirectColorModeInfo; /* db ? ; direct color mode attributes */
-
-/* ; Mandatory information for VBE 2.0 and above */
- ULONG PhysBasePtr; /* dd ? ; physical address for flat memory frame buffer */
- ULONG Reserved_1; /* dd 0 ; reserved - always set to 0 */
- USHORT Reserved_2; /* dw 0 ; reserved - always set to 0 */
-
-/* ; Mandatory information for VBE 3.0 and above */
- USHORT LinBytesPerScanLine; /* dw ? ; bytes per scan line for linear modes */
- UCHAR BnkNumberOfImagePages; /* db ? ; number of images for banked modes */
- UCHAR LinNumberOfImagPages; /* db ? ; number of images for linear modes */
- UCHAR LinRedMaskSize; /* db ? ; size of direct color red mask(linear modes) */
- UCHAR LinRedFieldPosition; /* db ? ; bit position of lsb of red mask(linear modes) */
- UCHAR LinGreenMaskSize; /* db ? ; size of direct color green mask(linear modes) */
- UCHAR LinGreenFieldPosition; /* db ? ; bit position of lsb of green mask(linear modes) */
- UCHAR LinBlueMaskSize; /* db ? ; size of direct color blue mask(linear modes) */
- UCHAR LinBlueFieldPosition; /* db ? ; bit position of lsb of blue mask(linear modes) */
- UCHAR LinRsvdMaskSize; /* db ? ; size of direct color reserved mask(linear modes) */
- UCHAR LinRsvdFieldPosition; /* db ? ; bit position of lsb of reserved mask(linear modes) */
- ULONG MaxPixelClock; /* dd ? ; maximum pixel clock(in Hz) for graphics mode */
- UCHAR Reserved; /* db 190 dup (0) */
+typedef struct _VESA_MODE_INFO_BLOCK
+{
+// Mandatory information for all VBE revisions
+ USHORT ModeAttributes; // dw ? ; mode attributes
+ UCHAR WinAAttributes; // db ? ; window A attributes
+ UCHAR WinBAttributes; // db ? ; window B attributes
+ USHORT WinGranularity; // dw ? ; window granularity
+ USHORT WinSize; // dw ? ; window size
+ USHORT WinASegment; // dw ? ; window A start segment
+ USHORT WinBSegment; // dw ? ; window B start segment
+ ULONG WinFuncPtr; // dd ? ; real mode pointer to window function
+ USHORT BytesPerScanLine;// dw ? ; bytes per scan line
+
+//; Mandatory information for VBE 1.2 and above
+ USHORT XResolution; // dw ? ; horizontal resolution in pixels or characters
+ USHORT YResolution; // dw ? ; vertical resolution in pixels or characters
+ UCHAR XCharSize; // db ? ; character cell width in pixels
+ UCHAR YCharSize; // db ? ; character cell height in pixels
+ UCHAR NumberOfPlanes; // db ? ; number of memory planes
+ UCHAR BitsPerPixel; // db ? ; bits per pixel
+ UCHAR NumberOfBanks; // db ? ; number of banks
+ UCHAR MemoryModel; // db ? ; memory model type
+ UCHAR BankSize; // db ? ; bank size in KB
+ UCHAR NumberOfImagePages;// db ? ; number of images
+ UCHAR ReservedForPageFunction;//db 1 ; reserved for page function
+
+//; Direct Color fields(required for direct/6 and YUV/7 memory models)
+ UCHAR RedMaskSize; // db ? ; size of direct color red mask in bits
+ UCHAR RedFieldPosition; // db ? ; bit position of lsb of red mask
+ UCHAR GreenMaskSize; // db ? ; size of direct color green mask in bits
+ UCHAR GreenFieldPosition; // db ? ; bit position of lsb of green mask
+ UCHAR BlueMaskSize; // db ? ; size of direct color blue mask in bits
+ UCHAR BlueFieldPosition; // db ? ; bit position of lsb of blue mask
+ UCHAR RsvdMaskSize; // db ? ; size of direct color reserved mask in bits
+ UCHAR RsvdFieldPosition; // db ? ; bit position of lsb of reserved mask
+ UCHAR DirectColorModeInfo;// db ? ; direct color mode attributes
+
+//; Mandatory information for VBE 2.0 and above
+ ULONG PhysBasePtr; // dd ? ; physical address for flat memory frame buffer
+ ULONG Reserved_1; // dd 0 ; reserved - always set to 0
+ USHORT Reserved_2; // dw 0 ; reserved - always set to 0
+
+//; Mandatory information for VBE 3.0 and above
+ USHORT LinBytesPerScanLine; // dw ? ; bytes per scan line for linear modes
+ UCHAR BnkNumberOfImagePages;// db ? ; number of images for banked modes
+ UCHAR LinNumberOfImagPages; // db ? ; number of images for linear modes
+ UCHAR LinRedMaskSize; // db ? ; size of direct color red mask(linear modes)
+ UCHAR LinRedFieldPosition; // db ? ; bit position of lsb of red mask(linear modes)
+ UCHAR LinGreenMaskSize; // db ? ; size of direct color green mask(linear modes)
+ UCHAR LinGreenFieldPosition;// db ? ; bit position of lsb of green mask(linear modes)
+ UCHAR LinBlueMaskSize; // db ? ; size of direct color blue mask(linear modes)
+ UCHAR LinBlueFieldPosition; // db ? ; bit position of lsb of blue mask(linear modes)
+ UCHAR LinRsvdMaskSize; // db ? ; size of direct color reserved mask(linear modes)
+ UCHAR LinRsvdFieldPosition; // db ? ; bit position of lsb of reserved mask(linear modes)
+ ULONG MaxPixelClock; // dd ? ; maximum pixel clock(in Hz) for graphics mode
+ UCHAR Reserved; // db 190 dup (0)
} VESA_MODE_INFO_BLOCK;
-/* BIOS function CALLS */
-#define ATOM_BIOS_EXTENDED_FUNCTION_CODE 0xA0 /* ATI Extended Function code */
+// BIOS function CALLS
+#define ATOM_BIOS_EXTENDED_FUNCTION_CODE 0xA0 // ATI Extended Function code
#define ATOM_BIOS_FUNCTION_COP_MODE 0x00
#define ATOM_BIOS_FUNCTION_SHORT_QUERY1 0x04
#define ATOM_BIOS_FUNCTION_SHORT_QUERY2 0x05
#define ATOM_BIOS_FUNCTION_SHORT_QUERY3 0x06
-#define ATOM_BIOS_FUNCTION_GET_DDC 0x0B
+#define ATOM_BIOS_FUNCTION_GET_DDC 0x0B
#define ATOM_BIOS_FUNCTION_ASIC_DSTATE 0x0E
#define ATOM_BIOS_FUNCTION_DEBUG_PLAY 0x0F
#define ATOM_BIOS_FUNCTION_STV_STD 0x16
@@ -4217,100 +5137,135 @@ typedef struct _VESA_MODE_INFO_BLOCK {
#define ATOM_BIOS_FUNCTION_PANEL_CONTROL 0x82
#define ATOM_BIOS_FUNCTION_OLD_DEVICE_DET 0x83
#define ATOM_BIOS_FUNCTION_OLD_DEVICE_SWITCH 0x84
-#define ATOM_BIOS_FUNCTION_HW_ICON 0x8A
+#define ATOM_BIOS_FUNCTION_HW_ICON 0x8A
#define ATOM_BIOS_FUNCTION_SET_CMOS 0x8B
-#define SUB_FUNCTION_UPDATE_DISPLAY_INFO 0x8000 /* Sub function 80 */
-#define SUB_FUNCTION_UPDATE_EXPANSION_INFO 0x8100 /* Sub function 80 */
+#define SUB_FUNCTION_UPDATE_DISPLAY_INFO 0x8000 // Sub function 80
+#define SUB_FUNCTION_UPDATE_EXPANSION_INFO 0x8100 // Sub function 80
#define ATOM_BIOS_FUNCTION_DISPLAY_INFO 0x8D
#define ATOM_BIOS_FUNCTION_DEVICE_ON_OFF 0x8E
-#define ATOM_BIOS_FUNCTION_VIDEO_STATE 0x8F
-#define ATOM_SUB_FUNCTION_GET_CRITICAL_STATE 0x0300 /* Sub function 03 */
-#define ATOM_SUB_FUNCTION_GET_LIDSTATE 0x0700 /* Sub function 7 */
-#define ATOM_SUB_FUNCTION_THERMAL_STATE_NOTICE 0x1400 /* Notify caller the current thermal state */
-#define ATOM_SUB_FUNCTION_CRITICAL_STATE_NOTICE 0x8300 /* Notify caller the current critical state */
-#define ATOM_SUB_FUNCTION_SET_LIDSTATE 0x8500 /* Sub function 85 */
-#define ATOM_SUB_FUNCTION_GET_REQ_DISPLAY_FROM_SBIOS_MODE 0x8900 /* Sub function 89 */
-#define ATOM_SUB_FUNCTION_INFORM_ADC_SUPPORT 0x9400 /* Notify caller that ADC is supported */
-
-#define ATOM_BIOS_FUNCTION_VESA_DPMS 0x4F10 /* Set DPMS */
-#define ATOM_SUB_FUNCTION_SET_DPMS 0x0001 /* BL: Sub function 01 */
-#define ATOM_SUB_FUNCTION_GET_DPMS 0x0002 /* BL: Sub function 02 */
-#define ATOM_PARAMETER_VESA_DPMS_ON 0x0000 /* BH Parameter for DPMS ON. */
-#define ATOM_PARAMETER_VESA_DPMS_STANDBY 0x0100 /* BH Parameter for DPMS STANDBY */
-#define ATOM_PARAMETER_VESA_DPMS_SUSPEND 0x0200 /* BH Parameter for DPMS SUSPEND */
-#define ATOM_PARAMETER_VESA_DPMS_OFF 0x0400 /* BH Parameter for DPMS OFF */
-#define ATOM_PARAMETER_VESA_DPMS_REDUCE_ON 0x0800 /* BH Parameter for DPMS REDUCE ON (NOT SUPPORTED) */
+#define ATOM_BIOS_FUNCTION_VIDEO_STATE 0x8F
+#define ATOM_SUB_FUNCTION_GET_CRITICAL_STATE 0x0300 // Sub function 03
+#define ATOM_SUB_FUNCTION_GET_LIDSTATE 0x0700 // Sub function 7
+#define ATOM_SUB_FUNCTION_THERMAL_STATE_NOTICE 0x1400 // Notify caller the current thermal state
+#define ATOM_SUB_FUNCTION_CRITICAL_STATE_NOTICE 0x8300 // Notify caller the current critical state
+#define ATOM_SUB_FUNCTION_SET_LIDSTATE 0x8500 // Sub function 85
+#define ATOM_SUB_FUNCTION_GET_REQ_DISPLAY_FROM_SBIOS_MODE 0x8900// Sub function 89
+#define ATOM_SUB_FUNCTION_INFORM_ADC_SUPPORT 0x9400 // Notify caller that ADC is supported
+
+
+#define ATOM_BIOS_FUNCTION_VESA_DPMS 0x4F10 // Set DPMS
+#define ATOM_SUB_FUNCTION_SET_DPMS 0x0001 // BL: Sub function 01
+#define ATOM_SUB_FUNCTION_GET_DPMS 0x0002 // BL: Sub function 02
+#define ATOM_PARAMETER_VESA_DPMS_ON 0x0000 // BH Parameter for DPMS ON.
+#define ATOM_PARAMETER_VESA_DPMS_STANDBY 0x0100 // BH Parameter for DPMS STANDBY
+#define ATOM_PARAMETER_VESA_DPMS_SUSPEND 0x0200 // BH Parameter for DPMS SUSPEND
+#define ATOM_PARAMETER_VESA_DPMS_OFF 0x0400 // BH Parameter for DPMS OFF
+#define ATOM_PARAMETER_VESA_DPMS_REDUCE_ON 0x0800 // BH Parameter for DPMS REDUCE ON (NOT SUPPORTED)
#define ATOM_BIOS_RETURN_CODE_MASK 0x0000FF00L
#define ATOM_BIOS_REG_HIGH_MASK 0x0000FF00L
#define ATOM_BIOS_REG_LOW_MASK 0x000000FFL
-/* structure used for VBIOS only */
+// structure used for VBIOS only
-/* DispOutInfoTable */
-typedef struct _ASIC_TRANSMITTER_INFO {
+//DispOutInfoTable
+typedef struct _ASIC_TRANSMITTER_INFO
+{
USHORT usTransmitterObjId;
USHORT usSupportDevice;
- UCHAR ucTransmitterCmdTblId;
- UCHAR ucConfig;
- UCHAR ucEncoderID; /* available 1st encoder ( default ) */
- UCHAR ucOptionEncoderID; /* available 2nd encoder ( optional ) */
- UCHAR uc2ndEncoderID;
- UCHAR ucReserved;
-} ASIC_TRANSMITTER_INFO;
-
-typedef struct _ASIC_ENCODER_INFO {
+ UCHAR ucTransmitterCmdTblId;
+ UCHAR ucConfig;
+ UCHAR ucEncoderID; //available 1st encoder ( default )
+ UCHAR ucOptionEncoderID; //available 2nd encoder ( optional )
+ UCHAR uc2ndEncoderID;
+ UCHAR ucReserved;
+}ASIC_TRANSMITTER_INFO;
+
+typedef struct _ASIC_ENCODER_INFO
+{
UCHAR ucEncoderID;
UCHAR ucEncoderConfig;
- USHORT usEncoderCmdTblId;
-} ASIC_ENCODER_INFO;
+ USHORT usEncoderCmdTblId;
+}ASIC_ENCODER_INFO;
+
+typedef struct _ATOM_DISP_OUT_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT ptrTransmitterInfo;
+ USHORT ptrEncoderInfo;
+ ASIC_TRANSMITTER_INFO asTransmitterInfo[1];
+ ASIC_ENCODER_INFO asEncoderInfo[1];
+}ATOM_DISP_OUT_INFO;
-typedef struct _ATOM_DISP_OUT_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
+typedef struct _ATOM_DISP_OUT_INFO_V2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
USHORT ptrTransmitterInfo;
USHORT ptrEncoderInfo;
- ASIC_TRANSMITTER_INFO asTransmitterInfo[1];
- ASIC_ENCODER_INFO asEncoderInfo[1];
-} ATOM_DISP_OUT_INFO;
+ USHORT ptrMainCallParserFar; // direct address of main parser call in VBIOS binary.
+ ASIC_TRANSMITTER_INFO asTransmitterInfo[1];
+ ASIC_ENCODER_INFO asEncoderInfo[1];
+}ATOM_DISP_OUT_INFO_V2;
-/* DispDevicePriorityInfo */
-typedef struct _ATOM_DISPLAY_DEVICE_PRIORITY_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
+// DispDevicePriorityInfo
+typedef struct _ATOM_DISPLAY_DEVICE_PRIORITY_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
USHORT asDevicePriority[16];
-} ATOM_DISPLAY_DEVICE_PRIORITY_INFO;
-
-/* ProcessAuxChannelTransactionTable */
-typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS {
- USHORT lpAuxRequest;
- USHORT lpDataOut;
- UCHAR ucChannelID;
- union {
- UCHAR ucReplyStatus;
- UCHAR ucDelay;
+}ATOM_DISPLAY_DEVICE_PRIORITY_INFO;
+
+//ProcessAuxChannelTransactionTable
+typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS
+{
+ USHORT lpAuxRequest;
+ USHORT lpDataOut;
+ UCHAR ucChannelID;
+ union
+ {
+ UCHAR ucReplyStatus;
+ UCHAR ucDelay;
+ };
+ UCHAR ucDataOutLen;
+ UCHAR ucReserved;
+}PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS;
+
+//ProcessAuxChannelTransactionTable
+typedef struct _PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2
+{
+ USHORT lpAuxRequest;
+ USHORT lpDataOut;
+ UCHAR ucChannelID;
+ union
+ {
+ UCHAR ucReplyStatus;
+ UCHAR ucDelay;
};
- UCHAR ucDataOutLen;
- UCHAR ucReserved;
-} PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS;
+ UCHAR ucDataOutLen;
+ UCHAR ucHPD_ID; //=0: HPD1, =1: HPD2, =2: HPD3, =3: HPD4, =4: HPD5, =5: HPD6
+}PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2;
#define PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS
-/* GetSinkType */
+//GetSinkType
-typedef struct _DP_ENCODER_SERVICE_PARAMETERS {
+typedef struct _DP_ENCODER_SERVICE_PARAMETERS
+{
USHORT ucLinkClock;
- union {
- UCHAR ucConfig; /* for DP training command */
- UCHAR ucI2cId; /* use for GET_SINK_TYPE command */
+ union
+ {
+ UCHAR ucConfig; // for DP training command
+ UCHAR ucI2cId; // use for GET_SINK_TYPE command
};
UCHAR ucAction;
UCHAR ucStatus;
UCHAR ucLaneNum;
UCHAR ucReserved[2];
-} DP_ENCODER_SERVICE_PARAMETERS;
+}DP_ENCODER_SERVICE_PARAMETERS;
-/* ucAction */
+// ucAction
#define ATOM_DP_ACTION_GET_SINK_TYPE 0x01
+/* obselete */
#define ATOM_DP_ACTION_TRAINING_START 0x02
#define ATOM_DP_ACTION_TRAINING_COMPLETE 0x03
#define ATOM_DP_ACTION_TRAINING_PATTERN_SEL 0x04
@@ -4318,7 +5273,7 @@ typedef struct _DP_ENCODER_SERVICE_PARAMETERS {
#define ATOM_DP_ACTION_GET_VSWING_PREEMP 0x06
#define ATOM_DP_ACTION_BLANKING 0x07
-/* ucConfig */
+// ucConfig
#define ATOM_DP_CONFIG_ENCODER_SEL_MASK 0x03
#define ATOM_DP_CONFIG_DIG1_ENCODER 0x00
#define ATOM_DP_CONFIG_DIG2_ENCODER 0x01
@@ -4326,14 +5281,14 @@ typedef struct _DP_ENCODER_SERVICE_PARAMETERS {
#define ATOM_DP_CONFIG_LINK_SEL_MASK 0x04
#define ATOM_DP_CONFIG_LINK_A 0x00
#define ATOM_DP_CONFIG_LINK_B 0x04
-
+/* /obselete */
#define DP_ENCODER_SERVICE_PS_ALLOCATION WRITE_ONE_BYTE_HW_I2C_DATA_PARAMETERS
-/* DP_TRAINING_TABLE */
-#define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR ATOM_DP_TRAINING_TBL_ADDR
+// DP_TRAINING_TABLE
+#define DPCD_SET_LINKRATE_LANENUM_PATTERN1_TBL_ADDR ATOM_DP_TRAINING_TBL_ADDR
#define DPCD_SET_SS_CNTL_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 8 )
-#define DPCD_SET_LANE_VSWING_PREEMP_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 16)
-#define DPCD_SET_TRAINING_PATTERN0_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 24)
+#define DPCD_SET_LANE_VSWING_PREEMP_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 16 )
+#define DPCD_SET_TRAINING_PATTERN0_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 24 )
#define DPCD_SET_TRAINING_PATTERN2_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 32)
#define DPCD_GET_LINKRATE_LANENUM_SS_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 40)
#define DPCD_GET_LANE_STATUS_ADJUST_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 48)
@@ -4341,183 +5296,241 @@ typedef struct _DP_ENCODER_SERVICE_PARAMETERS {
#define DP_I2C_AUX_DDC_WRITE_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 64)
#define DP_I2C_AUX_DDC_READ_START_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 72)
#define DP_I2C_AUX_DDC_READ_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 76)
-#define DP_I2C_AUX_DDC_READ_END_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 80)
+#define DP_I2C_AUX_DDC_WRITE_END_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 80)
+#define DP_I2C_AUX_DDC_READ_END_TBL_ADDR (ATOM_DP_TRAINING_TBL_ADDR + 84)
-typedef struct _PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS {
- UCHAR ucI2CSpeed;
- union {
- UCHAR ucRegIndex;
- UCHAR ucStatus;
+typedef struct _PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS
+{
+ UCHAR ucI2CSpeed;
+ union
+ {
+ UCHAR ucRegIndex;
+ UCHAR ucStatus;
};
- USHORT lpI2CDataOut;
- UCHAR ucFlag;
- UCHAR ucTransBytes;
- UCHAR ucSlaveAddr;
- UCHAR ucLineNumber;
-} PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS;
+ USHORT lpI2CDataOut;
+ UCHAR ucFlag;
+ UCHAR ucTransBytes;
+ UCHAR ucSlaveAddr;
+ UCHAR ucLineNumber;
+}PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS;
#define PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION PROCESS_I2C_CHANNEL_TRANSACTION_PARAMETERS
-/* ucFlag */
+//ucFlag
#define HW_I2C_WRITE 1
#define HW_I2C_READ 0
+#define I2C_2BYTE_ADDR 0x02
+typedef struct _SET_HWBLOCK_INSTANCE_PARAMETER_V2
+{
+ UCHAR ucHWBlkInst; // HW block instance, 0, 1, 2, ...
+ UCHAR ucReserved[3];
+}SET_HWBLOCK_INSTANCE_PARAMETER_V2;
+
+#define HWBLKINST_INSTANCE_MASK 0x07
+#define HWBLKINST_HWBLK_MASK 0xF0
+#define HWBLKINST_HWBLK_SHIFT 0x04
+
+//ucHWBlock
+#define SELECT_DISP_ENGINE 0
+#define SELECT_DISP_PLL 1
+#define SELECT_DCIO_UNIPHY_LINK0 2
+#define SELECT_DCIO_UNIPHY_LINK1 3
+#define SELECT_DCIO_IMPCAL 4
+#define SELECT_DCIO_DIG 6
+#define SELECT_CRTC_PIXEL_RATE 7
+
+/****************************************************************************/
+//Portion VI: Definitinos for vbios MC scratch registers that driver used
/****************************************************************************/
-/* Portion VI: Definitinos being oboselete */
+
+#define MC_MISC0__MEMORY_TYPE_MASK 0xF0000000
+#define MC_MISC0__MEMORY_TYPE__GDDR1 0x10000000
+#define MC_MISC0__MEMORY_TYPE__DDR2 0x20000000
+#define MC_MISC0__MEMORY_TYPE__GDDR3 0x30000000
+#define MC_MISC0__MEMORY_TYPE__GDDR4 0x40000000
+#define MC_MISC0__MEMORY_TYPE__GDDR5 0x50000000
+#define MC_MISC0__MEMORY_TYPE__DDR3 0xB0000000
+
+/****************************************************************************/
+//Portion VI: Definitinos being oboselete
/****************************************************************************/
-/* ========================================================================================== */
-/* Remove the definitions below when driver is ready! */
-typedef struct _ATOM_DAC_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- USHORT usMaxFrequency; /* in 10kHz unit */
- USHORT usReserved;
-} ATOM_DAC_INFO;
-
-typedef struct _COMPASSIONATE_DATA {
- ATOM_COMMON_TABLE_HEADER sHeader;
-
- /* ============================== DAC1 portion */
- UCHAR ucDAC1_BG_Adjustment;
- UCHAR ucDAC1_DAC_Adjustment;
- USHORT usDAC1_FORCE_Data;
- /* ============================== DAC2 portion */
- UCHAR ucDAC2_CRT2_BG_Adjustment;
- UCHAR ucDAC2_CRT2_DAC_Adjustment;
- USHORT usDAC2_CRT2_FORCE_Data;
- USHORT usDAC2_CRT2_MUX_RegisterIndex;
- UCHAR ucDAC2_CRT2_MUX_RegisterInfo; /* Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low */
- UCHAR ucDAC2_NTSC_BG_Adjustment;
- UCHAR ucDAC2_NTSC_DAC_Adjustment;
- USHORT usDAC2_TV1_FORCE_Data;
- USHORT usDAC2_TV1_MUX_RegisterIndex;
- UCHAR ucDAC2_TV1_MUX_RegisterInfo; /* Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low */
- UCHAR ucDAC2_CV_BG_Adjustment;
- UCHAR ucDAC2_CV_DAC_Adjustment;
- USHORT usDAC2_CV_FORCE_Data;
- USHORT usDAC2_CV_MUX_RegisterIndex;
- UCHAR ucDAC2_CV_MUX_RegisterInfo; /* Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low */
- UCHAR ucDAC2_PAL_BG_Adjustment;
- UCHAR ucDAC2_PAL_DAC_Adjustment;
- USHORT usDAC2_TV2_FORCE_Data;
-} COMPASSIONATE_DATA;
+//==========================================================================================
+//Remove the definitions below when driver is ready!
+typedef struct _ATOM_DAC_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usMaxFrequency; // in 10kHz unit
+ USHORT usReserved;
+}ATOM_DAC_INFO;
+
+
+typedef struct _COMPASSIONATE_DATA
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+
+ //============================== DAC1 portion
+ UCHAR ucDAC1_BG_Adjustment;
+ UCHAR ucDAC1_DAC_Adjustment;
+ USHORT usDAC1_FORCE_Data;
+ //============================== DAC2 portion
+ UCHAR ucDAC2_CRT2_BG_Adjustment;
+ UCHAR ucDAC2_CRT2_DAC_Adjustment;
+ USHORT usDAC2_CRT2_FORCE_Data;
+ USHORT usDAC2_CRT2_MUX_RegisterIndex;
+ UCHAR ucDAC2_CRT2_MUX_RegisterInfo; //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low
+ UCHAR ucDAC2_NTSC_BG_Adjustment;
+ UCHAR ucDAC2_NTSC_DAC_Adjustment;
+ USHORT usDAC2_TV1_FORCE_Data;
+ USHORT usDAC2_TV1_MUX_RegisterIndex;
+ UCHAR ucDAC2_TV1_MUX_RegisterInfo; //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low
+ UCHAR ucDAC2_CV_BG_Adjustment;
+ UCHAR ucDAC2_CV_DAC_Adjustment;
+ USHORT usDAC2_CV_FORCE_Data;
+ USHORT usDAC2_CV_MUX_RegisterIndex;
+ UCHAR ucDAC2_CV_MUX_RegisterInfo; //Bit[4:0]=Bit position,Bit[7]=1:Active High;=0 Active Low
+ UCHAR ucDAC2_PAL_BG_Adjustment;
+ UCHAR ucDAC2_PAL_DAC_Adjustment;
+ USHORT usDAC2_TV2_FORCE_Data;
+}COMPASSIONATE_DATA;
/****************************Supported Device Info Table Definitions**********************/
-/* ucConnectInfo: */
-/* [7:4] - connector type */
-/* = 1 - VGA connector */
-/* = 2 - DVI-I */
-/* = 3 - DVI-D */
-/* = 4 - DVI-A */
-/* = 5 - SVIDEO */
-/* = 6 - COMPOSITE */
-/* = 7 - LVDS */
-/* = 8 - DIGITAL LINK */
-/* = 9 - SCART */
-/* = 0xA - HDMI_type A */
-/* = 0xB - HDMI_type B */
-/* = 0xE - Special case1 (DVI+DIN) */
-/* Others=TBD */
-/* [3:0] - DAC Associated */
-/* = 0 - no DAC */
-/* = 1 - DACA */
-/* = 2 - DACB */
-/* = 3 - External DAC */
-/* Others=TBD */
-/* */
-
-typedef struct _ATOM_CONNECTOR_INFO {
+// ucConnectInfo:
+// [7:4] - connector type
+// = 1 - VGA connector
+// = 2 - DVI-I
+// = 3 - DVI-D
+// = 4 - DVI-A
+// = 5 - SVIDEO
+// = 6 - COMPOSITE
+// = 7 - LVDS
+// = 8 - DIGITAL LINK
+// = 9 - SCART
+// = 0xA - HDMI_type A
+// = 0xB - HDMI_type B
+// = 0xE - Special case1 (DVI+DIN)
+// Others=TBD
+// [3:0] - DAC Associated
+// = 0 - no DAC
+// = 1 - DACA
+// = 2 - DACB
+// = 3 - External DAC
+// Others=TBD
+//
+
+typedef struct _ATOM_CONNECTOR_INFO
+{
#if ATOM_BIG_ENDIAN
- UCHAR bfConnectorType:4;
- UCHAR bfAssociatedDAC:4;
+ UCHAR bfConnectorType:4;
+ UCHAR bfAssociatedDAC:4;
#else
- UCHAR bfAssociatedDAC:4;
- UCHAR bfConnectorType:4;
+ UCHAR bfAssociatedDAC:4;
+ UCHAR bfConnectorType:4;
#endif
-} ATOM_CONNECTOR_INFO;
+}ATOM_CONNECTOR_INFO;
+
+typedef union _ATOM_CONNECTOR_INFO_ACCESS
+{
+ ATOM_CONNECTOR_INFO sbfAccess;
+ UCHAR ucAccess;
+}ATOM_CONNECTOR_INFO_ACCESS;
-typedef union _ATOM_CONNECTOR_INFO_ACCESS {
- ATOM_CONNECTOR_INFO sbfAccess;
- UCHAR ucAccess;
-} ATOM_CONNECTOR_INFO_ACCESS;
+typedef struct _ATOM_CONNECTOR_INFO_I2C
+{
+ ATOM_CONNECTOR_INFO_ACCESS sucConnectorInfo;
+ ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
+}ATOM_CONNECTOR_INFO_I2C;
-typedef struct _ATOM_CONNECTOR_INFO_I2C {
- ATOM_CONNECTOR_INFO_ACCESS sucConnectorInfo;
- ATOM_I2C_ID_CONFIG_ACCESS sucI2cId;
-} ATOM_CONNECTOR_INFO_I2C;
-typedef struct _ATOM_SUPPORTED_DEVICES_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- USHORT usDeviceSupport;
- ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO];
-} ATOM_SUPPORTED_DEVICES_INFO;
+typedef struct _ATOM_SUPPORTED_DEVICES_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usDeviceSupport;
+ ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO];
+}ATOM_SUPPORTED_DEVICES_INFO;
#define NO_INT_SRC_MAPPED 0xFF
-typedef struct _ATOM_CONNECTOR_INC_SRC_BITMAP {
- UCHAR ucIntSrcBitmap;
-} ATOM_CONNECTOR_INC_SRC_BITMAP;
-
-typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- USHORT usDeviceSupport;
- ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2];
- ATOM_CONNECTOR_INC_SRC_BITMAP
- asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2];
-} ATOM_SUPPORTED_DEVICES_INFO_2;
-
-typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- USHORT usDeviceSupport;
- ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE];
- ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE];
-} ATOM_SUPPORTED_DEVICES_INFO_2d1;
+typedef struct _ATOM_CONNECTOR_INC_SRC_BITMAP
+{
+ UCHAR ucIntSrcBitmap;
+}ATOM_CONNECTOR_INC_SRC_BITMAP;
+
+typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usDeviceSupport;
+ ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2];
+ ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE_INFO_2];
+}ATOM_SUPPORTED_DEVICES_INFO_2;
+
+typedef struct _ATOM_SUPPORTED_DEVICES_INFO_2d1
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usDeviceSupport;
+ ATOM_CONNECTOR_INFO_I2C asConnInfo[ATOM_MAX_SUPPORTED_DEVICE];
+ ATOM_CONNECTOR_INC_SRC_BITMAP asIntSrcInfo[ATOM_MAX_SUPPORTED_DEVICE];
+}ATOM_SUPPORTED_DEVICES_INFO_2d1;
#define ATOM_SUPPORTED_DEVICES_INFO_LAST ATOM_SUPPORTED_DEVICES_INFO_2d1
-typedef struct _ATOM_MISC_CONTROL_INFO {
- USHORT usFrequency;
- UCHAR ucPLL_ChargePump; /* PLL charge-pump gain control */
- UCHAR ucPLL_DutyCycle; /* PLL duty cycle control */
- UCHAR ucPLL_VCO_Gain; /* PLL VCO gain control */
- UCHAR ucPLL_VoltageSwing; /* PLL driver voltage swing control */
-} ATOM_MISC_CONTROL_INFO;
+
+
+typedef struct _ATOM_MISC_CONTROL_INFO
+{
+ USHORT usFrequency;
+ UCHAR ucPLL_ChargePump; // PLL charge-pump gain control
+ UCHAR ucPLL_DutyCycle; // PLL duty cycle control
+ UCHAR ucPLL_VCO_Gain; // PLL VCO gain control
+ UCHAR ucPLL_VoltageSwing; // PLL driver voltage swing control
+}ATOM_MISC_CONTROL_INFO;
+
#define ATOM_MAX_MISC_INFO 4
-typedef struct _ATOM_TMDS_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- USHORT usMaxFrequency; /* in 10Khz */
- ATOM_MISC_CONTROL_INFO asMiscInfo[ATOM_MAX_MISC_INFO];
-} ATOM_TMDS_INFO;
+typedef struct _ATOM_TMDS_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usMaxFrequency; // in 10Khz
+ ATOM_MISC_CONTROL_INFO asMiscInfo[ATOM_MAX_MISC_INFO];
+}ATOM_TMDS_INFO;
+
+
+typedef struct _ATOM_ENCODER_ANALOG_ATTRIBUTE
+{
+ UCHAR ucTVStandard; //Same as TV standards defined above,
+ UCHAR ucPadding[1];
+}ATOM_ENCODER_ANALOG_ATTRIBUTE;
-typedef struct _ATOM_ENCODER_ANALOG_ATTRIBUTE {
- UCHAR ucTVStandard; /* Same as TV standards defined above, */
- UCHAR ucPadding[1];
-} ATOM_ENCODER_ANALOG_ATTRIBUTE;
+typedef struct _ATOM_ENCODER_DIGITAL_ATTRIBUTE
+{
+ UCHAR ucAttribute; //Same as other digital encoder attributes defined above
+ UCHAR ucPadding[1];
+}ATOM_ENCODER_DIGITAL_ATTRIBUTE;
-typedef struct _ATOM_ENCODER_DIGITAL_ATTRIBUTE {
- UCHAR ucAttribute; /* Same as other digital encoder attributes defined above */
- UCHAR ucPadding[1];
-} ATOM_ENCODER_DIGITAL_ATTRIBUTE;
+typedef union _ATOM_ENCODER_ATTRIBUTE
+{
+ ATOM_ENCODER_ANALOG_ATTRIBUTE sAlgAttrib;
+ ATOM_ENCODER_DIGITAL_ATTRIBUTE sDigAttrib;
+}ATOM_ENCODER_ATTRIBUTE;
-typedef union _ATOM_ENCODER_ATTRIBUTE {
- ATOM_ENCODER_ANALOG_ATTRIBUTE sAlgAttrib;
- ATOM_ENCODER_DIGITAL_ATTRIBUTE sDigAttrib;
-} ATOM_ENCODER_ATTRIBUTE;
-typedef struct _DVO_ENCODER_CONTROL_PARAMETERS {
- USHORT usPixelClock;
- USHORT usEncoderID;
- UCHAR ucDeviceType; /* Use ATOM_DEVICE_xxx1_Index to indicate device type only. */
- UCHAR ucAction; /* ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT */
- ATOM_ENCODER_ATTRIBUTE usDevAttr;
-} DVO_ENCODER_CONTROL_PARAMETERS;
+typedef struct _DVO_ENCODER_CONTROL_PARAMETERS
+{
+ USHORT usPixelClock;
+ USHORT usEncoderID;
+ UCHAR ucDeviceType; //Use ATOM_DEVICE_xxx1_Index to indicate device type only.
+ UCHAR ucAction; //ATOM_ENABLE/ATOM_DISABLE/ATOM_HPD_INIT
+ ATOM_ENCODER_ATTRIBUTE usDevAttr;
+}DVO_ENCODER_CONTROL_PARAMETERS;
+
+typedef struct _DVO_ENCODER_CONTROL_PS_ALLOCATION
+{
+ DVO_ENCODER_CONTROL_PARAMETERS sDVOEncoder;
+ WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; //Caller doesn't need to init this portion
+}DVO_ENCODER_CONTROL_PS_ALLOCATION;
-typedef struct _DVO_ENCODER_CONTROL_PS_ALLOCATION {
- DVO_ENCODER_CONTROL_PARAMETERS sDVOEncoder;
- WRITE_ONE_BYTE_HW_I2C_DATA_PS_ALLOCATION sReserved; /* Caller doesn't need to init this portion */
-} DVO_ENCODER_CONTROL_PS_ALLOCATION;
#define ATOM_XTMDS_ASIC_SI164_ID 1
#define ATOM_XTMDS_ASIC_SI178_ID 2
@@ -4526,27 +5539,30 @@ typedef struct _DVO_ENCODER_CONTROL_PS_ALLOCATION {
#define ATOM_XTMDS_SUPPORTED_DUALLINK 0x00000002
#define ATOM_XTMDS_MVPU_FPGA 0x00000004
-typedef struct _ATOM_XTMDS_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- USHORT usSingleLinkMaxFrequency;
- ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; /* Point the ID on which I2C is used to control external chip */
- UCHAR ucXtransimitterID;
- UCHAR ucSupportedLink; /* Bit field, bit0=1, single link supported;bit1=1,dual link supported */
- UCHAR ucSequnceAlterID; /* Even with the same external TMDS asic, it's possible that the program seqence alters */
- /* due to design. This ID is used to alert driver that the sequence is not "standard"! */
- UCHAR ucMasterAddress; /* Address to control Master xTMDS Chip */
- UCHAR ucSlaveAddress; /* Address to control Slave xTMDS Chip */
-} ATOM_XTMDS_INFO;
-
-typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS {
- UCHAR ucEnable; /* ATOM_ENABLE=On or ATOM_DISABLE=Off */
- UCHAR ucDevice; /* ATOM_DEVICE_DFP1_INDEX.... */
- UCHAR ucPadding[2];
-} DFP_DPMS_STATUS_CHANGE_PARAMETERS;
+
+typedef struct _ATOM_XTMDS_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ USHORT usSingleLinkMaxFrequency;
+ ATOM_I2C_ID_CONFIG_ACCESS sucI2cId; //Point the ID on which I2C is used to control external chip
+ UCHAR ucXtransimitterID;
+ UCHAR ucSupportedLink; // Bit field, bit0=1, single link supported;bit1=1,dual link supported
+ UCHAR ucSequnceAlterID; // Even with the same external TMDS asic, it's possible that the program seqence alters
+ // due to design. This ID is used to alert driver that the sequence is not "standard"!
+ UCHAR ucMasterAddress; // Address to control Master xTMDS Chip
+ UCHAR ucSlaveAddress; // Address to control Slave xTMDS Chip
+}ATOM_XTMDS_INFO;
+
+typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS
+{
+ UCHAR ucEnable; // ATOM_ENABLE=On or ATOM_DISABLE=Off
+ UCHAR ucDevice; // ATOM_DEVICE_DFP1_INDEX....
+ UCHAR ucPadding[2];
+}DFP_DPMS_STATUS_CHANGE_PARAMETERS;
/****************************Legacy Power Play Table Definitions **********************/
-/* Definitions for ulPowerPlayMiscInfo */
+//Definitions for ulPowerPlayMiscInfo
#define ATOM_PM_MISCINFO_SPLIT_CLOCK 0x00000000L
#define ATOM_PM_MISCINFO_USING_MCLK_SRC 0x00000001L
#define ATOM_PM_MISCINFO_USING_SCLK_SRC 0x00000002L
@@ -4558,8 +5574,8 @@ typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS {
#define ATOM_PM_MISCINFO_ENGINE_CLOCK_CONTRL_EN 0x00000020L
#define ATOM_PM_MISCINFO_MEMORY_CLOCK_CONTRL_EN 0x00000040L
-#define ATOM_PM_MISCINFO_PROGRAM_VOLTAGE 0x00000080L /* When this bit set, ucVoltageDropIndex is not an index for GPIO pin, but a voltage ID that SW needs program */
-
+#define ATOM_PM_MISCINFO_PROGRAM_VOLTAGE 0x00000080L //When this bit set, ucVoltageDropIndex is not an index for GPIO pin, but a voltage ID that SW needs program
+
#define ATOM_PM_MISCINFO_ASIC_REDUCED_SPEED_SCLK_EN 0x00000100L
#define ATOM_PM_MISCINFO_ASIC_DYNAMIC_VOLTAGE_EN 0x00000200L
#define ATOM_PM_MISCINFO_ASIC_SLEEP_MODE_EN 0x00000400L
@@ -4569,22 +5585,22 @@ typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS {
#define ATOM_PM_MISCINFO_LOW_LCD_REFRESH_RATE 0x00004000L
#define ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE 0x00008000L
-#define ATOM_PM_MISCINFO_OVER_CLOCK_MODE 0x00010000L
+#define ATOM_PM_MISCINFO_OVER_CLOCK_MODE 0x00010000L
#define ATOM_PM_MISCINFO_OVER_DRIVE_MODE 0x00020000L
#define ATOM_PM_MISCINFO_POWER_SAVING_MODE 0x00040000L
#define ATOM_PM_MISCINFO_THERMAL_DIODE_MODE 0x00080000L
-#define ATOM_PM_MISCINFO_FRAME_MODULATION_MASK 0x00300000L /* 0-FM Disable, 1-2 level FM, 2-4 level FM, 3-Reserved */
-#define ATOM_PM_MISCINFO_FRAME_MODULATION_SHIFT 20
+#define ATOM_PM_MISCINFO_FRAME_MODULATION_MASK 0x00300000L //0-FM Disable, 1-2 level FM, 2-4 level FM, 3-Reserved
+#define ATOM_PM_MISCINFO_FRAME_MODULATION_SHIFT 20
#define ATOM_PM_MISCINFO_DYN_CLK_3D_IDLE 0x00400000L
#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_2 0x00800000L
#define ATOM_PM_MISCINFO_DYNAMIC_CLOCK_DIVIDER_BY_4 0x01000000L
-#define ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN 0x02000000L /* When set, Dynamic */
-#define ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN 0x04000000L /* When set, Dynamic */
-#define ATOM_PM_MISCINFO_3D_ACCELERATION_EN 0x08000000L /* When set, This mode is for acceleated 3D mode */
+#define ATOM_PM_MISCINFO_DYNAMIC_HDP_BLOCK_EN 0x02000000L //When set, Dynamic
+#define ATOM_PM_MISCINFO_DYNAMIC_MC_HOST_BLOCK_EN 0x04000000L //When set, Dynamic
+#define ATOM_PM_MISCINFO_3D_ACCELERATION_EN 0x08000000L //When set, This mode is for acceleated 3D mode
-#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_MASK 0x70000000L /* 1-Optimal Battery Life Group, 2-High Battery, 3-Balanced, 4-High Performance, 5- Optimal Performance (Default state with Default clocks) */
+#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_MASK 0x70000000L //1-Optimal Battery Life Group, 2-High Battery, 3-Balanced, 4-High Performance, 5- Optimal Performance (Default state with Default clocks)
#define ATOM_PM_MISCINFO_POWERPLAY_SETTINGS_GROUP_SHIFT 28
#define ATOM_PM_MISCINFO_ENABLE_BACK_BIAS 0x80000000L
@@ -4594,55 +5610,59 @@ typedef struct _DFP_DPMS_STATUS_CHANGE_PARAMETERS {
#define ATOM_PM_MISCINFO2_FS3D_OVERDRIVE_INFO 0x00000008L
#define ATOM_PM_MISCINFO2_FORCEDLOWPWR_MODE 0x00000010L
#define ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN 0x00000020L
-#define ATOM_PM_MISCINFO2_VIDEO_PLAYBACK_CAPABLE 0x00000040L /* If this bit is set in multi-pp mode, then driver will pack up one with the minior power consumption. */
- /* If it's not set in any pp mode, driver will use its default logic to pick a pp mode in video playback */
+#define ATOM_PM_MISCINFO2_VIDEO_PLAYBACK_CAPABLE 0x00000040L //If this bit is set in multi-pp mode, then driver will pack up one with the minior power consumption.
+ //If it's not set in any pp mode, driver will use its default logic to pick a pp mode in video playback
#define ATOM_PM_MISCINFO2_NOT_VALID_ON_DC 0x00000080L
#define ATOM_PM_MISCINFO2_STUTTER_MODE_EN 0x00000100L
-#define ATOM_PM_MISCINFO2_UVD_SUPPORT_MODE 0x00000200L
-
-/* ucTableFormatRevision=1 */
-/* ucTableContentRevision=1 */
-typedef struct _ATOM_POWERMODE_INFO {
- ULONG ulMiscInfo; /* The power level should be arranged in ascending order */
- ULONG ulReserved1; /* must set to 0 */
- ULONG ulReserved2; /* must set to 0 */
- USHORT usEngineClock;
- USHORT usMemoryClock;
- UCHAR ucVoltageDropIndex; /* index to GPIO table */
- UCHAR ucSelectedPanel_RefreshRate; /* panel refresh rate */
- UCHAR ucMinTemperature;
- UCHAR ucMaxTemperature;
- UCHAR ucNumPciELanes; /* number of PCIE lanes */
-} ATOM_POWERMODE_INFO;
-
-/* ucTableFormatRevision=2 */
-/* ucTableContentRevision=1 */
-typedef struct _ATOM_POWERMODE_INFO_V2 {
- ULONG ulMiscInfo; /* The power level should be arranged in ascending order */
- ULONG ulMiscInfo2;
- ULONG ulEngineClock;
- ULONG ulMemoryClock;
- UCHAR ucVoltageDropIndex; /* index to GPIO table */
- UCHAR ucSelectedPanel_RefreshRate; /* panel refresh rate */
- UCHAR ucMinTemperature;
- UCHAR ucMaxTemperature;
- UCHAR ucNumPciELanes; /* number of PCIE lanes */
-} ATOM_POWERMODE_INFO_V2;
-
-/* ucTableFormatRevision=2 */
-/* ucTableContentRevision=2 */
-typedef struct _ATOM_POWERMODE_INFO_V3 {
- ULONG ulMiscInfo; /* The power level should be arranged in ascending order */
- ULONG ulMiscInfo2;
- ULONG ulEngineClock;
- ULONG ulMemoryClock;
- UCHAR ucVoltageDropIndex; /* index to Core (VDDC) votage table */
- UCHAR ucSelectedPanel_RefreshRate; /* panel refresh rate */
- UCHAR ucMinTemperature;
- UCHAR ucMaxTemperature;
- UCHAR ucNumPciELanes; /* number of PCIE lanes */
- UCHAR ucVDDCI_VoltageDropIndex; /* index to VDDCI votage table */
-} ATOM_POWERMODE_INFO_V3;
+#define ATOM_PM_MISCINFO2_UVD_SUPPORT_MODE 0x00000200L
+
+//ucTableFormatRevision=1
+//ucTableContentRevision=1
+typedef struct _ATOM_POWERMODE_INFO
+{
+ ULONG ulMiscInfo; //The power level should be arranged in ascending order
+ ULONG ulReserved1; // must set to 0
+ ULONG ulReserved2; // must set to 0
+ USHORT usEngineClock;
+ USHORT usMemoryClock;
+ UCHAR ucVoltageDropIndex; // index to GPIO table
+ UCHAR ucSelectedPanel_RefreshRate;// panel refresh rate
+ UCHAR ucMinTemperature;
+ UCHAR ucMaxTemperature;
+ UCHAR ucNumPciELanes; // number of PCIE lanes
+}ATOM_POWERMODE_INFO;
+
+//ucTableFormatRevision=2
+//ucTableContentRevision=1
+typedef struct _ATOM_POWERMODE_INFO_V2
+{
+ ULONG ulMiscInfo; //The power level should be arranged in ascending order
+ ULONG ulMiscInfo2;
+ ULONG ulEngineClock;
+ ULONG ulMemoryClock;
+ UCHAR ucVoltageDropIndex; // index to GPIO table
+ UCHAR ucSelectedPanel_RefreshRate;// panel refresh rate
+ UCHAR ucMinTemperature;
+ UCHAR ucMaxTemperature;
+ UCHAR ucNumPciELanes; // number of PCIE lanes
+}ATOM_POWERMODE_INFO_V2;
+
+//ucTableFormatRevision=2
+//ucTableContentRevision=2
+typedef struct _ATOM_POWERMODE_INFO_V3
+{
+ ULONG ulMiscInfo; //The power level should be arranged in ascending order
+ ULONG ulMiscInfo2;
+ ULONG ulEngineClock;
+ ULONG ulMemoryClock;
+ UCHAR ucVoltageDropIndex; // index to Core (VDDC) votage table
+ UCHAR ucSelectedPanel_RefreshRate;// panel refresh rate
+ UCHAR ucMinTemperature;
+ UCHAR ucMaxTemperature;
+ UCHAR ucNumPciELanes; // number of PCIE lanes
+ UCHAR ucVDDCI_VoltageDropIndex; // index to VDDCI votage table
+}ATOM_POWERMODE_INFO_V3;
+
#define ATOM_MAX_NUMBEROF_POWER_BLOCK 8
@@ -4655,40 +5675,44 @@ typedef struct _ATOM_POWERMODE_INFO_V3 {
#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_MUA6649 0x04
#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_LM64 0x05
#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_F75375 0x06
-#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ASC7512 0x07 /* Andigilog */
-
-typedef struct _ATOM_POWERPLAY_INFO {
- ATOM_COMMON_TABLE_HEADER sHeader;
- UCHAR ucOverdriveThermalController;
- UCHAR ucOverdriveI2cLine;
- UCHAR ucOverdriveIntBitmap;
- UCHAR ucOverdriveControllerAddress;
- UCHAR ucSizeOfPowerModeEntry;
- UCHAR ucNumOfPowerModeEntries;
- ATOM_POWERMODE_INFO asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
-} ATOM_POWERPLAY_INFO;
-
-typedef struct _ATOM_POWERPLAY_INFO_V2 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- UCHAR ucOverdriveThermalController;
- UCHAR ucOverdriveI2cLine;
- UCHAR ucOverdriveIntBitmap;
- UCHAR ucOverdriveControllerAddress;
- UCHAR ucSizeOfPowerModeEntry;
- UCHAR ucNumOfPowerModeEntries;
- ATOM_POWERMODE_INFO_V2 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
-} ATOM_POWERPLAY_INFO_V2;
-
-typedef struct _ATOM_POWERPLAY_INFO_V3 {
- ATOM_COMMON_TABLE_HEADER sHeader;
- UCHAR ucOverdriveThermalController;
- UCHAR ucOverdriveI2cLine;
- UCHAR ucOverdriveIntBitmap;
- UCHAR ucOverdriveControllerAddress;
- UCHAR ucSizeOfPowerModeEntry;
- UCHAR ucNumOfPowerModeEntries;
- ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
-} ATOM_POWERPLAY_INFO_V3;
+#define ATOM_PP_OVERDRIVE_THERMALCONTROLLER_ASC7512 0x07 // Andigilog
+
+
+typedef struct _ATOM_POWERPLAY_INFO
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucOverdriveThermalController;
+ UCHAR ucOverdriveI2cLine;
+ UCHAR ucOverdriveIntBitmap;
+ UCHAR ucOverdriveControllerAddress;
+ UCHAR ucSizeOfPowerModeEntry;
+ UCHAR ucNumOfPowerModeEntries;
+ ATOM_POWERMODE_INFO asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
+}ATOM_POWERPLAY_INFO;
+
+typedef struct _ATOM_POWERPLAY_INFO_V2
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucOverdriveThermalController;
+ UCHAR ucOverdriveI2cLine;
+ UCHAR ucOverdriveIntBitmap;
+ UCHAR ucOverdriveControllerAddress;
+ UCHAR ucSizeOfPowerModeEntry;
+ UCHAR ucNumOfPowerModeEntries;
+ ATOM_POWERMODE_INFO_V2 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
+}ATOM_POWERPLAY_INFO_V2;
+
+typedef struct _ATOM_POWERPLAY_INFO_V3
+{
+ ATOM_COMMON_TABLE_HEADER sHeader;
+ UCHAR ucOverdriveThermalController;
+ UCHAR ucOverdriveI2cLine;
+ UCHAR ucOverdriveIntBitmap;
+ UCHAR ucOverdriveControllerAddress;
+ UCHAR ucSizeOfPowerModeEntry;
+ UCHAR ucNumOfPowerModeEntries;
+ ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
+}ATOM_POWERPLAY_INFO_V3;
/* New PPlib */
/**************************************************************************/
@@ -4873,40 +5897,42 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16}
UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement.
USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
- ULONG ulFlags;
+ ULONG ulFlags;
} ATOM_PPLIB_RS780_CLOCK_INFO;
-#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0
-#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1
-#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2
-#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3
+#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0
+#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1
+#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2
+#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3
#define ATOM_PPLIB_RS780_SPMCLK_NONE 0 // We cannot change the side port memory clock, leave it as it is.
#define ATOM_PPLIB_RS780_SPMCLK_LOW 1
#define ATOM_PPLIB_RS780_SPMCLK_HIGH 2
-#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0
-#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1
-#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2
+#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0
+#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1
+#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2
/**************************************************************************/
-/* Following definitions are for compatiblity issue in different SW components. */
+
+// Following definitions are for compatiblity issue in different SW components.
#define ATOM_MASTER_DATA_TABLE_REVISION 0x01
-#define Object_Info Object_Header
+#define Object_Info Object_Header
#define AdjustARB_SEQ MC_InitParameter
#define VRAM_GPIO_DetectionInfo VoltageObjectInfo
-#define ASIC_VDDCI_Info ASIC_ProfilingInfo
+#define ASIC_VDDCI_Info ASIC_ProfilingInfo
#define ASIC_MVDDQ_Info MemoryTrainingInfo
-#define SS_Info PPLL_SS_Info
+#define SS_Info PPLL_SS_Info
#define ASIC_MVDDC_Info ASIC_InternalSS_Info
#define DispDevicePriorityInfo SaveRestoreInfo
#define DispOutInfo TV_VideoMode
+
#define ATOM_ENCODER_OBJECT_TABLE ATOM_OBJECT_TABLE
#define ATOM_CONNECTOR_OBJECT_TABLE ATOM_OBJECT_TABLE
-/* New device naming, remove them when both DAL/VBIOS is ready */
+//New device naming, remove them when both DAL/VBIOS is ready
#define DFP2I_OUTPUT_CONTROL_PARAMETERS CRT1_OUTPUT_CONTROL_PARAMETERS
#define DFP2I_OUTPUT_CONTROL_PS_ALLOCATION DFP2I_OUTPUT_CONTROL_PARAMETERS
@@ -4921,7 +5947,7 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
#define ATOM_DEVICE_DFP1I_INDEX ATOM_DEVICE_DFP1_INDEX
#define ATOM_DEVICE_DFP1X_INDEX ATOM_DEVICE_DFP2_INDEX
-
+
#define ATOM_DEVICE_DFP2I_INDEX 0x00000009
#define ATOM_DEVICE_DFP2I_SUPPORT (0x1L << ATOM_DEVICE_DFP2I_INDEX)
@@ -4939,7 +5965,7 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
#define ATOM_S3_DFP2I_ACTIVEb1 0x02
-#define ATOM_S3_DFP1I_ACTIVE ATOM_S3_DFP1_ACTIVE
+#define ATOM_S3_DFP1I_ACTIVE ATOM_S3_DFP1_ACTIVE
#define ATOM_S3_DFP1X_ACTIVE ATOM_S3_DFP2_ACTIVE
#define ATOM_S3_DFP2I_ACTIVE 0x00000200L
@@ -4958,14 +5984,14 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
#define ATOM_S6_ACC_REQ_DFP2Ib3 0x02
#define ATOM_S6_ACC_REQ_DFP2I 0x02000000L
-#define TMDS1XEncoderControl DVOEncoderControl
+#define TMDS1XEncoderControl DVOEncoderControl
#define DFP1XOutputControl DVOOutputControl
#define ExternalDFPOutputControl DFP1XOutputControl
#define EnableExternalTMDS_Encoder TMDS1XEncoderControl
#define DFP1IOutputControl TMDSAOutputControl
-#define DFP2IOutputControl LVTMAOutputControl
+#define DFP2IOutputControl LVTMAOutputControl
#define DAC1_ENCODER_CONTROL_PARAMETERS DAC_ENCODER_CONTROL_PARAMETERS
#define DAC1_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION
@@ -4974,7 +6000,7 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
#define DAC2_ENCODER_CONTROL_PS_ALLOCATION DAC_ENCODER_CONTROL_PS_ALLOCATION
#define ucDac1Standard ucDacStandard
-#define ucDac2Standard ucDacStandard
+#define ucDac2Standard ucDacStandard
#define TMDS1EncoderControl TMDSAEncoderControl
#define TMDS2EncoderControl LVTMAEncoderControl
@@ -4984,12 +6010,56 @@ typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
#define CRT1OutputControl DAC1OutputControl
#define CRT2OutputControl DAC2OutputControl
-/* These two lines will be removed for sure in a few days, will follow up with Michael V. */
+//These two lines will be removed for sure in a few days, will follow up with Michael V.
#define EnableLVDS_SS EnableSpreadSpectrumOnPPLL
-#define ENABLE_LVDS_SS_PARAMETERS_V3 ENABLE_SPREAD_SPECTRUM_ON_PPLL
+#define ENABLE_LVDS_SS_PARAMETERS_V3 ENABLE_SPREAD_SPECTRUM_ON_PPLL
+
+//#define ATOM_S2_CRT1_DPMS_STATE 0x00010000L
+//#define ATOM_S2_LCD1_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE
+//#define ATOM_S2_TV1_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE
+//#define ATOM_S2_DFP1_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE
+//#define ATOM_S2_CRT2_DPMS_STATE ATOM_S2_CRT1_DPMS_STATE
+
+#define ATOM_S6_ACC_REQ_TV2 0x00400000L
+#define ATOM_DEVICE_TV2_INDEX 0x00000006
+#define ATOM_DEVICE_TV2_SUPPORT (0x1L << ATOM_DEVICE_TV2_INDEX)
+#define ATOM_S0_TV2 0x00100000L
+#define ATOM_S3_TV2_ACTIVE ATOM_S3_DFP6_ACTIVE
+#define ATOM_S3_TV2_CRTC_ACTIVE ATOM_S3_DFP6_CRTC_ACTIVE
+
+//
+#define ATOM_S2_CRT1_DPMS_STATE 0x00010000L
+#define ATOM_S2_LCD1_DPMS_STATE 0x00020000L
+#define ATOM_S2_TV1_DPMS_STATE 0x00040000L
+#define ATOM_S2_DFP1_DPMS_STATE 0x00080000L
+#define ATOM_S2_CRT2_DPMS_STATE 0x00100000L
+#define ATOM_S2_LCD2_DPMS_STATE 0x00200000L
+#define ATOM_S2_TV2_DPMS_STATE 0x00400000L
+#define ATOM_S2_DFP2_DPMS_STATE 0x00800000L
+#define ATOM_S2_CV_DPMS_STATE 0x01000000L
+#define ATOM_S2_DFP3_DPMS_STATE 0x02000000L
+#define ATOM_S2_DFP4_DPMS_STATE 0x04000000L
+#define ATOM_S2_DFP5_DPMS_STATE 0x08000000L
+
+#define ATOM_S2_CRT1_DPMS_STATEb2 0x01
+#define ATOM_S2_LCD1_DPMS_STATEb2 0x02
+#define ATOM_S2_TV1_DPMS_STATEb2 0x04
+#define ATOM_S2_DFP1_DPMS_STATEb2 0x08
+#define ATOM_S2_CRT2_DPMS_STATEb2 0x10
+#define ATOM_S2_LCD2_DPMS_STATEb2 0x20
+#define ATOM_S2_TV2_DPMS_STATEb2 0x40
+#define ATOM_S2_DFP2_DPMS_STATEb2 0x80
+#define ATOM_S2_CV_DPMS_STATEb3 0x01
+#define ATOM_S2_DFP3_DPMS_STATEb3 0x02
+#define ATOM_S2_DFP4_DPMS_STATEb3 0x04
+#define ATOM_S2_DFP5_DPMS_STATEb3 0x08
+
+#define ATOM_S3_ASIC_GUI_ENGINE_HUNGb3 0x20
+#define ATOM_S3_ALLOW_FAST_PWR_SWITCHb3 0x40
+#define ATOM_S3_RQST_GPU_USE_MIN_PWRb3 0x80
/*********************************************************************************/
-#pragma pack() /* BIOS data must use byte aligment */
+#pragma pack() // BIOS data must use byte aligment
#endif /* _ATOMBIOS_H */
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index af464e351fb..dd9fdf56061 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -245,21 +245,25 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
switch (mode) {
case DRM_MODE_DPMS_ON:
- atombios_enable_crtc(crtc, 1);
+ atombios_enable_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev))
- atombios_enable_crtc_memreq(crtc, 1);
- atombios_blank_crtc(crtc, 0);
- drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
+ atombios_enable_crtc_memreq(crtc, ATOM_ENABLE);
+ atombios_blank_crtc(crtc, ATOM_DISABLE);
+ /* XXX re-enable when interrupt support is added */
+ if (!ASIC_IS_DCE4(rdev))
+ drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
radeon_crtc_load_lut(crtc);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
- atombios_blank_crtc(crtc, 1);
+ /* XXX re-enable when interrupt support is added */
+ if (!ASIC_IS_DCE4(rdev))
+ drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
+ atombios_blank_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev))
- atombios_enable_crtc_memreq(crtc, 0);
- atombios_enable_crtc(crtc, 0);
+ atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
+ atombios_enable_crtc(crtc, ATOM_DISABLE);
break;
}
}
@@ -349,6 +353,11 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
+union atom_enable_ss {
+ ENABLE_LVDS_SS_PARAMETERS legacy;
+ ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1;
+};
+
static void atombios_set_ss(struct drm_crtc *crtc, int enable)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -358,11 +367,14 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable)
struct radeon_encoder *radeon_encoder = NULL;
struct radeon_encoder_atom_dig *dig = NULL;
int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
- ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION args;
- ENABLE_LVDS_SS_PARAMETERS legacy_args;
+ union atom_enable_ss args;
uint16_t percentage = 0;
uint8_t type = 0, step = 0, delay = 0, range = 0;
+ /* XXX add ss support for DCE4 */
+ if (ASIC_IS_DCE4(rdev))
+ return;
+
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
radeon_encoder = to_radeon_encoder(encoder);
@@ -386,29 +398,28 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable)
if (!radeon_encoder)
return;
+ memset(&args, 0, sizeof(args));
if (ASIC_IS_AVIVO(rdev)) {
- memset(&args, 0, sizeof(args));
- args.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
- args.ucSpreadSpectrumType = type;
- args.ucSpreadSpectrumStep = step;
- args.ucSpreadSpectrumDelay = delay;
- args.ucSpreadSpectrumRange = range;
- args.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
- args.ucEnable = enable;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ args.v1.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
+ args.v1.ucSpreadSpectrumType = type;
+ args.v1.ucSpreadSpectrumStep = step;
+ args.v1.ucSpreadSpectrumDelay = delay;
+ args.v1.ucSpreadSpectrumRange = range;
+ args.v1.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
+ args.v1.ucEnable = enable;
} else {
- memset(&legacy_args, 0, sizeof(legacy_args));
- legacy_args.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
- legacy_args.ucSpreadSpectrumType = type;
- legacy_args.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2;
- legacy_args.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4;
- legacy_args.ucEnable = enable;
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&legacy_args);
+ args.legacy.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
+ args.legacy.ucSpreadSpectrumType = type;
+ args.legacy.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2;
+ args.legacy.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4;
+ args.legacy.ucEnable = enable;
}
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
union adjust_pixel_clock {
ADJUST_DISPLAY_PLL_PS_ALLOCATION v1;
+ ADJUST_DISPLAY_PLL_PS_ALLOCATION_V3 v3;
};
static u32 atombios_adjust_pll(struct drm_crtc *crtc,
@@ -420,10 +431,24 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
struct drm_encoder *encoder = NULL;
struct radeon_encoder *radeon_encoder = NULL;
u32 adjusted_clock = mode->clock;
+ int encoder_mode = 0;
/* reset the pll flags */
pll->flags = 0;
+ /* select the PLL algo */
+ if (ASIC_IS_AVIVO(rdev)) {
+ if (radeon_new_pll == 0)
+ pll->algo = PLL_ALGO_LEGACY;
+ else
+ pll->algo = PLL_ALGO_NEW;
+ } else {
+ if (radeon_new_pll == 1)
+ pll->algo = PLL_ALGO_NEW;
+ else
+ pll->algo = PLL_ALGO_LEGACY;
+ }
+
if (ASIC_IS_AVIVO(rdev)) {
if ((rdev->family == CHIP_RS600) ||
(rdev->family == CHIP_RS690) ||
@@ -448,10 +473,16 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
radeon_encoder = to_radeon_encoder(encoder);
+ encoder_mode = atombios_get_encoder_mode(encoder);
if (ASIC_IS_AVIVO(rdev)) {
/* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
adjusted_clock = mode->clock * 2;
+ /* LVDS PLL quirks */
+ if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ pll->algo = dig->pll_algo;
+ }
} else {
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
@@ -468,14 +499,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
*/
if (ASIC_IS_DCE3(rdev)) {
union adjust_pixel_clock args;
- struct radeon_encoder_atom_dig *dig;
u8 frev, crev;
int index;
- if (!radeon_encoder->enc_priv)
- return adjusted_clock;
- dig = radeon_encoder->enc_priv;
-
index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll);
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
&crev);
@@ -489,12 +515,51 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
case 2:
args.v1.usPixelClock = cpu_to_le16(mode->clock / 10);
args.v1.ucTransmitterID = radeon_encoder->encoder_id;
- args.v1.ucEncodeMode = atombios_get_encoder_mode(encoder);
+ args.v1.ucEncodeMode = encoder_mode;
atom_execute_table(rdev->mode_info.atom_context,
index, (uint32_t *)&args);
adjusted_clock = le16_to_cpu(args.v1.usPixelClock) * 10;
break;
+ case 3:
+ args.v3.sInput.usPixelClock = cpu_to_le16(mode->clock / 10);
+ args.v3.sInput.ucTransmitterID = radeon_encoder->encoder_id;
+ args.v3.sInput.ucEncodeMode = encoder_mode;
+ args.v3.sInput.ucDispPllConfig = 0;
+ if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+ if (encoder_mode == ATOM_ENCODER_MODE_DP)
+ args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_COHERENT_MODE;
+ else {
+ if (dig->coherent_mode)
+ args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_COHERENT_MODE;
+ if (mode->clock > 165000)
+ args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_DUAL_LINK;
+ }
+ } else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
+ /* may want to enable SS on DP/eDP eventually */
+ args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_SS_ENABLE;
+ if (mode->clock > 165000)
+ args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_DUAL_LINK;
+ }
+ atom_execute_table(rdev->mode_info.atom_context,
+ index, (uint32_t *)&args);
+ adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10;
+ if (args.v3.sOutput.ucRefDiv) {
+ pll->flags |= RADEON_PLL_USE_REF_DIV;
+ pll->reference_div = args.v3.sOutput.ucRefDiv;
+ }
+ if (args.v3.sOutput.ucPostDiv) {
+ pll->flags |= RADEON_PLL_USE_POST_DIV;
+ pll->post_div = args.v3.sOutput.ucPostDiv;
+ }
+ break;
default:
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
return adjusted_clock;
@@ -513,9 +578,47 @@ union set_pixel_clock {
PIXEL_CLOCK_PARAMETERS v1;
PIXEL_CLOCK_PARAMETERS_V2 v2;
PIXEL_CLOCK_PARAMETERS_V3 v3;
+ PIXEL_CLOCK_PARAMETERS_V5 v5;
};
-void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
+static void atombios_crtc_set_dcpll(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ u8 frev, crev;
+ int index;
+ union set_pixel_clock args;
+
+ memset(&args, 0, sizeof(args));
+
+ index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
+ atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
+ &crev);
+
+ switch (frev) {
+ case 1:
+ switch (crev) {
+ case 5:
+ /* if the default dcpll clock is specified,
+ * SetPixelClock provides the dividers
+ */
+ args.v5.ucCRTC = ATOM_CRTC_INVALID;
+ args.v5.usPixelClock = rdev->clock.default_dispclk;
+ args.v5.ucPpll = ATOM_DCPLL;
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+ return;
+ }
+ break;
+ default:
+ DRM_ERROR("Unknown table version %d %d\n", frev, crev);
+ return;
+ }
+ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+}
+
+static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
@@ -529,12 +632,14 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
u32 ref_div = 0, fb_div = 0, frac_fb_div = 0, post_div = 0;
struct radeon_pll *pll;
u32 adjusted_clock;
+ int encoder_mode = 0;
memset(&args, 0, sizeof(args));
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc == crtc) {
radeon_encoder = to_radeon_encoder(encoder);
+ encoder_mode = atombios_get_encoder_mode(encoder);
break;
}
}
@@ -542,26 +647,24 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
if (!radeon_encoder)
return;
- if (radeon_crtc->crtc_id == 0)
+ switch (radeon_crtc->pll_id) {
+ case ATOM_PPLL1:
pll = &rdev->clock.p1pll;
- else
+ break;
+ case ATOM_PPLL2:
pll = &rdev->clock.p2pll;
+ break;
+ case ATOM_DCPLL:
+ case ATOM_PPLL_INVALID:
+ pll = &rdev->clock.dcpll;
+ break;
+ }
/* adjust pixel clock as needed */
adjusted_clock = atombios_adjust_pll(crtc, mode, pll);
- if (ASIC_IS_AVIVO(rdev)) {
- if (radeon_new_pll)
- radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock,
- &fb_div, &frac_fb_div,
- &ref_div, &post_div);
- else
- radeon_compute_pll(pll, adjusted_clock, &pll_clock,
- &fb_div, &frac_fb_div,
- &ref_div, &post_div);
- } else
- radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
- &ref_div, &post_div);
+ radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
+ &ref_div, &post_div);
index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
@@ -576,8 +679,7 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
args.v1.usFbDiv = cpu_to_le16(fb_div);
args.v1.ucFracFbDiv = frac_fb_div;
args.v1.ucPostDiv = post_div;
- args.v1.ucPpll =
- radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
+ args.v1.ucPpll = radeon_crtc->pll_id;
args.v1.ucCRTC = radeon_crtc->crtc_id;
args.v1.ucRefDivSrc = 1;
break;
@@ -587,8 +689,7 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
args.v2.usFbDiv = cpu_to_le16(fb_div);
args.v2.ucFracFbDiv = frac_fb_div;
args.v2.ucPostDiv = post_div;
- args.v2.ucPpll =
- radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
+ args.v2.ucPpll = radeon_crtc->pll_id;
args.v2.ucCRTC = radeon_crtc->crtc_id;
args.v2.ucRefDivSrc = 1;
break;
@@ -598,12 +699,22 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
args.v3.usFbDiv = cpu_to_le16(fb_div);
args.v3.ucFracFbDiv = frac_fb_div;
args.v3.ucPostDiv = post_div;
- args.v3.ucPpll =
- radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
- args.v3.ucMiscInfo = (radeon_crtc->crtc_id << 2);
+ args.v3.ucPpll = radeon_crtc->pll_id;
+ args.v3.ucMiscInfo = (radeon_crtc->pll_id << 2);
args.v3.ucTransmitterId = radeon_encoder->encoder_id;
- args.v3.ucEncoderMode =
- atombios_get_encoder_mode(encoder);
+ args.v3.ucEncoderMode = encoder_mode;
+ break;
+ case 5:
+ args.v5.ucCRTC = radeon_crtc->crtc_id;
+ args.v5.usPixelClock = cpu_to_le16(mode->clock / 10);
+ args.v5.ucRefDiv = ref_div;
+ args.v5.usFbDiv = cpu_to_le16(fb_div);
+ args.v5.ulFbDivDecFrac = cpu_to_le32(frac_fb_div * 100000);
+ args.v5.ucPostDiv = post_div;
+ args.v5.ucMiscInfo = 0; /* HDMI depth, etc. */
+ args.v5.ucTransmitterID = radeon_encoder->encoder_id;
+ args.v5.ucEncoderMode = encoder_mode;
+ args.v5.ucPpll = radeon_crtc->pll_id;
break;
default:
DRM_ERROR("Unknown table version %d %d\n", frev, crev);
@@ -618,6 +729,140 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
+static int evergreen_crtc_set_base(struct drm_crtc *crtc, int x, int y,
+ struct drm_framebuffer *old_fb)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_framebuffer *radeon_fb;
+ struct drm_gem_object *obj;
+ struct radeon_bo *rbo;
+ uint64_t fb_location;
+ uint32_t fb_format, fb_pitch_pixels, tiling_flags;
+ int r;
+
+ /* no fb bound */
+ if (!crtc->fb) {
+ DRM_DEBUG("No FB bound\n");
+ return 0;
+ }
+
+ radeon_fb = to_radeon_framebuffer(crtc->fb);
+
+ /* Pin framebuffer & get tilling informations */
+ obj = radeon_fb->obj;
+ rbo = obj->driver_private;
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
+ if (unlikely(r != 0)) {
+ radeon_bo_unreserve(rbo);
+ return -EINVAL;
+ }
+ radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+ radeon_bo_unreserve(rbo);
+
+ switch (crtc->fb->bits_per_pixel) {
+ case 8:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED));
+ break;
+ case 15:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555));
+ break;
+ case 16:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
+ break;
+ case 24:
+ case 32:
+ fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
+ EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
+ break;
+ default:
+ DRM_ERROR("Unsupported screen depth %d\n",
+ crtc->fb->bits_per_pixel);
+ return -EINVAL;
+ }
+
+ switch (radeon_crtc->crtc_id) {
+ case 0:
+ WREG32(AVIVO_D1VGA_CONTROL, 0);
+ break;
+ case 1:
+ WREG32(AVIVO_D2VGA_CONTROL, 0);
+ break;
+ case 2:
+ WREG32(EVERGREEN_D3VGA_CONTROL, 0);
+ break;
+ case 3:
+ WREG32(EVERGREEN_D4VGA_CONTROL, 0);
+ break;
+ case 4:
+ WREG32(EVERGREEN_D5VGA_CONTROL, 0);
+ break;
+ case 5:
+ WREG32(EVERGREEN_D6VGA_CONTROL, 0);
+ break;
+ default:
+ break;
+ }
+
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+ upper_32_bits(fb_location));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset,
+ upper_32_bits(fb_location));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32)fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + radeon_crtc->crtc_offset,
+ (u32) fb_location & EVERGREEN_GRPH_SURFACE_ADDRESS_MASK);
+ WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
+
+ WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_GRPH_X_START + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_GRPH_Y_START + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_GRPH_X_END + radeon_crtc->crtc_offset, crtc->fb->width);
+ WREG32(EVERGREEN_GRPH_Y_END + radeon_crtc->crtc_offset, crtc->fb->height);
+
+ fb_pitch_pixels = crtc->fb->pitch / (crtc->fb->bits_per_pixel / 8);
+ WREG32(EVERGREEN_GRPH_PITCH + radeon_crtc->crtc_offset, fb_pitch_pixels);
+ WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
+
+ WREG32(EVERGREEN_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
+ crtc->mode.vdisplay);
+ x &= ~3;
+ y &= ~1;
+ WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset,
+ (x << 16) | y);
+ WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
+ (crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
+
+ if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE)
+ WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset,
+ EVERGREEN_INTERLEAVE_EN);
+ else
+ WREG32(EVERGREEN_DATA_FORMAT + radeon_crtc->crtc_offset, 0);
+
+ if (old_fb && old_fb != crtc->fb) {
+ radeon_fb = to_radeon_framebuffer(old_fb);
+ rbo = radeon_fb->obj->driver_private;
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+ radeon_bo_unpin(rbo);
+ radeon_bo_unreserve(rbo);
+ }
+
+ /* Bytes per pixel may have changed */
+ radeon_bandwidth_update(rdev);
+
+ return 0;
+}
+
static int avivo_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
@@ -755,7 +1000,9 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_device *dev = crtc->dev;
struct radeon_device *rdev = dev->dev_private;
- if (ASIC_IS_AVIVO(rdev))
+ if (ASIC_IS_DCE4(rdev))
+ return evergreen_crtc_set_base(crtc, x, y, old_fb);
+ else if (ASIC_IS_AVIVO(rdev))
return avivo_crtc_set_base(crtc, x, y, old_fb);
else
return radeon_crtc_set_base(crtc, x, y, old_fb);
@@ -785,6 +1032,46 @@ static void radeon_legacy_atom_fixup(struct drm_crtc *crtc)
}
}
+static int radeon_atom_pick_pll(struct drm_crtc *crtc)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_encoder *test_encoder;
+ struct drm_crtc *test_crtc;
+ uint32_t pll_in_use = 0;
+
+ if (ASIC_IS_DCE4(rdev)) {
+ /* if crtc is driving DP and we have an ext clock, use that */
+ list_for_each_entry(test_encoder, &dev->mode_config.encoder_list, head) {
+ if (test_encoder->crtc && (test_encoder->crtc == crtc)) {
+ if (atombios_get_encoder_mode(test_encoder) == ATOM_ENCODER_MODE_DP) {
+ if (rdev->clock.dp_extclk)
+ return ATOM_PPLL_INVALID;
+ }
+ }
+ }
+
+ /* otherwise, pick one of the plls */
+ list_for_each_entry(test_crtc, &dev->mode_config.crtc_list, head) {
+ struct radeon_crtc *radeon_test_crtc;
+
+ if (crtc == test_crtc)
+ continue;
+
+ radeon_test_crtc = to_radeon_crtc(test_crtc);
+ if ((radeon_test_crtc->pll_id >= ATOM_PPLL1) &&
+ (radeon_test_crtc->pll_id <= ATOM_PPLL2))
+ pll_in_use |= (1 << radeon_test_crtc->pll_id);
+ }
+ if (!(pll_in_use & 1))
+ return ATOM_PPLL1;
+ return ATOM_PPLL2;
+ } else
+ return radeon_crtc->crtc_id;
+
+}
+
int atombios_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@@ -796,19 +1083,27 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
/* TODO color tiling */
+ /* pick pll */
+ radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
+
atombios_set_ss(crtc, 0);
+ /* always set DCPLL */
+ if (ASIC_IS_DCE4(rdev))
+ atombios_crtc_set_dcpll(crtc);
atombios_crtc_set_pll(crtc, adjusted_mode);
atombios_set_ss(crtc, 1);
- atombios_crtc_set_timing(crtc, adjusted_mode);
- if (ASIC_IS_AVIVO(rdev))
- atombios_crtc_set_base(crtc, x, y, old_fb);
+ if (ASIC_IS_DCE4(rdev))
+ atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
+ else if (ASIC_IS_AVIVO(rdev))
+ atombios_crtc_set_timing(crtc, adjusted_mode);
else {
+ atombios_crtc_set_timing(crtc, adjusted_mode);
if (radeon_crtc->crtc_id == 0)
atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
- atombios_crtc_set_base(crtc, x, y, old_fb);
radeon_legacy_atom_fixup(crtc);
}
+ atombios_crtc_set_base(crtc, x, y, old_fb);
atombios_overscan_setup(crtc, mode, adjusted_mode);
atombios_scaler_setup(crtc);
return 0;
@@ -825,14 +1120,14 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
static void atombios_crtc_prepare(struct drm_crtc *crtc)
{
- atombios_lock_crtc(crtc, 1);
+ atombios_lock_crtc(crtc, ATOM_ENABLE);
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
}
static void atombios_crtc_commit(struct drm_crtc *crtc)
{
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
- atombios_lock_crtc(crtc, 0);
+ atombios_lock_crtc(crtc, ATOM_DISABLE);
}
static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
@@ -848,8 +1143,37 @@ static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
void radeon_atombios_init_crtc(struct drm_device *dev,
struct radeon_crtc *radeon_crtc)
{
- if (radeon_crtc->crtc_id == 1)
- radeon_crtc->crtc_offset =
- AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL;
+ struct radeon_device *rdev = dev->dev_private;
+
+ if (ASIC_IS_DCE4(rdev)) {
+ switch (radeon_crtc->crtc_id) {
+ case 0:
+ default:
+ radeon_crtc->crtc_offset = EVERGREEN_CRTC0_REGISTER_OFFSET;
+ break;
+ case 1:
+ radeon_crtc->crtc_offset = EVERGREEN_CRTC1_REGISTER_OFFSET;
+ break;
+ case 2:
+ radeon_crtc->crtc_offset = EVERGREEN_CRTC2_REGISTER_OFFSET;
+ break;
+ case 3:
+ radeon_crtc->crtc_offset = EVERGREEN_CRTC3_REGISTER_OFFSET;
+ break;
+ case 4:
+ radeon_crtc->crtc_offset = EVERGREEN_CRTC4_REGISTER_OFFSET;
+ break;
+ case 5:
+ radeon_crtc->crtc_offset = EVERGREEN_CRTC5_REGISTER_OFFSET;
+ break;
+ }
+ } else {
+ if (radeon_crtc->crtc_id == 1)
+ radeon_crtc->crtc_offset =
+ AVIVO_D2CRTC_H_TOTAL - AVIVO_D1CRTC_H_TOTAL;
+ else
+ radeon_crtc->crtc_offset = 0;
+ }
+ radeon_crtc->pll_id = -1;
drm_crtc_helper_add(&radeon_crtc->base, &atombios_helper_funcs);
}
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 99915a682d5..8a133bda00a 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -321,6 +321,10 @@ static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
train_set[lane] = v | p;
}
+union aux_channel_transaction {
+ PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1;
+ PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2;
+};
/* radeon aux chan functions */
bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
@@ -329,7 +333,7 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
{
struct drm_device *dev = chan->dev;
struct radeon_device *rdev = dev->dev_private;
- PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args;
+ union aux_channel_transaction args;
int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
unsigned char *base;
int retry_count = 0;
@@ -341,31 +345,33 @@ bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
retry:
memcpy(base, req_bytes, num_bytes);
- args.lpAuxRequest = 0;
- args.lpDataOut = 16;
- args.ucDataOutLen = 0;
- args.ucChannelID = chan->rec.i2c_id;
- args.ucDelay = delay / 10;
+ args.v1.lpAuxRequest = 0;
+ args.v1.lpDataOut = 16;
+ args.v1.ucDataOutLen = 0;
+ args.v1.ucChannelID = chan->rec.i2c_id;
+ args.v1.ucDelay = delay / 10;
+ if (ASIC_IS_DCE4(rdev))
+ args.v2.ucHPD_ID = chan->rec.hpd_id;
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
- if (args.ucReplyStatus && !args.ucDataOutLen) {
- if (args.ucReplyStatus == 0x20 && retry_count++ < 10)
+ if (args.v1.ucReplyStatus && !args.v1.ucDataOutLen) {
+ if (args.v1.ucReplyStatus == 0x20 && retry_count++ < 10)
goto retry;
DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x after %d retries\n",
req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
- chan->rec.i2c_id, args.ucReplyStatus, retry_count);
+ chan->rec.i2c_id, args.v1.ucReplyStatus, retry_count);
return false;
}
- if (args.ucDataOutLen && read_byte && read_buf_len) {
- if (read_buf_len < args.ucDataOutLen) {
+ if (args.v1.ucDataOutLen && read_byte && read_buf_len) {
+ if (read_buf_len < args.v1.ucDataOutLen) {
DRM_ERROR("Buffer to small for return answer %d %d\n",
- read_buf_len, args.ucDataOutLen);
+ read_buf_len, args.v1.ucDataOutLen);
return false;
}
{
- int len = min(read_buf_len, args.ucDataOutLen);
+ int len = min(read_buf_len, args.v1.ucDataOutLen);
memcpy(read_byte, base + 16, len);
}
}
@@ -626,12 +632,19 @@ void dp_link_train(struct drm_encoder *encoder,
dp_set_link_bw_lanes(radeon_connector, link_configuration);
/* disable downspread on the sink */
dp_set_downspread(radeon_connector, 0);
- /* start training on the source */
- radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START,
- dig_connector->dp_clock, enc_id, 0);
- /* set training pattern 1 on the source */
- radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
- dig_connector->dp_clock, enc_id, 0);
+ if (ASIC_IS_DCE4(rdev)) {
+ /* start training on the source */
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_START);
+ /* set training pattern 1 on the source */
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN1);
+ } else {
+ /* start training on the source */
+ radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START,
+ dig_connector->dp_clock, enc_id, 0);
+ /* set training pattern 1 on the source */
+ radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
+ dig_connector->dp_clock, enc_id, 0);
+ }
/* set initial vs/emph */
memset(train_set, 0, 4);
@@ -691,8 +704,11 @@ void dp_link_train(struct drm_encoder *encoder,
/* set training pattern 2 on the sink */
dp_set_training(radeon_connector, DP_TRAINING_PATTERN_2);
/* set training pattern 2 on the source */
- radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
- dig_connector->dp_clock, enc_id, 1);
+ if (ASIC_IS_DCE4(rdev))
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_PATTERN2);
+ else
+ radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
+ dig_connector->dp_clock, enc_id, 1);
/* channel equalization loop */
tries = 0;
@@ -729,7 +745,11 @@ void dp_link_train(struct drm_encoder *encoder,
>> DP_TRAIN_PRE_EMPHASIS_SHIFT);
/* disable the training pattern on the sink */
- dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
+ if (ASIC_IS_DCE4(rdev))
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE);
+ else
+ radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
+ dig_connector->dp_clock, enc_id, 0);
radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
dig_connector->dp_clock, enc_id, 0);
diff --git a/drivers/gpu/drm/radeon/avivod.h b/drivers/gpu/drm/radeon/avivod.h
index d4e6e6e4a93..3c391e7e9fd 100644
--- a/drivers/gpu/drm/radeon/avivod.h
+++ b/drivers/gpu/drm/radeon/avivod.h
@@ -30,11 +30,13 @@
#define D1CRTC_CONTROL 0x6080
#define CRTC_EN (1 << 0)
+#define D1CRTC_STATUS 0x609c
#define D1CRTC_UPDATE_LOCK 0x60E8
#define D1GRPH_PRIMARY_SURFACE_ADDRESS 0x6110
#define D1GRPH_SECONDARY_SURFACE_ADDRESS 0x6118
#define D2CRTC_CONTROL 0x6880
+#define D2CRTC_STATUS 0x689c
#define D2CRTC_UPDATE_LOCK 0x68E8
#define D2GRPH_PRIMARY_SURFACE_ADDRESS 0x6910
#define D2GRPH_SECONDARY_SURFACE_ADDRESS 0x6918
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
new file mode 100644
index 00000000000..bd2e7aa85c1
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -0,0 +1,767 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#include <linux/firmware.h>
+#include <linux/platform_device.h>
+#include "drmP.h"
+#include "radeon.h"
+#include "radeon_drm.h"
+#include "rv770d.h"
+#include "atom.h"
+#include "avivod.h"
+#include "evergreen_reg.h"
+
+static void evergreen_gpu_init(struct radeon_device *rdev);
+void evergreen_fini(struct radeon_device *rdev);
+
+bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
+{
+ bool connected = false;
+ /* XXX */
+ return connected;
+}
+
+void evergreen_hpd_set_polarity(struct radeon_device *rdev,
+ enum radeon_hpd_id hpd)
+{
+ /* XXX */
+}
+
+void evergreen_hpd_init(struct radeon_device *rdev)
+{
+ /* XXX */
+}
+
+
+void evergreen_bandwidth_update(struct radeon_device *rdev)
+{
+ /* XXX */
+}
+
+void evergreen_hpd_fini(struct radeon_device *rdev)
+{
+ /* XXX */
+}
+
+static int evergreen_mc_wait_for_idle(struct radeon_device *rdev)
+{
+ unsigned i;
+ u32 tmp;
+
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ /* read MC_STATUS */
+ tmp = RREG32(SRBM_STATUS) & 0x1F00;
+ if (!tmp)
+ return 0;
+ udelay(1);
+ }
+ return -1;
+}
+
+/*
+ * GART
+ */
+int evergreen_pcie_gart_enable(struct radeon_device *rdev)
+{
+ u32 tmp;
+ int r, i;
+
+ if (rdev->gart.table.vram.robj == NULL) {
+ dev_err(rdev->dev, "No VRAM object for PCIE GART.\n");
+ return -EINVAL;
+ }
+ r = radeon_gart_table_vram_pin(rdev);
+ if (r)
+ return r;
+ radeon_gart_restore(rdev);
+ /* Setup L2 cache */
+ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
+ ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+ EFFECTIVE_L2_QUEUE_SIZE(7));
+ WREG32(VM_L2_CNTL2, 0);
+ WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
+ /* Setup TLB control */
+ tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
+ SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+ SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
+ EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
+ WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR, rdev->mc.gtt_end >> 12);
+ WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR, rdev->gart.table_addr >> 12);
+ WREG32(VM_CONTEXT0_CNTL, ENABLE_CONTEXT | PAGE_TABLE_DEPTH(0) |
+ RANGE_PROTECTION_FAULT_ENABLE_DEFAULT);
+ WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR,
+ (u32)(rdev->dummy_page.addr >> 12));
+ for (i = 1; i < 7; i++)
+ WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+
+ r600_pcie_gart_tlb_flush(rdev);
+ rdev->gart.ready = true;
+ return 0;
+}
+
+void evergreen_pcie_gart_disable(struct radeon_device *rdev)
+{
+ u32 tmp;
+ int i, r;
+
+ /* Disable all tables */
+ for (i = 0; i < 7; i++)
+ WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+
+ /* Setup L2 cache */
+ WREG32(VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING |
+ EFFECTIVE_L2_QUEUE_SIZE(7));
+ WREG32(VM_L2_CNTL2, 0);
+ WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
+ /* Setup TLB control */
+ tmp = EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
+ WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+ if (rdev->gart.table.vram.robj) {
+ r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+ if (likely(r == 0)) {
+ radeon_bo_kunmap(rdev->gart.table.vram.robj);
+ radeon_bo_unpin(rdev->gart.table.vram.robj);
+ radeon_bo_unreserve(rdev->gart.table.vram.robj);
+ }
+ }
+}
+
+void evergreen_pcie_gart_fini(struct radeon_device *rdev)
+{
+ evergreen_pcie_gart_disable(rdev);
+ radeon_gart_table_vram_free(rdev);
+ radeon_gart_fini(rdev);
+}
+
+
+void evergreen_agp_enable(struct radeon_device *rdev)
+{
+ u32 tmp;
+ int i;
+
+ /* Setup L2 cache */
+ WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
+ ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
+ EFFECTIVE_L2_QUEUE_SIZE(7));
+ WREG32(VM_L2_CNTL2, 0);
+ WREG32(VM_L2_CNTL3, BANK_SELECT(0) | CACHE_UPDATE_MODE(2));
+ /* Setup TLB control */
+ tmp = ENABLE_L1_TLB | ENABLE_L1_FRAGMENT_PROCESSING |
+ SYSTEM_ACCESS_MODE_NOT_IN_SYS |
+ SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
+ EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
+ WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
+ WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
+ for (i = 0; i < 7; i++)
+ WREG32(VM_CONTEXT0_CNTL + (i * 4), 0);
+}
+
+static void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
+{
+ save->vga_control[0] = RREG32(D1VGA_CONTROL);
+ save->vga_control[1] = RREG32(D2VGA_CONTROL);
+ save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL);
+ save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL);
+ save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL);
+ save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL);
+ save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
+ save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
+ save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
+ save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
+ save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
+ save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
+ save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
+ save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+
+ /* Stop all video */
+ WREG32(VGA_RENDER_CONTROL, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+
+ WREG32(D1VGA_CONTROL, 0);
+ WREG32(D2VGA_CONTROL, 0);
+ WREG32(EVERGREEN_D3VGA_CONTROL, 0);
+ WREG32(EVERGREEN_D4VGA_CONTROL, 0);
+ WREG32(EVERGREEN_D5VGA_CONTROL, 0);
+ WREG32(EVERGREEN_D6VGA_CONTROL, 0);
+}
+
+static void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *save)
+{
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC0_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC0_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC1_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC1_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC2_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC3_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC4_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH + EVERGREEN_CRTC5_REGISTER_OFFSET,
+ upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+ WREG32(EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS + EVERGREEN_CRTC5_REGISTER_OFFSET,
+ (u32)rdev->mc.vram_start);
+
+ WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(rdev->mc.vram_start));
+ WREG32(EVERGREEN_VGA_MEMORY_BASE_ADDRESS, (u32)rdev->mc.vram_start);
+ /* Unlock host access */
+ WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
+ mdelay(1);
+ /* Restore video state */
+ WREG32(D1VGA_CONTROL, save->vga_control[0]);
+ WREG32(D2VGA_CONTROL, save->vga_control[1]);
+ WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]);
+ WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]);
+ WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]);
+ WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
+ WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
+ WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
+ WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
+}
+
+static void evergreen_mc_program(struct radeon_device *rdev)
+{
+ struct evergreen_mc_save save;
+ u32 tmp;
+ int i, j;
+
+ /* Initialize HDP */
+ for (i = 0, j = 0; i < 32; i++, j += 0x18) {
+ WREG32((0x2c14 + j), 0x00000000);
+ WREG32((0x2c18 + j), 0x00000000);
+ WREG32((0x2c1c + j), 0x00000000);
+ WREG32((0x2c20 + j), 0x00000000);
+ WREG32((0x2c24 + j), 0x00000000);
+ }
+ WREG32(HDP_REG_COHERENCY_FLUSH_CNTL, 0);
+
+ evergreen_mc_stop(rdev, &save);
+ if (evergreen_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+ /* Lockout access through VGA aperture*/
+ WREG32(VGA_HDP_CONTROL, VGA_MEMORY_DISABLE);
+ /* Update configuration */
+ if (rdev->flags & RADEON_IS_AGP) {
+ if (rdev->mc.vram_start < rdev->mc.gtt_start) {
+ /* VRAM before AGP */
+ WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ rdev->mc.vram_start >> 12);
+ WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ rdev->mc.gtt_end >> 12);
+ } else {
+ /* VRAM after AGP */
+ WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ rdev->mc.gtt_start >> 12);
+ WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ rdev->mc.vram_end >> 12);
+ }
+ } else {
+ WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ rdev->mc.vram_start >> 12);
+ WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ rdev->mc.vram_end >> 12);
+ }
+ WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0);
+ tmp = ((rdev->mc.vram_end >> 24) & 0xFFFF) << 16;
+ tmp |= ((rdev->mc.vram_start >> 24) & 0xFFFF);
+ WREG32(MC_VM_FB_LOCATION, tmp);
+ WREG32(HDP_NONSURFACE_BASE, (rdev->mc.vram_start >> 8));
+ WREG32(HDP_NONSURFACE_INFO, (2 << 7));
+ WREG32(HDP_NONSURFACE_SIZE, (rdev->mc.mc_vram_size - 1) | 0x3FF);
+ if (rdev->flags & RADEON_IS_AGP) {
+ WREG32(MC_VM_AGP_TOP, rdev->mc.gtt_end >> 16);
+ WREG32(MC_VM_AGP_BOT, rdev->mc.gtt_start >> 16);
+ WREG32(MC_VM_AGP_BASE, rdev->mc.agp_base >> 22);
+ } else {
+ WREG32(MC_VM_AGP_BASE, 0);
+ WREG32(MC_VM_AGP_TOP, 0x0FFFFFFF);
+ WREG32(MC_VM_AGP_BOT, 0x0FFFFFFF);
+ }
+ if (evergreen_mc_wait_for_idle(rdev)) {
+ dev_warn(rdev->dev, "Wait for MC idle timedout !\n");
+ }
+ evergreen_mc_resume(rdev, &save);
+ /* we need to own VRAM, so turn off the VGA renderer here
+ * to stop it overwriting our objects */
+ rv515_vga_render_disable(rdev);
+}
+
+#if 0
+/*
+ * CP.
+ */
+static void evergreen_cp_stop(struct radeon_device *rdev)
+{
+ /* XXX */
+}
+
+
+static int evergreen_cp_load_microcode(struct radeon_device *rdev)
+{
+ /* XXX */
+
+ return 0;
+}
+
+
+/*
+ * Core functions
+ */
+static u32 evergreen_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
+ u32 num_backends,
+ u32 backend_disable_mask)
+{
+ u32 backend_map = 0;
+
+ return backend_map;
+}
+#endif
+
+static void evergreen_gpu_init(struct radeon_device *rdev)
+{
+ /* XXX */
+}
+
+int evergreen_mc_init(struct radeon_device *rdev)
+{
+ fixed20_12 a;
+ u32 tmp;
+ int chansize, numchan;
+
+ /* Get VRAM informations */
+ rdev->mc.vram_is_ddr = true;
+ tmp = RREG32(MC_ARB_RAMCFG);
+ if (tmp & CHANSIZE_OVERRIDE) {
+ chansize = 16;
+ } else if (tmp & CHANSIZE_MASK) {
+ chansize = 64;
+ } else {
+ chansize = 32;
+ }
+ tmp = RREG32(MC_SHARED_CHMAP);
+ switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) {
+ case 0:
+ default:
+ numchan = 1;
+ break;
+ case 1:
+ numchan = 2;
+ break;
+ case 2:
+ numchan = 4;
+ break;
+ case 3:
+ numchan = 8;
+ break;
+ }
+ rdev->mc.vram_width = numchan * chansize;
+ /* Could aper size report 0 ? */
+ rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+ rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+ /* Setup GPU memory space */
+ /* size in MB on evergreen */
+ rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+ rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024;
+ rdev->mc.visible_vram_size = rdev->mc.aper_size;
+ /* FIXME remove this once we support unmappable VRAM */
+ if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
+ rdev->mc.mc_vram_size = rdev->mc.aper_size;
+ rdev->mc.real_vram_size = rdev->mc.aper_size;
+ }
+ r600_vram_gtt_location(rdev, &rdev->mc);
+ /* FIXME: we should enforce default clock in case GPU is not in
+ * default setup
+ */
+ a.full = rfixed_const(100);
+ rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
+ rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
+ return 0;
+}
+
+int evergreen_gpu_reset(struct radeon_device *rdev)
+{
+ /* FIXME: implement for evergreen */
+ return 0;
+}
+
+static int evergreen_startup(struct radeon_device *rdev)
+{
+#if 0
+ int r;
+
+ if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+ r = r600_init_microcode(rdev);
+ if (r) {
+ DRM_ERROR("Failed to load firmware!\n");
+ return r;
+ }
+ }
+#endif
+ evergreen_mc_program(rdev);
+#if 0
+ if (rdev->flags & RADEON_IS_AGP) {
+ evergreem_agp_enable(rdev);
+ } else {
+ r = evergreen_pcie_gart_enable(rdev);
+ if (r)
+ return r;
+ }
+#endif
+ evergreen_gpu_init(rdev);
+#if 0
+ if (!rdev->r600_blit.shader_obj) {
+ r = r600_blit_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: failed blitter (%d).\n", r);
+ return r;
+ }
+ }
+
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (unlikely(r != 0))
+ return r;
+ r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+ &rdev->r600_blit.shader_gpu_addr);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ if (r) {
+ DRM_ERROR("failed to pin blit object %d\n", r);
+ return r;
+ }
+
+ /* Enable IRQ */
+ r = r600_irq_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: IH init failed (%d).\n", r);
+ radeon_irq_kms_fini(rdev);
+ return r;
+ }
+ r600_irq_set(rdev);
+
+ r = radeon_ring_init(rdev, rdev->cp.ring_size);
+ if (r)
+ return r;
+ r = evergreen_cp_load_microcode(rdev);
+ if (r)
+ return r;
+ r = r600_cp_resume(rdev);
+ if (r)
+ return r;
+ /* write back buffer are not vital so don't worry about failure */
+ r600_wb_enable(rdev);
+#endif
+ return 0;
+}
+
+int evergreen_resume(struct radeon_device *rdev)
+{
+ int r;
+
+ /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
+ * posting will perform necessary task to bring back GPU into good
+ * shape.
+ */
+ /* post card */
+ atom_asic_init(rdev->mode_info.atom_context);
+ /* Initialize clocks */
+ r = radeon_clocks_init(rdev);
+ if (r) {
+ return r;
+ }
+
+ r = evergreen_startup(rdev);
+ if (r) {
+ DRM_ERROR("r600 startup failed on resume\n");
+ return r;
+ }
+#if 0
+ r = r600_ib_test(rdev);
+ if (r) {
+ DRM_ERROR("radeon: failled testing IB (%d).\n", r);
+ return r;
+ }
+#endif
+ return r;
+
+}
+
+int evergreen_suspend(struct radeon_device *rdev)
+{
+#if 0
+ int r;
+
+ /* FIXME: we should wait for ring to be empty */
+ r700_cp_stop(rdev);
+ rdev->cp.ready = false;
+ r600_wb_disable(rdev);
+ evergreen_pcie_gart_disable(rdev);
+ /* unpin shaders bo */
+ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+ if (likely(r == 0)) {
+ radeon_bo_unpin(rdev->r600_blit.shader_obj);
+ radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+ }
+#endif
+ return 0;
+}
+
+static bool evergreen_card_posted(struct radeon_device *rdev)
+{
+ u32 reg;
+
+ /* first check CRTCs */
+ reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+ if (reg & EVERGREEN_CRTC_MASTER_EN)
+ return true;
+
+ /* then check MEM_SIZE, in case the crtcs are off */
+ if (RREG32(CONFIG_MEMSIZE))
+ return true;
+
+ return false;
+}
+
+/* Plan is to move initialization in that function and use
+ * helper function so that radeon_device_init pretty much
+ * do nothing more than calling asic specific function. This
+ * should also allow to remove a bunch of callback function
+ * like vram_info.
+ */
+int evergreen_init(struct radeon_device *rdev)
+{
+ int r;
+
+ r = radeon_dummy_page_init(rdev);
+ if (r)
+ return r;
+ /* This don't do much */
+ r = radeon_gem_init(rdev);
+ if (r)
+ return r;
+ /* Read BIOS */
+ if (!radeon_get_bios(rdev)) {
+ if (ASIC_IS_AVIVO(rdev))
+ return -EINVAL;
+ }
+ /* Must be an ATOMBIOS */
+ if (!rdev->is_atom_bios) {
+ dev_err(rdev->dev, "Expecting atombios for R600 GPU\n");
+ return -EINVAL;
+ }
+ r = radeon_atombios_init(rdev);
+ if (r)
+ return r;
+ /* Post card if necessary */
+ if (!evergreen_card_posted(rdev)) {
+ if (!rdev->bios) {
+ dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+ return -EINVAL;
+ }
+ DRM_INFO("GPU not posted. posting now...\n");
+ atom_asic_init(rdev->mode_info.atom_context);
+ }
+ /* Initialize scratch registers */
+ r600_scratch_init(rdev);
+ /* Initialize surface registers */
+ radeon_surface_init(rdev);
+ /* Initialize clocks */
+ radeon_get_clock_info(rdev->ddev);
+ r = radeon_clocks_init(rdev);
+ if (r)
+ return r;
+ /* Initialize power management */
+ radeon_pm_init(rdev);
+ /* Fence driver */
+ r = radeon_fence_driver_init(rdev);
+ if (r)
+ return r;
+ /* initialize AGP */
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r)
+ radeon_agp_disable(rdev);
+ }
+ /* initialize memory controller */
+ r = evergreen_mc_init(rdev);
+ if (r)
+ return r;
+ /* Memory manager */
+ r = radeon_bo_init(rdev);
+ if (r)
+ return r;
+#if 0
+ r = radeon_irq_kms_init(rdev);
+ if (r)
+ return r;
+
+ rdev->cp.ring_obj = NULL;
+ r600_ring_init(rdev, 1024 * 1024);
+
+ rdev->ih.ring_obj = NULL;
+ r600_ih_ring_init(rdev, 64 * 1024);
+
+ r = r600_pcie_gart_init(rdev);
+ if (r)
+ return r;
+#endif
+ rdev->accel_working = false;
+ r = evergreen_startup(rdev);
+ if (r) {
+ evergreen_suspend(rdev);
+ /*r600_wb_fini(rdev);*/
+ /*radeon_ring_fini(rdev);*/
+ /*evergreen_pcie_gart_fini(rdev);*/
+ rdev->accel_working = false;
+ }
+ if (rdev->accel_working) {
+ r = radeon_ib_pool_init(rdev);
+ if (r) {
+ DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
+ rdev->accel_working = false;
+ }
+ r = r600_ib_test(rdev);
+ if (r) {
+ DRM_ERROR("radeon: failed testing IB (%d).\n", r);
+ rdev->accel_working = false;
+ }
+ }
+ return 0;
+}
+
+void evergreen_fini(struct radeon_device *rdev)
+{
+ evergreen_suspend(rdev);
+#if 0
+ r600_blit_fini(rdev);
+ r600_irq_fini(rdev);
+ radeon_irq_kms_fini(rdev);
+ radeon_ring_fini(rdev);
+ r600_wb_fini(rdev);
+ evergreen_pcie_gart_fini(rdev);
+#endif
+ radeon_gem_fini(rdev);
+ radeon_fence_driver_fini(rdev);
+ radeon_clocks_fini(rdev);
+ radeon_agp_fini(rdev);
+ radeon_bo_fini(rdev);
+ radeon_atombios_fini(rdev);
+ kfree(rdev->bios);
+ rdev->bios = NULL;
+ radeon_dummy_page_fini(rdev);
+}
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
new file mode 100644
index 00000000000..f7c7c964343
--- /dev/null
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -0,0 +1,176 @@
+/*
+ * Copyright 2010 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Alex Deucher
+ */
+#ifndef __EVERGREEN_REG_H__
+#define __EVERGREEN_REG_H__
+
+/* evergreen */
+#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS 0x310
+#define EVERGREEN_VGA_MEMORY_BASE_ADDRESS_HIGH 0x324
+#define EVERGREEN_D3VGA_CONTROL 0x3e0
+#define EVERGREEN_D4VGA_CONTROL 0x3e4
+#define EVERGREEN_D5VGA_CONTROL 0x3e8
+#define EVERGREEN_D6VGA_CONTROL 0x3ec
+
+#define EVERGREEN_P1PLL_SS_CNTL 0x414
+#define EVERGREEN_P2PLL_SS_CNTL 0x454
+# define EVERGREEN_PxPLL_SS_EN (1 << 12)
+/* GRPH blocks at 0x6800, 0x7400, 0x10000, 0x10c00, 0x11800, 0x12400 */
+#define EVERGREEN_GRPH_ENABLE 0x6800
+#define EVERGREEN_GRPH_CONTROL 0x6804
+# define EVERGREEN_GRPH_DEPTH(x) (((x) & 0x3) << 0)
+# define EVERGREEN_GRPH_DEPTH_8BPP 0
+# define EVERGREEN_GRPH_DEPTH_16BPP 1
+# define EVERGREEN_GRPH_DEPTH_32BPP 2
+# define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8)
+/* 8 BPP */
+# define EVERGREEN_GRPH_FORMAT_INDEXED 0
+/* 16 BPP */
+# define EVERGREEN_GRPH_FORMAT_ARGB1555 0
+# define EVERGREEN_GRPH_FORMAT_ARGB565 1
+# define EVERGREEN_GRPH_FORMAT_ARGB4444 2
+# define EVERGREEN_GRPH_FORMAT_AI88 3
+# define EVERGREEN_GRPH_FORMAT_MONO16 4
+# define EVERGREEN_GRPH_FORMAT_BGRA5551 5
+/* 32 BPP */
+# define EVERGREEN_GRPH_FORMAT_ARGB8888 0
+# define EVERGREEN_GRPH_FORMAT_ARGB2101010 1
+# define EVERGREEN_GRPH_FORMAT_32BPP_DIG 2
+# define EVERGREEN_GRPH_FORMAT_8B_ARGB2101010 3
+# define EVERGREEN_GRPH_FORMAT_BGRA1010102 4
+# define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5
+# define EVERGREEN_GRPH_FORMAT_RGB111110 6
+# define EVERGREEN_GRPH_FORMAT_BGR101111 7
+#define EVERGREEN_GRPH_SWAP_CONTROL 0x680c
+# define EVERGREEN_GRPH_ENDIAN_SWAP(x) (((x) & 0x3) << 0)
+# define EVERGREEN_GRPH_ENDIAN_NONE 0
+# define EVERGREEN_GRPH_ENDIAN_8IN16 1
+# define EVERGREEN_GRPH_ENDIAN_8IN32 2
+# define EVERGREEN_GRPH_ENDIAN_8IN64 3
+# define EVERGREEN_GRPH_RED_CROSSBAR(x) (((x) & 0x3) << 4)
+# define EVERGREEN_GRPH_RED_SEL_R 0
+# define EVERGREEN_GRPH_RED_SEL_G 1
+# define EVERGREEN_GRPH_RED_SEL_B 2
+# define EVERGREEN_GRPH_RED_SEL_A 3
+# define EVERGREEN_GRPH_GREEN_CROSSBAR(x) (((x) & 0x3) << 6)
+# define EVERGREEN_GRPH_GREEN_SEL_G 0
+# define EVERGREEN_GRPH_GREEN_SEL_B 1
+# define EVERGREEN_GRPH_GREEN_SEL_A 2
+# define EVERGREEN_GRPH_GREEN_SEL_R 3
+# define EVERGREEN_GRPH_BLUE_CROSSBAR(x) (((x) & 0x3) << 8)
+# define EVERGREEN_GRPH_BLUE_SEL_B 0
+# define EVERGREEN_GRPH_BLUE_SEL_A 1
+# define EVERGREEN_GRPH_BLUE_SEL_R 2
+# define EVERGREEN_GRPH_BLUE_SEL_G 3
+# define EVERGREEN_GRPH_ALPHA_CROSSBAR(x) (((x) & 0x3) << 10)
+# define EVERGREEN_GRPH_ALPHA_SEL_A 0
+# define EVERGREEN_GRPH_ALPHA_SEL_R 1
+# define EVERGREEN_GRPH_ALPHA_SEL_G 2
+# define EVERGREEN_GRPH_ALPHA_SEL_B 3
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS 0x6810
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS 0x6814
+# define EVERGREEN_GRPH_DFQ_ENABLE (1 << 0)
+# define EVERGREEN_GRPH_SURFACE_ADDRESS_MASK 0xffffff00
+#define EVERGREEN_GRPH_PITCH 0x6818
+#define EVERGREEN_GRPH_PRIMARY_SURFACE_ADDRESS_HIGH 0x681c
+#define EVERGREEN_GRPH_SECONDARY_SURFACE_ADDRESS_HIGH 0x6820
+#define EVERGREEN_GRPH_SURFACE_OFFSET_X 0x6824
+#define EVERGREEN_GRPH_SURFACE_OFFSET_Y 0x6828
+#define EVERGREEN_GRPH_X_START 0x682c
+#define EVERGREEN_GRPH_Y_START 0x6830
+#define EVERGREEN_GRPH_X_END 0x6834
+#define EVERGREEN_GRPH_Y_END 0x6838
+
+/* CUR blocks at 0x6998, 0x7598, 0x10198, 0x10d98, 0x11998, 0x12598 */
+#define EVERGREEN_CUR_CONTROL 0x6998
+# define EVERGREEN_CURSOR_EN (1 << 0)
+# define EVERGREEN_CURSOR_MODE(x) (((x) & 0x3) << 8)
+# define EVERGREEN_CURSOR_MONO 0
+# define EVERGREEN_CURSOR_24_1 1
+# define EVERGREEN_CURSOR_24_8_PRE_MULT 2
+# define EVERGREEN_CURSOR_24_8_UNPRE_MULT 3
+# define EVERGREEN_CURSOR_2X_MAGNIFY (1 << 16)
+# define EVERGREEN_CURSOR_FORCE_MC_ON (1 << 20)
+# define EVERGREEN_CURSOR_URGENT_CONTROL(x) (((x) & 0x7) << 24)
+# define EVERGREEN_CURSOR_URGENT_ALWAYS 0
+# define EVERGREEN_CURSOR_URGENT_1_8 1
+# define EVERGREEN_CURSOR_URGENT_1_4 2
+# define EVERGREEN_CURSOR_URGENT_3_8 3
+# define EVERGREEN_CURSOR_URGENT_1_2 4
+#define EVERGREEN_CUR_SURFACE_ADDRESS 0x699c
+# define EVERGREEN_CUR_SURFACE_ADDRESS_MASK 0xfffff000
+#define EVERGREEN_CUR_SIZE 0x69a0
+#define EVERGREEN_CUR_SURFACE_ADDRESS_HIGH 0x69a4
+#define EVERGREEN_CUR_POSITION 0x69a8
+#define EVERGREEN_CUR_HOT_SPOT 0x69ac
+#define EVERGREEN_CUR_COLOR1 0x69b0
+#define EVERGREEN_CUR_COLOR2 0x69b4
+#define EVERGREEN_CUR_UPDATE 0x69b8
+# define EVERGREEN_CURSOR_UPDATE_PENDING (1 << 0)
+# define EVERGREEN_CURSOR_UPDATE_TAKEN (1 << 1)
+# define EVERGREEN_CURSOR_UPDATE_LOCK (1 << 16)
+# define EVERGREEN_CURSOR_DISABLE_MULTIPLE_UPDATE (1 << 24)
+
+/* LUT blocks at 0x69e0, 0x75e0, 0x101e0, 0x10de0, 0x119e0, 0x125e0 */
+#define EVERGREEN_DC_LUT_RW_MODE 0x69e0
+#define EVERGREEN_DC_LUT_RW_INDEX 0x69e4
+#define EVERGREEN_DC_LUT_SEQ_COLOR 0x69e8
+#define EVERGREEN_DC_LUT_PWL_DATA 0x69ec
+#define EVERGREEN_DC_LUT_30_COLOR 0x69f0
+#define EVERGREEN_DC_LUT_VGA_ACCESS_ENABLE 0x69f4
+#define EVERGREEN_DC_LUT_WRITE_EN_MASK 0x69f8
+#define EVERGREEN_DC_LUT_AUTOFILL 0x69fc
+#define EVERGREEN_DC_LUT_CONTROL 0x6a00
+#define EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE 0x6a04
+#define EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN 0x6a08
+#define EVERGREEN_DC_LUT_BLACK_OFFSET_RED 0x6a0c
+#define EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE 0x6a10
+#define EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN 0x6a14
+#define EVERGREEN_DC_LUT_WHITE_OFFSET_RED 0x6a18
+
+#define EVERGREEN_DATA_FORMAT 0x6b00
+# define EVERGREEN_INTERLEAVE_EN (1 << 0)
+#define EVERGREEN_DESKTOP_HEIGHT 0x6b04
+
+#define EVERGREEN_VIEWPORT_START 0x6d70
+#define EVERGREEN_VIEWPORT_SIZE 0x6d74
+
+/* display controller offsets used for crtc/cur/lut/grph/viewport/etc. */
+#define EVERGREEN_CRTC0_REGISTER_OFFSET (0x6df0 - 0x6df0)
+#define EVERGREEN_CRTC1_REGISTER_OFFSET (0x79f0 - 0x6df0)
+#define EVERGREEN_CRTC2_REGISTER_OFFSET (0x105f0 - 0x6df0)
+#define EVERGREEN_CRTC3_REGISTER_OFFSET (0x111f0 - 0x6df0)
+#define EVERGREEN_CRTC4_REGISTER_OFFSET (0x11df0 - 0x6df0)
+#define EVERGREEN_CRTC5_REGISTER_OFFSET (0x129f0 - 0x6df0)
+
+/* CRTC blocks at 0x6df0, 0x79f0, 0x105f0, 0x111f0, 0x11df0, 0x129f0 */
+#define EVERGREEN_CRTC_CONTROL 0x6e70
+# define EVERGREEN_CRTC_MASTER_EN (1 << 0)
+#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
+
+#define EVERGREEN_DC_GPIO_HPD_MASK 0x64b0
+#define EVERGREEN_DC_GPIO_HPD_A 0x64b4
+#define EVERGREEN_DC_GPIO_HPD_EN 0x64b8
+#define EVERGREEN_DC_GPIO_HPD_Y 0x64bc
+
+#endif
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index c0d4650cdb7..91eb762eb3f 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -197,13 +197,13 @@ int r100_pci_gart_enable(struct radeon_device *rdev)
{
uint32_t tmp;
+ radeon_gart_restore(rdev);
/* discard memory request outside of configured range */
tmp = RREG32(RADEON_AIC_CNTL) | RADEON_DIS_OUT_OF_PCI_GART_ACCESS;
WREG32(RADEON_AIC_CNTL, tmp);
/* set address range for PCI address translate */
- WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location);
- tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
- WREG32(RADEON_AIC_HI_ADDR, tmp);
+ WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_start);
+ WREG32(RADEON_AIC_HI_ADDR, rdev->mc.gtt_end);
/* set PCI GART page-table base address */
WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
@@ -312,9 +312,11 @@ int r100_irq_process(struct radeon_device *rdev)
/* Vertical blank interrupts */
if (status & RADEON_CRTC_VBLANK_STAT) {
drm_handle_vblank(rdev->ddev, 0);
+ wake_up(&rdev->irq.vblank_queue);
}
if (status & RADEON_CRTC2_VBLANK_STAT) {
drm_handle_vblank(rdev->ddev, 1);
+ wake_up(&rdev->irq.vblank_queue);
}
if (status & RADEON_FP_DETECT_STAT) {
queue_hotplug = true;
@@ -366,8 +368,8 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
radeon_ring_write(rdev, PACKET0(RADEON_RB3D_ZCACHE_CTLSTAT, 0));
radeon_ring_write(rdev, RADEON_RB3D_ZC_FLUSH_ALL);
/* Wait until IDLE & CLEAN */
- radeon_ring_write(rdev, PACKET0(0x1720, 0));
- radeon_ring_write(rdev, (1 << 16) | (1 << 17));
+ radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(rdev, RADEON_WAIT_2D_IDLECLEAN | RADEON_WAIT_3D_IDLECLEAN);
radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(rdev, rdev->config.r100.hdp_cntl |
RADEON_HDP_READ_BUFFER_INVALIDATE);
@@ -1701,7 +1703,7 @@ int r100_gui_wait_for_idle(struct radeon_device *rdev)
}
for (i = 0; i < rdev->usec_timeout; i++) {
tmp = RREG32(RADEON_RBBM_STATUS);
- if (!(tmp & (1 << 31))) {
+ if (!(tmp & RADEON_RBBM_ACTIVE)) {
return 0;
}
DRM_UDELAY(1);
@@ -1716,8 +1718,8 @@ int r100_mc_wait_for_idle(struct radeon_device *rdev)
for (i = 0; i < rdev->usec_timeout; i++) {
/* read MC_STATUS */
- tmp = RREG32(0x0150);
- if (tmp & (1 << 2)) {
+ tmp = RREG32(RADEON_MC_STATUS);
+ if (tmp & RADEON_MC_IDLE) {
return 0;
}
DRM_UDELAY(1);
@@ -1790,7 +1792,7 @@ int r100_gpu_reset(struct radeon_device *rdev)
}
/* Check if GPU is idle */
status = RREG32(RADEON_RBBM_STATUS);
- if (status & (1 << 31)) {
+ if (status & RADEON_RBBM_ACTIVE) {
DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
return -1;
}
@@ -1800,6 +1802,9 @@ int r100_gpu_reset(struct radeon_device *rdev)
void r100_set_common_regs(struct radeon_device *rdev)
{
+ struct drm_device *dev = rdev->ddev;
+ bool force_dac2 = false;
+
/* set these so they don't interfere with anything */
WREG32(RADEON_OV0_SCALE_CNTL, 0);
WREG32(RADEON_SUBPIC_CNTL, 0);
@@ -1808,6 +1813,68 @@ void r100_set_common_regs(struct radeon_device *rdev)
WREG32(RADEON_DVI_I2C_CNTL_1, 0);
WREG32(RADEON_CAP0_TRIG_CNTL, 0);
WREG32(RADEON_CAP1_TRIG_CNTL, 0);
+
+ /* always set up dac2 on rn50 and some rv100 as lots
+ * of servers seem to wire it up to a VGA port but
+ * don't report it in the bios connector
+ * table.
+ */
+ switch (dev->pdev->device) {
+ /* RN50 */
+ case 0x515e:
+ case 0x5969:
+ force_dac2 = true;
+ break;
+ /* RV100*/
+ case 0x5159:
+ case 0x515a:
+ /* DELL triple head servers */
+ if ((dev->pdev->subsystem_vendor == 0x1028 /* DELL */) &&
+ ((dev->pdev->subsystem_device == 0x016c) ||
+ (dev->pdev->subsystem_device == 0x016d) ||
+ (dev->pdev->subsystem_device == 0x016e) ||
+ (dev->pdev->subsystem_device == 0x016f) ||
+ (dev->pdev->subsystem_device == 0x0170) ||
+ (dev->pdev->subsystem_device == 0x017d) ||
+ (dev->pdev->subsystem_device == 0x017e) ||
+ (dev->pdev->subsystem_device == 0x0183) ||
+ (dev->pdev->subsystem_device == 0x018a) ||
+ (dev->pdev->subsystem_device == 0x019a)))
+ force_dac2 = true;
+ break;
+ }
+
+ if (force_dac2) {
+ u32 disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
+ u32 tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
+ u32 dac2_cntl = RREG32(RADEON_DAC_CNTL2);
+
+ /* For CRT on DAC2, don't turn it on if BIOS didn't
+ enable it, even it's detected.
+ */
+
+ /* force it to crtc0 */
+ dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL;
+ dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL;
+ disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
+
+ /* set up the TV DAC */
+ tv_dac_cntl &= ~(RADEON_TV_DAC_PEDESTAL |
+ RADEON_TV_DAC_STD_MASK |
+ RADEON_TV_DAC_RDACPD |
+ RADEON_TV_DAC_GDACPD |
+ RADEON_TV_DAC_BDACPD |
+ RADEON_TV_DAC_BGADJ_MASK |
+ RADEON_TV_DAC_DACADJ_MASK);
+ tv_dac_cntl |= (RADEON_TV_DAC_NBLANK |
+ RADEON_TV_DAC_NHOLD |
+ RADEON_TV_DAC_STD_PS2 |
+ (0x58 << 16));
+
+ WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
+ WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
+ WREG32(RADEON_DAC_CNTL2, dac2_cntl);
+ }
}
/*
@@ -1889,17 +1956,20 @@ static u32 r100_get_accessible_vram(struct radeon_device *rdev)
void r100_vram_init_sizes(struct radeon_device *rdev)
{
u64 config_aper_size;
- u32 accessible;
+ /* work out accessible VRAM */
+ rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+ rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+ rdev->mc.visible_vram_size = r100_get_accessible_vram(rdev);
+ /* FIXME we don't use the second aperture yet when we could use it */
+ if (rdev->mc.visible_vram_size > rdev->mc.aper_size)
+ rdev->mc.visible_vram_size = rdev->mc.aper_size;
config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE);
-
if (rdev->flags & RADEON_IS_IGP) {
uint32_t tom;
/* read NB_TOM to get the amount of ram stolen for the GPU */
tom = RREG32(RADEON_NB_TOM);
rdev->mc.real_vram_size = (((tom >> 16) - (tom & 0xffff) + 1) << 16);
- /* for IGPs we need to keep VRAM where it was put by the BIOS */
- rdev->mc.vram_location = (tom & 0xffff) << 16;
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
} else {
@@ -1911,30 +1981,19 @@ void r100_vram_init_sizes(struct radeon_device *rdev)
rdev->mc.real_vram_size = 8192 * 1024;
WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
}
- /* let driver place VRAM */
- rdev->mc.vram_location = 0xFFFFFFFFUL;
- /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
- * Novell bug 204882 + along with lots of ubuntu ones */
+ /* Fix for RN50, M6, M7 with 8/16/32(??) MBs of VRAM -
+ * Novell bug 204882 + along with lots of ubuntu ones
+ */
if (config_aper_size > rdev->mc.real_vram_size)
rdev->mc.mc_vram_size = config_aper_size;
else
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
}
-
- /* work out accessible VRAM */
- accessible = r100_get_accessible_vram(rdev);
-
- rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
- rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
-
- if (accessible > rdev->mc.aper_size)
- accessible = rdev->mc.aper_size;
-
- if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
+ /* FIXME remove this once we support unmappable VRAM */
+ if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
rdev->mc.mc_vram_size = rdev->mc.aper_size;
-
- if (rdev->mc.real_vram_size > rdev->mc.aper_size)
rdev->mc.real_vram_size = rdev->mc.aper_size;
+ }
}
void r100_vga_set_state(struct radeon_device *rdev, bool state)
@@ -1951,11 +2010,18 @@ void r100_vga_set_state(struct radeon_device *rdev, bool state)
WREG32(RADEON_CONFIG_CNTL, temp);
}
-void r100_vram_info(struct radeon_device *rdev)
+void r100_mc_init(struct radeon_device *rdev)
{
- r100_vram_get_type(rdev);
+ u64 base;
+ r100_vram_get_type(rdev);
r100_vram_init_sizes(rdev);
+ base = rdev->mc.aper_base;
+ if (rdev->flags & RADEON_IS_IGP)
+ base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
+ radeon_vram_location(rdev, &rdev->mc, base);
+ if (!(rdev->flags & RADEON_IS_AGP))
+ radeon_gtt_location(rdev, &rdev->mc);
}
@@ -3226,10 +3292,9 @@ void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save)
void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save)
{
/* Update base address for crtc */
- WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_location);
+ WREG32(R_00023C_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
if (!(rdev->flags & RADEON_SINGLE_CRTC)) {
- WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR,
- rdev->mc.vram_location);
+ WREG32(R_00033C_CRTC2_DISPLAY_BASE_ADDR, rdev->mc.vram_start);
}
/* Restore CRTC registers */
WREG8(R_0003C2_GENMO_WT, save->GENMO_WT);
@@ -3390,32 +3455,6 @@ void r100_fini(struct radeon_device *rdev)
rdev->bios = NULL;
}
-int r100_mc_init(struct radeon_device *rdev)
-{
- int r;
- u32 tmp;
-
- /* Setup GPU memory space */
- rdev->mc.vram_location = 0xFFFFFFFFUL;
- rdev->mc.gtt_location = 0xFFFFFFFFUL;
- if (rdev->flags & RADEON_IS_IGP) {
- tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM));
- rdev->mc.vram_location = tmp << 16;
- }
- if (rdev->flags & RADEON_IS_AGP) {
- r = radeon_agp_init(rdev);
- if (r) {
- radeon_agp_disable(rdev);
- } else {
- rdev->mc.gtt_location = rdev->mc.agp_base;
- }
- }
- r = radeon_mc_setup(rdev);
- if (r)
- return r;
- return 0;
-}
-
int r100_init(struct radeon_device *rdev)
{
int r;
@@ -3458,12 +3497,15 @@ int r100_init(struct radeon_device *rdev)
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
- /* Get vram informations */
- r100_vram_info(rdev);
- /* Initialize memory controller (also test AGP) */
- r = r100_mc_init(rdev);
- if (r)
- return r;
+ /* initialize AGP */
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r) {
+ radeon_agp_disable(rdev);
+ }
+ }
+ /* initialize VRAM */
+ r100_mc_init(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index ff1e0cd608b..1146c9909c2 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -31,6 +31,7 @@
#include "radeon_reg.h"
#include "radeon.h"
+#include "r100d.h"
#include "r200_reg_safe.h"
#include "r100_track.h"
@@ -79,6 +80,51 @@ static int r200_get_vtx_size_0(uint32_t vtx_fmt_0)
return vtx_size;
}
+int r200_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_pages,
+ struct radeon_fence *fence)
+{
+ uint32_t size;
+ uint32_t cur_size;
+ int i, num_loops;
+ int r = 0;
+
+ /* radeon pitch is /64 */
+ size = num_pages << PAGE_SHIFT;
+ num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
+ r = radeon_ring_lock(rdev, num_loops * 4 + 64);
+ if (r) {
+ DRM_ERROR("radeon: moving bo (%d).\n", r);
+ return r;
+ }
+ /* Must wait for 2D idle & clean before DMA or hangs might happen */
+ radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(rdev, (1 << 16));
+ for (i = 0; i < num_loops; i++) {
+ cur_size = size;
+ if (cur_size > 0x1FFFFF) {
+ cur_size = 0x1FFFFF;
+ }
+ size -= cur_size;
+ radeon_ring_write(rdev, PACKET0(0x720, 2));
+ radeon_ring_write(rdev, src_offset);
+ radeon_ring_write(rdev, dst_offset);
+ radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
+ src_offset += cur_size;
+ dst_offset += cur_size;
+ }
+ radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
+ if (fence) {
+ r = radeon_fence_emit(rdev, fence);
+ }
+ radeon_ring_unlock_commit(rdev);
+ return r;
+}
+
+
static int r200_get_vtx_size_1(uint32_t vtx_fmt_1)
{
int vtx_size, i, tex_size;
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 43b55a030b4..4cef90cd74e 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -117,18 +117,19 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
+ radeon_gart_restore(rdev);
/* discard memory request outside of configured range */
tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp);
- WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_location);
- tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - RADEON_GPU_PAGE_SIZE;
+ WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_start);
+ tmp = rdev->mc.gtt_end & ~RADEON_GPU_PAGE_MASK;
WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp);
WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0);
WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0);
table_addr = rdev->gart.table_addr;
WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr);
/* FIXME: setup default page */
- WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_location);
+ WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start);
WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0);
/* Clear error */
WREG32_PCIE(0x18, 0);
@@ -174,18 +175,20 @@ void r300_fence_ring_emit(struct radeon_device *rdev,
/* Who ever call radeon_fence_emit should call ring_lock and ask
* for enough space (today caller are ib schedule and buffer move) */
/* Write SC register so SC & US assert idle */
- radeon_ring_write(rdev, PACKET0(0x43E0, 0));
+ radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_TL, 0));
radeon_ring_write(rdev, 0);
- radeon_ring_write(rdev, PACKET0(0x43E4, 0));
+ radeon_ring_write(rdev, PACKET0(R300_RE_SCISSORS_BR, 0));
radeon_ring_write(rdev, 0);
/* Flush 3D cache */
- radeon_ring_write(rdev, PACKET0(0x4E4C, 0));
- radeon_ring_write(rdev, (2 << 0));
- radeon_ring_write(rdev, PACKET0(0x4F18, 0));
- radeon_ring_write(rdev, (1 << 0));
+ radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, R300_RB3D_DC_FLUSH);
+ radeon_ring_write(rdev, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0));
+ radeon_ring_write(rdev, R300_ZC_FLUSH);
/* Wait until IDLE & CLEAN */
- radeon_ring_write(rdev, PACKET0(0x1720, 0));
- radeon_ring_write(rdev, (1 << 17) | (1 << 16) | (1 << 9));
+ radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
+ radeon_ring_write(rdev, (RADEON_WAIT_3D_IDLECLEAN |
+ RADEON_WAIT_2D_IDLECLEAN |
+ RADEON_WAIT_DMA_GUI_IDLE));
radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
radeon_ring_write(rdev, rdev->config.r300.hdp_cntl |
RADEON_HDP_READ_BUFFER_INVALIDATE);
@@ -198,50 +201,6 @@ void r300_fence_ring_emit(struct radeon_device *rdev,
radeon_ring_write(rdev, RADEON_SW_INT_FIRE);
}
-int r300_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_pages,
- struct radeon_fence *fence)
-{
- uint32_t size;
- uint32_t cur_size;
- int i, num_loops;
- int r = 0;
-
- /* radeon pitch is /64 */
- size = num_pages << PAGE_SHIFT;
- num_loops = DIV_ROUND_UP(size, 0x1FFFFF);
- r = radeon_ring_lock(rdev, num_loops * 4 + 64);
- if (r) {
- DRM_ERROR("radeon: moving bo (%d).\n", r);
- return r;
- }
- /* Must wait for 2D idle & clean before DMA or hangs might happen */
- radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0 ));
- radeon_ring_write(rdev, (1 << 16));
- for (i = 0; i < num_loops; i++) {
- cur_size = size;
- if (cur_size > 0x1FFFFF) {
- cur_size = 0x1FFFFF;
- }
- size -= cur_size;
- radeon_ring_write(rdev, PACKET0(0x720, 2));
- radeon_ring_write(rdev, src_offset);
- radeon_ring_write(rdev, dst_offset);
- radeon_ring_write(rdev, cur_size | (1 << 31) | (1 << 30));
- src_offset += cur_size;
- dst_offset += cur_size;
- }
- radeon_ring_write(rdev, PACKET0(RADEON_WAIT_UNTIL, 0));
- radeon_ring_write(rdev, RADEON_WAIT_DMA_GUI_IDLE);
- if (fence) {
- r = radeon_fence_emit(rdev, fence);
- }
- radeon_ring_unlock_commit(rdev);
- return r;
-}
-
void r300_ring_start(struct radeon_device *rdev)
{
unsigned gb_tile_config;
@@ -281,8 +240,8 @@ void r300_ring_start(struct radeon_device *rdev)
radeon_ring_write(rdev,
RADEON_WAIT_2D_IDLECLEAN |
RADEON_WAIT_3D_IDLECLEAN);
- radeon_ring_write(rdev, PACKET0(0x170C, 0));
- radeon_ring_write(rdev, 1 << 31);
+ radeon_ring_write(rdev, PACKET0(R300_DST_PIPE_CONFIG, 0));
+ radeon_ring_write(rdev, R300_PIPE_AUTO_CONFIG);
radeon_ring_write(rdev, PACKET0(R300_GB_SELECT, 0));
radeon_ring_write(rdev, 0);
radeon_ring_write(rdev, PACKET0(R300_GB_ENABLE, 0));
@@ -349,8 +308,8 @@ int r300_mc_wait_for_idle(struct radeon_device *rdev)
for (i = 0; i < rdev->usec_timeout; i++) {
/* read MC_STATUS */
- tmp = RREG32(0x0150);
- if (tmp & (1 << 4)) {
+ tmp = RREG32(RADEON_MC_STATUS);
+ if (tmp & R300_MC_IDLE) {
return 0;
}
DRM_UDELAY(1);
@@ -395,8 +354,8 @@ void r300_gpu_init(struct radeon_device *rdev)
"programming pipes. Bad things might happen.\n");
}
- tmp = RREG32(0x170C);
- WREG32(0x170C, tmp | (1 << 31));
+ tmp = RREG32(R300_DST_PIPE_CONFIG);
+ WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
WREG32(R300_RB2D_DSTCACHE_MODE,
R300_DC_AUTOFLUSH_ENABLE |
@@ -437,8 +396,8 @@ int r300_ga_reset(struct radeon_device *rdev)
/* GA still busy soft reset it */
WREG32(0x429C, 0x200);
WREG32(R300_VAP_PVS_STATE_FLUSH_REG, 0);
- WREG32(0x43E0, 0);
- WREG32(0x43E4, 0);
+ WREG32(R300_RE_SCISSORS_TL, 0);
+ WREG32(R300_RE_SCISSORS_BR, 0);
WREG32(0x24AC, 0);
}
/* Wait to prevent race in RBBM_STATUS */
@@ -488,7 +447,7 @@ int r300_gpu_reset(struct radeon_device *rdev)
}
/* Check if GPU is idle */
status = RREG32(RADEON_RBBM_STATUS);
- if (status & (1 << 31)) {
+ if (status & RADEON_RBBM_ACTIVE) {
DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status);
return -1;
}
@@ -500,13 +459,13 @@ int r300_gpu_reset(struct radeon_device *rdev)
/*
* r300,r350,rv350,rv380 VRAM info
*/
-void r300_vram_info(struct radeon_device *rdev)
+void r300_mc_init(struct radeon_device *rdev)
{
- uint32_t tmp;
+ u64 base;
+ u32 tmp;
/* DDR for all card after R300 & IGP */
rdev->mc.vram_is_ddr = true;
-
tmp = RREG32(RADEON_MEM_CNTL);
tmp &= R300_MEM_NUM_CHANNELS_MASK;
switch (tmp) {
@@ -515,8 +474,13 @@ void r300_vram_info(struct radeon_device *rdev)
case 2: rdev->mc.vram_width = 256; break;
default: rdev->mc.vram_width = 128; break;
}
-
r100_vram_init_sizes(rdev);
+ base = rdev->mc.aper_base;
+ if (rdev->flags & RADEON_IS_IGP)
+ base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
+ radeon_vram_location(rdev, &rdev->mc, base);
+ if (!(rdev->flags & RADEON_IS_AGP))
+ radeon_gtt_location(rdev, &rdev->mc);
}
void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
@@ -578,6 +542,40 @@ void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
}
+int rv370_get_pcie_lanes(struct radeon_device *rdev)
+{
+ u32 link_width_cntl;
+
+ if (rdev->flags & RADEON_IS_IGP)
+ return 0;
+
+ if (!(rdev->flags & RADEON_IS_PCIE))
+ return 0;
+
+ /* FIXME wait for idle */
+
+ if (rdev->family < CHIP_R600)
+ link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+ else
+ link_width_cntl = RREG32_PCIE_P(RADEON_PCIE_LC_LINK_WIDTH_CNTL);
+
+ switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) {
+ case RADEON_PCIE_LC_LINK_WIDTH_X0:
+ return 0;
+ case RADEON_PCIE_LC_LINK_WIDTH_X1:
+ return 1;
+ case RADEON_PCIE_LC_LINK_WIDTH_X2:
+ return 2;
+ case RADEON_PCIE_LC_LINK_WIDTH_X4:
+ return 4;
+ case RADEON_PCIE_LC_LINK_WIDTH_X8:
+ return 8;
+ case RADEON_PCIE_LC_LINK_WIDTH_X16:
+ default:
+ return 16;
+ }
+}
+
#if defined(CONFIG_DEBUG_FS)
static int rv370_debugfs_pcie_gart_info(struct seq_file *m, void *data)
{
@@ -707,6 +705,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
tile_flags |= R300_TXO_MACRO_TILE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R300_TXO_MICRO_TILE;
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
+ tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
tmp |= tile_flags;
@@ -757,6 +757,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
tile_flags |= R300_COLOR_TILE_ENABLE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R300_COLOR_MICROTILE_ENABLE;
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
+ tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
tmp = idx_value & ~(0x7 << 16);
tmp |= tile_flags;
@@ -828,7 +830,9 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
tile_flags |= R300_DEPTHMACROTILE_ENABLE;
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
- tile_flags |= R300_DEPTHMICROTILE_TILED;;
+ tile_flags |= R300_DEPTHMICROTILE_TILED;
+ else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
+ tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
tmp = idx_value & ~(0x7 << 16);
tmp |= tile_flags;
@@ -1387,12 +1391,15 @@ int r300_init(struct radeon_device *rdev)
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
- /* Get vram informations */
- r300_vram_info(rdev);
- /* Initialize memory controller (also test AGP) */
- r = r420_mc_init(rdev);
- if (r)
- return r;
+ /* initialize AGP */
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r) {
+ radeon_agp_disable(rdev);
+ }
+ }
+ /* initialize memory controller */
+ r300_mc_init(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c
index 34bffa0e4b7..ea46d558e8f 100644
--- a/drivers/gpu/drm/radeon/r300_cmdbuf.c
+++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c
@@ -33,6 +33,7 @@
#include "drmP.h"
#include "drm.h"
+#include "drm_buffer.h"
#include "radeon_drm.h"
#include "radeon_drv.h"
#include "r300_reg.h"
@@ -299,46 +300,42 @@ static __inline__ int r300_emit_carefully_checked_packet0(drm_radeon_private_t *
int reg;
int sz;
int i;
- int values[64];
+ u32 *value;
RING_LOCALS;
sz = header.packet0.count;
reg = (header.packet0.reghi << 8) | header.packet0.reglo;
if ((sz > 64) || (sz < 0)) {
- DRM_ERROR
- ("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n",
- reg, sz);
+ DRM_ERROR("Cannot emit more than 64 values at a time (reg=%04x sz=%d)\n",
+ reg, sz);
return -EINVAL;
}
+
for (i = 0; i < sz; i++) {
- values[i] = ((int *)cmdbuf->buf)[i];
switch (r300_reg_flags[(reg >> 2) + i]) {
case MARK_SAFE:
break;
case MARK_CHECK_OFFSET:
- if (!radeon_check_offset(dev_priv, (u32) values[i])) {
- DRM_ERROR
- ("Offset failed range check (reg=%04x sz=%d)\n",
- reg, sz);
+ value = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
+ if (!radeon_check_offset(dev_priv, *value)) {
+ DRM_ERROR("Offset failed range check (reg=%04x sz=%d)\n",
+ reg, sz);
return -EINVAL;
}
break;
default:
DRM_ERROR("Register %04x failed check as flag=%02x\n",
- reg + i * 4, r300_reg_flags[(reg >> 2) + i]);
+ reg + i * 4, r300_reg_flags[(reg >> 2) + i]);
return -EINVAL;
}
}
BEGIN_RING(1 + sz);
OUT_RING(CP_PACKET0(reg, sz - 1));
- OUT_RING_TABLE(values, sz);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
ADVANCE_RING();
- cmdbuf->buf += sz * 4;
- cmdbuf->bufsz -= sz * 4;
-
return 0;
}
@@ -362,7 +359,7 @@ static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
if (!sz)
return 0;
- if (sz * 4 > cmdbuf->bufsz)
+ if (sz * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
return -EINVAL;
if (reg + sz * 4 >= 0x10000) {
@@ -380,12 +377,9 @@ static __inline__ int r300_emit_packet0(drm_radeon_private_t *dev_priv,
BEGIN_RING(1 + sz);
OUT_RING(CP_PACKET0(reg, sz - 1));
- OUT_RING_TABLE((int *)cmdbuf->buf, sz);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
ADVANCE_RING();
- cmdbuf->buf += sz * 4;
- cmdbuf->bufsz -= sz * 4;
-
return 0;
}
@@ -407,7 +401,7 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
if (!sz)
return 0;
- if (sz * 16 > cmdbuf->bufsz)
+ if (sz * 16 > drm_buffer_unprocessed(cmdbuf->buffer))
return -EINVAL;
/* VAP is very sensitive so we purge cache before we program it
@@ -426,7 +420,7 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
BEGIN_RING(3 + sz * 4);
OUT_RING_REG(R300_VAP_PVS_UPLOAD_ADDRESS, addr);
OUT_RING(CP_PACKET0_TABLE(R300_VAP_PVS_UPLOAD_DATA, sz * 4 - 1));
- OUT_RING_TABLE((int *)cmdbuf->buf, sz * 4);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz * 4);
ADVANCE_RING();
BEGIN_RING(2);
@@ -434,9 +428,6 @@ static __inline__ int r300_emit_vpu(drm_radeon_private_t *dev_priv,
OUT_RING(0);
ADVANCE_RING();
- cmdbuf->buf += sz * 16;
- cmdbuf->bufsz -= sz * 16;
-
return 0;
}
@@ -449,14 +440,14 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
{
RING_LOCALS;
- if (8 * 4 > cmdbuf->bufsz)
+ if (8 * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
return -EINVAL;
BEGIN_RING(10);
OUT_RING(CP_PACKET3(R200_3D_DRAW_IMMD_2, 8));
OUT_RING(R300_PRIM_TYPE_POINT | R300_PRIM_WALK_RING |
(1 << R300_PRIM_NUM_VERTICES_SHIFT));
- OUT_RING_TABLE((int *)cmdbuf->buf, 8);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, 8);
ADVANCE_RING();
BEGIN_RING(4);
@@ -468,9 +459,6 @@ static __inline__ int r300_emit_clear(drm_radeon_private_t *dev_priv,
/* set flush flag */
dev_priv->track_flush |= RADEON_FLUSH_EMITED;
- cmdbuf->buf += 8 * 4;
- cmdbuf->bufsz -= 8 * 4;
-
return 0;
}
@@ -480,28 +468,29 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
{
int count, i, k;
#define MAX_ARRAY_PACKET 64
- u32 payload[MAX_ARRAY_PACKET];
+ u32 *data;
u32 narrays;
RING_LOCALS;
- count = (header >> 16) & 0x3fff;
+ count = (header & RADEON_CP_PACKET_COUNT_MASK) >> 16;
if ((count + 1) > MAX_ARRAY_PACKET) {
DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
count);
return -EINVAL;
}
- memset(payload, 0, MAX_ARRAY_PACKET * 4);
- memcpy(payload, cmdbuf->buf + 4, (count + 1) * 4);
-
/* carefully check packet contents */
- narrays = payload[0];
+ /* We have already read the header so advance the buffer. */
+ drm_buffer_advance(cmdbuf->buffer, 4);
+
+ narrays = *(u32 *)drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
k = 0;
i = 1;
while ((k < narrays) && (i < (count + 1))) {
i++; /* skip attribute field */
- if (!radeon_check_offset(dev_priv, payload[i])) {
+ data = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
+ if (!radeon_check_offset(dev_priv, *data)) {
DRM_ERROR
("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
k, i);
@@ -512,7 +501,8 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
if (k == narrays)
break;
/* have one more to process, they come in pairs */
- if (!radeon_check_offset(dev_priv, payload[i])) {
+ data = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
+ if (!radeon_check_offset(dev_priv, *data)) {
DRM_ERROR
("Offset failed range check (k=%d i=%d) while processing 3D_LOAD_VBPNTR packet.\n",
k, i);
@@ -533,30 +523,30 @@ static __inline__ int r300_emit_3d_load_vbpntr(drm_radeon_private_t *dev_priv,
BEGIN_RING(count + 2);
OUT_RING(header);
- OUT_RING_TABLE(payload, count + 1);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 1);
ADVANCE_RING();
- cmdbuf->buf += (count + 2) * 4;
- cmdbuf->bufsz -= (count + 2) * 4;
-
return 0;
}
static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf)
{
- u32 *cmd = (u32 *) cmdbuf->buf;
+ u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
int count, ret;
RING_LOCALS;
- count=(cmd[0]>>16) & 0x3fff;
- if (cmd[0] & 0x8000) {
- u32 offset;
+ count = (*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16;
- if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
+ if (*cmd & 0x8000) {
+ u32 offset;
+ u32 *cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+ if (*cmd1 & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
| RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
- offset = cmd[2] << 10;
+
+ u32 *cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
+ offset = *cmd2 << 10;
ret = !radeon_check_offset(dev_priv, offset);
if (ret) {
DRM_ERROR("Invalid bitblt first offset is %08X\n", offset);
@@ -564,9 +554,10 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
}
}
- if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
- (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
- offset = cmd[3] << 10;
+ if ((*cmd1 & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
+ (*cmd1 & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
+ u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
+ offset = *cmd3 << 10;
ret = !radeon_check_offset(dev_priv, offset);
if (ret) {
DRM_ERROR("Invalid bitblt second offset is %08X\n", offset);
@@ -577,28 +568,25 @@ static __inline__ int r300_emit_bitblt_multi(drm_radeon_private_t *dev_priv,
}
BEGIN_RING(count+2);
- OUT_RING(cmd[0]);
- OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
ADVANCE_RING();
- cmdbuf->buf += (count+2)*4;
- cmdbuf->bufsz -= (count+2)*4;
-
return 0;
}
static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf)
{
- u32 *cmd;
+ u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
+ u32 *cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
int count;
int expected_count;
RING_LOCALS;
- cmd = (u32 *) cmdbuf->buf;
- count = (cmd[0]>>16) & 0x3fff;
- expected_count = cmd[1] >> 16;
- if (!(cmd[1] & R300_VAP_VF_CNTL__INDEX_SIZE_32bit))
+ count = (*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16;
+
+ expected_count = *cmd1 >> 16;
+ if (!(*cmd1 & R300_VAP_VF_CNTL__INDEX_SIZE_32bit))
expected_count = (expected_count+1)/2;
if (count && count != expected_count) {
@@ -608,55 +596,53 @@ static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
}
BEGIN_RING(count+2);
- OUT_RING(cmd[0]);
- OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
ADVANCE_RING();
- cmdbuf->buf += (count+2)*4;
- cmdbuf->bufsz -= (count+2)*4;
-
if (!count) {
- drm_r300_cmd_header_t header;
+ drm_r300_cmd_header_t stack_header, *header;
+ u32 *cmd1, *cmd2, *cmd3;
- if (cmdbuf->bufsz < 4*4 + sizeof(header)) {
+ if (drm_buffer_unprocessed(cmdbuf->buffer)
+ < 4*4 + sizeof(stack_header)) {
DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER, but stream is too short.\n");
return -EINVAL;
}
- header.u = *(unsigned int *)cmdbuf->buf;
+ header = drm_buffer_read_object(cmdbuf->buffer,
+ sizeof(stack_header), &stack_header);
- cmdbuf->buf += sizeof(header);
- cmdbuf->bufsz -= sizeof(header);
- cmd = (u32 *) cmdbuf->buf;
+ cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
+ cmd1 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+ cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
+ cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
- if (header.header.cmd_type != R300_CMD_PACKET3 ||
- header.packet3.packet != R300_CMD_PACKET3_RAW ||
- cmd[0] != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) {
+ if (header->header.cmd_type != R300_CMD_PACKET3 ||
+ header->packet3.packet != R300_CMD_PACKET3_RAW ||
+ *cmd != CP_PACKET3(RADEON_CP_INDX_BUFFER, 2)) {
DRM_ERROR("3D_DRAW_INDX_2: expect subsequent INDX_BUFFER.\n");
return -EINVAL;
}
- if ((cmd[1] & 0x8000ffff) != 0x80000810) {
- DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
+ if ((*cmd1 & 0x8000ffff) != 0x80000810) {
+ DRM_ERROR("Invalid indx_buffer reg address %08X\n",
+ *cmd1);
return -EINVAL;
}
- if (!radeon_check_offset(dev_priv, cmd[2])) {
- DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
+ if (!radeon_check_offset(dev_priv, *cmd2)) {
+ DRM_ERROR("Invalid indx_buffer offset is %08X\n",
+ *cmd2);
return -EINVAL;
}
- if (cmd[3] != expected_count) {
+ if (*cmd3 != expected_count) {
DRM_ERROR("INDX_BUFFER: buffer size %i, expected %i\n",
- cmd[3], expected_count);
+ *cmd3, expected_count);
return -EINVAL;
}
BEGIN_RING(4);
- OUT_RING(cmd[0]);
- OUT_RING_TABLE((int *)(cmdbuf->buf + 4), 3);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, 4);
ADVANCE_RING();
-
- cmdbuf->buf += 4*4;
- cmdbuf->bufsz -= 4*4;
}
return 0;
@@ -665,39 +651,39 @@ static __inline__ int r300_emit_draw_indx_2(drm_radeon_private_t *dev_priv,
static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
drm_radeon_kcmd_buffer_t *cmdbuf)
{
- u32 header;
+ u32 *header;
int count;
RING_LOCALS;
- if (4 > cmdbuf->bufsz)
+ if (4 > drm_buffer_unprocessed(cmdbuf->buffer))
return -EINVAL;
/* Fixme !! This simply emits a packet without much checking.
We need to be smarter. */
/* obtain first word - actual packet3 header */
- header = *(u32 *) cmdbuf->buf;
+ header = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
/* Is it packet 3 ? */
- if ((header >> 30) != 0x3) {
- DRM_ERROR("Not a packet3 header (0x%08x)\n", header);
+ if ((*header >> 30) != 0x3) {
+ DRM_ERROR("Not a packet3 header (0x%08x)\n", *header);
return -EINVAL;
}
- count = (header >> 16) & 0x3fff;
+ count = (*header >> 16) & 0x3fff;
/* Check again now that we know how much data to expect */
- if ((count + 2) * 4 > cmdbuf->bufsz) {
+ if ((count + 2) * 4 > drm_buffer_unprocessed(cmdbuf->buffer)) {
DRM_ERROR
("Expected packet3 of length %d but have only %d bytes left\n",
- (count + 2) * 4, cmdbuf->bufsz);
+ (count + 2) * 4, drm_buffer_unprocessed(cmdbuf->buffer));
return -EINVAL;
}
/* Is it a packet type we know about ? */
- switch (header & 0xff00) {
+ switch (*header & 0xff00) {
case RADEON_3D_LOAD_VBPNTR: /* load vertex array pointers */
- return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, header);
+ return r300_emit_3d_load_vbpntr(dev_priv, cmdbuf, *header);
case RADEON_CNTL_BITBLT_MULTI:
return r300_emit_bitblt_multi(dev_priv, cmdbuf);
@@ -723,18 +709,14 @@ static __inline__ int r300_emit_raw_packet3(drm_radeon_private_t *dev_priv,
/* these packets are safe */
break;
default:
- DRM_ERROR("Unknown packet3 header (0x%08x)\n", header);
+ DRM_ERROR("Unknown packet3 header (0x%08x)\n", *header);
return -EINVAL;
}
BEGIN_RING(count + 2);
- OUT_RING(header);
- OUT_RING_TABLE((int *)(cmdbuf->buf + 4), count + 1);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, count + 2);
ADVANCE_RING();
- cmdbuf->buf += (count + 2) * 4;
- cmdbuf->bufsz -= (count + 2) * 4;
-
return 0;
}
@@ -748,8 +730,7 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
{
int n;
int ret;
- char *orig_buf = cmdbuf->buf;
- int orig_bufsz = cmdbuf->bufsz;
+ int orig_iter = cmdbuf->buffer->iterator;
/* This is a do-while-loop so that we run the interior at least once,
* even if cmdbuf->nbox is 0. Compare r300_emit_cliprects for rationale.
@@ -761,8 +742,7 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
if (ret)
return ret;
- cmdbuf->buf = orig_buf;
- cmdbuf->bufsz = orig_bufsz;
+ cmdbuf->buffer->iterator = orig_iter;
}
switch (header.packet3.packet) {
@@ -785,9 +765,9 @@ static __inline__ int r300_emit_packet3(drm_radeon_private_t *dev_priv,
break;
default:
- DRM_ERROR("bad packet3 type %i at %p\n",
+ DRM_ERROR("bad packet3 type %i at byte %d\n",
header.packet3.packet,
- cmdbuf->buf - sizeof(header));
+ cmdbuf->buffer->iterator - (int)sizeof(header));
return -EINVAL;
}
@@ -923,12 +903,13 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
drm_r300_cmd_header_t header)
{
u32 *ref_age_base;
- u32 i, buf_idx, h_pending;
- u64 ptr_addr;
+ u32 i, *buf_idx, h_pending;
+ u64 *ptr_addr;
+ u64 stack_ptr_addr;
RING_LOCALS;
- if (cmdbuf->bufsz <
- (sizeof(u64) + header.scratch.n_bufs * sizeof(buf_idx))) {
+ if (drm_buffer_unprocessed(cmdbuf->buffer) <
+ (sizeof(u64) + header.scratch.n_bufs * sizeof(*buf_idx))) {
return -EINVAL;
}
@@ -938,36 +919,35 @@ static int r300_scratch(drm_radeon_private_t *dev_priv,
dev_priv->scratch_ages[header.scratch.reg]++;
- ptr_addr = get_unaligned((u64 *)cmdbuf->buf);
- ref_age_base = (u32 *)(unsigned long)ptr_addr;
-
- cmdbuf->buf += sizeof(u64);
- cmdbuf->bufsz -= sizeof(u64);
+ ptr_addr = drm_buffer_read_object(cmdbuf->buffer,
+ sizeof(stack_ptr_addr), &stack_ptr_addr);
+ ref_age_base = (u32 *)(unsigned long)*ptr_addr;
for (i=0; i < header.scratch.n_bufs; i++) {
- buf_idx = *(u32 *)cmdbuf->buf;
- buf_idx *= 2; /* 8 bytes per buf */
+ buf_idx = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
+ *buf_idx *= 2; /* 8 bytes per buf */
- if (DRM_COPY_TO_USER(ref_age_base + buf_idx, &dev_priv->scratch_ages[header.scratch.reg], sizeof(u32))) {
+ if (DRM_COPY_TO_USER(ref_age_base + *buf_idx,
+ &dev_priv->scratch_ages[header.scratch.reg],
+ sizeof(u32)))
return -EINVAL;
- }
- if (DRM_COPY_FROM_USER(&h_pending, ref_age_base + buf_idx + 1, sizeof(u32))) {
+ if (DRM_COPY_FROM_USER(&h_pending,
+ ref_age_base + *buf_idx + 1,
+ sizeof(u32)))
return -EINVAL;
- }
- if (h_pending == 0) {
+ if (h_pending == 0)
return -EINVAL;
- }
h_pending--;
- if (DRM_COPY_TO_USER(ref_age_base + buf_idx + 1, &h_pending, sizeof(u32))) {
+ if (DRM_COPY_TO_USER(ref_age_base + *buf_idx + 1,
+ &h_pending,
+ sizeof(u32)))
return -EINVAL;
- }
- cmdbuf->buf += sizeof(buf_idx);
- cmdbuf->bufsz -= sizeof(buf_idx);
+ drm_buffer_advance(cmdbuf->buffer, sizeof(*buf_idx));
}
BEGIN_RING(2);
@@ -1009,19 +989,16 @@ static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
DRM_DEBUG("r500fp %d %d type: %d\n", sz, addr, type);
if (!sz)
return 0;
- if (sz * stride * 4 > cmdbuf->bufsz)
+ if (sz * stride * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
return -EINVAL;
BEGIN_RING(3 + sz * stride);
OUT_RING_REG(R500_GA_US_VECTOR_INDEX, addr);
OUT_RING(CP_PACKET0_TABLE(R500_GA_US_VECTOR_DATA, sz * stride - 1));
- OUT_RING_TABLE((int *)cmdbuf->buf, sz * stride);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz * stride);
ADVANCE_RING();
- cmdbuf->buf += sz * stride * 4;
- cmdbuf->bufsz -= sz * stride * 4;
-
return 0;
}
@@ -1053,19 +1030,18 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
goto cleanup;
}
- while (cmdbuf->bufsz >= sizeof(drm_r300_cmd_header_t)) {
+ while (drm_buffer_unprocessed(cmdbuf->buffer)
+ >= sizeof(drm_r300_cmd_header_t)) {
int idx;
- drm_r300_cmd_header_t header;
-
- header.u = *(unsigned int *)cmdbuf->buf;
+ drm_r300_cmd_header_t *header, stack_header;
- cmdbuf->buf += sizeof(header);
- cmdbuf->bufsz -= sizeof(header);
+ header = drm_buffer_read_object(cmdbuf->buffer,
+ sizeof(stack_header), &stack_header);
- switch (header.header.cmd_type) {
+ switch (header->header.cmd_type) {
case R300_CMD_PACKET0:
DRM_DEBUG("R300_CMD_PACKET0\n");
- ret = r300_emit_packet0(dev_priv, cmdbuf, header);
+ ret = r300_emit_packet0(dev_priv, cmdbuf, *header);
if (ret) {
DRM_ERROR("r300_emit_packet0 failed\n");
goto cleanup;
@@ -1074,7 +1050,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
case R300_CMD_VPU:
DRM_DEBUG("R300_CMD_VPU\n");
- ret = r300_emit_vpu(dev_priv, cmdbuf, header);
+ ret = r300_emit_vpu(dev_priv, cmdbuf, *header);
if (ret) {
DRM_ERROR("r300_emit_vpu failed\n");
goto cleanup;
@@ -1083,7 +1059,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
case R300_CMD_PACKET3:
DRM_DEBUG("R300_CMD_PACKET3\n");
- ret = r300_emit_packet3(dev_priv, cmdbuf, header);
+ ret = r300_emit_packet3(dev_priv, cmdbuf, *header);
if (ret) {
DRM_ERROR("r300_emit_packet3 failed\n");
goto cleanup;
@@ -1117,8 +1093,8 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
int i;
RING_LOCALS;
- BEGIN_RING(header.delay.count);
- for (i = 0; i < header.delay.count; i++)
+ BEGIN_RING(header->delay.count);
+ for (i = 0; i < header->delay.count; i++)
OUT_RING(RADEON_CP_PACKET2);
ADVANCE_RING();
}
@@ -1126,7 +1102,7 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
case R300_CMD_DMA_DISCARD:
DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
- idx = header.dma.buf_idx;
+ idx = header->dma.buf_idx;
if (idx < 0 || idx >= dma->buf_count) {
DRM_ERROR("buffer index %d (of %d max)\n",
idx, dma->buf_count - 1);
@@ -1149,12 +1125,12 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
case R300_CMD_WAIT:
DRM_DEBUG("R300_CMD_WAIT\n");
- r300_cmd_wait(dev_priv, header);
+ r300_cmd_wait(dev_priv, *header);
break;
case R300_CMD_SCRATCH:
DRM_DEBUG("R300_CMD_SCRATCH\n");
- ret = r300_scratch(dev_priv, cmdbuf, header);
+ ret = r300_scratch(dev_priv, cmdbuf, *header);
if (ret) {
DRM_ERROR("r300_scratch failed\n");
goto cleanup;
@@ -1168,16 +1144,16 @@ int r300_do_cp_cmdbuf(struct drm_device *dev,
goto cleanup;
}
DRM_DEBUG("R300_CMD_R500FP\n");
- ret = r300_emit_r500fp(dev_priv, cmdbuf, header);
+ ret = r300_emit_r500fp(dev_priv, cmdbuf, *header);
if (ret) {
DRM_ERROR("r300_emit_r500fp failed\n");
goto cleanup;
}
break;
default:
- DRM_ERROR("bad cmd_type %i at %p\n",
- header.header.cmd_type,
- cmdbuf->buf - sizeof(header));
+ DRM_ERROR("bad cmd_type %i at byte %d\n",
+ header->header.cmd_type,
+ cmdbuf->buffer->iterator - (int)sizeof(*header));
ret = -EINVAL;
goto cleanup;
}
diff --git a/drivers/gpu/drm/radeon/r300_reg.h b/drivers/gpu/drm/radeon/r300_reg.h
index 1735a2b6958..1a0d5362cd7 100644
--- a/drivers/gpu/drm/radeon/r300_reg.h
+++ b/drivers/gpu/drm/radeon/r300_reg.h
@@ -952,6 +952,7 @@
# define R300_TXO_ENDIAN_HALFDW_SWAP (3 << 0)
# define R300_TXO_MACRO_TILE (1 << 2)
# define R300_TXO_MICRO_TILE (1 << 3)
+# define R300_TXO_MICRO_TILE_SQUARE (2 << 3)
# define R300_TXO_OFFSET_MASK 0xffffffe0
# define R300_TXO_OFFSET_SHIFT 5
/* END: Guess from R200 */
@@ -1360,6 +1361,7 @@
# define R300_COLORPITCH_MASK 0x00001FF8 /* GUESS */
# define R300_COLOR_TILE_ENABLE (1 << 16) /* GUESS */
# define R300_COLOR_MICROTILE_ENABLE (1 << 17) /* GUESS */
+# define R300_COLOR_MICROTILE_SQUARE_ENABLE (2 << 17)
# define R300_COLOR_ENDIAN_NO_SWAP (0 << 18) /* GUESS */
# define R300_COLOR_ENDIAN_WORD_SWAP (1 << 18) /* GUESS */
# define R300_COLOR_ENDIAN_DWORD_SWAP (2 << 18) /* GUESS */
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index d9373246c97..c7593b8f58e 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -40,28 +40,6 @@ static void r420_set_reg_safe(struct radeon_device *rdev)
rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm);
}
-int r420_mc_init(struct radeon_device *rdev)
-{
- int r;
-
- /* Setup GPU memory space */
- rdev->mc.vram_location = 0xFFFFFFFFUL;
- rdev->mc.gtt_location = 0xFFFFFFFFUL;
- if (rdev->flags & RADEON_IS_AGP) {
- r = radeon_agp_init(rdev);
- if (r) {
- radeon_agp_disable(rdev);
- } else {
- rdev->mc.gtt_location = rdev->mc.agp_base;
- }
- }
- r = radeon_mc_setup(rdev);
- if (r) {
- return r;
- }
- return 0;
-}
-
void r420_pipes_init(struct radeon_device *rdev)
{
unsigned tmp;
@@ -69,7 +47,8 @@ void r420_pipes_init(struct radeon_device *rdev)
unsigned num_pipes;
/* GA_ENHANCE workaround TCL deadlock issue */
- WREG32(0x4274, (1 << 0) | (1 << 1) | (1 << 2) | (1 << 3));
+ WREG32(R300_GA_ENHANCE, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL |
+ (1 << 2) | (1 << 3));
/* add idle wait as per freedesktop.org bug 24041 */
if (r100_gui_wait_for_idle(rdev)) {
printk(KERN_WARNING "Failed to wait GUI idle while "
@@ -97,17 +76,17 @@ void r420_pipes_init(struct radeon_device *rdev)
tmp = (7 << 1);
break;
}
- WREG32(0x42C8, (1 << num_pipes) - 1);
+ WREG32(R500_SU_REG_DEST, (1 << num_pipes) - 1);
/* Sub pixel 1/12 so we can have 4K rendering according to doc */
- tmp |= (1 << 4) | (1 << 0);
- WREG32(0x4018, tmp);
+ tmp |= R300_TILE_SIZE_16 | R300_ENABLE_TILING;
+ WREG32(R300_GB_TILE_CONFIG, tmp);
if (r100_gui_wait_for_idle(rdev)) {
printk(KERN_WARNING "Failed to wait GUI idle while "
"programming pipes. Bad things might happen.\n");
}
- tmp = RREG32(0x170C);
- WREG32(0x170C, tmp | (1 << 31));
+ tmp = RREG32(R300_DST_PIPE_CONFIG);
+ WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG);
WREG32(R300_RB2D_DSTCACHE_MODE,
RREG32(R300_RB2D_DSTCACHE_MODE) |
@@ -348,13 +327,15 @@ int r420_init(struct radeon_device *rdev)
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
- /* Get vram informations */
- r300_vram_info(rdev);
- /* Initialize memory controller (also test AGP) */
- r = r420_mc_init(rdev);
- if (r) {
- return r;
+ /* initialize AGP */
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r) {
+ radeon_agp_disable(rdev);
+ }
}
+ /* initialize memory controller */
+ r300_mc_init(rdev);
r420_debugfs(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index 74ad89bdf2b..0cf2ad2a558 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -717,54 +717,62 @@
#define AVIVO_DVOA_BIT_DEPTH_CONTROL 0x7988
#define AVIVO_DC_GPIO_HPD_A 0x7e94
-
-#define AVIVO_GPIO_0 0x7e30
-#define AVIVO_GPIO_1 0x7e40
-#define AVIVO_GPIO_2 0x7e50
-#define AVIVO_GPIO_3 0x7e60
-
#define AVIVO_DC_GPIO_HPD_Y 0x7e9c
-#define AVIVO_I2C_STATUS 0x7d30
-# define AVIVO_I2C_STATUS_DONE (1 << 0)
-# define AVIVO_I2C_STATUS_NACK (1 << 1)
-# define AVIVO_I2C_STATUS_HALT (1 << 2)
-# define AVIVO_I2C_STATUS_GO (1 << 3)
-# define AVIVO_I2C_STATUS_MASK 0x7
-/* If radeon_mm_i2c is to be believed, this is HALT, NACK, and maybe
- * DONE? */
-# define AVIVO_I2C_STATUS_CMD_RESET 0x7
-# define AVIVO_I2C_STATUS_CMD_WAIT (1 << 3)
-#define AVIVO_I2C_STOP 0x7d34
-#define AVIVO_I2C_START_CNTL 0x7d38
-# define AVIVO_I2C_START (1 << 8)
-# define AVIVO_I2C_CONNECTOR0 (0 << 16)
-# define AVIVO_I2C_CONNECTOR1 (1 << 16)
-#define R520_I2C_START (1<<0)
-#define R520_I2C_STOP (1<<1)
-#define R520_I2C_RX (1<<2)
-#define R520_I2C_EN (1<<8)
-#define R520_I2C_DDC1 (0<<16)
-#define R520_I2C_DDC2 (1<<16)
-#define R520_I2C_DDC3 (2<<16)
-#define R520_I2C_DDC_MASK (3<<16)
-#define AVIVO_I2C_CONTROL2 0x7d3c
-# define AVIVO_I2C_7D3C_SIZE_SHIFT 8
-# define AVIVO_I2C_7D3C_SIZE_MASK (0xf << 8)
-#define AVIVO_I2C_CONTROL3 0x7d40
-/* Reading is done 4 bytes at a time: read the bottom 8 bits from
- * 7d44, four times in a row.
- * Writing is a little more complex. First write DATA with
- * 0xnnnnnnzz, then 0xnnnnnnyy, where nnnnnn is some non-deterministic
- * magic number, zz is, I think, the slave address, and yy is the byte
- * you want to write. */
-#define AVIVO_I2C_DATA 0x7d44
-#define R520_I2C_ADDR_COUNT_MASK (0x7)
-#define R520_I2C_DATA_COUNT_SHIFT (8)
-#define R520_I2C_DATA_COUNT_MASK (0xF00)
-#define AVIVO_I2C_CNTL 0x7d50
-# define AVIVO_I2C_EN (1 << 0)
-# define AVIVO_I2C_RESET (1 << 8)
+#define AVIVO_DC_I2C_STATUS1 0x7d30
+# define AVIVO_DC_I2C_DONE (1 << 0)
+# define AVIVO_DC_I2C_NACK (1 << 1)
+# define AVIVO_DC_I2C_HALT (1 << 2)
+# define AVIVO_DC_I2C_GO (1 << 3)
+#define AVIVO_DC_I2C_RESET 0x7d34
+# define AVIVO_DC_I2C_SOFT_RESET (1 << 0)
+# define AVIVO_DC_I2C_ABORT (1 << 8)
+#define AVIVO_DC_I2C_CONTROL1 0x7d38
+# define AVIVO_DC_I2C_START (1 << 0)
+# define AVIVO_DC_I2C_STOP (1 << 1)
+# define AVIVO_DC_I2C_RECEIVE (1 << 2)
+# define AVIVO_DC_I2C_EN (1 << 8)
+# define AVIVO_DC_I2C_PIN_SELECT(x) ((x) << 16)
+# define AVIVO_SEL_DDC1 0
+# define AVIVO_SEL_DDC2 1
+# define AVIVO_SEL_DDC3 2
+#define AVIVO_DC_I2C_CONTROL2 0x7d3c
+# define AVIVO_DC_I2C_ADDR_COUNT(x) ((x) << 0)
+# define AVIVO_DC_I2C_DATA_COUNT(x) ((x) << 8)
+#define AVIVO_DC_I2C_CONTROL3 0x7d40
+# define AVIVO_DC_I2C_DATA_DRIVE_EN (1 << 0)
+# define AVIVO_DC_I2C_DATA_DRIVE_SEL (1 << 1)
+# define AVIVO_DC_I2C_CLK_DRIVE_EN (1 << 7)
+# define AVIVO_DC_I2C_RD_INTRA_BYTE_DELAY(x) ((x) << 8)
+# define AVIVO_DC_I2C_WR_INTRA_BYTE_DELAY(x) ((x) << 16)
+# define AVIVO_DC_I2C_TIME_LIMIT(x) ((x) << 24)
+#define AVIVO_DC_I2C_DATA 0x7d44
+#define AVIVO_DC_I2C_INTERRUPT_CONTROL 0x7d48
+# define AVIVO_DC_I2C_INTERRUPT_STATUS (1 << 0)
+# define AVIVO_DC_I2C_INTERRUPT_AK (1 << 8)
+# define AVIVO_DC_I2C_INTERRUPT_ENABLE (1 << 16)
+#define AVIVO_DC_I2C_ARBITRATION 0x7d50
+# define AVIVO_DC_I2C_SW_WANTS_TO_USE_I2C (1 << 0)
+# define AVIVO_DC_I2C_SW_CAN_USE_I2C (1 << 1)
+# define AVIVO_DC_I2C_SW_DONE_USING_I2C (1 << 8)
+# define AVIVO_DC_I2C_HW_NEEDS_I2C (1 << 9)
+# define AVIVO_DC_I2C_ABORT_HDCP_I2C (1 << 16)
+# define AVIVO_DC_I2C_HW_USING_I2C (1 << 17)
+
+#define AVIVO_DC_GPIO_DDC1_MASK 0x7e40
+#define AVIVO_DC_GPIO_DDC1_A 0x7e44
+#define AVIVO_DC_GPIO_DDC1_EN 0x7e48
+#define AVIVO_DC_GPIO_DDC1_Y 0x7e4c
+
+#define AVIVO_DC_GPIO_DDC2_MASK 0x7e50
+#define AVIVO_DC_GPIO_DDC2_A 0x7e54
+#define AVIVO_DC_GPIO_DDC2_EN 0x7e58
+#define AVIVO_DC_GPIO_DDC2_Y 0x7e5c
+
+#define AVIVO_DC_GPIO_DDC3_MASK 0x7e60
+#define AVIVO_DC_GPIO_DDC3_A 0x7e64
+#define AVIVO_DC_GPIO_DDC3_EN 0x7e68
+#define AVIVO_DC_GPIO_DDC3_Y 0x7e6c
#define AVIVO_DISP_INTERRUPT_STATUS 0x7edc
# define AVIVO_D1_VBLANK_INTERRUPT (1 << 4)
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index ddf5731eba0..2b8a5dd1351 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -119,13 +119,15 @@ static void r520_vram_get_type(struct radeon_device *rdev)
rdev->mc.vram_width *= 2;
}
-void r520_vram_info(struct radeon_device *rdev)
+void r520_mc_init(struct radeon_device *rdev)
{
fixed20_12 a;
r520_vram_get_type(rdev);
-
r100_vram_init_sizes(rdev);
+ radeon_vram_location(rdev, &rdev->mc, 0);
+ if (!(rdev->flags & RADEON_IS_AGP))
+ radeon_gtt_location(rdev, &rdev->mc);
/* FIXME: we should enforce default clock in case GPU is not in
* default setup
*/
@@ -267,12 +269,15 @@ int r520_init(struct radeon_device *rdev)
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
- /* Get vram informations */
- r520_vram_info(rdev);
- /* Initialize memory controller (also test AGP) */
- r = r420_mc_init(rdev);
- if (r)
- return r;
+ /* initialize AGP */
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r) {
+ radeon_agp_disable(rdev);
+ }
+ }
+ /* initialize memory controller */
+ r520_mc_init(rdev);
rv515_debugfs(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 2ffcf5a0355..c5229019729 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -353,23 +353,14 @@ void r600_hpd_fini(struct radeon_device *rdev)
/*
* R600 PCIE GART
*/
-int r600_gart_clear_page(struct radeon_device *rdev, int i)
-{
- void __iomem *ptr = (void *)rdev->gart.table.vram.ptr;
- u64 pte;
-
- if (i < 0 || i > rdev->gart.num_gpu_pages)
- return -EINVAL;
- pte = 0;
- writeq(pte, ((void __iomem *)ptr) + (i * 8));
- return 0;
-}
-
void r600_pcie_gart_tlb_flush(struct radeon_device *rdev)
{
unsigned i;
u32 tmp;
+ /* flush hdp cache so updates hit vram */
+ WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
+
WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR, rdev->mc.gtt_start >> 12);
WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR, (rdev->mc.gtt_end - 1) >> 12);
WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
@@ -416,6 +407,7 @@ int r600_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
+ radeon_gart_restore(rdev);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
@@ -619,6 +611,68 @@ static void r600_mc_program(struct radeon_device *rdev)
rv515_vga_render_disable(rdev);
}
+/**
+ * r600_vram_gtt_location - try to find VRAM & GTT location
+ * @rdev: radeon device structure holding all necessary informations
+ * @mc: memory controller structure holding memory informations
+ *
+ * Function will place try to place VRAM at same place as in CPU (PCI)
+ * address space as some GPU seems to have issue when we reprogram at
+ * different address space.
+ *
+ * If there is not enough space to fit the unvisible VRAM after the
+ * aperture then we limit the VRAM size to the aperture.
+ *
+ * If we are using AGP then place VRAM adjacent to AGP aperture are we need
+ * them to be in one from GPU point of view so that we can program GPU to
+ * catch access outside them (weird GPU policy see ??).
+ *
+ * This function will never fails, worst case are limiting VRAM or GTT.
+ *
+ * Note: GTT start, end, size should be initialized before calling this
+ * function on AGP platform.
+ */
+void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
+{
+ u64 size_bf, size_af;
+
+ if (mc->mc_vram_size > 0xE0000000) {
+ /* leave room for at least 512M GTT */
+ dev_warn(rdev->dev, "limiting VRAM\n");
+ mc->real_vram_size = 0xE0000000;
+ mc->mc_vram_size = 0xE0000000;
+ }
+ if (rdev->flags & RADEON_IS_AGP) {
+ size_bf = mc->gtt_start;
+ size_af = 0xFFFFFFFF - mc->gtt_end + 1;
+ if (size_bf > size_af) {
+ if (mc->mc_vram_size > size_bf) {
+ dev_warn(rdev->dev, "limiting VRAM\n");
+ mc->real_vram_size = size_bf;
+ mc->mc_vram_size = size_bf;
+ }
+ mc->vram_start = mc->gtt_start - mc->mc_vram_size;
+ } else {
+ if (mc->mc_vram_size > size_af) {
+ dev_warn(rdev->dev, "limiting VRAM\n");
+ mc->real_vram_size = size_af;
+ mc->mc_vram_size = size_af;
+ }
+ mc->vram_start = mc->gtt_end;
+ }
+ mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+ dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
+ mc->mc_vram_size >> 20, mc->vram_start,
+ mc->vram_end, mc->real_vram_size >> 20);
+ } else {
+ u64 base = 0;
+ if (rdev->flags & RADEON_IS_IGP)
+ base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
+ radeon_vram_location(rdev, &rdev->mc, base);
+ radeon_gtt_location(rdev, mc);
+ }
+}
+
int r600_mc_init(struct radeon_device *rdev)
{
fixed20_12 a;
@@ -658,75 +712,21 @@ int r600_mc_init(struct radeon_device *rdev)
/* Setup GPU memory space */
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
-
- if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
+ rdev->mc.visible_vram_size = rdev->mc.aper_size;
+ /* FIXME remove this once we support unmappable VRAM */
+ if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
rdev->mc.mc_vram_size = rdev->mc.aper_size;
-
- if (rdev->mc.real_vram_size > rdev->mc.aper_size)
rdev->mc.real_vram_size = rdev->mc.aper_size;
-
- if (rdev->flags & RADEON_IS_AGP) {
- /* gtt_size is setup by radeon_agp_init */
- rdev->mc.gtt_location = rdev->mc.agp_base;
- tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
- /* Try to put vram before or after AGP because we
- * we want SYSTEM_APERTURE to cover both VRAM and
- * AGP so that GPU can catch out of VRAM/AGP access
- */
- if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
- /* Enough place before */
- rdev->mc.vram_location = rdev->mc.gtt_location -
- rdev->mc.mc_vram_size;
- } else if (tmp > rdev->mc.mc_vram_size) {
- /* Enough place after */
- rdev->mc.vram_location = rdev->mc.gtt_location +
- rdev->mc.gtt_size;
- } else {
- /* Try to setup VRAM then AGP might not
- * not work on some card
- */
- rdev->mc.vram_location = 0x00000000UL;
- rdev->mc.gtt_location = rdev->mc.mc_vram_size;
- }
- } else {
- rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
- rdev->mc.vram_location = (RREG32(MC_VM_FB_LOCATION) &
- 0xFFFF) << 24;
- tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
- if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
- /* Enough place after vram */
- rdev->mc.gtt_location = tmp;
- } else if (rdev->mc.vram_location >= rdev->mc.gtt_size) {
- /* Enough place before vram */
- rdev->mc.gtt_location = 0;
- } else {
- /* Not enough place after or before shrink
- * gart size
- */
- if (rdev->mc.vram_location > (0xFFFFFFFFUL - tmp)) {
- rdev->mc.gtt_location = 0;
- rdev->mc.gtt_size = rdev->mc.vram_location;
- } else {
- rdev->mc.gtt_location = tmp;
- rdev->mc.gtt_size = 0xFFFFFFFFUL - tmp;
- }
- }
- rdev->mc.gtt_location = rdev->mc.mc_vram_size;
}
- rdev->mc.vram_start = rdev->mc.vram_location;
- rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
- rdev->mc.gtt_start = rdev->mc.gtt_location;
- rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
+ r600_vram_gtt_location(rdev, &rdev->mc);
/* FIXME: we should enforce default clock in case GPU is not in
* default setup
*/
a.full = rfixed_const(100);
rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
-
if (rdev->flags & RADEON_IS_IGP)
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
-
return 0;
}
@@ -981,6 +981,9 @@ void r600_gpu_init(struct radeon_device *rdev)
{
u32 tiling_config;
u32 ramcfg;
+ u32 backend_map;
+ u32 cc_rb_backend_disable;
+ u32 cc_gc_shader_pipe_config;
u32 tmp;
int i, j;
u32 sq_config;
@@ -1090,8 +1093,11 @@ void r600_gpu_init(struct radeon_device *rdev)
default:
break;
}
+ rdev->config.r600.tiling_npipes = rdev->config.r600.max_tile_pipes;
+ rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
tiling_config |= GROUP_SIZE(0);
+ rdev->config.r600.tiling_group_size = 256;
tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
if (tmp > 3) {
tiling_config |= ROW_TILING(3);
@@ -1101,24 +1107,33 @@ void r600_gpu_init(struct radeon_device *rdev)
tiling_config |= SAMPLE_SPLIT(tmp);
}
tiling_config |= BANK_SWAPS(1);
- tmp = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
- rdev->config.r600.max_backends,
- (0xff << rdev->config.r600.max_backends) & 0xff);
- tiling_config |= BACKEND_MAP(tmp);
+
+ cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
+ cc_rb_backend_disable |=
+ BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
+
+ cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
+ cc_gc_shader_pipe_config |=
+ INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
+ cc_gc_shader_pipe_config |=
+ INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
+
+ backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes,
+ (R6XX_MAX_BACKENDS -
+ r600_count_pipe_bits((cc_rb_backend_disable &
+ R6XX_MAX_BACKENDS_MASK) >> 16)),
+ (cc_rb_backend_disable >> 16));
+
+ tiling_config |= BACKEND_MAP(backend_map);
WREG32(GB_TILING_CONFIG, tiling_config);
WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
- tmp = BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK);
- WREG32(CC_RB_BACKEND_DISABLE, tmp);
-
/* Setup pipes */
- tmp = INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK);
- tmp |= INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK);
- WREG32(CC_GC_SHADER_PIPE_CONFIG, tmp);
- WREG32(GC_USER_SHADER_PIPE_CONFIG, tmp);
+ WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
+ WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
- tmp = R6XX_MAX_BACKENDS - r600_count_pipe_bits(tmp & INACTIVE_QD_PIPES_MASK);
+ tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
@@ -1783,12 +1798,17 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence)
{
/* Also consider EVENT_WRITE_EOP. it handles the interrupts + timestamps + events */
+
+ radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
+ radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
+ /* wait for 3D idle clean */
+ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
+ radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
+ radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
/* Emit fence sequence & fire IRQ */
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
radeon_ring_write(rdev, fence->seq);
- radeon_ring_write(rdev, PACKET0(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
- radeon_ring_write(rdev, 1);
/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
radeon_ring_write(rdev, RB_INT_STAT);
@@ -2745,6 +2765,7 @@ restart_ih:
case 0: /* D1 vblank */
if (disp_int & LB_D1_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 0);
+ wake_up(&rdev->irq.vblank_queue);
disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n");
}
@@ -2765,6 +2786,7 @@ restart_ih:
case 0: /* D2 vblank */
if (disp_int & LB_D2_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 1);
+ wake_up(&rdev->irq.vblank_queue);
disp_int &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n");
}
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 0dcb6904c4f..db928016d03 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -35,7 +35,7 @@
*/
static int r600_audio_chipset_supported(struct radeon_device *rdev)
{
- return (rdev->family >= CHIP_R600 && rdev->family < CHIP_RV710)
+ return rdev->family >= CHIP_R600
|| rdev->family == CHIP_RS600
|| rdev->family == CHIP_RS690
|| rdev->family == CHIP_RS740;
@@ -147,15 +147,23 @@ static void r600_audio_update_hdmi(unsigned long param)
}
/*
+ * turn on/off audio engine
+ */
+static void r600_audio_engine_enable(struct radeon_device *rdev, bool enable)
+{
+ DRM_INFO("%s audio support", enable ? "Enabling" : "Disabling");
+ WREG32_P(R600_AUDIO_ENABLE, enable ? 0x81000000 : 0x0, ~0x81000000);
+}
+
+/*
* initialize the audio vars and register the update timer
*/
int r600_audio_init(struct radeon_device *rdev)
{
- if (!r600_audio_chipset_supported(rdev))
+ if (!radeon_audio || !r600_audio_chipset_supported(rdev))
return 0;
- DRM_INFO("%s audio support", radeon_audio ? "Enabling" : "Disabling");
- WREG32_P(R600_AUDIO_ENABLE, radeon_audio ? 0x81000000 : 0x0, ~0x81000000);
+ r600_audio_engine_enable(rdev, true);
rdev->audio_channels = -1;
rdev->audio_rate = -1;
@@ -258,9 +266,10 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
*/
void r600_audio_fini(struct radeon_device *rdev)
{
- if (!r600_audio_chipset_supported(rdev))
+ if (!radeon_audio || !r600_audio_chipset_supported(rdev))
return;
del_timer(&rdev->audio_timer);
- WREG32_P(R600_AUDIO_ENABLE, 0x0, ~0x81000000);
+
+ r600_audio_engine_enable(rdev, false);
}
diff --git a/drivers/gpu/drm/radeon/r600_blit.c b/drivers/gpu/drm/radeon/r600_blit.c
index 5ea43234758..f4fb88ece2b 100644
--- a/drivers/gpu/drm/radeon/r600_blit.c
+++ b/drivers/gpu/drm/radeon/r600_blit.c
@@ -49,7 +49,7 @@ set_render_target(drm_radeon_private_t *dev_priv, int format, int w, int h, u64
RING_LOCALS;
DRM_DEBUG("\n");
- h = (h + 7) & ~7;
+ h = ALIGN(h, 8);
if (h < 8)
h = 8;
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c
index 446b765ac72..f6c6c77db7e 100644
--- a/drivers/gpu/drm/radeon/r600_blit_kms.c
+++ b/drivers/gpu/drm/radeon/r600_blit_kms.c
@@ -25,7 +25,7 @@ set_render_target(struct radeon_device *rdev, int format,
u32 cb_color_info;
int pitch, slice;
- h = (h + 7) & ~7;
+ h = ALIGN(h, 8);
if (h < 8)
h = 8;
@@ -396,15 +396,13 @@ set_default_state(struct radeon_device *rdev)
NUM_ES_STACK_ENTRIES(num_es_stack_entries));
/* emit an IB pointing at default state */
- dwords = (rdev->r600_blit.state_len + 0xf) & ~0xf;
+ dwords = ALIGN(rdev->r600_blit.state_len, 0x10);
gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset;
radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2));
radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC);
radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF);
radeon_ring_write(rdev, dwords);
- radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
- radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
/* SQ config */
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 6));
radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
@@ -578,9 +576,9 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
ring_size = num_loops * dwords_per_loop;
/* set default + shaders */
ring_size += 40; /* shaders + def state */
- ring_size += 7; /* fence emit for VB IB */
+ ring_size += 10; /* fence emit for VB IB */
ring_size += 5; /* done copy */
- ring_size += 7; /* fence emit for done copy */
+ ring_size += 10; /* fence emit for done copy */
r = radeon_ring_lock(rdev, ring_size);
if (r)
return r;
@@ -594,13 +592,6 @@ void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence)
{
int r;
- radeon_ring_write(rdev, PACKET3(PACKET3_EVENT_WRITE, 0));
- radeon_ring_write(rdev, CACHE_FLUSH_AND_INV_EVENT);
- /* wait for 3D idle clean */
- radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
- radeon_ring_write(rdev, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2);
- radeon_ring_write(rdev, WAIT_3D_IDLE_bit | WAIT_3D_IDLECLEAN_bit);
-
if (rdev->r600_blit.vb_ib)
r600_vb_ib_put(rdev);
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c
index d745e815c2e..a112c59f9d8 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c
@@ -9,11 +9,6 @@ const u32 r6xx_default_state[] =
0xc0012800,
0x80000000,
0x80000000,
- 0xc0004600,
- 0x00000016,
- 0xc0016800,
- 0x00000010,
- 0x00028000,
0xc0016800,
0x00000010,
0x00008000,
@@ -531,11 +526,6 @@ const u32 r7xx_default_state[] =
0xc0012800,
0x80000000,
0x80000000,
- 0xc0004600,
- 0x00000016,
- 0xc0016800,
- 0x00000010,
- 0x00028000,
0xc0016800,
0x00000010,
0x00008000,
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 75bcf35a093..40416c068d9 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -734,8 +734,8 @@ static void r600_gfx_init(struct drm_device *dev,
u32 hdp_host_path_cntl;
u32 backend_map;
u32 gb_tiling_config = 0;
- u32 cc_rb_backend_disable = 0;
- u32 cc_gc_shader_pipe_config = 0;
+ u32 cc_rb_backend_disable;
+ u32 cc_gc_shader_pipe_config;
u32 ramcfg;
/* setup chip specs */
@@ -857,29 +857,44 @@ static void r600_gfx_init(struct drm_device *dev,
gb_tiling_config |= R600_BANK_SWAPS(1);
- backend_map = r600_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
- dev_priv->r600_max_backends,
- (0xff << dev_priv->r600_max_backends) & 0xff);
- gb_tiling_config |= R600_BACKEND_MAP(backend_map);
+ cc_rb_backend_disable = RADEON_READ(R600_CC_RB_BACKEND_DISABLE) & 0x00ff0000;
+ cc_rb_backend_disable |=
+ R600_BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R6XX_MAX_BACKENDS_MASK);
- cc_gc_shader_pipe_config =
+ cc_gc_shader_pipe_config = RADEON_READ(R600_CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
+ cc_gc_shader_pipe_config |=
R600_INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << dev_priv->r600_max_pipes) & R6XX_MAX_PIPES_MASK);
cc_gc_shader_pipe_config |=
R600_INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << dev_priv->r600_max_simds) & R6XX_MAX_SIMDS_MASK);
- cc_rb_backend_disable =
- R600_BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R6XX_MAX_BACKENDS_MASK);
+ backend_map = r600_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
+ (R6XX_MAX_BACKENDS -
+ r600_count_pipe_bits((cc_rb_backend_disable &
+ R6XX_MAX_BACKENDS_MASK) >> 16)),
+ (cc_rb_backend_disable >> 16));
+ gb_tiling_config |= R600_BACKEND_MAP(backend_map);
RADEON_WRITE(R600_GB_TILING_CONFIG, gb_tiling_config);
RADEON_WRITE(R600_DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
RADEON_WRITE(R600_HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
+ if (gb_tiling_config & 0xc0) {
+ dev_priv->r600_group_size = 512;
+ } else {
+ dev_priv->r600_group_size = 256;
+ }
+ dev_priv->r600_npipes = 1 << ((gb_tiling_config >> 1) & 0x7);
+ if (gb_tiling_config & 0x30) {
+ dev_priv->r600_nbanks = 8;
+ } else {
+ dev_priv->r600_nbanks = 4;
+ }
RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
RADEON_WRITE(R600_GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
num_qd_pipes =
- R6XX_MAX_BACKENDS - r600_count_pipe_bits(cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK);
+ R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8);
RADEON_WRITE(R600_VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & R600_DEALLOC_DIST_MASK);
RADEON_WRITE(R600_VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & R600_VTX_REUSE_DEPTH_MASK);
@@ -1151,7 +1166,8 @@ static void r600_gfx_init(struct drm_device *dev,
}
-static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
+static u32 r700_get_tile_pipe_to_backend_map(drm_radeon_private_t *dev_priv,
+ u32 num_tile_pipes,
u32 num_backends,
u32 backend_disable_mask)
{
@@ -1162,6 +1178,7 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
u32 swizzle_pipe[R7XX_MAX_PIPES];
u32 cur_backend;
u32 i;
+ bool force_no_swizzle;
if (num_tile_pipes > R7XX_MAX_PIPES)
num_tile_pipes = R7XX_MAX_PIPES;
@@ -1191,6 +1208,18 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
if (enabled_backends_count != num_backends)
num_backends = enabled_backends_count;
+ switch (dev_priv->flags & RADEON_FAMILY_MASK) {
+ case CHIP_RV770:
+ case CHIP_RV730:
+ force_no_swizzle = false;
+ break;
+ case CHIP_RV710:
+ case CHIP_RV740:
+ default:
+ force_no_swizzle = true;
+ break;
+ }
+
memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES);
switch (num_tile_pipes) {
case 1:
@@ -1201,49 +1230,100 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
swizzle_pipe[1] = 1;
break;
case 3:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 1;
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 1;
+ }
break;
case 4:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 3;
- swizzle_pipe[3] = 1;
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 3;
+ swizzle_pipe[3] = 1;
+ }
break;
case 5:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 1;
- swizzle_pipe[4] = 3;
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 1;
+ swizzle_pipe[4] = 3;
+ }
break;
case 6:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 5;
- swizzle_pipe[4] = 3;
- swizzle_pipe[5] = 1;
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ swizzle_pipe[5] = 5;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 5;
+ swizzle_pipe[4] = 3;
+ swizzle_pipe[5] = 1;
+ }
break;
case 7:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 6;
- swizzle_pipe[4] = 3;
- swizzle_pipe[5] = 1;
- swizzle_pipe[6] = 5;
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ swizzle_pipe[5] = 5;
+ swizzle_pipe[6] = 6;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 6;
+ swizzle_pipe[4] = 3;
+ swizzle_pipe[5] = 1;
+ swizzle_pipe[6] = 5;
+ }
break;
case 8:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 6;
- swizzle_pipe[4] = 3;
- swizzle_pipe[5] = 1;
- swizzle_pipe[6] = 7;
- swizzle_pipe[7] = 5;
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ swizzle_pipe[5] = 5;
+ swizzle_pipe[6] = 6;
+ swizzle_pipe[7] = 7;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 6;
+ swizzle_pipe[4] = 3;
+ swizzle_pipe[5] = 1;
+ swizzle_pipe[6] = 7;
+ swizzle_pipe[7] = 5;
+ }
break;
}
@@ -1264,8 +1344,10 @@ static void r700_gfx_init(struct drm_device *dev,
drm_radeon_private_t *dev_priv)
{
int i, j, num_qd_pipes;
+ u32 ta_aux_cntl;
u32 sx_debug_1;
u32 smx_dc_ctl0;
+ u32 db_debug3;
u32 num_gs_verts_per_thread;
u32 vgt_gs_per_es;
u32 gs_prim_buffer_depth = 0;
@@ -1276,8 +1358,8 @@ static void r700_gfx_init(struct drm_device *dev,
u32 sq_dyn_gpr_size_simd_ab_0;
u32 backend_map;
u32 gb_tiling_config = 0;
- u32 cc_rb_backend_disable = 0;
- u32 cc_gc_shader_pipe_config = 0;
+ u32 cc_rb_backend_disable;
+ u32 cc_gc_shader_pipe_config;
u32 mc_arb_ramcfg;
u32 db_debug4;
@@ -1428,38 +1510,51 @@ static void r700_gfx_init(struct drm_device *dev,
gb_tiling_config |= R600_BANK_SWAPS(1);
- if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740)
- backend_map = 0x28;
- else
- backend_map = r700_get_tile_pipe_to_backend_map(dev_priv->r600_max_tile_pipes,
- dev_priv->r600_max_backends,
- (0xff << dev_priv->r600_max_backends) & 0xff);
- gb_tiling_config |= R600_BACKEND_MAP(backend_map);
+ cc_rb_backend_disable = RADEON_READ(R600_CC_RB_BACKEND_DISABLE) & 0x00ff0000;
+ cc_rb_backend_disable |=
+ R600_BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R7XX_MAX_BACKENDS_MASK);
- cc_gc_shader_pipe_config =
+ cc_gc_shader_pipe_config = RADEON_READ(R600_CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
+ cc_gc_shader_pipe_config |=
R600_INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << dev_priv->r600_max_pipes) & R7XX_MAX_PIPES_MASK);
cc_gc_shader_pipe_config |=
R600_INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << dev_priv->r600_max_simds) & R7XX_MAX_SIMDS_MASK);
- cc_rb_backend_disable =
- R600_BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << dev_priv->r600_max_backends) & R7XX_MAX_BACKENDS_MASK);
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740)
+ backend_map = 0x28;
+ else
+ backend_map = r700_get_tile_pipe_to_backend_map(dev_priv,
+ dev_priv->r600_max_tile_pipes,
+ (R7XX_MAX_BACKENDS -
+ r600_count_pipe_bits((cc_rb_backend_disable &
+ R7XX_MAX_BACKENDS_MASK) >> 16)),
+ (cc_rb_backend_disable >> 16));
+ gb_tiling_config |= R600_BACKEND_MAP(backend_map);
RADEON_WRITE(R600_GB_TILING_CONFIG, gb_tiling_config);
RADEON_WRITE(R600_DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
RADEON_WRITE(R600_HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
+ if (gb_tiling_config & 0xc0) {
+ dev_priv->r600_group_size = 512;
+ } else {
+ dev_priv->r600_group_size = 256;
+ }
+ dev_priv->r600_npipes = 1 << ((gb_tiling_config >> 1) & 0x7);
+ if (gb_tiling_config & 0x30) {
+ dev_priv->r600_nbanks = 8;
+ } else {
+ dev_priv->r600_nbanks = 4;
+ }
RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
- RADEON_WRITE(R600_GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
RADEON_WRITE(R700_CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
RADEON_WRITE(R700_CGTS_SYS_TCC_DISABLE, 0);
RADEON_WRITE(R700_CGTS_TCC_DISABLE, 0);
- RADEON_WRITE(R700_CGTS_USER_SYS_TCC_DISABLE, 0);
- RADEON_WRITE(R700_CGTS_USER_TCC_DISABLE, 0);
num_qd_pipes =
- R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK);
+ R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8);
RADEON_WRITE(R600_VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & R600_DEALLOC_DIST_MASK);
RADEON_WRITE(R600_VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & R600_VTX_REUSE_DEPTH_MASK);
@@ -1469,10 +1564,8 @@ static void r700_gfx_init(struct drm_device *dev,
RADEON_WRITE(R600_CP_MEQ_THRESHOLDS, R700_STQ_SPLIT(0x30));
- RADEON_WRITE(R600_TA_CNTL_AUX, (R600_DISABLE_CUBE_ANISO |
- R600_SYNC_GRADIENT |
- R600_SYNC_WALKER |
- R600_SYNC_ALIGNER));
+ ta_aux_cntl = RADEON_READ(R600_TA_CNTL_AUX);
+ RADEON_WRITE(R600_TA_CNTL_AUX, ta_aux_cntl | R600_DISABLE_CUBE_ANISO);
sx_debug_1 = RADEON_READ(R700_SX_DEBUG_1);
sx_debug_1 |= R700_ENABLE_NEW_SMX_ADDRESS;
@@ -1483,14 +1576,28 @@ static void r700_gfx_init(struct drm_device *dev,
smx_dc_ctl0 |= R700_CACHE_DEPTH((dev_priv->r700_sx_num_of_sets * 64) - 1);
RADEON_WRITE(R600_SMX_DC_CTL0, smx_dc_ctl0);
- RADEON_WRITE(R700_SMX_EVENT_CTL, (R700_ES_FLUSH_CTL(4) |
- R700_GS_FLUSH_CTL(4) |
- R700_ACK_FLUSH_CTL(3) |
- R700_SYNC_FLUSH_CTL));
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) != CHIP_RV740)
+ RADEON_WRITE(R700_SMX_EVENT_CTL, (R700_ES_FLUSH_CTL(4) |
+ R700_GS_FLUSH_CTL(4) |
+ R700_ACK_FLUSH_CTL(3) |
+ R700_SYNC_FLUSH_CTL));
- if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV770)
- RADEON_WRITE(R700_DB_DEBUG3, R700_DB_CLK_OFF_DELAY(0x1f));
- else {
+ db_debug3 = RADEON_READ(R700_DB_DEBUG3);
+ db_debug3 &= ~R700_DB_CLK_OFF_DELAY(0x1f);
+ switch (dev_priv->flags & RADEON_FAMILY_MASK) {
+ case CHIP_RV770:
+ case CHIP_RV740:
+ db_debug3 |= R700_DB_CLK_OFF_DELAY(0x1f);
+ break;
+ case CHIP_RV710:
+ case CHIP_RV730:
+ default:
+ db_debug3 |= R700_DB_CLK_OFF_DELAY(2);
+ break;
+ }
+ RADEON_WRITE(R700_DB_DEBUG3, db_debug3);
+
+ if ((dev_priv->flags & RADEON_FAMILY_MASK) != CHIP_RV770) {
db_debug4 = RADEON_READ(RV700_DB_DEBUG4);
db_debug4 |= RV700_DISABLE_TILE_COVERED_FOR_PS_ITER;
RADEON_WRITE(RV700_DB_DEBUG4, db_debug4);
@@ -1519,10 +1626,10 @@ static void r700_gfx_init(struct drm_device *dev,
R600_ALU_UPDATE_FIFO_HIWATER(0x8));
switch (dev_priv->flags & RADEON_FAMILY_MASK) {
case CHIP_RV770:
- sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x1);
- break;
case CHIP_RV730:
case CHIP_RV710:
+ sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x1);
+ break;
case CHIP_RV740:
default:
sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x4);
@@ -2529,3 +2636,12 @@ out:
mutex_unlock(&dev_priv->cs_mutex);
return r;
}
+
+void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size)
+{
+ struct drm_radeon_private *dev_priv = dev->dev_private;
+
+ *npipes = dev_priv->r600_npipes;
+ *nbanks = dev_priv->r600_nbanks;
+ *group_size = dev_priv->r600_group_size;
+}
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index e4c45ec1650..cd2c63bce50 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -28,6 +28,7 @@
#include "drmP.h"
#include "radeon.h"
#include "r600d.h"
+#include "r600_reg_safe.h"
static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
struct radeon_cs_reloc **cs_reloc);
@@ -35,11 +36,313 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
struct radeon_cs_reloc **cs_reloc);
typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
+extern void r600_cs_legacy_get_tiling_conf(struct drm_device *dev, u32 *npipes, u32 *nbanks, u32 *group_size);
+
struct r600_cs_track {
- u32 cb_color0_base_last;
+ /* configuration we miror so that we use same code btw kms/ums */
+ u32 group_size;
+ u32 nbanks;
+ u32 npipes;
+ /* value we track */
+ u32 nsamples;
+ u32 cb_color_base_last[8];
+ struct radeon_bo *cb_color_bo[8];
+ u32 cb_color_bo_offset[8];
+ struct radeon_bo *cb_color_frag_bo[8];
+ struct radeon_bo *cb_color_tile_bo[8];
+ u32 cb_color_info[8];
+ u32 cb_color_size_idx[8];
+ u32 cb_target_mask;
+ u32 cb_shader_mask;
+ u32 cb_color_size[8];
+ u32 vgt_strmout_en;
+ u32 vgt_strmout_buffer_en;
+ u32 db_depth_control;
+ u32 db_depth_info;
+ u32 db_depth_size_idx;
+ u32 db_depth_view;
+ u32 db_depth_size;
+ u32 db_offset;
+ struct radeon_bo *db_bo;
};
+static inline int r600_bpe_from_format(u32 *bpe, u32 format)
+{
+ switch (format) {
+ case V_038004_COLOR_8:
+ case V_038004_COLOR_4_4:
+ case V_038004_COLOR_3_3_2:
+ case V_038004_FMT_1:
+ *bpe = 1;
+ break;
+ case V_038004_COLOR_16:
+ case V_038004_COLOR_16_FLOAT:
+ case V_038004_COLOR_8_8:
+ case V_038004_COLOR_5_6_5:
+ case V_038004_COLOR_6_5_5:
+ case V_038004_COLOR_1_5_5_5:
+ case V_038004_COLOR_4_4_4_4:
+ case V_038004_COLOR_5_5_5_1:
+ *bpe = 2;
+ break;
+ case V_038004_FMT_8_8_8:
+ *bpe = 3;
+ break;
+ case V_038004_COLOR_32:
+ case V_038004_COLOR_32_FLOAT:
+ case V_038004_COLOR_16_16:
+ case V_038004_COLOR_16_16_FLOAT:
+ case V_038004_COLOR_8_24:
+ case V_038004_COLOR_8_24_FLOAT:
+ case V_038004_COLOR_24_8:
+ case V_038004_COLOR_24_8_FLOAT:
+ case V_038004_COLOR_10_11_11:
+ case V_038004_COLOR_10_11_11_FLOAT:
+ case V_038004_COLOR_11_11_10:
+ case V_038004_COLOR_11_11_10_FLOAT:
+ case V_038004_COLOR_2_10_10_10:
+ case V_038004_COLOR_8_8_8_8:
+ case V_038004_COLOR_10_10_10_2:
+ case V_038004_FMT_5_9_9_9_SHAREDEXP:
+ case V_038004_FMT_32_AS_8:
+ case V_038004_FMT_32_AS_8_8:
+ *bpe = 4;
+ break;
+ case V_038004_COLOR_X24_8_32_FLOAT:
+ case V_038004_COLOR_32_32:
+ case V_038004_COLOR_32_32_FLOAT:
+ case V_038004_COLOR_16_16_16_16:
+ case V_038004_COLOR_16_16_16_16_FLOAT:
+ *bpe = 8;
+ break;
+ case V_038004_FMT_16_16_16:
+ case V_038004_FMT_16_16_16_FLOAT:
+ *bpe = 6;
+ break;
+ case V_038004_FMT_32_32_32:
+ case V_038004_FMT_32_32_32_FLOAT:
+ *bpe = 12;
+ break;
+ case V_038004_COLOR_32_32_32_32:
+ case V_038004_COLOR_32_32_32_32_FLOAT:
+ *bpe = 16;
+ break;
+ case V_038004_FMT_GB_GR:
+ case V_038004_FMT_BG_RG:
+ case V_038004_COLOR_INVALID:
+ *bpe = 16;
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void r600_cs_track_init(struct r600_cs_track *track)
+{
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ track->cb_color_base_last[i] = 0;
+ track->cb_color_size[i] = 0;
+ track->cb_color_size_idx[i] = 0;
+ track->cb_color_info[i] = 0;
+ track->cb_color_bo[i] = NULL;
+ track->cb_color_bo_offset[i] = 0xFFFFFFFF;
+ }
+ track->cb_target_mask = 0xFFFFFFFF;
+ track->cb_shader_mask = 0xFFFFFFFF;
+ track->db_bo = NULL;
+ /* assume the biggest format and that htile is enabled */
+ track->db_depth_info = 7 | (1 << 25);
+ track->db_depth_view = 0xFFFFC000;
+ track->db_depth_size = 0xFFFFFFFF;
+ track->db_depth_size_idx = 0;
+ track->db_depth_control = 0xFFFFFFFF;
+}
+
+static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
+{
+ struct r600_cs_track *track = p->track;
+ u32 bpe = 0, pitch, slice_tile_max, size, tmp, height;
+ volatile u32 *ib = p->ib->ptr;
+
+ if (G_0280A0_TILE_MODE(track->cb_color_info[i])) {
+ dev_warn(p->dev, "FMASK or CMASK buffer are not supported by this kernel\n");
+ return -EINVAL;
+ }
+ size = radeon_bo_size(track->cb_color_bo[i]);
+ if (r600_bpe_from_format(&bpe, G_0280A0_FORMAT(track->cb_color_info[i]))) {
+ dev_warn(p->dev, "%s:%d cb invalid format %d for %d (0x%08X)\n",
+ __func__, __LINE__, G_0280A0_FORMAT(track->cb_color_info[i]),
+ i, track->cb_color_info[i]);
+ return -EINVAL;
+ }
+ pitch = (G_028060_PITCH_TILE_MAX(track->cb_color_size[i]) + 1) << 3;
+ slice_tile_max = G_028060_SLICE_TILE_MAX(track->cb_color_size[i]) + 1;
+ if (!pitch) {
+ dev_warn(p->dev, "%s:%d cb pitch (%d) for %d invalid (0x%08X)\n",
+ __func__, __LINE__, pitch, i, track->cb_color_size[i]);
+ return -EINVAL;
+ }
+ height = size / (pitch * bpe);
+ if (height > 8192)
+ height = 8192;
+ switch (G_0280A0_ARRAY_MODE(track->cb_color_info[i])) {
+ case V_0280A0_ARRAY_LINEAR_GENERAL:
+ case V_0280A0_ARRAY_LINEAR_ALIGNED:
+ if (pitch & 0x3f) {
+ dev_warn(p->dev, "%s:%d cb pitch (%d x %d = %d) invalid\n",
+ __func__, __LINE__, pitch, bpe, pitch * bpe);
+ return -EINVAL;
+ }
+ if ((pitch * bpe) & (track->group_size - 1)) {
+ dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
+ __func__, __LINE__, pitch);
+ return -EINVAL;
+ }
+ break;
+ case V_0280A0_ARRAY_1D_TILED_THIN1:
+ if ((pitch * 8 * bpe * track->nsamples) & (track->group_size - 1)) {
+ dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
+ __func__, __LINE__, pitch);
+ return -EINVAL;
+ }
+ height &= ~0x7;
+ if (!height)
+ height = 8;
+ break;
+ case V_0280A0_ARRAY_2D_TILED_THIN1:
+ if (pitch & ((8 * track->nbanks) - 1)) {
+ dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
+ __func__, __LINE__, pitch);
+ return -EINVAL;
+ }
+ tmp = pitch * 8 * bpe * track->nsamples;
+ tmp = tmp / track->nbanks;
+ if (tmp & (track->group_size - 1)) {
+ dev_warn(p->dev, "%s:%d cb pitch (%d) invalid\n",
+ __func__, __LINE__, pitch);
+ return -EINVAL;
+ }
+ height &= ~((16 * track->npipes) - 1);
+ if (!height)
+ height = 16 * track->npipes;
+ break;
+ default:
+ dev_warn(p->dev, "%s invalid tiling %d for %d (0x%08X)\n", __func__,
+ G_0280A0_ARRAY_MODE(track->cb_color_info[i]), i,
+ track->cb_color_info[i]);
+ return -EINVAL;
+ }
+ /* check offset */
+ tmp = height * pitch;
+ if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
+ dev_warn(p->dev, "%s offset[%d] %d to big\n", __func__, i, track->cb_color_bo_offset[i]);
+ return -EINVAL;
+ }
+ /* limit max tile */
+ tmp = (height * pitch) >> 6;
+ if (tmp < slice_tile_max)
+ slice_tile_max = tmp;
+ tmp = S_028060_PITCH_TILE_MAX((pitch >> 3) - 1) |
+ S_028060_SLICE_TILE_MAX(slice_tile_max - 1);
+ ib[track->cb_color_size_idx[i]] = tmp;
+ return 0;
+}
+
+static int r600_cs_track_check(struct radeon_cs_parser *p)
+{
+ struct r600_cs_track *track = p->track;
+ u32 tmp;
+ int r, i;
+ volatile u32 *ib = p->ib->ptr;
+
+ /* on legacy kernel we don't perform advanced check */
+ if (p->rdev == NULL)
+ return 0;
+ /* we don't support out buffer yet */
+ if (track->vgt_strmout_en || track->vgt_strmout_buffer_en) {
+ dev_warn(p->dev, "this kernel doesn't support SMX output buffer\n");
+ return -EINVAL;
+ }
+ /* check that we have a cb for each enabled target, we don't check
+ * shader_mask because it seems mesa isn't always setting it :(
+ */
+ tmp = track->cb_target_mask;
+ for (i = 0; i < 8; i++) {
+ if ((tmp >> (i * 4)) & 0xF) {
+ /* at least one component is enabled */
+ if (track->cb_color_bo[i] == NULL) {
+ dev_warn(p->dev, "%s:%d mask 0x%08X | 0x%08X no cb for %d\n",
+ __func__, __LINE__, track->cb_target_mask, track->cb_shader_mask, i);
+ return -EINVAL;
+ }
+ /* perform rewrite of CB_COLOR[0-7]_SIZE */
+ r = r600_cs_track_validate_cb(p, i);
+ if (r)
+ return r;
+ }
+ }
+ /* Check depth buffer */
+ if (G_028800_STENCIL_ENABLE(track->db_depth_control) ||
+ G_028800_Z_ENABLE(track->db_depth_control)) {
+ u32 nviews, bpe, ntiles;
+ if (track->db_bo == NULL) {
+ dev_warn(p->dev, "z/stencil with no depth buffer\n");
+ return -EINVAL;
+ }
+ if (G_028010_TILE_SURFACE_ENABLE(track->db_depth_info)) {
+ dev_warn(p->dev, "this kernel doesn't support z/stencil htile\n");
+ return -EINVAL;
+ }
+ switch (G_028010_FORMAT(track->db_depth_info)) {
+ case V_028010_DEPTH_16:
+ bpe = 2;
+ break;
+ case V_028010_DEPTH_X8_24:
+ case V_028010_DEPTH_8_24:
+ case V_028010_DEPTH_X8_24_FLOAT:
+ case V_028010_DEPTH_8_24_FLOAT:
+ case V_028010_DEPTH_32_FLOAT:
+ bpe = 4;
+ break;
+ case V_028010_DEPTH_X24_8_32_FLOAT:
+ bpe = 8;
+ break;
+ default:
+ dev_warn(p->dev, "z/stencil with invalid format %d\n", G_028010_FORMAT(track->db_depth_info));
+ return -EINVAL;
+ }
+ if ((track->db_depth_size & 0xFFFFFC00) == 0xFFFFFC00) {
+ if (!track->db_depth_size_idx) {
+ dev_warn(p->dev, "z/stencil buffer size not set\n");
+ return -EINVAL;
+ }
+ printk_once(KERN_WARNING "You have old & broken userspace please consider updating mesa\n");
+ tmp = radeon_bo_size(track->db_bo) - track->db_offset;
+ tmp = (tmp / bpe) >> 6;
+ if (!tmp) {
+ dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %ld)\n",
+ track->db_depth_size, bpe, track->db_offset,
+ radeon_bo_size(track->db_bo));
+ return -EINVAL;
+ }
+ ib[track->db_depth_size_idx] = S_028000_SLICE_TILE_MAX(tmp - 1) | (track->db_depth_size & 0x3FF);
+ } else {
+ ntiles = G_028000_SLICE_TILE_MAX(track->db_depth_size) + 1;
+ nviews = G_028004_SLICE_MAX(track->db_depth_view) + 1;
+ tmp = ntiles * bpe * 64 * nviews;
+ if ((tmp + track->db_offset) > radeon_bo_size(track->db_bo)) {
+ dev_warn(p->dev, "z/stencil buffer too small (0x%08X %d %d %d -> %d have %ld)\n",
+ track->db_depth_size, ntiles, nviews, bpe, tmp + track->db_offset,
+ radeon_bo_size(track->db_bo));
+ return -EINVAL;
+ }
+ }
+ }
+ return 0;
+}
+
/**
* r600_cs_packet_parse() - parse cp packet and point ib index to next packet
* @parser: parser structure holding parsing context.
@@ -359,6 +662,334 @@ static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
return 0;
}
+/**
+ * r600_cs_check_reg() - check if register is authorized or not
+ * @parser: parser structure holding parsing context
+ * @reg: register we are testing
+ * @idx: index into the cs buffer
+ *
+ * This function will test against r600_reg_safe_bm and return 0
+ * if register is safe. If register is not flag as safe this function
+ * will test it against a list of register needind special handling.
+ */
+static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
+{
+ struct r600_cs_track *track = (struct r600_cs_track *)p->track;
+ struct radeon_cs_reloc *reloc;
+ u32 last_reg = ARRAY_SIZE(r600_reg_safe_bm);
+ u32 m, i, tmp, *ib;
+ int r;
+
+ i = (reg >> 7);
+ if (i > last_reg) {
+ dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ return -EINVAL;
+ }
+ m = 1 << ((reg >> 2) & 31);
+ if (!(r600_reg_safe_bm[i] & m))
+ return 0;
+ ib = p->ib->ptr;
+ switch (reg) {
+ /* force following reg to 0 in an attemp to disable out buffer
+ * which will need us to better understand how it works to perform
+ * security check on it (Jerome)
+ */
+ case R_0288A8_SQ_ESGS_RING_ITEMSIZE:
+ case R_008C44_SQ_ESGS_RING_SIZE:
+ case R_0288B0_SQ_ESTMP_RING_ITEMSIZE:
+ case R_008C54_SQ_ESTMP_RING_SIZE:
+ case R_0288C0_SQ_FBUF_RING_ITEMSIZE:
+ case R_008C74_SQ_FBUF_RING_SIZE:
+ case R_0288B4_SQ_GSTMP_RING_ITEMSIZE:
+ case R_008C5C_SQ_GSTMP_RING_SIZE:
+ case R_0288AC_SQ_GSVS_RING_ITEMSIZE:
+ case R_008C4C_SQ_GSVS_RING_SIZE:
+ case R_0288BC_SQ_PSTMP_RING_ITEMSIZE:
+ case R_008C6C_SQ_PSTMP_RING_SIZE:
+ case R_0288C4_SQ_REDUC_RING_ITEMSIZE:
+ case R_008C7C_SQ_REDUC_RING_SIZE:
+ case R_0288B8_SQ_VSTMP_RING_ITEMSIZE:
+ case R_008C64_SQ_VSTMP_RING_SIZE:
+ case R_0288C8_SQ_GS_VERT_ITEMSIZE:
+ /* get value to populate the IB don't remove */
+ tmp =radeon_get_ib_value(p, idx);
+ ib[idx] = 0;
+ break;
+ case R_028800_DB_DEPTH_CONTROL:
+ track->db_depth_control = radeon_get_ib_value(p, idx);
+ break;
+ case R_028010_DB_DEPTH_INFO:
+ track->db_depth_info = radeon_get_ib_value(p, idx);
+ break;
+ case R_028004_DB_DEPTH_VIEW:
+ track->db_depth_view = radeon_get_ib_value(p, idx);
+ break;
+ case R_028000_DB_DEPTH_SIZE:
+ track->db_depth_size = radeon_get_ib_value(p, idx);
+ track->db_depth_size_idx = idx;
+ break;
+ case R_028AB0_VGT_STRMOUT_EN:
+ track->vgt_strmout_en = radeon_get_ib_value(p, idx);
+ break;
+ case R_028B20_VGT_STRMOUT_BUFFER_EN:
+ track->vgt_strmout_buffer_en = radeon_get_ib_value(p, idx);
+ break;
+ case R_028238_CB_TARGET_MASK:
+ track->cb_target_mask = radeon_get_ib_value(p, idx);
+ break;
+ case R_02823C_CB_SHADER_MASK:
+ track->cb_shader_mask = radeon_get_ib_value(p, idx);
+ break;
+ case R_028C04_PA_SC_AA_CONFIG:
+ tmp = G_028C04_MSAA_NUM_SAMPLES(radeon_get_ib_value(p, idx));
+ track->nsamples = 1 << tmp;
+ break;
+ case R_0280A0_CB_COLOR0_INFO:
+ case R_0280A4_CB_COLOR1_INFO:
+ case R_0280A8_CB_COLOR2_INFO:
+ case R_0280AC_CB_COLOR3_INFO:
+ case R_0280B0_CB_COLOR4_INFO:
+ case R_0280B4_CB_COLOR5_INFO:
+ case R_0280B8_CB_COLOR6_INFO:
+ case R_0280BC_CB_COLOR7_INFO:
+ tmp = (reg - R_0280A0_CB_COLOR0_INFO) / 4;
+ track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
+ break;
+ case R_028060_CB_COLOR0_SIZE:
+ case R_028064_CB_COLOR1_SIZE:
+ case R_028068_CB_COLOR2_SIZE:
+ case R_02806C_CB_COLOR3_SIZE:
+ case R_028070_CB_COLOR4_SIZE:
+ case R_028074_CB_COLOR5_SIZE:
+ case R_028078_CB_COLOR6_SIZE:
+ case R_02807C_CB_COLOR7_SIZE:
+ tmp = (reg - R_028060_CB_COLOR0_SIZE) / 4;
+ track->cb_color_size[tmp] = radeon_get_ib_value(p, idx);
+ track->cb_color_size_idx[tmp] = idx;
+ break;
+ /* This register were added late, there is userspace
+ * which does provide relocation for those but set
+ * 0 offset. In order to avoid breaking old userspace
+ * we detect this and set address to point to last
+ * CB_COLOR0_BASE, note that if userspace doesn't set
+ * CB_COLOR0_BASE before this register we will report
+ * error. Old userspace always set CB_COLOR0_BASE
+ * before any of this.
+ */
+ case R_0280E0_CB_COLOR0_FRAG:
+ case R_0280E4_CB_COLOR1_FRAG:
+ case R_0280E8_CB_COLOR2_FRAG:
+ case R_0280EC_CB_COLOR3_FRAG:
+ case R_0280F0_CB_COLOR4_FRAG:
+ case R_0280F4_CB_COLOR5_FRAG:
+ case R_0280F8_CB_COLOR6_FRAG:
+ case R_0280FC_CB_COLOR7_FRAG:
+ tmp = (reg - R_0280E0_CB_COLOR0_FRAG) / 4;
+ if (!r600_cs_packet_next_is_pkt3_nop(p)) {
+ if (!track->cb_color_base_last[tmp]) {
+ dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] = track->cb_color_base_last[tmp];
+ printk_once(KERN_WARNING "You have old & broken userspace "
+ "please consider updating mesa & xf86-video-ati\n");
+ track->cb_color_frag_bo[tmp] = track->cb_color_bo[tmp];
+ } else {
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->cb_color_frag_bo[tmp] = reloc->robj;
+ }
+ break;
+ case R_0280C0_CB_COLOR0_TILE:
+ case R_0280C4_CB_COLOR1_TILE:
+ case R_0280C8_CB_COLOR2_TILE:
+ case R_0280CC_CB_COLOR3_TILE:
+ case R_0280D0_CB_COLOR4_TILE:
+ case R_0280D4_CB_COLOR5_TILE:
+ case R_0280D8_CB_COLOR6_TILE:
+ case R_0280DC_CB_COLOR7_TILE:
+ tmp = (reg - R_0280C0_CB_COLOR0_TILE) / 4;
+ if (!r600_cs_packet_next_is_pkt3_nop(p)) {
+ if (!track->cb_color_base_last[tmp]) {
+ dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] = track->cb_color_base_last[tmp];
+ printk_once(KERN_WARNING "You have old & broken userspace "
+ "please consider updating mesa & xf86-video-ati\n");
+ track->cb_color_tile_bo[tmp] = track->cb_color_bo[tmp];
+ } else {
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->cb_color_tile_bo[tmp] = reloc->robj;
+ }
+ break;
+ case CB_COLOR0_BASE:
+ case CB_COLOR1_BASE:
+ case CB_COLOR2_BASE:
+ case CB_COLOR3_BASE:
+ case CB_COLOR4_BASE:
+ case CB_COLOR5_BASE:
+ case CB_COLOR6_BASE:
+ case CB_COLOR7_BASE:
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ tmp = (reg - CB_COLOR0_BASE) / 4;
+ track->cb_color_bo_offset[tmp] = radeon_get_ib_value(p, idx);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->cb_color_base_last[tmp] = ib[idx];
+ track->cb_color_bo[tmp] = reloc->robj;
+ break;
+ case DB_DEPTH_BASE:
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ track->db_offset = radeon_get_ib_value(p, idx);
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ track->db_bo = reloc->robj;
+ break;
+ case DB_HTILE_DATA_BASE:
+ case SQ_PGM_START_FS:
+ case SQ_PGM_START_ES:
+ case SQ_PGM_START_VS:
+ case SQ_PGM_START_GS:
+ case SQ_PGM_START_PS:
+ r = r600_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ break;
+ default:
+ dev_warn(p->dev, "forbidden register 0x%08x at %d\n", reg, idx);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline unsigned minify(unsigned size, unsigned levels)
+{
+ size = size >> levels;
+ if (size < 1)
+ size = 1;
+ return size;
+}
+
+static void r600_texture_size(unsigned nfaces, unsigned blevel, unsigned nlevels,
+ unsigned w0, unsigned h0, unsigned d0, unsigned bpe,
+ unsigned *l0_size, unsigned *mipmap_size)
+{
+ unsigned offset, i, level, face;
+ unsigned width, height, depth, rowstride, size;
+
+ w0 = minify(w0, 0);
+ h0 = minify(h0, 0);
+ d0 = minify(d0, 0);
+ for(i = 0, offset = 0, level = blevel; i < nlevels; i++, level++) {
+ width = minify(w0, i);
+ height = minify(h0, i);
+ depth = minify(d0, i);
+ for(face = 0; face < nfaces; face++) {
+ rowstride = ((width * bpe) + 255) & ~255;
+ size = height * rowstride * depth;
+ offset += size;
+ offset = (offset + 0x1f) & ~0x1f;
+ }
+ }
+ *l0_size = (((w0 * bpe) + 255) & ~255) * h0 * d0;
+ *mipmap_size = offset;
+ if (!blevel)
+ *mipmap_size -= *l0_size;
+ if (!nlevels)
+ *mipmap_size = *l0_size;
+}
+
+/**
+ * r600_check_texture_resource() - check if register is authorized or not
+ * @p: parser structure holding parsing context
+ * @idx: index into the cs buffer
+ * @texture: texture's bo structure
+ * @mipmap: mipmap's bo structure
+ *
+ * This function will check that the resource has valid field and that
+ * the texture and mipmap bo object are big enough to cover this resource.
+ */
+static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
+ struct radeon_bo *texture,
+ struct radeon_bo *mipmap)
+{
+ u32 nfaces, nlevels, blevel, w0, h0, d0, bpe = 0;
+ u32 word0, word1, l0_size, mipmap_size;
+
+ /* on legacy kernel we don't perform advanced check */
+ if (p->rdev == NULL)
+ return 0;
+ word0 = radeon_get_ib_value(p, idx + 0);
+ word1 = radeon_get_ib_value(p, idx + 1);
+ w0 = G_038000_TEX_WIDTH(word0) + 1;
+ h0 = G_038004_TEX_HEIGHT(word1) + 1;
+ d0 = G_038004_TEX_DEPTH(word1);
+ nfaces = 1;
+ switch (G_038000_DIM(word0)) {
+ case V_038000_SQ_TEX_DIM_1D:
+ case V_038000_SQ_TEX_DIM_2D:
+ case V_038000_SQ_TEX_DIM_3D:
+ break;
+ case V_038000_SQ_TEX_DIM_CUBEMAP:
+ nfaces = 6;
+ break;
+ case V_038000_SQ_TEX_DIM_1D_ARRAY:
+ case V_038000_SQ_TEX_DIM_2D_ARRAY:
+ case V_038000_SQ_TEX_DIM_2D_MSAA:
+ case V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA:
+ default:
+ dev_warn(p->dev, "this kernel doesn't support %d texture dim\n", G_038000_DIM(word0));
+ return -EINVAL;
+ }
+ if (r600_bpe_from_format(&bpe, G_038004_DATA_FORMAT(word1))) {
+ dev_warn(p->dev, "%s:%d texture invalid format %d\n",
+ __func__, __LINE__, G_038004_DATA_FORMAT(word1));
+ return -EINVAL;
+ }
+ word0 = radeon_get_ib_value(p, idx + 4);
+ word1 = radeon_get_ib_value(p, idx + 5);
+ blevel = G_038010_BASE_LEVEL(word0);
+ nlevels = G_038014_LAST_LEVEL(word1);
+ r600_texture_size(nfaces, blevel, nlevels, w0, h0, d0, bpe, &l0_size, &mipmap_size);
+ /* using get ib will give us the offset into the texture bo */
+ word0 = radeon_get_ib_value(p, idx + 2);
+ if ((l0_size + word0) > radeon_bo_size(texture)) {
+ dev_warn(p->dev, "texture bo too small (%d %d %d %d -> %d have %ld)\n",
+ w0, h0, bpe, word0, l0_size, radeon_bo_size(texture));
+ return -EINVAL;
+ }
+ /* using get ib will give us the offset into the mipmap bo */
+ word0 = radeon_get_ib_value(p, idx + 3);
+ if ((mipmap_size + word0) > radeon_bo_size(mipmap)) {
+ dev_warn(p->dev, "mipmap bo too small (%d %d %d %d %d %d -> %d have %ld)\n",
+ w0, h0, bpe, blevel, nlevels, word0, mipmap_size, radeon_bo_size(texture));
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int r600_packet3_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt)
{
@@ -408,12 +1039,22 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
}
ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
+ r = r600_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ return r;
+ }
break;
case PACKET3_DRAW_INDEX_AUTO:
if (pkt->count != 1) {
DRM_ERROR("bad DRAW_INDEX_AUTO\n");
return -EINVAL;
}
+ r = r600_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream %d\n", __func__, __LINE__, idx);
+ return r;
+ }
break;
case PACKET3_DRAW_INDEX_IMMD_BE:
case PACKET3_DRAW_INDEX_IMMD:
@@ -421,6 +1062,11 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad DRAW_INDEX_IMMD\n");
return -EINVAL;
}
+ r = r600_cs_track_check(p);
+ if (r) {
+ dev_warn(p->dev, "%s:%d invalid cmd stream\n", __func__, __LINE__);
+ return r;
+ }
break;
case PACKET3_WAIT_REG_MEM:
if (pkt->count != 5) {
@@ -493,30 +1139,9 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
}
for (i = 0; i < pkt->count; i++) {
reg = start_reg + (4 * i);
- switch (reg) {
- case SQ_ESGS_RING_BASE:
- case SQ_GSVS_RING_BASE:
- case SQ_ESTMP_RING_BASE:
- case SQ_GSTMP_RING_BASE:
- case SQ_VSTMP_RING_BASE:
- case SQ_PSTMP_RING_BASE:
- case SQ_FBUF_RING_BASE:
- case SQ_REDUC_RING_BASE:
- case SX_MEMORY_EXPORT_BASE:
- r = r600_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("bad SET_CONFIG_REG "
- "0x%04X\n", reg);
- return -EINVAL;
- }
- ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
- break;
- case CP_COHER_BASE:
- /* use PACKET3_SURFACE_SYNC */
- return -EINVAL;
- default:
- break;
- }
+ r = r600_cs_check_reg(p, reg, idx+1+i);
+ if (r)
+ return r;
}
break;
case PACKET3_SET_CONTEXT_REG:
@@ -530,106 +1155,9 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
}
for (i = 0; i < pkt->count; i++) {
reg = start_reg + (4 * i);
- switch (reg) {
- /* This register were added late, there is userspace
- * which does provide relocation for those but set
- * 0 offset. In order to avoid breaking old userspace
- * we detect this and set address to point to last
- * CB_COLOR0_BASE, note that if userspace doesn't set
- * CB_COLOR0_BASE before this register we will report
- * error. Old userspace always set CB_COLOR0_BASE
- * before any of this.
- */
- case R_0280E0_CB_COLOR0_FRAG:
- case R_0280E4_CB_COLOR1_FRAG:
- case R_0280E8_CB_COLOR2_FRAG:
- case R_0280EC_CB_COLOR3_FRAG:
- case R_0280F0_CB_COLOR4_FRAG:
- case R_0280F4_CB_COLOR5_FRAG:
- case R_0280F8_CB_COLOR6_FRAG:
- case R_0280FC_CB_COLOR7_FRAG:
- case R_0280C0_CB_COLOR0_TILE:
- case R_0280C4_CB_COLOR1_TILE:
- case R_0280C8_CB_COLOR2_TILE:
- case R_0280CC_CB_COLOR3_TILE:
- case R_0280D0_CB_COLOR4_TILE:
- case R_0280D4_CB_COLOR5_TILE:
- case R_0280D8_CB_COLOR6_TILE:
- case R_0280DC_CB_COLOR7_TILE:
- if (!r600_cs_packet_next_is_pkt3_nop(p)) {
- if (!track->cb_color0_base_last) {
- dev_err(p->dev, "Broken old userspace ? no cb_color0_base supplied before trying to write 0x%08X\n", reg);
- return -EINVAL;
- }
- ib[idx+1+i] = track->cb_color0_base_last;
- printk_once(KERN_WARNING "radeon: You have old & broken userspace "
- "please consider updating mesa & xf86-video-ati\n");
- } else {
- r = r600_cs_packet_next_reloc(p, &reloc);
- if (r) {
- dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
- return -EINVAL;
- }
- ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
- }
- break;
- case DB_DEPTH_BASE:
- case DB_HTILE_DATA_BASE:
- case CB_COLOR0_BASE:
- r = r600_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
- return -EINVAL;
- }
- ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
- track->cb_color0_base_last = ib[idx+1+i];
- break;
- case CB_COLOR1_BASE:
- case CB_COLOR2_BASE:
- case CB_COLOR3_BASE:
- case CB_COLOR4_BASE:
- case CB_COLOR5_BASE:
- case CB_COLOR6_BASE:
- case CB_COLOR7_BASE:
- case SQ_PGM_START_FS:
- case SQ_PGM_START_ES:
- case SQ_PGM_START_VS:
- case SQ_PGM_START_GS:
- case SQ_PGM_START_PS:
- r = r600_cs_packet_next_reloc(p, &reloc);
- if (r) {
- DRM_ERROR("bad SET_CONTEXT_REG "
- "0x%04X\n", reg);
- return -EINVAL;
- }
- ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
- break;
- case VGT_DMA_BASE:
- case VGT_DMA_BASE_HI:
- /* These should be handled by DRAW_INDEX packet 3 */
- case VGT_STRMOUT_BASE_OFFSET_0:
- case VGT_STRMOUT_BASE_OFFSET_1:
- case VGT_STRMOUT_BASE_OFFSET_2:
- case VGT_STRMOUT_BASE_OFFSET_3:
- case VGT_STRMOUT_BASE_OFFSET_HI_0:
- case VGT_STRMOUT_BASE_OFFSET_HI_1:
- case VGT_STRMOUT_BASE_OFFSET_HI_2:
- case VGT_STRMOUT_BASE_OFFSET_HI_3:
- case VGT_STRMOUT_BUFFER_BASE_0:
- case VGT_STRMOUT_BUFFER_BASE_1:
- case VGT_STRMOUT_BUFFER_BASE_2:
- case VGT_STRMOUT_BUFFER_BASE_3:
- case VGT_STRMOUT_BUFFER_OFFSET_0:
- case VGT_STRMOUT_BUFFER_OFFSET_1:
- case VGT_STRMOUT_BUFFER_OFFSET_2:
- case VGT_STRMOUT_BUFFER_OFFSET_3:
- /* These should be handled by STRMOUT_BUFFER packet 3 */
- DRM_ERROR("bad context reg: 0x%08x\n", reg);
- return -EINVAL;
- default:
- break;
- }
+ r = r600_cs_check_reg(p, reg, idx+1+i);
+ if (r)
+ return r;
}
break;
case PACKET3_SET_RESOURCE:
@@ -646,6 +1174,9 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
for (i = 0; i < (pkt->count / 7); i++) {
+ struct radeon_bo *texture, *mipmap;
+ u32 size, offset;
+
switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
case SQ_TEX_VTX_VALID_TEXTURE:
/* tex base */
@@ -655,6 +1186,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
ib[idx+1+(i*7)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ texture = reloc->robj;
/* tex mip base */
r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
@@ -662,6 +1194,11 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
return -EINVAL;
}
ib[idx+1+(i*7)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
+ mipmap = reloc->robj;
+ r = r600_check_texture_resource(p, idx+(i*7)+1,
+ texture, mipmap);
+ if (r)
+ return r;
break;
case SQ_TEX_VTX_VALID_BUFFER:
/* vtx base */
@@ -670,6 +1207,13 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
DRM_ERROR("bad SET_RESOURCE\n");
return -EINVAL;
}
+ offset = radeon_get_ib_value(p, idx+1+(i*7)+0);
+ size = radeon_get_ib_value(p, idx+1+(i*7)+1);
+ if (p->rdev && (size + offset) > radeon_bo_size(reloc->robj)) {
+ /* force size to size of the buffer */
+ dev_warn(p->dev, "vbo resource seems too big for the bo\n");
+ ib[idx+1+(i*7)+1] = radeon_bo_size(reloc->robj);
+ }
ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
ib[idx+1+(i*7)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
break;
@@ -760,11 +1304,28 @@ int r600_cs_parse(struct radeon_cs_parser *p)
struct r600_cs_track *track;
int r;
- track = kzalloc(sizeof(*track), GFP_KERNEL);
- p->track = track;
+ if (p->track == NULL) {
+ /* initialize tracker, we are in kms */
+ track = kzalloc(sizeof(*track), GFP_KERNEL);
+ if (track == NULL)
+ return -ENOMEM;
+ r600_cs_track_init(track);
+ if (p->rdev->family < CHIP_RV770) {
+ track->npipes = p->rdev->config.r600.tiling_npipes;
+ track->nbanks = p->rdev->config.r600.tiling_nbanks;
+ track->group_size = p->rdev->config.r600.tiling_group_size;
+ } else if (p->rdev->family <= CHIP_RV740) {
+ track->npipes = p->rdev->config.rv770.tiling_npipes;
+ track->nbanks = p->rdev->config.rv770.tiling_nbanks;
+ track->group_size = p->rdev->config.rv770.tiling_group_size;
+ }
+ p->track = track;
+ }
do {
r = r600_cs_packet_parse(p, &pkt, p->idx);
if (r) {
+ kfree(p->track);
+ p->track = NULL;
return r;
}
p->idx += pkt.count + 2;
@@ -779,9 +1340,13 @@ int r600_cs_parse(struct radeon_cs_parser *p)
break;
default:
DRM_ERROR("Unknown packet type %d !\n", pkt.type);
+ kfree(p->track);
+ p->track = NULL;
return -EINVAL;
}
if (r) {
+ kfree(p->track);
+ p->track = NULL;
return r;
}
} while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
@@ -791,6 +1356,8 @@ int r600_cs_parse(struct radeon_cs_parser *p)
mdelay(1);
}
#endif
+ kfree(p->track);
+ p->track = NULL;
return 0;
}
@@ -833,9 +1400,16 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
{
struct radeon_cs_parser parser;
struct radeon_cs_chunk *ib_chunk;
- struct radeon_ib fake_ib;
+ struct radeon_ib fake_ib;
+ struct r600_cs_track *track;
int r;
+ /* initialize tracker */
+ track = kzalloc(sizeof(*track), GFP_KERNEL);
+ if (track == NULL)
+ return -ENOMEM;
+ r600_cs_track_init(track);
+ r600_cs_legacy_get_tiling_conf(dev, &track->npipes, &track->nbanks, &track->group_size);
/* initialize parser */
memset(&parser, 0, sizeof(struct radeon_cs_parser));
parser.filp = filp;
@@ -843,6 +1417,7 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
parser.rdev = NULL;
parser.family = family;
parser.ib = &fake_ib;
+ parser.track = track;
fake_ib.ptr = ib;
r = radeon_cs_parser_init(&parser, data);
if (r) {
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 30480881aed..5b2e4d44282 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -883,6 +883,16 @@
#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
+#define R_028C04_PA_SC_AA_CONFIG 0x028C04
+#define S_028C04_MSAA_NUM_SAMPLES(x) (((x) & 0x3) << 0)
+#define G_028C04_MSAA_NUM_SAMPLES(x) (((x) >> 0) & 0x3)
+#define C_028C04_MSAA_NUM_SAMPLES 0xFFFFFFFC
+#define S_028C04_AA_MASK_CENTROID_DTMN(x) (((x) & 0x1) << 4)
+#define G_028C04_AA_MASK_CENTROID_DTMN(x) (((x) >> 4) & 0x1)
+#define C_028C04_AA_MASK_CENTROID_DTMN 0xFFFFFFEF
+#define S_028C04_MAX_SAMPLE_DIST(x) (((x) & 0xF) << 13)
+#define G_028C04_MAX_SAMPLE_DIST(x) (((x) >> 13) & 0xF)
+#define C_028C04_MAX_SAMPLE_DIST 0xFFFE1FFF
#define R_0280E0_CB_COLOR0_FRAG 0x0280E0
#define S_0280E0_BASE_256B(x) (((x) & 0xFFFFFFFF) << 0)
#define G_0280E0_BASE_256B(x) (((x) >> 0) & 0xFFFFFFFF)
@@ -905,6 +915,461 @@
#define R_0280D4_CB_COLOR5_TILE 0x0280D4
#define R_0280D8_CB_COLOR6_TILE 0x0280D8
#define R_0280DC_CB_COLOR7_TILE 0x0280DC
-
+#define R_0280A0_CB_COLOR0_INFO 0x0280A0
+#define S_0280A0_ENDIAN(x) (((x) & 0x3) << 0)
+#define G_0280A0_ENDIAN(x) (((x) >> 0) & 0x3)
+#define C_0280A0_ENDIAN 0xFFFFFFFC
+#define S_0280A0_FORMAT(x) (((x) & 0x3F) << 2)
+#define G_0280A0_FORMAT(x) (((x) >> 2) & 0x3F)
+#define C_0280A0_FORMAT 0xFFFFFF03
+#define V_0280A0_COLOR_INVALID 0x00000000
+#define V_0280A0_COLOR_8 0x00000001
+#define V_0280A0_COLOR_4_4 0x00000002
+#define V_0280A0_COLOR_3_3_2 0x00000003
+#define V_0280A0_COLOR_16 0x00000005
+#define V_0280A0_COLOR_16_FLOAT 0x00000006
+#define V_0280A0_COLOR_8_8 0x00000007
+#define V_0280A0_COLOR_5_6_5 0x00000008
+#define V_0280A0_COLOR_6_5_5 0x00000009
+#define V_0280A0_COLOR_1_5_5_5 0x0000000A
+#define V_0280A0_COLOR_4_4_4_4 0x0000000B
+#define V_0280A0_COLOR_5_5_5_1 0x0000000C
+#define V_0280A0_COLOR_32 0x0000000D
+#define V_0280A0_COLOR_32_FLOAT 0x0000000E
+#define V_0280A0_COLOR_16_16 0x0000000F
+#define V_0280A0_COLOR_16_16_FLOAT 0x00000010
+#define V_0280A0_COLOR_8_24 0x00000011
+#define V_0280A0_COLOR_8_24_FLOAT 0x00000012
+#define V_0280A0_COLOR_24_8 0x00000013
+#define V_0280A0_COLOR_24_8_FLOAT 0x00000014
+#define V_0280A0_COLOR_10_11_11 0x00000015
+#define V_0280A0_COLOR_10_11_11_FLOAT 0x00000016
+#define V_0280A0_COLOR_11_11_10 0x00000017
+#define V_0280A0_COLOR_11_11_10_FLOAT 0x00000018
+#define V_0280A0_COLOR_2_10_10_10 0x00000019
+#define V_0280A0_COLOR_8_8_8_8 0x0000001A
+#define V_0280A0_COLOR_10_10_10_2 0x0000001B
+#define V_0280A0_COLOR_X24_8_32_FLOAT 0x0000001C
+#define V_0280A0_COLOR_32_32 0x0000001D
+#define V_0280A0_COLOR_32_32_FLOAT 0x0000001E
+#define V_0280A0_COLOR_16_16_16_16 0x0000001F
+#define V_0280A0_COLOR_16_16_16_16_FLOAT 0x00000020
+#define V_0280A0_COLOR_32_32_32_32 0x00000022
+#define V_0280A0_COLOR_32_32_32_32_FLOAT 0x00000023
+#define S_0280A0_ARRAY_MODE(x) (((x) & 0xF) << 8)
+#define G_0280A0_ARRAY_MODE(x) (((x) >> 8) & 0xF)
+#define C_0280A0_ARRAY_MODE 0xFFFFF0FF
+#define V_0280A0_ARRAY_LINEAR_GENERAL 0x00000000
+#define V_0280A0_ARRAY_LINEAR_ALIGNED 0x00000001
+#define V_0280A0_ARRAY_1D_TILED_THIN1 0x00000002
+#define V_0280A0_ARRAY_2D_TILED_THIN1 0x00000004
+#define S_0280A0_NUMBER_TYPE(x) (((x) & 0x7) << 12)
+#define G_0280A0_NUMBER_TYPE(x) (((x) >> 12) & 0x7)
+#define C_0280A0_NUMBER_TYPE 0xFFFF8FFF
+#define S_0280A0_READ_SIZE(x) (((x) & 0x1) << 15)
+#define G_0280A0_READ_SIZE(x) (((x) >> 15) & 0x1)
+#define C_0280A0_READ_SIZE 0xFFFF7FFF
+#define S_0280A0_COMP_SWAP(x) (((x) & 0x3) << 16)
+#define G_0280A0_COMP_SWAP(x) (((x) >> 16) & 0x3)
+#define C_0280A0_COMP_SWAP 0xFFFCFFFF
+#define S_0280A0_TILE_MODE(x) (((x) & 0x3) << 18)
+#define G_0280A0_TILE_MODE(x) (((x) >> 18) & 0x3)
+#define C_0280A0_TILE_MODE 0xFFF3FFFF
+#define S_0280A0_BLEND_CLAMP(x) (((x) & 0x1) << 20)
+#define G_0280A0_BLEND_CLAMP(x) (((x) >> 20) & 0x1)
+#define C_0280A0_BLEND_CLAMP 0xFFEFFFFF
+#define S_0280A0_CLEAR_COLOR(x) (((x) & 0x1) << 21)
+#define G_0280A0_CLEAR_COLOR(x) (((x) >> 21) & 0x1)
+#define C_0280A0_CLEAR_COLOR 0xFFDFFFFF
+#define S_0280A0_BLEND_BYPASS(x) (((x) & 0x1) << 22)
+#define G_0280A0_BLEND_BYPASS(x) (((x) >> 22) & 0x1)
+#define C_0280A0_BLEND_BYPASS 0xFFBFFFFF
+#define S_0280A0_BLEND_FLOAT32(x) (((x) & 0x1) << 23)
+#define G_0280A0_BLEND_FLOAT32(x) (((x) >> 23) & 0x1)
+#define C_0280A0_BLEND_FLOAT32 0xFF7FFFFF
+#define S_0280A0_SIMPLE_FLOAT(x) (((x) & 0x1) << 24)
+#define G_0280A0_SIMPLE_FLOAT(x) (((x) >> 24) & 0x1)
+#define C_0280A0_SIMPLE_FLOAT 0xFEFFFFFF
+#define S_0280A0_ROUND_MODE(x) (((x) & 0x1) << 25)
+#define G_0280A0_ROUND_MODE(x) (((x) >> 25) & 0x1)
+#define C_0280A0_ROUND_MODE 0xFDFFFFFF
+#define S_0280A0_TILE_COMPACT(x) (((x) & 0x1) << 26)
+#define G_0280A0_TILE_COMPACT(x) (((x) >> 26) & 0x1)
+#define C_0280A0_TILE_COMPACT 0xFBFFFFFF
+#define S_0280A0_SOURCE_FORMAT(x) (((x) & 0x1) << 27)
+#define G_0280A0_SOURCE_FORMAT(x) (((x) >> 27) & 0x1)
+#define C_0280A0_SOURCE_FORMAT 0xF7FFFFFF
+#define R_0280A4_CB_COLOR1_INFO 0x0280A4
+#define R_0280A8_CB_COLOR2_INFO 0x0280A8
+#define R_0280AC_CB_COLOR3_INFO 0x0280AC
+#define R_0280B0_CB_COLOR4_INFO 0x0280B0
+#define R_0280B4_CB_COLOR5_INFO 0x0280B4
+#define R_0280B8_CB_COLOR6_INFO 0x0280B8
+#define R_0280BC_CB_COLOR7_INFO 0x0280BC
+#define R_028060_CB_COLOR0_SIZE 0x028060
+#define S_028060_PITCH_TILE_MAX(x) (((x) & 0x3FF) << 0)
+#define G_028060_PITCH_TILE_MAX(x) (((x) >> 0) & 0x3FF)
+#define C_028060_PITCH_TILE_MAX 0xFFFFFC00
+#define S_028060_SLICE_TILE_MAX(x) (((x) & 0xFFFFF) << 10)
+#define G_028060_SLICE_TILE_MAX(x) (((x) >> 10) & 0xFFFFF)
+#define C_028060_SLICE_TILE_MAX 0xC00003FF
+#define R_028064_CB_COLOR1_SIZE 0x028064
+#define R_028068_CB_COLOR2_SIZE 0x028068
+#define R_02806C_CB_COLOR3_SIZE 0x02806C
+#define R_028070_CB_COLOR4_SIZE 0x028070
+#define R_028074_CB_COLOR5_SIZE 0x028074
+#define R_028078_CB_COLOR6_SIZE 0x028078
+#define R_02807C_CB_COLOR7_SIZE 0x02807C
+#define R_028238_CB_TARGET_MASK 0x028238
+#define S_028238_TARGET0_ENABLE(x) (((x) & 0xF) << 0)
+#define G_028238_TARGET0_ENABLE(x) (((x) >> 0) & 0xF)
+#define C_028238_TARGET0_ENABLE 0xFFFFFFF0
+#define S_028238_TARGET1_ENABLE(x) (((x) & 0xF) << 4)
+#define G_028238_TARGET1_ENABLE(x) (((x) >> 4) & 0xF)
+#define C_028238_TARGET1_ENABLE 0xFFFFFF0F
+#define S_028238_TARGET2_ENABLE(x) (((x) & 0xF) << 8)
+#define G_028238_TARGET2_ENABLE(x) (((x) >> 8) & 0xF)
+#define C_028238_TARGET2_ENABLE 0xFFFFF0FF
+#define S_028238_TARGET3_ENABLE(x) (((x) & 0xF) << 12)
+#define G_028238_TARGET3_ENABLE(x) (((x) >> 12) & 0xF)
+#define C_028238_TARGET3_ENABLE 0xFFFF0FFF
+#define S_028238_TARGET4_ENABLE(x) (((x) & 0xF) << 16)
+#define G_028238_TARGET4_ENABLE(x) (((x) >> 16) & 0xF)
+#define C_028238_TARGET4_ENABLE 0xFFF0FFFF
+#define S_028238_TARGET5_ENABLE(x) (((x) & 0xF) << 20)
+#define G_028238_TARGET5_ENABLE(x) (((x) >> 20) & 0xF)
+#define C_028238_TARGET5_ENABLE 0xFF0FFFFF
+#define S_028238_TARGET6_ENABLE(x) (((x) & 0xF) << 24)
+#define G_028238_TARGET6_ENABLE(x) (((x) >> 24) & 0xF)
+#define C_028238_TARGET6_ENABLE 0xF0FFFFFF
+#define S_028238_TARGET7_ENABLE(x) (((x) & 0xF) << 28)
+#define G_028238_TARGET7_ENABLE(x) (((x) >> 28) & 0xF)
+#define C_028238_TARGET7_ENABLE 0x0FFFFFFF
+#define R_02823C_CB_SHADER_MASK 0x02823C
+#define S_02823C_OUTPUT0_ENABLE(x) (((x) & 0xF) << 0)
+#define G_02823C_OUTPUT0_ENABLE(x) (((x) >> 0) & 0xF)
+#define C_02823C_OUTPUT0_ENABLE 0xFFFFFFF0
+#define S_02823C_OUTPUT1_ENABLE(x) (((x) & 0xF) << 4)
+#define G_02823C_OUTPUT1_ENABLE(x) (((x) >> 4) & 0xF)
+#define C_02823C_OUTPUT1_ENABLE 0xFFFFFF0F
+#define S_02823C_OUTPUT2_ENABLE(x) (((x) & 0xF) << 8)
+#define G_02823C_OUTPUT2_ENABLE(x) (((x) >> 8) & 0xF)
+#define C_02823C_OUTPUT2_ENABLE 0xFFFFF0FF
+#define S_02823C_OUTPUT3_ENABLE(x) (((x) & 0xF) << 12)
+#define G_02823C_OUTPUT3_ENABLE(x) (((x) >> 12) & 0xF)
+#define C_02823C_OUTPUT3_ENABLE 0xFFFF0FFF
+#define S_02823C_OUTPUT4_ENABLE(x) (((x) & 0xF) << 16)
+#define G_02823C_OUTPUT4_ENABLE(x) (((x) >> 16) & 0xF)
+#define C_02823C_OUTPUT4_ENABLE 0xFFF0FFFF
+#define S_02823C_OUTPUT5_ENABLE(x) (((x) & 0xF) << 20)
+#define G_02823C_OUTPUT5_ENABLE(x) (((x) >> 20) & 0xF)
+#define C_02823C_OUTPUT5_ENABLE 0xFF0FFFFF
+#define S_02823C_OUTPUT6_ENABLE(x) (((x) & 0xF) << 24)
+#define G_02823C_OUTPUT6_ENABLE(x) (((x) >> 24) & 0xF)
+#define C_02823C_OUTPUT6_ENABLE 0xF0FFFFFF
+#define S_02823C_OUTPUT7_ENABLE(x) (((x) & 0xF) << 28)
+#define G_02823C_OUTPUT7_ENABLE(x) (((x) >> 28) & 0xF)
+#define C_02823C_OUTPUT7_ENABLE 0x0FFFFFFF
+#define R_028AB0_VGT_STRMOUT_EN 0x028AB0
+#define S_028AB0_STREAMOUT(x) (((x) & 0x1) << 0)
+#define G_028AB0_STREAMOUT(x) (((x) >> 0) & 0x1)
+#define C_028AB0_STREAMOUT 0xFFFFFFFE
+#define R_028B20_VGT_STRMOUT_BUFFER_EN 0x028B20
+#define S_028B20_BUFFER_0_EN(x) (((x) & 0x1) << 0)
+#define G_028B20_BUFFER_0_EN(x) (((x) >> 0) & 0x1)
+#define C_028B20_BUFFER_0_EN 0xFFFFFFFE
+#define S_028B20_BUFFER_1_EN(x) (((x) & 0x1) << 1)
+#define G_028B20_BUFFER_1_EN(x) (((x) >> 1) & 0x1)
+#define C_028B20_BUFFER_1_EN 0xFFFFFFFD
+#define S_028B20_BUFFER_2_EN(x) (((x) & 0x1) << 2)
+#define G_028B20_BUFFER_2_EN(x) (((x) >> 2) & 0x1)
+#define C_028B20_BUFFER_2_EN 0xFFFFFFFB
+#define S_028B20_BUFFER_3_EN(x) (((x) & 0x1) << 3)
+#define G_028B20_BUFFER_3_EN(x) (((x) >> 3) & 0x1)
+#define C_028B20_BUFFER_3_EN 0xFFFFFFF7
+#define S_028B20_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_028B20_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_028B20_SIZE 0x00000000
+#define R_038000_SQ_TEX_RESOURCE_WORD0_0 0x038000
+#define S_038000_DIM(x) (((x) & 0x7) << 0)
+#define G_038000_DIM(x) (((x) >> 0) & 0x7)
+#define C_038000_DIM 0xFFFFFFF8
+#define V_038000_SQ_TEX_DIM_1D 0x00000000
+#define V_038000_SQ_TEX_DIM_2D 0x00000001
+#define V_038000_SQ_TEX_DIM_3D 0x00000002
+#define V_038000_SQ_TEX_DIM_CUBEMAP 0x00000003
+#define V_038000_SQ_TEX_DIM_1D_ARRAY 0x00000004
+#define V_038000_SQ_TEX_DIM_2D_ARRAY 0x00000005
+#define V_038000_SQ_TEX_DIM_2D_MSAA 0x00000006
+#define V_038000_SQ_TEX_DIM_2D_ARRAY_MSAA 0x00000007
+#define S_038000_TILE_MODE(x) (((x) & 0xF) << 3)
+#define G_038000_TILE_MODE(x) (((x) >> 3) & 0xF)
+#define C_038000_TILE_MODE 0xFFFFFF87
+#define S_038000_TILE_TYPE(x) (((x) & 0x1) << 7)
+#define G_038000_TILE_TYPE(x) (((x) >> 7) & 0x1)
+#define C_038000_TILE_TYPE 0xFFFFFF7F
+#define S_038000_PITCH(x) (((x) & 0x7FF) << 8)
+#define G_038000_PITCH(x) (((x) >> 8) & 0x7FF)
+#define C_038000_PITCH 0xFFF800FF
+#define S_038000_TEX_WIDTH(x) (((x) & 0x1FFF) << 19)
+#define G_038000_TEX_WIDTH(x) (((x) >> 19) & 0x1FFF)
+#define C_038000_TEX_WIDTH 0x0007FFFF
+#define R_038004_SQ_TEX_RESOURCE_WORD1_0 0x038004
+#define S_038004_TEX_HEIGHT(x) (((x) & 0x1FFF) << 0)
+#define G_038004_TEX_HEIGHT(x) (((x) >> 0) & 0x1FFF)
+#define C_038004_TEX_HEIGHT 0xFFFFE000
+#define S_038004_TEX_DEPTH(x) (((x) & 0x1FFF) << 13)
+#define G_038004_TEX_DEPTH(x) (((x) >> 13) & 0x1FFF)
+#define C_038004_TEX_DEPTH 0xFC001FFF
+#define S_038004_DATA_FORMAT(x) (((x) & 0x3F) << 26)
+#define G_038004_DATA_FORMAT(x) (((x) >> 26) & 0x3F)
+#define C_038004_DATA_FORMAT 0x03FFFFFF
+#define V_038004_COLOR_INVALID 0x00000000
+#define V_038004_COLOR_8 0x00000001
+#define V_038004_COLOR_4_4 0x00000002
+#define V_038004_COLOR_3_3_2 0x00000003
+#define V_038004_COLOR_16 0x00000005
+#define V_038004_COLOR_16_FLOAT 0x00000006
+#define V_038004_COLOR_8_8 0x00000007
+#define V_038004_COLOR_5_6_5 0x00000008
+#define V_038004_COLOR_6_5_5 0x00000009
+#define V_038004_COLOR_1_5_5_5 0x0000000A
+#define V_038004_COLOR_4_4_4_4 0x0000000B
+#define V_038004_COLOR_5_5_5_1 0x0000000C
+#define V_038004_COLOR_32 0x0000000D
+#define V_038004_COLOR_32_FLOAT 0x0000000E
+#define V_038004_COLOR_16_16 0x0000000F
+#define V_038004_COLOR_16_16_FLOAT 0x00000010
+#define V_038004_COLOR_8_24 0x00000011
+#define V_038004_COLOR_8_24_FLOAT 0x00000012
+#define V_038004_COLOR_24_8 0x00000013
+#define V_038004_COLOR_24_8_FLOAT 0x00000014
+#define V_038004_COLOR_10_11_11 0x00000015
+#define V_038004_COLOR_10_11_11_FLOAT 0x00000016
+#define V_038004_COLOR_11_11_10 0x00000017
+#define V_038004_COLOR_11_11_10_FLOAT 0x00000018
+#define V_038004_COLOR_2_10_10_10 0x00000019
+#define V_038004_COLOR_8_8_8_8 0x0000001A
+#define V_038004_COLOR_10_10_10_2 0x0000001B
+#define V_038004_COLOR_X24_8_32_FLOAT 0x0000001C
+#define V_038004_COLOR_32_32 0x0000001D
+#define V_038004_COLOR_32_32_FLOAT 0x0000001E
+#define V_038004_COLOR_16_16_16_16 0x0000001F
+#define V_038004_COLOR_16_16_16_16_FLOAT 0x00000020
+#define V_038004_COLOR_32_32_32_32 0x00000022
+#define V_038004_COLOR_32_32_32_32_FLOAT 0x00000023
+#define V_038004_FMT_1 0x00000025
+#define V_038004_FMT_GB_GR 0x00000027
+#define V_038004_FMT_BG_RG 0x00000028
+#define V_038004_FMT_32_AS_8 0x00000029
+#define V_038004_FMT_32_AS_8_8 0x0000002A
+#define V_038004_FMT_5_9_9_9_SHAREDEXP 0x0000002B
+#define V_038004_FMT_8_8_8 0x0000002C
+#define V_038004_FMT_16_16_16 0x0000002D
+#define V_038004_FMT_16_16_16_FLOAT 0x0000002E
+#define V_038004_FMT_32_32_32 0x0000002F
+#define V_038004_FMT_32_32_32_FLOAT 0x00000030
+#define R_038010_SQ_TEX_RESOURCE_WORD4_0 0x038010
+#define S_038010_FORMAT_COMP_X(x) (((x) & 0x3) << 0)
+#define G_038010_FORMAT_COMP_X(x) (((x) >> 0) & 0x3)
+#define C_038010_FORMAT_COMP_X 0xFFFFFFFC
+#define S_038010_FORMAT_COMP_Y(x) (((x) & 0x3) << 2)
+#define G_038010_FORMAT_COMP_Y(x) (((x) >> 2) & 0x3)
+#define C_038010_FORMAT_COMP_Y 0xFFFFFFF3
+#define S_038010_FORMAT_COMP_Z(x) (((x) & 0x3) << 4)
+#define G_038010_FORMAT_COMP_Z(x) (((x) >> 4) & 0x3)
+#define C_038010_FORMAT_COMP_Z 0xFFFFFFCF
+#define S_038010_FORMAT_COMP_W(x) (((x) & 0x3) << 6)
+#define G_038010_FORMAT_COMP_W(x) (((x) >> 6) & 0x3)
+#define C_038010_FORMAT_COMP_W 0xFFFFFF3F
+#define S_038010_NUM_FORMAT_ALL(x) (((x) & 0x3) << 8)
+#define G_038010_NUM_FORMAT_ALL(x) (((x) >> 8) & 0x3)
+#define C_038010_NUM_FORMAT_ALL 0xFFFFFCFF
+#define S_038010_SRF_MODE_ALL(x) (((x) & 0x1) << 10)
+#define G_038010_SRF_MODE_ALL(x) (((x) >> 10) & 0x1)
+#define C_038010_SRF_MODE_ALL 0xFFFFFBFF
+#define S_038010_FORCE_DEGAMMA(x) (((x) & 0x1) << 11)
+#define G_038010_FORCE_DEGAMMA(x) (((x) >> 11) & 0x1)
+#define C_038010_FORCE_DEGAMMA 0xFFFFF7FF
+#define S_038010_ENDIAN_SWAP(x) (((x) & 0x3) << 12)
+#define G_038010_ENDIAN_SWAP(x) (((x) >> 12) & 0x3)
+#define C_038010_ENDIAN_SWAP 0xFFFFCFFF
+#define S_038010_REQUEST_SIZE(x) (((x) & 0x3) << 14)
+#define G_038010_REQUEST_SIZE(x) (((x) >> 14) & 0x3)
+#define C_038010_REQUEST_SIZE 0xFFFF3FFF
+#define S_038010_DST_SEL_X(x) (((x) & 0x7) << 16)
+#define G_038010_DST_SEL_X(x) (((x) >> 16) & 0x7)
+#define C_038010_DST_SEL_X 0xFFF8FFFF
+#define S_038010_DST_SEL_Y(x) (((x) & 0x7) << 19)
+#define G_038010_DST_SEL_Y(x) (((x) >> 19) & 0x7)
+#define C_038010_DST_SEL_Y 0xFFC7FFFF
+#define S_038010_DST_SEL_Z(x) (((x) & 0x7) << 22)
+#define G_038010_DST_SEL_Z(x) (((x) >> 22) & 0x7)
+#define C_038010_DST_SEL_Z 0xFE3FFFFF
+#define S_038010_DST_SEL_W(x) (((x) & 0x7) << 25)
+#define G_038010_DST_SEL_W(x) (((x) >> 25) & 0x7)
+#define C_038010_DST_SEL_W 0xF1FFFFFF
+#define S_038010_BASE_LEVEL(x) (((x) & 0xF) << 28)
+#define G_038010_BASE_LEVEL(x) (((x) >> 28) & 0xF)
+#define C_038010_BASE_LEVEL 0x0FFFFFFF
+#define R_038014_SQ_TEX_RESOURCE_WORD5_0 0x038014
+#define S_038014_LAST_LEVEL(x) (((x) & 0xF) << 0)
+#define G_038014_LAST_LEVEL(x) (((x) >> 0) & 0xF)
+#define C_038014_LAST_LEVEL 0xFFFFFFF0
+#define S_038014_BASE_ARRAY(x) (((x) & 0x1FFF) << 4)
+#define G_038014_BASE_ARRAY(x) (((x) >> 4) & 0x1FFF)
+#define C_038014_BASE_ARRAY 0xFFFE000F
+#define S_038014_LAST_ARRAY(x) (((x) & 0x1FFF) << 17)
+#define G_038014_LAST_ARRAY(x) (((x) >> 17) & 0x1FFF)
+#define C_038014_LAST_ARRAY 0xC001FFFF
+#define R_0288A8_SQ_ESGS_RING_ITEMSIZE 0x0288A8
+#define S_0288A8_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
+#define G_0288A8_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
+#define C_0288A8_ITEMSIZE 0xFFFF8000
+#define R_008C44_SQ_ESGS_RING_SIZE 0x008C44
+#define S_008C44_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_008C44_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_008C44_MEM_SIZE 0x00000000
+#define R_0288B0_SQ_ESTMP_RING_ITEMSIZE 0x0288B0
+#define S_0288B0_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
+#define G_0288B0_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
+#define C_0288B0_ITEMSIZE 0xFFFF8000
+#define R_008C54_SQ_ESTMP_RING_SIZE 0x008C54
+#define S_008C54_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_008C54_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_008C54_MEM_SIZE 0x00000000
+#define R_0288C0_SQ_FBUF_RING_ITEMSIZE 0x0288C0
+#define S_0288C0_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
+#define G_0288C0_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
+#define C_0288C0_ITEMSIZE 0xFFFF8000
+#define R_008C74_SQ_FBUF_RING_SIZE 0x008C74
+#define S_008C74_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_008C74_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_008C74_MEM_SIZE 0x00000000
+#define R_0288B4_SQ_GSTMP_RING_ITEMSIZE 0x0288B4
+#define S_0288B4_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
+#define G_0288B4_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
+#define C_0288B4_ITEMSIZE 0xFFFF8000
+#define R_008C5C_SQ_GSTMP_RING_SIZE 0x008C5C
+#define S_008C5C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_008C5C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_008C5C_MEM_SIZE 0x00000000
+#define R_0288AC_SQ_GSVS_RING_ITEMSIZE 0x0288AC
+#define S_0288AC_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
+#define G_0288AC_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
+#define C_0288AC_ITEMSIZE 0xFFFF8000
+#define R_008C4C_SQ_GSVS_RING_SIZE 0x008C4C
+#define S_008C4C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_008C4C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_008C4C_MEM_SIZE 0x00000000
+#define R_0288BC_SQ_PSTMP_RING_ITEMSIZE 0x0288BC
+#define S_0288BC_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
+#define G_0288BC_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
+#define C_0288BC_ITEMSIZE 0xFFFF8000
+#define R_008C6C_SQ_PSTMP_RING_SIZE 0x008C6C
+#define S_008C6C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_008C6C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_008C6C_MEM_SIZE 0x00000000
+#define R_0288C4_SQ_REDUC_RING_ITEMSIZE 0x0288C4
+#define S_0288C4_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
+#define G_0288C4_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
+#define C_0288C4_ITEMSIZE 0xFFFF8000
+#define R_008C7C_SQ_REDUC_RING_SIZE 0x008C7C
+#define S_008C7C_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_008C7C_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_008C7C_MEM_SIZE 0x00000000
+#define R_0288B8_SQ_VSTMP_RING_ITEMSIZE 0x0288B8
+#define S_0288B8_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
+#define G_0288B8_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
+#define C_0288B8_ITEMSIZE 0xFFFF8000
+#define R_008C64_SQ_VSTMP_RING_SIZE 0x008C64
+#define S_008C64_MEM_SIZE(x) (((x) & 0xFFFFFFFF) << 0)
+#define G_008C64_MEM_SIZE(x) (((x) >> 0) & 0xFFFFFFFF)
+#define C_008C64_MEM_SIZE 0x00000000
+#define R_0288C8_SQ_GS_VERT_ITEMSIZE 0x0288C8
+#define S_0288C8_ITEMSIZE(x) (((x) & 0x7FFF) << 0)
+#define G_0288C8_ITEMSIZE(x) (((x) >> 0) & 0x7FFF)
+#define C_0288C8_ITEMSIZE 0xFFFF8000
+#define R_028010_DB_DEPTH_INFO 0x028010
+#define S_028010_FORMAT(x) (((x) & 0x7) << 0)
+#define G_028010_FORMAT(x) (((x) >> 0) & 0x7)
+#define C_028010_FORMAT 0xFFFFFFF8
+#define V_028010_DEPTH_INVALID 0x00000000
+#define V_028010_DEPTH_16 0x00000001
+#define V_028010_DEPTH_X8_24 0x00000002
+#define V_028010_DEPTH_8_24 0x00000003
+#define V_028010_DEPTH_X8_24_FLOAT 0x00000004
+#define V_028010_DEPTH_8_24_FLOAT 0x00000005
+#define V_028010_DEPTH_32_FLOAT 0x00000006
+#define V_028010_DEPTH_X24_8_32_FLOAT 0x00000007
+#define S_028010_READ_SIZE(x) (((x) & 0x1) << 3)
+#define G_028010_READ_SIZE(x) (((x) >> 3) & 0x1)
+#define C_028010_READ_SIZE 0xFFFFFFF7
+#define S_028010_ARRAY_MODE(x) (((x) & 0xF) << 15)
+#define G_028010_ARRAY_MODE(x) (((x) >> 15) & 0xF)
+#define C_028010_ARRAY_MODE 0xFFF87FFF
+#define S_028010_TILE_SURFACE_ENABLE(x) (((x) & 0x1) << 25)
+#define G_028010_TILE_SURFACE_ENABLE(x) (((x) >> 25) & 0x1)
+#define C_028010_TILE_SURFACE_ENABLE 0xFDFFFFFF
+#define S_028010_TILE_COMPACT(x) (((x) & 0x1) << 26)
+#define G_028010_TILE_COMPACT(x) (((x) >> 26) & 0x1)
+#define C_028010_TILE_COMPACT 0xFBFFFFFF
+#define S_028010_ZRANGE_PRECISION(x) (((x) & 0x1) << 31)
+#define G_028010_ZRANGE_PRECISION(x) (((x) >> 31) & 0x1)
+#define C_028010_ZRANGE_PRECISION 0x7FFFFFFF
+#define R_028000_DB_DEPTH_SIZE 0x028000
+#define S_028000_PITCH_TILE_MAX(x) (((x) & 0x3FF) << 0)
+#define G_028000_PITCH_TILE_MAX(x) (((x) >> 0) & 0x3FF)
+#define C_028000_PITCH_TILE_MAX 0xFFFFFC00
+#define S_028000_SLICE_TILE_MAX(x) (((x) & 0xFFFFF) << 10)
+#define G_028000_SLICE_TILE_MAX(x) (((x) >> 10) & 0xFFFFF)
+#define C_028000_SLICE_TILE_MAX 0xC00003FF
+#define R_028004_DB_DEPTH_VIEW 0x028004
+#define S_028004_SLICE_START(x) (((x) & 0x7FF) << 0)
+#define G_028004_SLICE_START(x) (((x) >> 0) & 0x7FF)
+#define C_028004_SLICE_START 0xFFFFF800
+#define S_028004_SLICE_MAX(x) (((x) & 0x7FF) << 13)
+#define G_028004_SLICE_MAX(x) (((x) >> 13) & 0x7FF)
+#define C_028004_SLICE_MAX 0xFF001FFF
+#define R_028800_DB_DEPTH_CONTROL 0x028800
+#define S_028800_STENCIL_ENABLE(x) (((x) & 0x1) << 0)
+#define G_028800_STENCIL_ENABLE(x) (((x) >> 0) & 0x1)
+#define C_028800_STENCIL_ENABLE 0xFFFFFFFE
+#define S_028800_Z_ENABLE(x) (((x) & 0x1) << 1)
+#define G_028800_Z_ENABLE(x) (((x) >> 1) & 0x1)
+#define C_028800_Z_ENABLE 0xFFFFFFFD
+#define S_028800_Z_WRITE_ENABLE(x) (((x) & 0x1) << 2)
+#define G_028800_Z_WRITE_ENABLE(x) (((x) >> 2) & 0x1)
+#define C_028800_Z_WRITE_ENABLE 0xFFFFFFFB
+#define S_028800_ZFUNC(x) (((x) & 0x7) << 4)
+#define G_028800_ZFUNC(x) (((x) >> 4) & 0x7)
+#define C_028800_ZFUNC 0xFFFFFF8F
+#define S_028800_BACKFACE_ENABLE(x) (((x) & 0x1) << 7)
+#define G_028800_BACKFACE_ENABLE(x) (((x) >> 7) & 0x1)
+#define C_028800_BACKFACE_ENABLE 0xFFFFFF7F
+#define S_028800_STENCILFUNC(x) (((x) & 0x7) << 8)
+#define G_028800_STENCILFUNC(x) (((x) >> 8) & 0x7)
+#define C_028800_STENCILFUNC 0xFFFFF8FF
+#define S_028800_STENCILFAIL(x) (((x) & 0x7) << 11)
+#define G_028800_STENCILFAIL(x) (((x) >> 11) & 0x7)
+#define C_028800_STENCILFAIL 0xFFFFC7FF
+#define S_028800_STENCILZPASS(x) (((x) & 0x7) << 14)
+#define G_028800_STENCILZPASS(x) (((x) >> 14) & 0x7)
+#define C_028800_STENCILZPASS 0xFFFE3FFF
+#define S_028800_STENCILZFAIL(x) (((x) & 0x7) << 17)
+#define G_028800_STENCILZFAIL(x) (((x) >> 17) & 0x7)
+#define C_028800_STENCILZFAIL 0xFFF1FFFF
+#define S_028800_STENCILFUNC_BF(x) (((x) & 0x7) << 20)
+#define G_028800_STENCILFUNC_BF(x) (((x) >> 20) & 0x7)
+#define C_028800_STENCILFUNC_BF 0xFF8FFFFF
+#define S_028800_STENCILFAIL_BF(x) (((x) & 0x7) << 23)
+#define G_028800_STENCILFAIL_BF(x) (((x) >> 23) & 0x7)
+#define C_028800_STENCILFAIL_BF 0xFC7FFFFF
+#define S_028800_STENCILZPASS_BF(x) (((x) & 0x7) << 26)
+#define G_028800_STENCILZPASS_BF(x) (((x) >> 26) & 0x7)
+#define C_028800_STENCILZPASS_BF 0xE3FFFFFF
+#define S_028800_STENCILZFAIL_BF(x) (((x) & 0x7) << 29)
+#define G_028800_STENCILZFAIL_BF(x) (((x) >> 29) & 0x7)
+#define C_028800_STENCILZFAIL_BF 0x1FFFFFFF
#endif
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index c0356bb193e..829e26e8a4b 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -89,6 +89,7 @@ extern int radeon_testing;
extern int radeon_connector_table;
extern int radeon_tv;
extern int radeon_new_pll;
+extern int radeon_dynpm;
extern int radeon_audio;
/*
@@ -118,6 +119,21 @@ struct radeon_device;
/*
* BIOS.
*/
+#define ATRM_BIOS_PAGE 4096
+
+#if defined(CONFIG_VGA_SWITCHEROO)
+bool radeon_atrm_supported(struct pci_dev *pdev);
+int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len);
+#else
+static inline bool radeon_atrm_supported(struct pci_dev *pdev)
+{
+ return false;
+}
+
+static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len){
+ return -EINVAL;
+}
+#endif
bool radeon_get_bios(struct radeon_device *rdev);
@@ -138,17 +154,23 @@ void radeon_dummy_page_fini(struct radeon_device *rdev);
struct radeon_clock {
struct radeon_pll p1pll;
struct radeon_pll p2pll;
+ struct radeon_pll dcpll;
struct radeon_pll spll;
struct radeon_pll mpll;
/* 10 Khz units */
uint32_t default_mclk;
uint32_t default_sclk;
+ uint32_t default_dispclk;
+ uint32_t dp_extclk;
};
/*
* Power management
*/
int radeon_pm_init(struct radeon_device *rdev);
+void radeon_pm_compute_clocks(struct radeon_device *rdev);
+void radeon_combios_get_power_modes(struct radeon_device *rdev);
+void radeon_atombios_get_power_modes(struct radeon_device *rdev);
/*
* Fences.
@@ -275,6 +297,7 @@ union radeon_gart_table {
};
#define RADEON_GPU_PAGE_SIZE 4096
+#define RADEON_GPU_PAGE_MASK (RADEON_GPU_PAGE_SIZE - 1)
struct radeon_gart {
dma_addr_t table_addr;
@@ -309,21 +332,19 @@ struct radeon_mc {
/* for some chips with <= 32MB we need to lie
* about vram size near mc fb location */
u64 mc_vram_size;
- u64 gtt_location;
+ u64 visible_vram_size;
u64 gtt_size;
u64 gtt_start;
u64 gtt_end;
- u64 vram_location;
u64 vram_start;
u64 vram_end;
unsigned vram_width;
u64 real_vram_size;
int vram_mtrr;
bool vram_is_ddr;
- bool igp_sideport_enabled;
+ bool igp_sideport_enabled;
};
-int radeon_mc_setup(struct radeon_device *rdev);
bool radeon_combios_sideport_present(struct radeon_device *rdev);
bool radeon_atombios_sideport_present(struct radeon_device *rdev);
@@ -348,6 +369,7 @@ struct radeon_irq {
bool sw_int;
/* FIXME: use a define max crtc rather than hardcode it */
bool crtc_vblank_int[2];
+ wait_queue_head_t vblank_queue;
/* FIXME: use defines for max hpd/dacs */
bool hpd[6];
spinlock_t sw_lock;
@@ -379,6 +401,7 @@ struct radeon_ib {
struct radeon_ib_pool {
struct mutex mutex;
struct radeon_bo *robj;
+ struct list_head bogus_ib;
struct radeon_ib ibs[RADEON_IB_POOL_SIZE];
bool ready;
unsigned head_id;
@@ -433,6 +456,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib);
int radeon_ib_pool_init(struct radeon_device *rdev);
void radeon_ib_pool_fini(struct radeon_device *rdev);
int radeon_ib_test(struct radeon_device *rdev);
+extern void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib);
/* Ring access between begin & end cannot sleep */
void radeon_ring_free_size(struct radeon_device *rdev);
int radeon_ring_lock(struct radeon_device *rdev, unsigned ndw);
@@ -570,7 +594,99 @@ struct radeon_wb {
* Equation between gpu/memory clock and available bandwidth is hw dependent
* (type of memory, bus size, efficiency, ...)
*/
+enum radeon_pm_state {
+ PM_STATE_DISABLED,
+ PM_STATE_MINIMUM,
+ PM_STATE_PAUSED,
+ PM_STATE_ACTIVE
+};
+enum radeon_pm_action {
+ PM_ACTION_NONE,
+ PM_ACTION_MINIMUM,
+ PM_ACTION_DOWNCLOCK,
+ PM_ACTION_UPCLOCK
+};
+
+enum radeon_voltage_type {
+ VOLTAGE_NONE = 0,
+ VOLTAGE_GPIO,
+ VOLTAGE_VDDC,
+ VOLTAGE_SW
+};
+
+enum radeon_pm_state_type {
+ POWER_STATE_TYPE_DEFAULT,
+ POWER_STATE_TYPE_POWERSAVE,
+ POWER_STATE_TYPE_BATTERY,
+ POWER_STATE_TYPE_BALANCED,
+ POWER_STATE_TYPE_PERFORMANCE,
+};
+
+enum radeon_pm_clock_mode_type {
+ POWER_MODE_TYPE_DEFAULT,
+ POWER_MODE_TYPE_LOW,
+ POWER_MODE_TYPE_MID,
+ POWER_MODE_TYPE_HIGH,
+};
+
+struct radeon_voltage {
+ enum radeon_voltage_type type;
+ /* gpio voltage */
+ struct radeon_gpio_rec gpio;
+ u32 delay; /* delay in usec from voltage drop to sclk change */
+ bool active_high; /* voltage drop is active when bit is high */
+ /* VDDC voltage */
+ u8 vddc_id; /* index into vddc voltage table */
+ u8 vddci_id; /* index into vddci voltage table */
+ bool vddci_enabled;
+ /* r6xx+ sw */
+ u32 voltage;
+};
+
+struct radeon_pm_non_clock_info {
+ /* pcie lanes */
+ int pcie_lanes;
+ /* standardized non-clock flags */
+ u32 flags;
+};
+
+struct radeon_pm_clock_info {
+ /* memory clock */
+ u32 mclk;
+ /* engine clock */
+ u32 sclk;
+ /* voltage info */
+ struct radeon_voltage voltage;
+ /* standardized clock flags - not sure we'll need these */
+ u32 flags;
+};
+
+struct radeon_power_state {
+ enum radeon_pm_state_type type;
+ /* XXX: use a define for num clock modes */
+ struct radeon_pm_clock_info clock_info[8];
+ /* number of valid clock modes in this power state */
+ int num_clock_modes;
+ struct radeon_pm_clock_info *default_clock_mode;
+ /* non clock info about this state */
+ struct radeon_pm_non_clock_info non_clock_info;
+ bool voltage_drop_active;
+};
+
+/*
+ * Some modes are overclocked by very low value, accept them
+ */
+#define RADEON_MODE_OVERCLOCK_MARGIN 500 /* 5 MHz */
+
struct radeon_pm {
+ struct mutex mutex;
+ struct delayed_work idle_work;
+ enum radeon_pm_state state;
+ enum radeon_pm_action planned_action;
+ unsigned long action_timeout;
+ bool downclocked;
+ int active_crtcs;
+ int req_vblank;
fixed20_12 max_bandwidth;
fixed20_12 igp_sideport_mclk;
fixed20_12 igp_system_mclk;
@@ -582,6 +698,15 @@ struct radeon_pm {
fixed20_12 core_bandwidth;
fixed20_12 sclk;
fixed20_12 needed_bandwidth;
+ /* XXX: use a define for num power modes */
+ struct radeon_power_state power_state[8];
+ /* number of valid power states */
+ int num_power_states;
+ struct radeon_power_state *current_power_state;
+ struct radeon_pm_clock_info *current_clock_mode;
+ struct radeon_power_state *requested_power_state;
+ struct radeon_pm_clock_info *requested_clock_mode;
+ struct radeon_power_state *default_power_state;
};
@@ -651,6 +776,7 @@ struct radeon_asic {
void (*set_engine_clock)(struct radeon_device *rdev, uint32_t eng_clock);
uint32_t (*get_memory_clock)(struct radeon_device *rdev);
void (*set_memory_clock)(struct radeon_device *rdev, uint32_t mem_clock);
+ int (*get_pcie_lanes)(struct radeon_device *rdev);
void (*set_pcie_lanes)(struct radeon_device *rdev, int lanes);
void (*set_clock_gating)(struct radeon_device *rdev, int enable);
int (*set_surface_reg)(struct radeon_device *rdev, int reg,
@@ -701,6 +827,9 @@ struct r600_asic {
unsigned sx_max_export_pos_size;
unsigned sx_max_export_smx_size;
unsigned sq_num_cf_insts;
+ unsigned tiling_nbanks;
+ unsigned tiling_npipes;
+ unsigned tiling_group_size;
};
struct rv770_asic {
@@ -721,6 +850,9 @@ struct rv770_asic {
unsigned sc_prim_fifo_size;
unsigned sc_hiz_tile_fifo_size;
unsigned sc_earlyz_tile_fifo_fize;
+ unsigned tiling_nbanks;
+ unsigned tiling_npipes;
+ unsigned tiling_group_size;
};
union radeon_asic_config {
@@ -830,6 +962,8 @@ struct radeon_device {
struct r600_ih ih; /* r6/700 interrupt ring */
struct workqueue_struct *wq;
struct work_struct hotplug_work;
+ int num_crtc; /* number of crtcs */
+ struct mutex dc_hw_i2c_mutex; /* display controller hw i2c mutex */
/* audio stuff */
struct timer_list audio_timer;
@@ -838,6 +972,8 @@ struct radeon_device {
int audio_bits_per_sample;
uint8_t audio_status_bits;
uint8_t audio_category_code;
+
+ bool powered_down;
};
int radeon_device_init(struct radeon_device *rdev,
@@ -895,6 +1031,8 @@ static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32
#define WREG32_MC(reg, v) rdev->mc_wreg(rdev, (reg), (v))
#define RREG32_PCIE(reg) rv370_pcie_rreg(rdev, (reg))
#define WREG32_PCIE(reg, v) rv370_pcie_wreg(rdev, (reg), (v))
+#define RREG32_PCIE_P(reg) rdev->pciep_rreg(rdev, (reg))
+#define WREG32_PCIE_P(reg, v) rdev->pciep_wreg(rdev, (reg), (v))
#define WREG32_P(reg, val, mask) \
do { \
uint32_t tmp_ = RREG32(reg); \
@@ -956,7 +1094,7 @@ void r100_pll_errata_after_index(struct radeon_device *rdev);
#define ASIC_IS_AVIVO(rdev) ((rdev->family >= CHIP_RS600))
#define ASIC_IS_DCE3(rdev) ((rdev->family >= CHIP_RV620))
#define ASIC_IS_DCE32(rdev) ((rdev->family >= CHIP_RV730))
-
+#define ASIC_IS_DCE4(rdev) ((rdev->family >= CHIP_CEDAR))
/*
* BIOS helpers.
@@ -1015,6 +1153,7 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
#define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
#define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev))
#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_memory_clock((rdev), (e))
+#define radeon_get_pcie_lanes(rdev) (rdev)->asic->get_pcie_lanes((rdev))
#define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l))
#define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e))
#define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s)))
@@ -1029,6 +1168,7 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
/* AGP */
extern void radeon_agp_disable(struct radeon_device *rdev);
extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
+extern void radeon_gart_restore(struct radeon_device *rdev);
extern int radeon_modeset_init(struct radeon_device *rdev);
extern void radeon_modeset_fini(struct radeon_device *rdev);
extern bool radeon_card_posted(struct radeon_device *rdev);
@@ -1042,6 +1182,10 @@ extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enabl
extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
extern bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo);
+extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
+extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
+extern int radeon_resume_kms(struct drm_device *dev);
+extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
struct r100_mc_save {
@@ -1096,7 +1240,7 @@ extern void r200_set_safe_registers(struct radeon_device *rdev);
/* r300,r350,rv350,rv370,rv380 */
extern void r300_set_reg_safe(struct radeon_device *rdev);
extern void r300_mc_program(struct radeon_device *rdev);
-extern void r300_vram_info(struct radeon_device *rdev);
+extern void r300_mc_init(struct radeon_device *rdev);
extern void r300_clock_startup(struct radeon_device *rdev);
extern int r300_mc_wait_for_idle(struct radeon_device *rdev);
extern int rv370_pcie_gart_init(struct radeon_device *rdev);
@@ -1105,7 +1249,6 @@ extern int rv370_pcie_gart_enable(struct radeon_device *rdev);
extern void rv370_pcie_gart_disable(struct radeon_device *rdev);
/* r420,r423,rv410 */
-extern int r420_mc_init(struct radeon_device *rdev);
extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg);
extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v);
extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev);
@@ -1147,13 +1290,13 @@ extern void rs690_line_buffer_adjust(struct radeon_device *rdev,
struct drm_display_mode *mode2);
/* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */
+extern void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
extern bool r600_card_posted(struct radeon_device *rdev);
extern void r600_cp_stop(struct radeon_device *rdev);
extern void r600_ring_init(struct radeon_device *rdev, unsigned ring_size);
extern int r600_cp_resume(struct radeon_device *rdev);
extern void r600_cp_fini(struct radeon_device *rdev);
extern int r600_count_pipe_bits(uint32_t val);
-extern int r600_gart_clear_page(struct radeon_device *rdev, int i);
extern int r600_mc_wait_for_idle(struct radeon_device *rdev);
extern int r600_pcie_gart_init(struct radeon_device *rdev);
extern void r600_pcie_gart_tlb_flush(struct radeon_device *rdev);
@@ -1189,6 +1332,14 @@ extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
uint8_t status_bits,
uint8_t category_code);
+/* evergreen */
+struct evergreen_mc_save {
+ u32 vga_control[6];
+ u32 vga_render_control;
+ u32 vga_hdp_control;
+ u32 crtc_control[6];
+};
+
#include "radeon_object.h"
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index c0681a5556d..c4457791dff 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -237,6 +237,10 @@ int radeon_agp_init(struct radeon_device *rdev)
rdev->mc.agp_base = rdev->ddev->agp->agp_info.aper_base;
rdev->mc.gtt_size = rdev->ddev->agp->agp_info.aper_size << 20;
+ rdev->mc.gtt_start = rdev->mc.agp_base;
+ rdev->mc.gtt_end = rdev->mc.gtt_start + rdev->mc.gtt_size - 1;
+ dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n",
+ rdev->mc.gtt_size >> 20, rdev->mc.gtt_start, rdev->mc.gtt_end);
/* workaround some hw issues */
if (rdev->family < CHIP_R200) {
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 05ee1aeac3f..d3a157b2bcb 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -43,7 +43,7 @@ void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock
void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
/*
- * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
+ * r100,rv100,rs100,rv200,rs200
*/
extern int r100_init(struct radeon_device *rdev);
extern void r100_fini(struct radeon_device *rdev);
@@ -108,6 +108,52 @@ static struct radeon_asic r100_asic = {
.set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = &radeon_legacy_set_clock_gating,
+ .set_surface_reg = r100_set_surface_reg,
+ .clear_surface_reg = r100_clear_surface_reg,
+ .bandwidth_update = &r100_bandwidth_update,
+ .hpd_init = &r100_hpd_init,
+ .hpd_fini = &r100_hpd_fini,
+ .hpd_sense = &r100_hpd_sense,
+ .hpd_set_polarity = &r100_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
+};
+
+/*
+ * r200,rv250,rs300,rv280
+ */
+extern int r200_copy_dma(struct radeon_device *rdev,
+ uint64_t src_offset,
+ uint64_t dst_offset,
+ unsigned num_pages,
+ struct radeon_fence *fence);
+static struct radeon_asic r200_asic = {
+ .init = &r100_init,
+ .fini = &r100_fini,
+ .suspend = &r100_suspend,
+ .resume = &r100_resume,
+ .vga_set_state = &r100_vga_set_state,
+ .gpu_reset = &r100_gpu_reset,
+ .gart_tlb_flush = &r100_pci_gart_tlb_flush,
+ .gart_set_page = &r100_pci_gart_set_page,
+ .cp_commit = &r100_cp_commit,
+ .ring_start = &r100_ring_start,
+ .ring_test = &r100_ring_test,
+ .ring_ib_execute = &r100_ring_ib_execute,
+ .irq_set = &r100_irq_set,
+ .irq_process = &r100_irq_process,
+ .get_vblank_counter = &r100_get_vblank_counter,
+ .fence_ring_emit = &r100_fence_ring_emit,
+ .cs_parse = &r100_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = &r200_copy_dma,
+ .copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_legacy_get_engine_clock,
+ .set_engine_clock = &radeon_legacy_set_engine_clock,
+ .get_memory_clock = &radeon_legacy_get_memory_clock,
+ .set_memory_clock = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
@@ -138,11 +184,8 @@ extern int rv370_pcie_gart_set_page(struct radeon_device *rdev, int i, uint64_t
extern uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
-extern int r300_copy_dma(struct radeon_device *rdev,
- uint64_t src_offset,
- uint64_t dst_offset,
- unsigned num_pages,
- struct radeon_fence *fence);
+extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
+
static struct radeon_asic r300_asic = {
.init = &r300_init,
.fini = &r300_fini,
@@ -162,7 +205,46 @@ static struct radeon_asic r300_asic = {
.fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
- .copy_dma = &r300_copy_dma,
+ .copy_dma = &r200_copy_dma,
+ .copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_legacy_get_engine_clock,
+ .set_engine_clock = &radeon_legacy_set_engine_clock,
+ .get_memory_clock = &radeon_legacy_get_memory_clock,
+ .set_memory_clock = NULL,
+ .get_pcie_lanes = &rv370_get_pcie_lanes,
+ .set_pcie_lanes = &rv370_set_pcie_lanes,
+ .set_clock_gating = &radeon_legacy_set_clock_gating,
+ .set_surface_reg = r100_set_surface_reg,
+ .clear_surface_reg = r100_clear_surface_reg,
+ .bandwidth_update = &r100_bandwidth_update,
+ .hpd_init = &r100_hpd_init,
+ .hpd_fini = &r100_hpd_fini,
+ .hpd_sense = &r100_hpd_sense,
+ .hpd_set_polarity = &r100_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
+};
+
+
+static struct radeon_asic r300_asic_pcie = {
+ .init = &r300_init,
+ .fini = &r300_fini,
+ .suspend = &r300_suspend,
+ .resume = &r300_resume,
+ .vga_set_state = &r100_vga_set_state,
+ .gpu_reset = &r300_gpu_reset,
+ .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .gart_set_page = &rv370_pcie_gart_set_page,
+ .cp_commit = &r100_cp_commit,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ring_ib_execute = &r100_ring_ib_execute,
+ .irq_set = &r100_irq_set,
+ .irq_process = &r100_irq_process,
+ .get_vblank_counter = &r100_get_vblank_counter,
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = &r200_copy_dma,
.copy = &r100_copy_blit,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
@@ -206,12 +288,13 @@ static struct radeon_asic r420_asic = {
.fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
- .copy_dma = &r300_copy_dma,
+ .copy_dma = &r200_copy_dma,
.copy = &r100_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
@@ -255,12 +338,13 @@ static struct radeon_asic rs400_asic = {
.fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
- .copy_dma = &r300_copy_dma,
+ .copy_dma = &r200_copy_dma,
.copy = &r100_copy_blit,
.get_engine_clock = &radeon_legacy_get_engine_clock,
.set_engine_clock = &radeon_legacy_set_engine_clock,
.get_memory_clock = &radeon_legacy_get_memory_clock,
.set_memory_clock = NULL,
+ .get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_legacy_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
@@ -314,14 +398,17 @@ static struct radeon_asic rs600_asic = {
.fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
- .copy_dma = &r300_copy_dma,
+ .copy_dma = &r200_copy_dma,
.copy = &r100_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating,
+ .set_surface_reg = r100_set_surface_reg,
+ .clear_surface_reg = r100_clear_surface_reg,
.bandwidth_update = &rs600_bandwidth_update,
.hpd_init = &rs600_hpd_init,
.hpd_fini = &rs600_hpd_fini,
@@ -360,12 +447,13 @@ static struct radeon_asic rs690_asic = {
.fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
- .copy_dma = &r300_copy_dma,
- .copy = &r300_copy_dma,
+ .copy_dma = &r200_copy_dma,
+ .copy = &r200_copy_dma,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = NULL,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
@@ -412,12 +500,13 @@ static struct radeon_asic rv515_asic = {
.fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
- .copy_dma = &r300_copy_dma,
+ .copy_dma = &r200_copy_dma,
.copy = &r100_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
@@ -455,12 +544,13 @@ static struct radeon_asic r520_asic = {
.fence_ring_emit = &r300_fence_ring_emit,
.cs_parse = &r300_cs_parse,
.copy_blit = &r100_copy_blit,
- .copy_dma = &r300_copy_dma,
+ .copy_dma = &r200_copy_dma,
.copy = &r100_copy_blit,
.get_engine_clock = &radeon_atom_get_engine_clock,
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = &rv370_set_pcie_lanes,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r100_set_surface_reg,
@@ -538,8 +628,9 @@ static struct radeon_asic r600_asic = {
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = NULL,
- .set_clock_gating = &radeon_atom_set_clock_gating,
+ .set_clock_gating = NULL,
.set_surface_reg = r600_set_surface_reg,
.clear_surface_reg = r600_clear_surface_reg,
.bandwidth_update = &rv515_bandwidth_update,
@@ -583,6 +674,7 @@ static struct radeon_asic rv770_asic = {
.set_engine_clock = &radeon_atom_set_engine_clock,
.get_memory_clock = &radeon_atom_get_memory_clock,
.set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &rv370_get_pcie_lanes,
.set_pcie_lanes = NULL,
.set_clock_gating = &radeon_atom_set_clock_gating,
.set_surface_reg = r600_set_surface_reg,
@@ -595,4 +687,54 @@ static struct radeon_asic rv770_asic = {
.ioctl_wait_idle = r600_ioctl_wait_idle,
};
+/*
+ * evergreen
+ */
+int evergreen_init(struct radeon_device *rdev);
+void evergreen_fini(struct radeon_device *rdev);
+int evergreen_suspend(struct radeon_device *rdev);
+int evergreen_resume(struct radeon_device *rdev);
+int evergreen_gpu_reset(struct radeon_device *rdev);
+void evergreen_bandwidth_update(struct radeon_device *rdev);
+void evergreen_hpd_init(struct radeon_device *rdev);
+void evergreen_hpd_fini(struct radeon_device *rdev);
+bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+void evergreen_hpd_set_polarity(struct radeon_device *rdev,
+ enum radeon_hpd_id hpd);
+
+static struct radeon_asic evergreen_asic = {
+ .init = &evergreen_init,
+ .fini = &evergreen_fini,
+ .suspend = &evergreen_suspend,
+ .resume = &evergreen_resume,
+ .cp_commit = NULL,
+ .gpu_reset = &evergreen_gpu_reset,
+ .vga_set_state = &r600_vga_set_state,
+ .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
+ .gart_set_page = &rs600_gart_set_page,
+ .ring_test = NULL,
+ .ring_ib_execute = NULL,
+ .irq_set = NULL,
+ .irq_process = NULL,
+ .get_vblank_counter = NULL,
+ .fence_ring_emit = NULL,
+ .cs_parse = NULL,
+ .copy_blit = NULL,
+ .copy_dma = NULL,
+ .copy = NULL,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = NULL,
+ .set_surface_reg = r600_set_surface_reg,
+ .clear_surface_reg = r600_clear_surface_reg,
+ .bandwidth_update = &evergreen_bandwidth_update,
+ .hpd_init = &evergreen_hpd_init,
+ .hpd_fini = &evergreen_hpd_fini,
+ .hpd_sense = &evergreen_hpd_sense,
+ .hpd_set_polarity = &evergreen_hpd_set_polarity,
+};
+
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 4d8831548a5..93783b15c81 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -159,8 +159,15 @@ static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device
struct radeon_gpio_rec *gpio)
{
struct radeon_hpd hpd;
+ u32 reg;
+
+ if (ASIC_IS_DCE4(rdev))
+ reg = EVERGREEN_DC_GPIO_HPD_A;
+ else
+ reg = AVIVO_DC_GPIO_HPD_A;
+
hpd.gpio = *gpio;
- if (gpio->reg == AVIVO_DC_GPIO_HPD_A) {
+ if (gpio->reg == reg) {
switch(gpio->mask) {
case (1 << 0):
hpd.hpd = RADEON_HPD_1;
@@ -574,6 +581,9 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
ddc_bus.valid = false;
}
+ /* needed for aux chan transactions */
+ ddc_bus.hpd_id = hpd.hpd ? (hpd.hpd - 1) : 0;
+
conn_id = le16_to_cpu(path->usConnObjectId);
if (!radeon_atom_apply_quirks
@@ -838,6 +848,7 @@ union firmware_info {
ATOM_FIRMWARE_INFO_V1_2 info_12;
ATOM_FIRMWARE_INFO_V1_3 info_13;
ATOM_FIRMWARE_INFO_V1_4 info_14;
+ ATOM_FIRMWARE_INFO_V2_1 info_21;
};
bool radeon_atom_get_clock_info(struct drm_device *dev)
@@ -849,6 +860,7 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
uint8_t frev, crev;
struct radeon_pll *p1pll = &rdev->clock.p1pll;
struct radeon_pll *p2pll = &rdev->clock.p2pll;
+ struct radeon_pll *dcpll = &rdev->clock.dcpll;
struct radeon_pll *spll = &rdev->clock.spll;
struct radeon_pll *mpll = &rdev->clock.mpll;
uint16_t data_offset;
@@ -951,8 +963,19 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
rdev->clock.default_mclk =
le32_to_cpu(firmware_info->info.ulDefaultMemoryClock);
+ if (ASIC_IS_DCE4(rdev)) {
+ rdev->clock.default_dispclk =
+ le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
+ if (rdev->clock.default_dispclk == 0)
+ rdev->clock.default_dispclk = 60000; /* 600 Mhz */
+ rdev->clock.dp_extclk =
+ le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
+ }
+ *dcpll = *p1pll;
+
return true;
}
+
return false;
}
@@ -1091,6 +1114,30 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
return ss;
}
+static void radeon_atom_apply_lvds_quirks(struct drm_device *dev,
+ struct radeon_encoder_atom_dig *lvds)
+{
+
+ /* Toshiba A300-1BU laptop panel doesn't like new pll divider algo */
+ if ((dev->pdev->device == 0x95c4) &&
+ (dev->pdev->subsystem_vendor == 0x1179) &&
+ (dev->pdev->subsystem_device == 0xff50)) {
+ if ((lvds->native_mode.hdisplay == 1280) &&
+ (lvds->native_mode.vdisplay == 800))
+ lvds->pll_algo = PLL_ALGO_LEGACY;
+ }
+
+ /* Dell Studio 15 laptop panel doesn't like new pll divider algo */
+ if ((dev->pdev->device == 0x95c4) &&
+ (dev->pdev->subsystem_vendor == 0x1028) &&
+ (dev->pdev->subsystem_device == 0x029f)) {
+ if ((lvds->native_mode.hdisplay == 1280) &&
+ (lvds->native_mode.vdisplay == 800))
+ lvds->pll_algo = PLL_ALGO_LEGACY;
+ }
+
+}
+
union lvds_info {
struct _ATOM_LVDS_INFO info;
struct _ATOM_LVDS_INFO_V12 info_12;
@@ -1161,6 +1208,21 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
lvds->ss = radeon_atombios_get_ss_info(encoder, lvds_info->info.ucSS_Id);
+ if (ASIC_IS_AVIVO(rdev)) {
+ if (radeon_new_pll == 0)
+ lvds->pll_algo = PLL_ALGO_LEGACY;
+ else
+ lvds->pll_algo = PLL_ALGO_NEW;
+ } else {
+ if (radeon_new_pll == 1)
+ lvds->pll_algo = PLL_ALGO_NEW;
+ else
+ lvds->pll_algo = PLL_ALGO_LEGACY;
+ }
+
+ /* LVDS quirks */
+ radeon_atom_apply_lvds_quirks(dev, lvds);
+
encoder->native_mode = lvds->native_mode;
}
return lvds;
@@ -1385,20 +1447,375 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
return tv_dac;
}
-void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
+union power_info {
+ struct _ATOM_POWERPLAY_INFO info;
+ struct _ATOM_POWERPLAY_INFO_V2 info_2;
+ struct _ATOM_POWERPLAY_INFO_V3 info_3;
+ struct _ATOM_PPLIB_POWERPLAYTABLE info_4;
+};
+
+void radeon_atombios_get_power_modes(struct radeon_device *rdev)
{
- DYNAMIC_CLOCK_GATING_PS_ALLOCATION args;
- int index = GetIndexIntoMasterTable(COMMAND, DynamicClockGating);
+ struct radeon_mode_info *mode_info = &rdev->mode_info;
+ int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+ u16 data_offset;
+ u8 frev, crev;
+ u32 misc, misc2 = 0, sclk, mclk;
+ union power_info *power_info;
+ struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
+ struct _ATOM_PPLIB_STATE *power_state;
+ int num_modes = 0, i, j;
+ int state_index = 0, mode_index = 0;
- args.ucEnable = enable;
+ atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
- atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+ power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+ rdev->pm.default_power_state = NULL;
+
+ if (power_info) {
+ if (frev < 4) {
+ num_modes = power_info->info.ucNumOfPowerModeEntries;
+ if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
+ num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
+ for (i = 0; i < num_modes; i++) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+ switch (frev) {
+ case 1:
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
+ rdev->pm.power_state[state_index].clock_info[0].mclk =
+ le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock);
+ rdev->pm.power_state[state_index].clock_info[0].sclk =
+ le16_to_cpu(power_info->info.asPowerPlayInfo[i].usEngineClock);
+ /* skip invalid modes */
+ if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+ (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+ continue;
+ /* skip overclock modes for now */
+ if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
+ rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
+ (rdev->pm.power_state[state_index].clock_info[0].sclk >
+ rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
+ continue;
+ rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
+ power_info->info.asPowerPlayInfo[i].ucNumPciELanes;
+ misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo);
+ if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_GPIO;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
+ radeon_lookup_gpio(rdev,
+ power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex);
+ if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ true;
+ else
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ false;
+ } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_VDDC;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
+ power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex;
+ }
+ /* order matters! */
+ if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_POWERSAVE;
+ if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BATTERY;
+ if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BATTERY;
+ if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BALANCED;
+ if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_PERFORMANCE;
+ if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+ rdev->pm.power_state[state_index].default_clock_mode =
+ &rdev->pm.power_state[state_index].clock_info[0];
+ }
+ state_index++;
+ break;
+ case 2:
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
+ rdev->pm.power_state[state_index].clock_info[0].mclk =
+ le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock);
+ rdev->pm.power_state[state_index].clock_info[0].sclk =
+ le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulEngineClock);
+ /* skip invalid modes */
+ if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+ (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+ continue;
+ /* skip overclock modes for now */
+ if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
+ rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
+ (rdev->pm.power_state[state_index].clock_info[0].sclk >
+ rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
+ continue;
+ rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
+ power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes;
+ misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo);
+ misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2);
+ if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_GPIO;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
+ radeon_lookup_gpio(rdev,
+ power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex);
+ if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ true;
+ else
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ false;
+ } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_VDDC;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
+ power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex;
+ }
+ /* order matters! */
+ if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_POWERSAVE;
+ if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BATTERY;
+ if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BATTERY;
+ if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BALANCED;
+ if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_PERFORMANCE;
+ if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BALANCED;
+ if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+ rdev->pm.power_state[state_index].default_clock_mode =
+ &rdev->pm.power_state[state_index].clock_info[0];
+ }
+ state_index++;
+ break;
+ case 3:
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
+ rdev->pm.power_state[state_index].clock_info[0].mclk =
+ le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock);
+ rdev->pm.power_state[state_index].clock_info[0].sclk =
+ le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulEngineClock);
+ /* skip invalid modes */
+ if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+ (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+ continue;
+ /* skip overclock modes for now */
+ if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
+ rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
+ (rdev->pm.power_state[state_index].clock_info[0].sclk >
+ rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
+ continue;
+ rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
+ power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes;
+ misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo);
+ misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2);
+ if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_GPIO;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio =
+ radeon_lookup_gpio(rdev,
+ power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex);
+ if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ true;
+ else
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ false;
+ } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type =
+ VOLTAGE_VDDC;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id =
+ power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex;
+ if (misc2 & ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_enabled =
+ true;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_id =
+ power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex;
+ }
+ }
+ /* order matters! */
+ if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_POWERSAVE;
+ if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BATTERY;
+ if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BATTERY;
+ if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BALANCED;
+ if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_PERFORMANCE;
+ if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE)
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BALANCED;
+ if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) {
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+ rdev->pm.power_state[state_index].default_clock_mode =
+ &rdev->pm.power_state[state_index].clock_info[0];
+ }
+ state_index++;
+ break;
+ }
+ }
+ } else if (frev == 4) {
+ for (i = 0; i < power_info->info_4.ucNumStates; i++) {
+ mode_index = 0;
+ power_state = (struct _ATOM_PPLIB_STATE *)
+ (mode_info->atom_context->bios +
+ data_offset +
+ le16_to_cpu(power_info->info_4.usStateArrayOffset) +
+ i * power_info->info_4.ucStateEntrySize);
+ non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
+ (mode_info->atom_context->bios +
+ data_offset +
+ le16_to_cpu(power_info->info_4.usNonClockInfoArrayOffset) +
+ (power_state->ucNonClockStateIndex *
+ power_info->info_4.ucNonClockSize));
+ for (j = 0; j < (power_info->info_4.ucStateEntrySize - 1); j++) {
+ if (rdev->flags & RADEON_IS_IGP) {
+ struct _ATOM_PPLIB_RS780_CLOCK_INFO *clock_info =
+ (struct _ATOM_PPLIB_RS780_CLOCK_INFO *)
+ (mode_info->atom_context->bios +
+ data_offset +
+ le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
+ (power_state->ucClockStateIndices[j] *
+ power_info->info_4.ucClockInfoSize));
+ sclk = le16_to_cpu(clock_info->usLowEngineClockLow);
+ sclk |= clock_info->ucLowEngineClockHigh << 16;
+ rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+ /* skip invalid modes */
+ if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)
+ continue;
+ /* skip overclock modes for now */
+ if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk >
+ rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN)
+ continue;
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
+ VOLTAGE_SW;
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
+ clock_info->usVDDC;
+ mode_index++;
+ } else {
+ struct _ATOM_PPLIB_R600_CLOCK_INFO *clock_info =
+ (struct _ATOM_PPLIB_R600_CLOCK_INFO *)
+ (mode_info->atom_context->bios +
+ data_offset +
+ le16_to_cpu(power_info->info_4.usClockInfoArrayOffset) +
+ (power_state->ucClockStateIndices[j] *
+ power_info->info_4.ucClockInfoSize));
+ sclk = le16_to_cpu(clock_info->usEngineClockLow);
+ sclk |= clock_info->ucEngineClockHigh << 16;
+ mclk = le16_to_cpu(clock_info->usMemoryClockLow);
+ mclk |= clock_info->ucMemoryClockHigh << 16;
+ rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk;
+ rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk;
+ /* skip invalid modes */
+ if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) ||
+ (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0))
+ continue;
+ /* skip overclock modes for now */
+ if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk >
+ rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
+ (rdev->pm.power_state[state_index].clock_info[mode_index].sclk >
+ rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
+ continue;
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type =
+ VOLTAGE_SW;
+ rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage =
+ clock_info->usVDDC;
+ mode_index++;
+ }
+ }
+ rdev->pm.power_state[state_index].num_clock_modes = mode_index;
+ if (mode_index) {
+ misc = le32_to_cpu(non_clock_info->ulCapsAndSettings);
+ misc2 = le16_to_cpu(non_clock_info->usClassification);
+ rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
+ ((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >>
+ ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
+ switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
+ case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BATTERY;
+ break;
+ case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BALANCED;
+ break;
+ case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_PERFORMANCE;
+ break;
+ }
+ if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) {
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+ rdev->pm.power_state[state_index].default_clock_mode =
+ &rdev->pm.power_state[state_index].clock_info[mode_index - 1];
+ }
+ state_index++;
+ }
+ }
+ }
+ } else {
+ /* XXX figure out some good default low power mode for cards w/out power tables */
+ }
+
+ if (rdev->pm.default_power_state == NULL) {
+ /* add the default mode */
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
+ rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
+ rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
+ rdev->pm.power_state[state_index].default_clock_mode =
+ &rdev->pm.power_state[state_index].clock_info[0];
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+ if (rdev->asic->get_pcie_lanes)
+ rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = radeon_get_pcie_lanes(rdev);
+ else
+ rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = 16;
+ rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+ state_index++;
+ }
+ rdev->pm.num_power_states = state_index;
+
+ rdev->pm.current_power_state = rdev->pm.default_power_state;
+ rdev->pm.current_clock_mode =
+ rdev->pm.default_power_state->default_clock_mode;
}
-void radeon_atom_static_pwrmgt_setup(struct radeon_device *rdev, int enable)
+void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable)
{
- ENABLE_ASIC_STATIC_PWR_MGT_PS_ALLOCATION args;
- int index = GetIndexIntoMasterTable(COMMAND, EnableASIC_StaticPwrMgt);
+ DYNAMIC_CLOCK_GATING_PS_ALLOCATION args;
+ int index = GetIndexIntoMasterTable(COMMAND, DynamicClockGating);
args.ucEnable = enable;
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
new file mode 100644
index 00000000000..3f557c4151e
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright (c) 2010 Red Hat Inc.
+ * Author : Dave Airlie <airlied@redhat.com>
+ *
+ * Licensed under GPLv2
+ *
+ * ATPX support for both Intel/ATI
+ */
+#include <linux/vga_switcheroo.h>
+#include <acpi/acpi.h>
+#include <acpi/acpi_bus.h>
+#include <linux/pci.h>
+
+#define ATPX_VERSION 0
+#define ATPX_GPU_PWR 2
+#define ATPX_MUX_SELECT 3
+
+#define ATPX_INTEGRATED 0
+#define ATPX_DISCRETE 1
+
+#define ATPX_MUX_IGD 0
+#define ATPX_MUX_DISCRETE 1
+
+static struct radeon_atpx_priv {
+ bool atpx_detected;
+ /* handle for device - and atpx */
+ acpi_handle dhandle;
+ acpi_handle atpx_handle;
+ acpi_handle atrm_handle;
+} radeon_atpx_priv;
+
+/* retrieve the ROM in 4k blocks */
+static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
+ int offset, int len)
+{
+ acpi_status status;
+ union acpi_object atrm_arg_elements[2], *obj;
+ struct acpi_object_list atrm_arg;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL};
+
+ atrm_arg.count = 2;
+ atrm_arg.pointer = &atrm_arg_elements[0];
+
+ atrm_arg_elements[0].type = ACPI_TYPE_INTEGER;
+ atrm_arg_elements[0].integer.value = offset;
+
+ atrm_arg_elements[1].type = ACPI_TYPE_INTEGER;
+ atrm_arg_elements[1].integer.value = len;
+
+ status = acpi_evaluate_object(atrm_handle, NULL, &atrm_arg, &buffer);
+ if (ACPI_FAILURE(status)) {
+ printk("failed to evaluate ATRM got %s\n", acpi_format_exception(status));
+ return -ENODEV;
+ }
+
+ obj = (union acpi_object *)buffer.pointer;
+ memcpy(bios+offset, obj->buffer.pointer, len);
+ kfree(buffer.pointer);
+ return len;
+}
+
+bool radeon_atrm_supported(struct pci_dev *pdev)
+{
+ /* get the discrete ROM only via ATRM */
+ if (!radeon_atpx_priv.atpx_detected)
+ return false;
+
+ if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
+ return false;
+ return true;
+}
+
+
+int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len)
+{
+ return radeon_atrm_call(radeon_atpx_priv.atrm_handle, bios, offset, len);
+}
+
+static int radeon_atpx_get_version(acpi_handle handle)
+{
+ acpi_status status;
+ union acpi_object atpx_arg_elements[2], *obj;
+ struct acpi_object_list atpx_arg;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+
+ atpx_arg.count = 2;
+ atpx_arg.pointer = &atpx_arg_elements[0];
+
+ atpx_arg_elements[0].type = ACPI_TYPE_INTEGER;
+ atpx_arg_elements[0].integer.value = ATPX_VERSION;
+
+ atpx_arg_elements[1].type = ACPI_TYPE_INTEGER;
+ atpx_arg_elements[1].integer.value = ATPX_VERSION;
+
+ status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer);
+ if (ACPI_FAILURE(status)) {
+ printk("%s: failed to call ATPX: %s\n", __func__, acpi_format_exception(status));
+ return -ENOSYS;
+ }
+ obj = (union acpi_object *)buffer.pointer;
+ if (obj && (obj->type == ACPI_TYPE_BUFFER))
+ printk(KERN_INFO "radeon atpx: version is %d\n", *((u8 *)(obj->buffer.pointer) + 2));
+ kfree(buffer.pointer);
+ return 0;
+}
+
+static int radeon_atpx_execute(acpi_handle handle, int cmd_id, u16 value)
+{
+ acpi_status status;
+ union acpi_object atpx_arg_elements[2];
+ struct acpi_object_list atpx_arg;
+ struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+ uint8_t buf[4] = {0};
+
+ if (!handle)
+ return -EINVAL;
+
+ atpx_arg.count = 2;
+ atpx_arg.pointer = &atpx_arg_elements[0];
+
+ atpx_arg_elements[0].type = ACPI_TYPE_INTEGER;
+ atpx_arg_elements[0].integer.value = cmd_id;
+
+ buf[2] = value & 0xff;
+ buf[3] = (value >> 8) & 0xff;
+
+ atpx_arg_elements[1].type = ACPI_TYPE_BUFFER;
+ atpx_arg_elements[1].buffer.length = 4;
+ atpx_arg_elements[1].buffer.pointer = buf;
+
+ status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer);
+ if (ACPI_FAILURE(status)) {
+ printk("%s: failed to call ATPX: %s\n", __func__, acpi_format_exception(status));
+ return -ENOSYS;
+ }
+ kfree(buffer.pointer);
+
+ return 0;
+}
+
+static int radeon_atpx_set_discrete_state(acpi_handle handle, int state)
+{
+ return radeon_atpx_execute(handle, ATPX_GPU_PWR, state);
+}
+
+static int radeon_atpx_switch_mux(acpi_handle handle, int mux_id)
+{
+ return radeon_atpx_execute(handle, ATPX_MUX_SELECT, mux_id);
+}
+
+
+static int radeon_atpx_switchto(enum vga_switcheroo_client_id id)
+{
+ if (id == VGA_SWITCHEROO_IGD)
+ radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, 0);
+ else
+ radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, 1);
+ return 0;
+}
+
+static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
+ enum vga_switcheroo_state state)
+{
+ /* on w500 ACPI can't change intel gpu state */
+ if (id == VGA_SWITCHEROO_IGD)
+ return 0;
+
+ radeon_atpx_set_discrete_state(radeon_atpx_priv.atpx_handle, state);
+ return 0;
+}
+
+static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
+{
+ acpi_handle dhandle, atpx_handle, atrm_handle;
+ acpi_status status;
+
+ dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+ if (!dhandle)
+ return false;
+
+ status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
+ if (ACPI_FAILURE(status))
+ return false;
+
+ status = acpi_get_handle(dhandle, "ATRM", &atrm_handle);
+ if (ACPI_FAILURE(status))
+ return false;
+
+ radeon_atpx_priv.dhandle = dhandle;
+ radeon_atpx_priv.atpx_handle = atpx_handle;
+ radeon_atpx_priv.atrm_handle = atrm_handle;
+ return true;
+}
+
+static int radeon_atpx_init(void)
+{
+ /* set up the ATPX handle */
+
+ radeon_atpx_get_version(radeon_atpx_priv.atpx_handle);
+ return 0;
+}
+
+static int radeon_atpx_get_client_id(struct pci_dev *pdev)
+{
+ if (radeon_atpx_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
+ return VGA_SWITCHEROO_IGD;
+ else
+ return VGA_SWITCHEROO_DIS;
+}
+
+static struct vga_switcheroo_handler radeon_atpx_handler = {
+ .switchto = radeon_atpx_switchto,
+ .power_state = radeon_atpx_power_state,
+ .init = radeon_atpx_init,
+ .get_client_id = radeon_atpx_get_client_id,
+};
+
+static bool radeon_atpx_detect(void)
+{
+ char acpi_method_name[255] = { 0 };
+ struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
+ struct pci_dev *pdev = NULL;
+ bool has_atpx = false;
+ int vga_count = 0;
+
+ while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+ vga_count++;
+
+ has_atpx |= (radeon_atpx_pci_probe_handle(pdev) == true);
+ }
+
+ if (has_atpx && vga_count == 2) {
+ acpi_get_name(radeon_atpx_priv.atpx_handle, ACPI_FULL_PATHNAME, &buffer);
+ printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
+ acpi_method_name);
+ radeon_atpx_priv.atpx_detected = true;
+ return true;
+ }
+ return false;
+}
+
+void radeon_register_atpx_handler(void)
+{
+ bool r;
+
+ /* detect if we have any ATPX + 2 VGA in the system */
+ r = radeon_atpx_detect();
+ if (!r)
+ return;
+
+ vga_switcheroo_register_handler(&radeon_atpx_handler);
+}
+
+void radeon_unregister_atpx_handler(void)
+{
+ vga_switcheroo_unregister_handler();
+}
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 906921740c6..55724046052 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -30,6 +30,7 @@
#include "radeon.h"
#include "atom.h"
+#include <linux/vga_switcheroo.h>
/*
* BIOS.
*/
@@ -62,7 +63,7 @@ static bool igp_read_bios_from_vram(struct radeon_device *rdev)
iounmap(bios);
return false;
}
- memcpy(rdev->bios, bios, size);
+ memcpy_fromio(rdev->bios, bios, size);
iounmap(bios);
return true;
}
@@ -93,6 +94,38 @@ static bool radeon_read_bios(struct radeon_device *rdev)
return true;
}
+/* ATRM is used to get the BIOS on the discrete cards in
+ * dual-gpu systems.
+ */
+static bool radeon_atrm_get_bios(struct radeon_device *rdev)
+{
+ int ret;
+ int size = 64 * 1024;
+ int i;
+
+ if (!radeon_atrm_supported(rdev->pdev))
+ return false;
+
+ rdev->bios = kmalloc(size, GFP_KERNEL);
+ if (!rdev->bios) {
+ DRM_ERROR("Unable to allocate bios\n");
+ return false;
+ }
+
+ for (i = 0; i < size / ATRM_BIOS_PAGE; i++) {
+ ret = radeon_atrm_get_bios_chunk(rdev->bios,
+ (i * ATRM_BIOS_PAGE),
+ ATRM_BIOS_PAGE);
+ if (ret <= 0)
+ break;
+ }
+
+ if (i == 0 || rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) {
+ kfree(rdev->bios);
+ return false;
+ }
+ return true;
+}
static bool r700_read_disabled_bios(struct radeon_device *rdev)
{
uint32_t viph_control;
@@ -388,16 +421,16 @@ static bool radeon_read_disabled_bios(struct radeon_device *rdev)
return legacy_read_disabled_bios(rdev);
}
+
bool radeon_get_bios(struct radeon_device *rdev)
{
bool r;
uint16_t tmp;
- if (rdev->flags & RADEON_IS_IGP) {
+ r = radeon_atrm_get_bios(rdev);
+ if (r == false)
r = igp_read_bios_from_vram(rdev);
- if (r == false)
- r = radeon_read_bios(rdev);
- } else
+ if (r == false)
r = radeon_read_bios(rdev);
if (r == false) {
r = radeon_read_disabled_bios(rdev);
@@ -408,6 +441,13 @@ bool radeon_get_bios(struct radeon_device *rdev)
return false;
}
if (rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa) {
+ printk("BIOS signature incorrect %x %x\n", rdev->bios[0], rdev->bios[1]);
+ goto free_bios;
+ }
+
+ tmp = RBIOS16(0x18);
+ if (RBIOS8(tmp + 0x14) != 0x0) {
+ DRM_INFO("Not an x86 BIOS ROM, not using.\n");
goto free_bios;
}
diff --git a/drivers/gpu/drm/radeon/radeon_clocks.c b/drivers/gpu/drm/radeon/radeon_clocks.c
index 73c4405bf42..f64936cc4dd 100644
--- a/drivers/gpu/drm/radeon/radeon_clocks.c
+++ b/drivers/gpu/drm/radeon/radeon_clocks.c
@@ -96,6 +96,7 @@ void radeon_get_clock_info(struct drm_device *dev)
struct radeon_device *rdev = dev->dev_private;
struct radeon_pll *p1pll = &rdev->clock.p1pll;
struct radeon_pll *p2pll = &rdev->clock.p2pll;
+ struct radeon_pll *dcpll = &rdev->clock.dcpll;
struct radeon_pll *spll = &rdev->clock.spll;
struct radeon_pll *mpll = &rdev->clock.mpll;
int ret;
@@ -204,6 +205,17 @@ void radeon_get_clock_info(struct drm_device *dev)
p2pll->max_frac_feedback_div = 0;
}
+ /* dcpll is DCE4 only */
+ dcpll->min_post_div = 2;
+ dcpll->max_post_div = 0x7f;
+ dcpll->min_frac_feedback_div = 0;
+ dcpll->max_frac_feedback_div = 9;
+ dcpll->min_ref_div = 2;
+ dcpll->max_ref_div = 0x3ff;
+ dcpll->min_feedback_div = 4;
+ dcpll->max_feedback_div = 0xfff;
+ dcpll->best_vco = 0;
+
p1pll->min_ref_div = 2;
p1pll->max_ref_div = 0x3ff;
p1pll->min_feedback_div = 4;
@@ -846,8 +858,10 @@ int radeon_static_clocks_init(struct drm_device *dev)
/* XXX make sure engine is idle */
if (radeon_dynclks != -1) {
- if (radeon_dynclks)
- radeon_set_clock_gating(rdev, 1);
+ if (radeon_dynclks) {
+ if (rdev->asic->set_clock_gating)
+ radeon_set_clock_gating(rdev, 1);
+ }
}
radeon_apply_clock_quirks(rdev);
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 22d476160d5..e9ea38ece37 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -150,6 +150,9 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
int rev;
uint16_t offset = 0, check_offset;
+ if (!rdev->bios)
+ return 0;
+
switch (table) {
/* absolute offset tables */
case COMBIOS_ASIC_INIT_1_TABLE:
@@ -443,6 +446,39 @@ static uint16_t combios_get_table_offset(struct drm_device *dev,
}
+bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev)
+{
+ int edid_info;
+ struct edid *edid;
+ edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE);
+ if (!edid_info)
+ return false;
+
+ edid = kmalloc(EDID_LENGTH * (DRM_MAX_EDID_EXT_NUM + 1),
+ GFP_KERNEL);
+ if (edid == NULL)
+ return false;
+
+ memcpy((unsigned char *)edid,
+ (unsigned char *)(rdev->bios + edid_info), EDID_LENGTH);
+
+ if (!drm_edid_is_valid(edid)) {
+ kfree(edid);
+ return false;
+ }
+
+ rdev->mode_info.bios_hardcoded_edid = edid;
+ return true;
+}
+
+struct edid *
+radeon_combios_get_hardcoded_edid(struct radeon_device *rdev)
+{
+ if (rdev->mode_info.bios_hardcoded_edid)
+ return rdev->mode_info.bios_hardcoded_edid;
+ return NULL;
+}
+
static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rdev,
int ddc_line)
{
@@ -486,9 +522,65 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
i2c.y_data_reg = ddc_line;
}
- if (rdev->family < CHIP_R200)
- i2c.hw_capable = false;
- else {
+ switch (rdev->family) {
+ case CHIP_R100:
+ case CHIP_RV100:
+ case CHIP_RS100:
+ case CHIP_RV200:
+ case CHIP_RS200:
+ case CHIP_RS300:
+ switch (ddc_line) {
+ case RADEON_GPIO_DVI_DDC:
+ /* in theory this should be hw capable,
+ * but it doesn't seem to work
+ */
+ i2c.hw_capable = false;
+ break;
+ default:
+ i2c.hw_capable = false;
+ break;
+ }
+ break;
+ case CHIP_R200:
+ switch (ddc_line) {
+ case RADEON_GPIO_DVI_DDC:
+ case RADEON_GPIO_MONID:
+ i2c.hw_capable = true;
+ break;
+ default:
+ i2c.hw_capable = false;
+ break;
+ }
+ break;
+ case CHIP_RV250:
+ case CHIP_RV280:
+ switch (ddc_line) {
+ case RADEON_GPIO_VGA_DDC:
+ case RADEON_GPIO_DVI_DDC:
+ case RADEON_GPIO_CRT2_DDC:
+ i2c.hw_capable = true;
+ break;
+ default:
+ i2c.hw_capable = false;
+ break;
+ }
+ break;
+ case CHIP_R300:
+ case CHIP_R350:
+ switch (ddc_line) {
+ case RADEON_GPIO_VGA_DDC:
+ case RADEON_GPIO_DVI_DDC:
+ i2c.hw_capable = true;
+ break;
+ default:
+ i2c.hw_capable = false;
+ break;
+ }
+ break;
+ case CHIP_RV350:
+ case CHIP_RV380:
+ case CHIP_RS400:
+ case CHIP_RS480:
switch (ddc_line) {
case RADEON_GPIO_VGA_DDC:
case RADEON_GPIO_DVI_DDC:
@@ -504,9 +596,14 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
i2c.hw_capable = false;
break;
}
+ break;
+ default:
+ i2c.hw_capable = false;
+ break;
}
i2c.mm_i2c = false;
i2c.i2c_id = 0;
+ i2c.hpd_id = 0;
if (ddc_line)
i2c.valid = true;
@@ -527,9 +624,6 @@ bool radeon_combios_get_clock_info(struct drm_device *dev)
int8_t rev;
uint16_t sclk, mclk;
- if (rdev->bios == NULL)
- return false;
-
pll_info = combios_get_table_offset(dev, COMBIOS_PLL_INFO_TABLE);
if (pll_info) {
rev = RBIOS8(pll_info);
@@ -654,9 +748,6 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
if (!p_dac)
return NULL;
- if (rdev->bios == NULL)
- goto out;
-
/* check CRT table */
dac_info = combios_get_table_offset(dev, COMBIOS_CRT_INFO_TABLE);
if (dac_info) {
@@ -673,7 +764,6 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
found = 1;
}
-out:
if (!found) /* fallback to defaults */
radeon_legacy_get_primary_dac_info_from_table(rdev, p_dac);
@@ -687,9 +777,6 @@ radeon_combios_get_tv_info(struct radeon_device *rdev)
uint16_t tv_info;
enum radeon_tv_std tv_std = TV_STD_NTSC;
- if (rdev->bios == NULL)
- return tv_std;
-
tv_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
if (tv_info) {
if (RBIOS8(tv_info + 6) == 'T') {
@@ -793,9 +880,6 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
if (!tv_dac)
return NULL;
- if (rdev->bios == NULL)
- goto out;
-
/* first check TV table */
dac_info = combios_get_table_offset(dev, COMBIOS_TV_INFO_TABLE);
if (dac_info) {
@@ -857,7 +941,6 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
}
}
-out:
if (!found) /* fallback to defaults */
radeon_legacy_get_tv_dac_info_from_table(rdev, tv_dac);
@@ -945,11 +1028,6 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
int tmp, i;
struct radeon_encoder_lvds *lvds = NULL;
- if (rdev->bios == NULL) {
- lvds = radeon_legacy_get_lvds_info_from_regs(rdev);
- goto out;
- }
-
lcd_info = combios_get_table_offset(dev, COMBIOS_LCD_INFO_TABLE);
if (lcd_info) {
@@ -1050,7 +1128,7 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
DRM_INFO("No panel info found in BIOS\n");
lvds = radeon_legacy_get_lvds_info_from_regs(rdev);
}
-out:
+
if (lvds)
encoder->native_mode = lvds->native_mode;
return lvds;
@@ -1102,9 +1180,6 @@ bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
int i, n;
uint8_t ver;
- if (rdev->bios == NULL)
- return false;
-
tmds_info = combios_get_table_offset(dev, COMBIOS_DFP_INFO_TABLE);
if (tmds_info) {
@@ -1184,9 +1259,6 @@ bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder
enum radeon_combios_ddc gpio;
struct radeon_i2c_bus_rec i2c_bus;
- if (rdev->bios == NULL)
- return false;
-
tmds->i2c_bus = NULL;
if (rdev->flags & RADEON_IS_IGP) {
offset = combios_get_table_offset(dev, COMBIOS_I2C_INFO_TABLE);
@@ -1253,7 +1325,10 @@ bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder
tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
break;
case DDC_LCD: /* MM i2c */
- DRM_ERROR("MM i2c requires hw i2c engine\n");
+ i2c_bus.valid = true;
+ i2c_bus.hw_capable = true;
+ i2c_bus.mm_i2c = true;
+ tmds->i2c_bus = radeon_i2c_create(dev, &i2c_bus, "DVO");
break;
default:
DRM_ERROR("Unsupported gpio %d\n", gpio);
@@ -1909,9 +1984,6 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
struct radeon_i2c_bus_rec ddc_i2c;
struct radeon_hpd hpd;
- if (rdev->bios == NULL)
- return false;
-
conn_info = combios_get_table_offset(dev, COMBIOS_CONNECTOR_INFO_TABLE);
if (conn_info) {
for (i = 0; i < 4; i++) {
@@ -2278,6 +2350,115 @@ bool radeon_get_legacy_connector_info_from_bios(struct drm_device *dev)
return true;
}
+void radeon_combios_get_power_modes(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ u16 offset, misc, misc2 = 0;
+ u8 rev, blocks, tmp;
+ int state_index = 0;
+
+ rdev->pm.default_power_state = NULL;
+
+ if (rdev->flags & RADEON_IS_MOBILITY) {
+ offset = combios_get_table_offset(dev, COMBIOS_POWERPLAY_INFO_TABLE);
+ if (offset) {
+ rev = RBIOS8(offset);
+ blocks = RBIOS8(offset + 0x2);
+ /* power mode 0 tends to be the only valid one */
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
+ rdev->pm.power_state[state_index].clock_info[0].mclk = RBIOS32(offset + 0x5 + 0x2);
+ rdev->pm.power_state[state_index].clock_info[0].sclk = RBIOS32(offset + 0x5 + 0x6);
+ if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) ||
+ (rdev->pm.power_state[state_index].clock_info[0].sclk == 0))
+ goto default_mode;
+ /* skip overclock modes for now */
+ if ((rdev->pm.power_state[state_index].clock_info[0].mclk >
+ rdev->clock.default_mclk + RADEON_MODE_OVERCLOCK_MARGIN) ||
+ (rdev->pm.power_state[state_index].clock_info[0].sclk >
+ rdev->clock.default_sclk + RADEON_MODE_OVERCLOCK_MARGIN))
+ goto default_mode;
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_BATTERY;
+ misc = RBIOS16(offset + 0x5 + 0x0);
+ if (rev > 4)
+ misc2 = RBIOS16(offset + 0x5 + 0xe);
+ if (misc & 0x4) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO;
+ if (misc & 0x8)
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ true;
+ else
+ rdev->pm.power_state[state_index].clock_info[0].voltage.active_high =
+ false;
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.valid = true;
+ if (rev < 6) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.reg =
+ RBIOS16(offset + 0x5 + 0xb) * 4;
+ tmp = RBIOS8(offset + 0x5 + 0xd);
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.mask = (1 << tmp);
+ } else {
+ u8 entries = RBIOS8(offset + 0x5 + 0xb);
+ u16 voltage_table_offset = RBIOS16(offset + 0x5 + 0xc);
+ if (entries && voltage_table_offset) {
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.reg =
+ RBIOS16(voltage_table_offset) * 4;
+ tmp = RBIOS8(voltage_table_offset + 0x2);
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.mask = (1 << tmp);
+ } else
+ rdev->pm.power_state[state_index].clock_info[0].voltage.gpio.valid = false;
+ }
+ switch ((misc2 & 0x700) >> 8) {
+ case 0:
+ default:
+ rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 0;
+ break;
+ case 1:
+ rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 33;
+ break;
+ case 2:
+ rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 66;
+ break;
+ case 3:
+ rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 99;
+ break;
+ case 4:
+ rdev->pm.power_state[state_index].clock_info[0].voltage.delay = 132;
+ break;
+ }
+ } else
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+ if (rev > 6)
+ rdev->pm.power_state[state_index].non_clock_info.pcie_lanes =
+ RBIOS8(offset + 0x5 + 0x10);
+ state_index++;
+ } else {
+ /* XXX figure out some good default low power mode for mobility cards w/out power tables */
+ }
+ } else {
+ /* XXX figure out some good default low power mode for desktop cards */
+ }
+
+default_mode:
+ /* add the default mode */
+ rdev->pm.power_state[state_index].type =
+ POWER_STATE_TYPE_DEFAULT;
+ rdev->pm.power_state[state_index].num_clock_modes = 1;
+ rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
+ rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
+ rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0];
+ rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
+ if (rdev->asic->get_pcie_lanes)
+ rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = radeon_get_pcie_lanes(rdev);
+ else
+ rdev->pm.power_state[state_index].non_clock_info.pcie_lanes = 16;
+ rdev->pm.default_power_state = &rdev->pm.power_state[state_index];
+ rdev->pm.num_power_states = state_index + 1;
+
+ rdev->pm.current_power_state = rdev->pm.default_power_state;
+ rdev->pm.current_clock_mode =
+ rdev->pm.default_power_state->default_clock_mode;
+}
+
void radeon_external_tmds_setup(struct drm_encoder *encoder)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
@@ -2289,23 +2470,21 @@ void radeon_external_tmds_setup(struct drm_encoder *encoder)
switch (tmds->dvo_chip) {
case DVO_SIL164:
/* sil 164 */
- radeon_i2c_do_lock(tmds->i2c_bus, 1);
- radeon_i2c_sw_put_byte(tmds->i2c_bus,
- tmds->slave_addr,
- 0x08, 0x30);
- radeon_i2c_sw_put_byte(tmds->i2c_bus,
+ radeon_i2c_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ 0x08, 0x30);
+ radeon_i2c_put_byte(tmds->i2c_bus,
tmds->slave_addr,
0x09, 0x00);
- radeon_i2c_sw_put_byte(tmds->i2c_bus,
- tmds->slave_addr,
- 0x0a, 0x90);
- radeon_i2c_sw_put_byte(tmds->i2c_bus,
- tmds->slave_addr,
- 0x0c, 0x89);
- radeon_i2c_sw_put_byte(tmds->i2c_bus,
+ radeon_i2c_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ 0x0a, 0x90);
+ radeon_i2c_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ 0x0c, 0x89);
+ radeon_i2c_put_byte(tmds->i2c_bus,
tmds->slave_addr,
0x08, 0x3b);
- radeon_i2c_do_lock(tmds->i2c_bus, 0);
break;
case DVO_SIL1178:
/* sil 1178 - untested */
@@ -2338,9 +2517,6 @@ bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder)
uint32_t reg, val, and_mask, or_mask;
struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
- if (rdev->bios == NULL)
- return false;
-
if (!tmds)
return false;
@@ -2390,11 +2566,9 @@ bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder)
index++;
val = RBIOS8(index);
index++;
- radeon_i2c_do_lock(tmds->i2c_bus, 1);
- radeon_i2c_sw_put_byte(tmds->i2c_bus,
- slave_addr,
- reg, val);
- radeon_i2c_do_lock(tmds->i2c_bus, 0);
+ radeon_i2c_put_byte(tmds->i2c_bus,
+ slave_addr,
+ reg, val);
break;
default:
DRM_ERROR("Unknown id %d\n", id >> 13);
@@ -2447,11 +2621,9 @@ bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder)
reg = id & 0x1fff;
val = RBIOS8(index);
index += 1;
- radeon_i2c_do_lock(tmds->i2c_bus, 1);
- radeon_i2c_sw_put_byte(tmds->i2c_bus,
- tmds->slave_addr,
- reg, val);
- radeon_i2c_do_lock(tmds->i2c_bus, 0);
+ radeon_i2c_put_byte(tmds->i2c_bus,
+ tmds->slave_addr,
+ reg, val);
break;
default:
DRM_ERROR("Unknown id %d\n", id >> 13);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 65f81942f39..ee0083f982d 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -479,10 +479,8 @@ static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connec
ret = connector_status_connected;
else {
if (radeon_connector->ddc_bus) {
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
radeon_connector->edid = drm_get_edid(&radeon_connector->base,
&radeon_connector->ddc_bus->adapter);
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (radeon_connector->edid)
ret = connector_status_connected;
}
@@ -587,19 +585,14 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
if (!encoder)
ret = connector_status_disconnected;
- if (radeon_connector->ddc_bus) {
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
+ if (radeon_connector->ddc_bus)
dret = radeon_ddc_probe(radeon_connector);
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
- }
if (dret) {
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
}
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (!radeon_connector->edid) {
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
@@ -744,19 +737,14 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
enum drm_connector_status ret = connector_status_disconnected;
bool dret = false;
- if (radeon_connector->ddc_bus) {
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
+ if (radeon_connector->ddc_bus)
dret = radeon_ddc_probe(radeon_connector);
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
- }
if (dret) {
if (radeon_connector->edid) {
kfree(radeon_connector->edid);
radeon_connector->edid = NULL;
}
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (!radeon_connector->edid) {
DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
@@ -952,7 +940,7 @@ static void radeon_dp_connector_destroy(struct drm_connector *connector)
if (radeon_connector->edid)
kfree(radeon_connector->edid);
if (radeon_dig_connector->dp_i2c_bus)
- radeon_i2c_destroy(radeon_dig_connector->dp_i2c_bus);
+ radeon_i2c_destroy_dp(radeon_dig_connector->dp_i2c_bus);
kfree(radeon_connector->con_priv);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
@@ -988,12 +976,10 @@ static enum drm_connector_status radeon_dp_detect(struct drm_connector *connecto
ret = connector_status_connected;
}
} else {
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
if (radeon_ddc_probe(radeon_connector)) {
radeon_dig_connector->dp_sink_type = sink_type;
ret = connector_status_connected;
}
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
}
return ret;
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 06123ba31d3..dc6eba6b96d 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -1644,6 +1644,7 @@ static int radeon_do_resume_cp(struct drm_device *dev, struct drm_file *file_pri
radeon_cp_load_microcode(dev_priv);
radeon_cp_init_ring_buffer(dev, dev_priv, file_priv);
+ dev_priv->have_z_offset = 0;
radeon_do_engine_reset(dev);
radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index e9d085021c1..70ba02ed772 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -194,11 +194,8 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
}
radeon_bo_list_unreserve(&parser->validated);
for (i = 0; i < parser->nrelocs; i++) {
- if (parser->relocs[i].gobj) {
- mutex_lock(&parser->rdev->ddev->struct_mutex);
- drm_gem_object_unreference(parser->relocs[i].gobj);
- mutex_unlock(&parser->rdev->ddev->struct_mutex);
- }
+ if (parser->relocs[i].gobj)
+ drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
}
kfree(parser->track);
kfree(parser->relocs);
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 28772a37009..b7023fff89e 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -36,7 +36,14 @@ static void radeon_lock_cursor(struct drm_crtc *crtc, bool lock)
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
uint32_t cur_lock;
- if (ASIC_IS_AVIVO(rdev)) {
+ if (ASIC_IS_DCE4(rdev)) {
+ cur_lock = RREG32(EVERGREEN_CUR_UPDATE + radeon_crtc->crtc_offset);
+ if (lock)
+ cur_lock |= EVERGREEN_CURSOR_UPDATE_LOCK;
+ else
+ cur_lock &= ~EVERGREEN_CURSOR_UPDATE_LOCK;
+ WREG32(EVERGREEN_CUR_UPDATE + radeon_crtc->crtc_offset, cur_lock);
+ } else if (ASIC_IS_AVIVO(rdev)) {
cur_lock = RREG32(AVIVO_D1CUR_UPDATE + radeon_crtc->crtc_offset);
if (lock)
cur_lock |= AVIVO_D1CURSOR_UPDATE_LOCK;
@@ -58,7 +65,10 @@ static void radeon_hide_cursor(struct drm_crtc *crtc)
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_device *rdev = crtc->dev->dev_private;
- if (ASIC_IS_AVIVO(rdev)) {
+ if (ASIC_IS_DCE4(rdev)) {
+ WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
+ WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT));
+ } else if (ASIC_IS_AVIVO(rdev)) {
WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
WREG32(RADEON_MM_DATA, (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
} else {
@@ -81,10 +91,14 @@ static void radeon_show_cursor(struct drm_crtc *crtc)
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_device *rdev = crtc->dev->dev_private;
- if (ASIC_IS_AVIVO(rdev)) {
+ if (ASIC_IS_DCE4(rdev)) {
+ WREG32(RADEON_MM_INDEX, EVERGREEN_CUR_CONTROL + radeon_crtc->crtc_offset);
+ WREG32(RADEON_MM_DATA, EVERGREEN_CURSOR_EN |
+ EVERGREEN_CURSOR_MODE(EVERGREEN_CURSOR_24_8_PRE_MULT));
+ } else if (ASIC_IS_AVIVO(rdev)) {
WREG32(RADEON_MM_INDEX, AVIVO_D1CUR_CONTROL + radeon_crtc->crtc_offset);
WREG32(RADEON_MM_DATA, AVIVO_D1CURSOR_EN |
- (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
+ (AVIVO_D1CURSOR_MODE_24BPP << AVIVO_D1CURSOR_MODE_SHIFT));
} else {
switch (radeon_crtc->crtc_id) {
case 0:
@@ -109,7 +123,10 @@ static void radeon_set_cursor(struct drm_crtc *crtc, struct drm_gem_object *obj,
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct radeon_device *rdev = crtc->dev->dev_private;
- if (ASIC_IS_AVIVO(rdev)) {
+ if (ASIC_IS_DCE4(rdev)) {
+ WREG32(EVERGREEN_CUR_SURFACE_ADDRESS_HIGH + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_CUR_SURFACE_ADDRESS + radeon_crtc->crtc_offset, gpu_addr);
+ } else if (ASIC_IS_AVIVO(rdev)) {
if (rdev->family >= CHIP_RV770) {
if (radeon_crtc->crtc_id)
WREG32(R700_D2CUR_SURFACE_ADDRESS_HIGH, 0);
@@ -169,17 +186,13 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc,
unpin:
if (radeon_crtc->cursor_bo) {
radeon_gem_object_unpin(radeon_crtc->cursor_bo);
- mutex_lock(&crtc->dev->struct_mutex);
- drm_gem_object_unreference(radeon_crtc->cursor_bo);
- mutex_unlock(&crtc->dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(radeon_crtc->cursor_bo);
}
radeon_crtc->cursor_bo = obj;
return 0;
fail:
- mutex_lock(&crtc->dev->struct_mutex);
- drm_gem_object_unreference(obj);
- mutex_unlock(&crtc->dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(obj);
return 0;
}
@@ -201,7 +214,20 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
yorigin = CURSOR_HEIGHT - 1;
radeon_lock_cursor(crtc, true);
- if (ASIC_IS_AVIVO(rdev)) {
+ if (ASIC_IS_DCE4(rdev)) {
+ /* cursors are offset into the total surface */
+ x += crtc->x;
+ y += crtc->y;
+ DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y);
+
+ /* XXX: check if evergreen has the same issues as avivo chips */
+ WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset,
+ ((xorigin ? 0 : x) << 16) |
+ (yorigin ? 0 : y));
+ WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin);
+ WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset,
+ ((radeon_crtc->cursor_width - 1) << 16) | (radeon_crtc->cursor_height - 1));
+ } else if (ASIC_IS_AVIVO(rdev)) {
int w = radeon_crtc->cursor_width;
int i = 0;
struct drm_crtc *crtc_p;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 768b1509fa0..e28e4ed5f72 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -30,6 +30,7 @@
#include <drm/drm_crtc_helper.h>
#include <drm/radeon_drm.h>
#include <linux/vgaarb.h>
+#include <linux/vga_switcheroo.h>
#include "radeon_reg.h"
#include "radeon.h"
#include "radeon_asic.h"
@@ -100,80 +101,103 @@ void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
}
}
-/*
- * MC common functions
+/**
+ * radeon_vram_location - try to find VRAM location
+ * @rdev: radeon device structure holding all necessary informations
+ * @mc: memory controller structure holding memory informations
+ * @base: base address at which to put VRAM
+ *
+ * Function will place try to place VRAM at base address provided
+ * as parameter (which is so far either PCI aperture address or
+ * for IGP TOM base address).
+ *
+ * If there is not enough space to fit the unvisible VRAM in the 32bits
+ * address space then we limit the VRAM size to the aperture.
+ *
+ * If we are using AGP and if the AGP aperture doesn't allow us to have
+ * room for all the VRAM than we restrict the VRAM to the PCI aperture
+ * size and print a warning.
+ *
+ * This function will never fails, worst case are limiting VRAM.
+ *
+ * Note: GTT start, end, size should be initialized before calling this
+ * function on AGP platform.
+ *
+ * Note: We don't explictly enforce VRAM start to be aligned on VRAM size,
+ * this shouldn't be a problem as we are using the PCI aperture as a reference.
+ * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
+ * not IGP.
+ *
+ * Note: we use mc_vram_size as on some board we need to program the mc to
+ * cover the whole aperture even if VRAM size is inferior to aperture size
+ * Novell bug 204882 + along with lots of ubuntu ones
+ *
+ * Note: when limiting vram it's safe to overwritte real_vram_size because
+ * we are not in case where real_vram_size is inferior to mc_vram_size (ie
+ * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
+ * ones)
+ *
+ * Note: IGP TOM addr should be the same as the aperture addr, we don't
+ * explicitly check for that thought.
+ *
+ * FIXME: when reducing VRAM size align new size on power of 2.
+ */
+void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
+{
+ mc->vram_start = base;
+ if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
+ dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
+ mc->real_vram_size = mc->aper_size;
+ mc->mc_vram_size = mc->aper_size;
+ }
+ mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+ if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_end <= mc->gtt_end) {
+ dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
+ mc->real_vram_size = mc->aper_size;
+ mc->mc_vram_size = mc->aper_size;
+ }
+ mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
+ dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
+ mc->mc_vram_size >> 20, mc->vram_start,
+ mc->vram_end, mc->real_vram_size >> 20);
+}
+
+/**
+ * radeon_gtt_location - try to find GTT location
+ * @rdev: radeon device structure holding all necessary informations
+ * @mc: memory controller structure holding memory informations
+ *
+ * Function will place try to place GTT before or after VRAM.
+ *
+ * If GTT size is bigger than space left then we ajust GTT size.
+ * Thus function will never fails.
+ *
+ * FIXME: when reducing GTT size align new size on power of 2.
*/
-int radeon_mc_setup(struct radeon_device *rdev)
+void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
{
- uint32_t tmp;
+ u64 size_af, size_bf;
- /* Some chips have an "issue" with the memory controller, the
- * location must be aligned to the size. We just align it down,
- * too bad if we walk over the top of system memory, we don't
- * use DMA without a remapped anyway.
- * Affected chips are rv280, all r3xx, and all r4xx, but not IGP
- */
- /* FGLRX seems to setup like this, VRAM a 0, then GART.
- */
- /*
- * Note: from R6xx the address space is 40bits but here we only
- * use 32bits (still have to see a card which would exhaust 4G
- * address space).
- */
- if (rdev->mc.vram_location != 0xFFFFFFFFUL) {
- /* vram location was already setup try to put gtt after
- * if it fits */
- tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size;
- tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1);
- if ((0xFFFFFFFFUL - tmp) >= rdev->mc.gtt_size) {
- rdev->mc.gtt_location = tmp;
- } else {
- if (rdev->mc.gtt_size >= rdev->mc.vram_location) {
- printk(KERN_ERR "[drm] GTT too big to fit "
- "before or after vram location.\n");
- return -EINVAL;
- }
- rdev->mc.gtt_location = 0;
- }
- } else if (rdev->mc.gtt_location != 0xFFFFFFFFUL) {
- /* gtt location was already setup try to put vram before
- * if it fits */
- if (rdev->mc.mc_vram_size < rdev->mc.gtt_location) {
- rdev->mc.vram_location = 0;
- } else {
- tmp = rdev->mc.gtt_location + rdev->mc.gtt_size;
- tmp += (rdev->mc.mc_vram_size - 1);
- tmp &= ~(rdev->mc.mc_vram_size - 1);
- if ((0xFFFFFFFFUL - tmp) >= rdev->mc.mc_vram_size) {
- rdev->mc.vram_location = tmp;
- } else {
- printk(KERN_ERR "[drm] vram too big to fit "
- "before or after GTT location.\n");
- return -EINVAL;
- }
+ size_af = 0xFFFFFFFF - mc->vram_end;
+ size_bf = mc->vram_start;
+ if (size_bf > size_af) {
+ if (mc->gtt_size > size_bf) {
+ dev_warn(rdev->dev, "limiting GTT\n");
+ mc->gtt_size = size_bf;
}
+ mc->gtt_start = mc->vram_start - mc->gtt_size;
} else {
- rdev->mc.vram_location = 0;
- tmp = rdev->mc.mc_vram_size;
- tmp = (tmp + rdev->mc.gtt_size - 1) & ~(rdev->mc.gtt_size - 1);
- rdev->mc.gtt_location = tmp;
- }
- rdev->mc.vram_start = rdev->mc.vram_location;
- rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
- rdev->mc.gtt_start = rdev->mc.gtt_location;
- rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
- DRM_INFO("radeon: VRAM %uM\n", (unsigned)(rdev->mc.mc_vram_size >> 20));
- DRM_INFO("radeon: VRAM from 0x%08X to 0x%08X\n",
- (unsigned)rdev->mc.vram_location,
- (unsigned)(rdev->mc.vram_location + rdev->mc.mc_vram_size - 1));
- DRM_INFO("radeon: GTT %uM\n", (unsigned)(rdev->mc.gtt_size >> 20));
- DRM_INFO("radeon: GTT from 0x%08X to 0x%08X\n",
- (unsigned)rdev->mc.gtt_location,
- (unsigned)(rdev->mc.gtt_location + rdev->mc.gtt_size - 1));
- return 0;
+ if (mc->gtt_size > size_af) {
+ dev_warn(rdev->dev, "limiting GTT\n");
+ mc->gtt_size = size_af;
+ }
+ mc->gtt_start = mc->vram_end + 1;
+ }
+ mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
+ dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n",
+ mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
}
-
/*
* GPU helpers function.
*/
@@ -182,7 +206,16 @@ bool radeon_card_posted(struct radeon_device *rdev)
uint32_t reg;
/* first check CRTCs */
- if (ASIC_IS_AVIVO(rdev)) {
+ if (ASIC_IS_DCE4(rdev)) {
+ reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
+ RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
+ if (reg & EVERGREEN_CRTC_MASTER_EN)
+ return true;
+ } else if (ASIC_IS_AVIVO(rdev)) {
reg = RREG32(AVIVO_D1CRTC_CONTROL) |
RREG32(AVIVO_D2CRTC_CONTROL);
if (reg & AVIVO_CRTC_EN) {
@@ -229,6 +262,8 @@ bool radeon_boot_test_post_card(struct radeon_device *rdev)
int radeon_dummy_page_init(struct radeon_device *rdev)
{
+ if (rdev->dummy_page.page)
+ return 0;
rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
if (rdev->dummy_page.page == NULL)
return -ENOMEM;
@@ -310,7 +345,7 @@ void radeon_register_accessor_init(struct radeon_device *rdev)
rdev->mc_rreg = &rs600_mc_rreg;
rdev->mc_wreg = &rs600_mc_wreg;
}
- if (rdev->family >= CHIP_R600) {
+ if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_RV740)) {
rdev->pciep_rreg = &r600_pciep_rreg;
rdev->pciep_wreg = &r600_pciep_wreg;
}
@@ -329,21 +364,22 @@ int radeon_asic_init(struct radeon_device *rdev)
case CHIP_RS100:
case CHIP_RV200:
case CHIP_RS200:
+ rdev->asic = &r100_asic;
+ break;
case CHIP_R200:
case CHIP_RV250:
case CHIP_RS300:
case CHIP_RV280:
- rdev->asic = &r100_asic;
+ rdev->asic = &r200_asic;
break;
case CHIP_R300:
case CHIP_R350:
case CHIP_RV350:
case CHIP_RV380:
- rdev->asic = &r300_asic;
- if (rdev->flags & RADEON_IS_PCIE) {
- rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
- rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
- }
+ if (rdev->flags & RADEON_IS_PCIE)
+ rdev->asic = &r300_asic_pcie;
+ else
+ rdev->asic = &r300_asic;
break;
case CHIP_R420:
case CHIP_R423:
@@ -387,6 +423,13 @@ int radeon_asic_init(struct radeon_device *rdev)
case CHIP_RV740:
rdev->asic = &rv770_asic;
break;
+ case CHIP_CEDAR:
+ case CHIP_REDWOOD:
+ case CHIP_JUNIPER:
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ rdev->asic = &evergreen_asic;
+ break;
default:
/* FIXME: not supported yet */
return -EINVAL;
@@ -613,6 +656,36 @@ void radeon_check_arguments(struct radeon_device *rdev)
}
}
+static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ struct radeon_device *rdev = dev->dev_private;
+ pm_message_t pmm = { .event = PM_EVENT_SUSPEND };
+ if (state == VGA_SWITCHEROO_ON) {
+ printk(KERN_INFO "radeon: switched on\n");
+ /* don't suspend or resume card normally */
+ rdev->powered_down = false;
+ radeon_resume_kms(dev);
+ } else {
+ printk(KERN_INFO "radeon: switched off\n");
+ radeon_suspend_kms(dev, pmm);
+ /* don't suspend or resume card normally */
+ rdev->powered_down = true;
+ }
+}
+
+static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
+{
+ struct drm_device *dev = pci_get_drvdata(pdev);
+ bool can_switch;
+
+ spin_lock(&dev->count_lock);
+ can_switch = (dev->open_count == 0);
+ spin_unlock(&dev->count_lock);
+ return can_switch;
+}
+
+
int radeon_device_init(struct radeon_device *rdev,
struct drm_device *ddev,
struct pci_dev *pdev,
@@ -638,11 +711,14 @@ int radeon_device_init(struct radeon_device *rdev,
mutex_init(&rdev->cs_mutex);
mutex_init(&rdev->ib_pool.mutex);
mutex_init(&rdev->cp.mutex);
+ mutex_init(&rdev->dc_hw_i2c_mutex);
if (rdev->family >= CHIP_R600)
spin_lock_init(&rdev->ih.lock);
mutex_init(&rdev->gem.mutex);
+ mutex_init(&rdev->pm.mutex);
rwlock_init(&rdev->fence_drv.lock);
INIT_LIST_HEAD(&rdev->gem.objects);
+ init_waitqueue_head(&rdev->irq.vblank_queue);
/* setup workqueue */
rdev->wq = create_workqueue("radeon");
@@ -692,6 +768,9 @@ int radeon_device_init(struct radeon_device *rdev,
/* this will fail for cards that aren't VGA class devices, just
* ignore it */
vga_client_register(rdev->pdev, rdev, NULL, radeon_vga_set_decode);
+ vga_switcheroo_register_client(rdev->pdev,
+ radeon_switcheroo_set_state,
+ radeon_switcheroo_can_switch);
r = radeon_init(rdev);
if (r)
@@ -723,6 +802,7 @@ void radeon_device_fini(struct radeon_device *rdev)
rdev->shutdown = true;
radeon_fini(rdev);
destroy_workqueue(rdev->wq);
+ vga_switcheroo_unregister_client(rdev->pdev);
vga_client_register(rdev->pdev, NULL, NULL, NULL);
iounmap(rdev->rmmio);
rdev->rmmio = NULL;
@@ -746,6 +826,8 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
}
rdev = dev->dev_private;
+ if (rdev->powered_down)
+ return 0;
/* unpin the front buffers */
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
@@ -791,6 +873,9 @@ int radeon_resume_kms(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
+ if (rdev->powered_down)
+ return 0;
+
acquire_console_sem();
pci_set_power_state(dev->pdev, PCI_D0);
pci_restore_state(dev->pdev);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 7e17a362b54..ba8d806dcf3 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -68,6 +68,36 @@ static void avivo_crtc_load_lut(struct drm_crtc *crtc)
WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id);
}
+static void evergreen_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ int i;
+
+ DRM_DEBUG("%d\n", radeon_crtc->crtc_id);
+ WREG32(EVERGREEN_DC_LUT_CONTROL + radeon_crtc->crtc_offset, 0);
+
+ WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_BLUE + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_GREEN + radeon_crtc->crtc_offset, 0);
+ WREG32(EVERGREEN_DC_LUT_BLACK_OFFSET_RED + radeon_crtc->crtc_offset, 0);
+
+ WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_BLUE + radeon_crtc->crtc_offset, 0xffff);
+ WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_GREEN + radeon_crtc->crtc_offset, 0xffff);
+ WREG32(EVERGREEN_DC_LUT_WHITE_OFFSET_RED + radeon_crtc->crtc_offset, 0xffff);
+
+ WREG32(EVERGREEN_DC_LUT_RW_MODE, radeon_crtc->crtc_id);
+ WREG32(EVERGREEN_DC_LUT_WRITE_EN_MASK, 0x00000007);
+
+ WREG32(EVERGREEN_DC_LUT_RW_INDEX, 0);
+ for (i = 0; i < 256; i++) {
+ WREG32(EVERGREEN_DC_LUT_30_COLOR,
+ (radeon_crtc->lut_r[i] << 20) |
+ (radeon_crtc->lut_g[i] << 10) |
+ (radeon_crtc->lut_b[i] << 0));
+ }
+}
+
static void legacy_crtc_load_lut(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -100,7 +130,9 @@ void radeon_crtc_load_lut(struct drm_crtc *crtc)
if (!crtc->enabled)
return;
- if (ASIC_IS_AVIVO(rdev))
+ if (ASIC_IS_DCE4(rdev))
+ evergreen_crtc_load_lut(crtc);
+ else if (ASIC_IS_AVIVO(rdev))
avivo_crtc_load_lut(crtc);
else
legacy_crtc_load_lut(crtc);
@@ -361,6 +393,8 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
{
+ struct drm_device *dev = radeon_connector->base.dev;
+ struct radeon_device *rdev = dev->dev_private;
int ret = 0;
if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
@@ -373,11 +407,11 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
if (!radeon_connector->ddc_bus)
return -1;
if (!radeon_connector->edid) {
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
}
-
+ /* some servers provide a hardcoded edid in rom for KVMs */
+ if (!radeon_connector->edid)
+ radeon_connector->edid = radeon_combios_get_hardcoded_edid(rdev);
if (radeon_connector->edid) {
drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
@@ -395,9 +429,7 @@ static int radeon_ddc_dump(struct drm_connector *connector)
if (!radeon_connector->ddc_bus)
return -1;
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
- radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
if (edid) {
kfree(edid);
}
@@ -414,13 +446,13 @@ static inline uint32_t radeon_div(uint64_t n, uint32_t d)
return n;
}
-void radeon_compute_pll(struct radeon_pll *pll,
- uint64_t freq,
- uint32_t *dot_clock_p,
- uint32_t *fb_div_p,
- uint32_t *frac_fb_div_p,
- uint32_t *ref_div_p,
- uint32_t *post_div_p)
+static void radeon_compute_pll_legacy(struct radeon_pll *pll,
+ uint64_t freq,
+ uint32_t *dot_clock_p,
+ uint32_t *fb_div_p,
+ uint32_t *frac_fb_div_p,
+ uint32_t *ref_div_p,
+ uint32_t *post_div_p)
{
uint32_t min_ref_div = pll->min_ref_div;
uint32_t max_ref_div = pll->max_ref_div;
@@ -580,95 +612,194 @@ void radeon_compute_pll(struct radeon_pll *pll,
*post_div_p = best_post_div;
}
-void radeon_compute_pll_avivo(struct radeon_pll *pll,
- uint64_t freq,
- uint32_t *dot_clock_p,
- uint32_t *fb_div_p,
- uint32_t *frac_fb_div_p,
- uint32_t *ref_div_p,
- uint32_t *post_div_p)
+static bool
+calc_fb_div(struct radeon_pll *pll,
+ uint32_t freq,
+ uint32_t post_div,
+ uint32_t ref_div,
+ uint32_t *fb_div,
+ uint32_t *fb_div_frac)
{
- fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq;
- fixed20_12 pll_out_max, pll_out_min;
- fixed20_12 pll_in_max, pll_in_min;
- fixed20_12 reference_freq;
- fixed20_12 error, ffreq, a, b;
-
- pll_out_max.full = rfixed_const(pll->pll_out_max);
- pll_out_min.full = rfixed_const(pll->pll_out_min);
- pll_in_max.full = rfixed_const(pll->pll_in_max);
- pll_in_min.full = rfixed_const(pll->pll_in_min);
- reference_freq.full = rfixed_const(pll->reference_freq);
- do_div(freq, 10);
+ fixed20_12 feedback_divider, a, b;
+ u32 vco_freq;
+
+ vco_freq = freq * post_div;
+ /* feedback_divider = vco_freq * ref_div / pll->reference_freq; */
+ a.full = rfixed_const(pll->reference_freq);
+ feedback_divider.full = rfixed_const(vco_freq);
+ feedback_divider.full = rfixed_div(feedback_divider, a);
+ a.full = rfixed_const(ref_div);
+ feedback_divider.full = rfixed_mul(feedback_divider, a);
+
+ if (pll->flags & RADEON_PLL_USE_FRAC_FB_DIV) {
+ /* feedback_divider = floor((feedback_divider * 10.0) + 0.5) * 0.1; */
+ a.full = rfixed_const(10);
+ feedback_divider.full = rfixed_mul(feedback_divider, a);
+ feedback_divider.full += rfixed_const_half(0);
+ feedback_divider.full = rfixed_floor(feedback_divider);
+ feedback_divider.full = rfixed_div(feedback_divider, a);
+
+ /* *fb_div = floor(feedback_divider); */
+ a.full = rfixed_floor(feedback_divider);
+ *fb_div = rfixed_trunc(a);
+ /* *fb_div_frac = fmod(feedback_divider, 1.0) * 10.0; */
+ a.full = rfixed_const(10);
+ b.full = rfixed_mul(feedback_divider, a);
+
+ feedback_divider.full = rfixed_floor(feedback_divider);
+ feedback_divider.full = rfixed_mul(feedback_divider, a);
+ feedback_divider.full = b.full - feedback_divider.full;
+ *fb_div_frac = rfixed_trunc(feedback_divider);
+ } else {
+ /* *fb_div = floor(feedback_divider + 0.5); */
+ feedback_divider.full += rfixed_const_half(0);
+ feedback_divider.full = rfixed_floor(feedback_divider);
+
+ *fb_div = rfixed_trunc(feedback_divider);
+ *fb_div_frac = 0;
+ }
+
+ if (((*fb_div) < pll->min_feedback_div) || ((*fb_div) > pll->max_feedback_div))
+ return false;
+ else
+ return true;
+}
+
+static bool
+calc_fb_ref_div(struct radeon_pll *pll,
+ uint32_t freq,
+ uint32_t post_div,
+ uint32_t *fb_div,
+ uint32_t *fb_div_frac,
+ uint32_t *ref_div)
+{
+ fixed20_12 ffreq, max_error, error, pll_out, a;
+ u32 vco;
+
ffreq.full = rfixed_const(freq);
- error.full = rfixed_const(100 * 100);
+ /* max_error = ffreq * 0.0025; */
+ a.full = rfixed_const(400);
+ max_error.full = rfixed_div(ffreq, a);
- /* max p */
- p.full = rfixed_div(pll_out_max, ffreq);
- p.full = rfixed_floor(p);
+ for ((*ref_div) = pll->min_ref_div; (*ref_div) < pll->max_ref_div; ++(*ref_div)) {
+ if (calc_fb_div(pll, freq, post_div, (*ref_div), fb_div, fb_div_frac)) {
+ vco = pll->reference_freq * (((*fb_div) * 10) + (*fb_div_frac));
+ vco = vco / ((*ref_div) * 10);
- /* min m */
- m.full = rfixed_div(reference_freq, pll_in_max);
- m.full = rfixed_ceil(m);
+ if ((vco < pll->pll_out_min) || (vco > pll->pll_out_max))
+ continue;
- while (1) {
- n.full = rfixed_div(ffreq, reference_freq);
- n.full = rfixed_mul(n, m);
- n.full = rfixed_mul(n, p);
+ /* pll_out = vco / post_div; */
+ a.full = rfixed_const(post_div);
+ pll_out.full = rfixed_const(vco);
+ pll_out.full = rfixed_div(pll_out, a);
- f_vco.full = rfixed_div(n, m);
- f_vco.full = rfixed_mul(f_vco, reference_freq);
+ if (pll_out.full >= ffreq.full) {
+ error.full = pll_out.full - ffreq.full;
+ if (error.full <= max_error.full)
+ return true;
+ }
+ }
+ }
+ return false;
+}
- f_pclk.full = rfixed_div(f_vco, p);
+static void radeon_compute_pll_new(struct radeon_pll *pll,
+ uint64_t freq,
+ uint32_t *dot_clock_p,
+ uint32_t *fb_div_p,
+ uint32_t *frac_fb_div_p,
+ uint32_t *ref_div_p,
+ uint32_t *post_div_p)
+{
+ u32 fb_div = 0, fb_div_frac = 0, post_div = 0, ref_div = 0;
+ u32 best_freq = 0, vco_frequency;
- if (f_pclk.full > ffreq.full)
- error.full = f_pclk.full - ffreq.full;
- else
- error.full = ffreq.full - f_pclk.full;
- error.full = rfixed_div(error, f_pclk);
- a.full = rfixed_const(100 * 100);
- error.full = rfixed_mul(error, a);
-
- a.full = rfixed_mul(m, p);
- a.full = rfixed_div(n, a);
- best_freq.full = rfixed_mul(reference_freq, a);
-
- if (rfixed_trunc(error) < 25)
- break;
-
- a.full = rfixed_const(1);
- m.full = m.full + a.full;
- a.full = rfixed_div(reference_freq, m);
- if (a.full >= pll_in_min.full)
- continue;
+ /* freq = freq / 10; */
+ do_div(freq, 10);
- m.full = rfixed_div(reference_freq, pll_in_max);
- m.full = rfixed_ceil(m);
- a.full= rfixed_const(1);
- p.full = p.full - a.full;
- a.full = rfixed_mul(p, ffreq);
- if (a.full >= pll_out_min.full)
- continue;
- else {
- DRM_ERROR("Unable to find pll dividers\n");
- break;
+ if (pll->flags & RADEON_PLL_USE_POST_DIV) {
+ post_div = pll->post_div;
+ if ((post_div < pll->min_post_div) || (post_div > pll->max_post_div))
+ goto done;
+
+ vco_frequency = freq * post_div;
+ if ((vco_frequency < pll->pll_out_min) || (vco_frequency > pll->pll_out_max))
+ goto done;
+
+ if (pll->flags & RADEON_PLL_USE_REF_DIV) {
+ ref_div = pll->reference_div;
+ if ((ref_div < pll->min_ref_div) || (ref_div > pll->max_ref_div))
+ goto done;
+ if (!calc_fb_div(pll, freq, post_div, ref_div, &fb_div, &fb_div_frac))
+ goto done;
+ }
+ } else {
+ for (post_div = pll->max_post_div; post_div >= pll->min_post_div; --post_div) {
+ if (pll->flags & RADEON_PLL_LEGACY) {
+ if ((post_div == 5) ||
+ (post_div == 7) ||
+ (post_div == 9) ||
+ (post_div == 10) ||
+ (post_div == 11))
+ continue;
+ }
+
+ if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1))
+ continue;
+
+ vco_frequency = freq * post_div;
+ if ((vco_frequency < pll->pll_out_min) || (vco_frequency > pll->pll_out_max))
+ continue;
+ if (pll->flags & RADEON_PLL_USE_REF_DIV) {
+ ref_div = pll->reference_div;
+ if ((ref_div < pll->min_ref_div) || (ref_div > pll->max_ref_div))
+ goto done;
+ if (calc_fb_div(pll, freq, post_div, ref_div, &fb_div, &fb_div_frac))
+ break;
+ } else {
+ if (calc_fb_ref_div(pll, freq, post_div, &fb_div, &fb_div_frac, &ref_div))
+ break;
+ }
}
}
- a.full = rfixed_const(10);
- b.full = rfixed_mul(n, a);
+ best_freq = pll->reference_freq * 10 * fb_div;
+ best_freq += pll->reference_freq * fb_div_frac;
+ best_freq = best_freq / (ref_div * post_div);
- frac_n.full = rfixed_floor(n);
- frac_n.full = rfixed_mul(frac_n, a);
- frac_n.full = b.full - frac_n.full;
+done:
+ if (best_freq == 0)
+ DRM_ERROR("Couldn't find valid PLL dividers\n");
- *dot_clock_p = rfixed_trunc(best_freq);
- *fb_div_p = rfixed_trunc(n);
- *frac_fb_div_p = rfixed_trunc(frac_n);
- *ref_div_p = rfixed_trunc(m);
- *post_div_p = rfixed_trunc(p);
+ *dot_clock_p = best_freq / 10;
+ *fb_div_p = fb_div;
+ *frac_fb_div_p = fb_div_frac;
+ *ref_div_p = ref_div;
+ *post_div_p = post_div;
- DRM_DEBUG("%u %d.%d, %d, %d\n", *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p);
+ DRM_DEBUG("%u %d.%d, %d, %d\n", *dot_clock_p, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p);
+}
+
+void radeon_compute_pll(struct radeon_pll *pll,
+ uint64_t freq,
+ uint32_t *dot_clock_p,
+ uint32_t *fb_div_p,
+ uint32_t *frac_fb_div_p,
+ uint32_t *ref_div_p,
+ uint32_t *post_div_p)
+{
+ switch (pll->algo) {
+ case PLL_ALGO_NEW:
+ radeon_compute_pll_new(pll, freq, dot_clock_p, fb_div_p,
+ frac_fb_div_p, ref_div_p, post_div_p);
+ break;
+ case PLL_ALGO_LEGACY:
+ default:
+ radeon_compute_pll_legacy(pll, freq, dot_clock_p, fb_div_p,
+ frac_fb_div_p, ref_div_p, post_div_p);
+ break;
+ }
}
static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
@@ -679,11 +810,8 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
if (fb->fbdev)
radeonfb_remove(dev, fb);
- if (radeon_fb->obj) {
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(radeon_fb->obj);
- mutex_unlock(&dev->struct_mutex);
- }
+ if (radeon_fb->obj)
+ drm_gem_object_unreference_unlocked(radeon_fb->obj);
drm_framebuffer_cleanup(fb);
kfree(radeon_fb);
}
@@ -819,7 +947,7 @@ static int radeon_modeset_create_props(struct radeon_device *rdev)
int radeon_modeset_init(struct radeon_device *rdev)
{
- int num_crtc = 2, i;
+ int i;
int ret;
drm_mode_config_init(rdev->ddev);
@@ -842,11 +970,23 @@ int radeon_modeset_init(struct radeon_device *rdev)
return ret;
}
+ /* check combios for a valid hardcoded EDID - Sun servers */
+ if (!rdev->is_atom_bios) {
+ /* check for hardcoded EDID in BIOS */
+ radeon_combios_check_hardcoded_edid(rdev);
+ }
+
if (rdev->flags & RADEON_SINGLE_CRTC)
- num_crtc = 1;
+ rdev->num_crtc = 1;
+ else {
+ if (ASIC_IS_DCE4(rdev))
+ rdev->num_crtc = 6;
+ else
+ rdev->num_crtc = 2;
+ }
/* allocate crtcs */
- for (i = 0; i < num_crtc; i++) {
+ for (i = 0; i < rdev->num_crtc; i++) {
radeon_crtc_init(rdev->ddev, i);
}
@@ -863,6 +1003,8 @@ int radeon_modeset_init(struct radeon_device *rdev)
void radeon_modeset_fini(struct radeon_device *rdev)
{
+ kfree(rdev->mode_info.bios_hardcoded_edid);
+
if (rdev->mode_info.mode_config_initialized) {
radeon_hpd_fini(rdev);
drm_mode_config_cleanup(rdev->ddev);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 8ba3de7994d..6eec0ece6a6 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -40,9 +40,11 @@
/*
* KMS wrapper.
+ * - 2.0.0 - initial interface
+ * - 2.1.0 - add square tiling interface
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 0
+#define KMS_DRIVER_MINOR 1
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -86,7 +88,8 @@ int radeon_benchmarking = 0;
int radeon_testing = 0;
int radeon_connector_table = 0;
int radeon_tv = 1;
-int radeon_new_pll = 1;
+int radeon_new_pll = -1;
+int radeon_dynpm = -1;
int radeon_audio = 1;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
@@ -122,9 +125,12 @@ module_param_named(connector_table, radeon_connector_table, int, 0444);
MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
module_param_named(tv, radeon_tv, int, 0444);
-MODULE_PARM_DESC(new_pll, "Select new PLL code for AVIVO chips");
+MODULE_PARM_DESC(new_pll, "Select new PLL code");
module_param_named(new_pll, radeon_new_pll, int, 0444);
+MODULE_PARM_DESC(dynpm, "Disable/Enable dynamic power management (1 = enable)");
+module_param_named(dynpm, radeon_dynpm, int, 0444);
+
MODULE_PARM_DESC(audio, "Audio enable (0 = disable)");
module_param_named(audio, radeon_audio, int, 0444);
@@ -339,6 +345,7 @@ static int __init radeon_init(void)
driver = &kms_driver;
driver->driver_features |= DRIVER_MODESET;
driver->num_ioctls = radeon_max_kms_ioctl;
+ radeon_register_atpx_handler();
}
/* if the vga console setting is enabled still
* let modprobe override it */
@@ -348,6 +355,7 @@ static int __init radeon_init(void)
static void __exit radeon_exit(void)
{
drm_exit(driver);
+ radeon_unregister_atpx_handler();
}
module_init(radeon_init);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index c57ad606504..ec55f2b23c2 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -268,6 +268,8 @@ typedef struct drm_radeon_private {
u32 scratch_ages[5];
+ int have_z_offset;
+
/* starting from here on, data is preserved accross an open */
uint32_t flags; /* see radeon_chip_flags */
resource_size_t fb_aper_offset;
@@ -295,6 +297,9 @@ typedef struct drm_radeon_private {
int r700_sc_prim_fifo_size;
int r700_sc_hiz_tile_fifo_size;
int r700_sc_earlyz_tile_fifo_fize;
+ int r600_group_size;
+ int r600_npipes;
+ int r600_nbanks;
struct mutex cs_mutex;
u32 cs_id_scnt;
@@ -310,9 +315,11 @@ typedef struct drm_radeon_buf_priv {
u32 age;
} drm_radeon_buf_priv_t;
+struct drm_buffer;
+
typedef struct drm_radeon_kcmd_buffer {
int bufsz;
- char *buf;
+ struct drm_buffer *buffer;
int nbox;
struct drm_clip_rect __user *boxes;
} drm_radeon_kcmd_buffer_t;
@@ -455,6 +462,15 @@ extern void r600_blit_swap(struct drm_device *dev,
int sx, int sy, int dx, int dy,
int w, int h, int src_pitch, int dst_pitch, int cpp);
+/* atpx handler */
+#if defined(CONFIG_VGA_SWITCHEROO)
+void radeon_register_atpx_handler(void);
+void radeon_unregister_atpx_handler(void);
+#else
+static inline void radeon_register_atpx_handler(void) {}
+static inline void radeon_unregister_atpx_handler(void) {}
+#endif
+
/* Flags for stats.boxes
*/
#define RADEON_BOX_DMA_IDLE 0x1
@@ -2122,4 +2138,32 @@ extern void radeon_commit_ring(drm_radeon_private_t *dev_priv);
write &= mask; \
} while (0)
+/**
+ * Copy given number of dwords from drm buffer to the ring buffer.
+ */
+#define OUT_RING_DRM_BUFFER(buf, sz) do { \
+ int _size = (sz) * 4; \
+ struct drm_buffer *_buf = (buf); \
+ int _part_size; \
+ while (_size > 0) { \
+ _part_size = _size; \
+ \
+ if (write + _part_size/4 > mask) \
+ _part_size = ((mask + 1) - write)*4; \
+ \
+ if (drm_buffer_index(_buf) + _part_size > PAGE_SIZE) \
+ _part_size = PAGE_SIZE - drm_buffer_index(_buf);\
+ \
+ \
+ \
+ memcpy(ring + write, &_buf->data[drm_buffer_page(_buf)] \
+ [drm_buffer_index(_buf)], _part_size); \
+ \
+ _size -= _part_size; \
+ write = (write + _part_size/4) & mask; \
+ drm_buffer_advance(_buf, _part_size); \
+ } \
+} while (0)
+
+
#endif /* __RADEON_DRV_H__ */
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 3c91724457c..bc926ea0a53 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -53,7 +53,7 @@ static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
/* DVO requires 2x ppll clocks depending on tmds chip */
if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT)
return index_mask;
-
+
count = -1;
list_for_each_entry(clone_encoder, &dev->mode_config.encoder_list, head) {
struct radeon_encoder *radeon_clone = to_radeon_encoder(clone_encoder);
@@ -228,6 +228,32 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
return NULL;
}
+static struct radeon_connector_atom_dig *
+radeon_get_atom_connector_priv_from_encoder(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector;
+ struct radeon_connector_atom_dig *dig_connector;
+
+ if (!rdev->is_atom_bios)
+ return NULL;
+
+ connector = radeon_get_connector_for_encoder(encoder);
+ if (!connector)
+ return NULL;
+
+ radeon_connector = to_radeon_connector(connector);
+
+ if (!radeon_connector->con_priv)
+ return NULL;
+
+ dig_connector = radeon_connector->con_priv;
+
+ return dig_connector;
+}
+
static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -236,6 +262,9 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
+ /* adjust pm to upcoming mode change */
+ radeon_pm_compute_clocks(rdev);
+
/* set the active encoder to connector routing */
radeon_encoder_set_active_device(encoder);
drm_mode_set_crtcinfo(adjusted_mode, 0);
@@ -458,34 +487,20 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_get_atom_connector_priv_from_encoder(encoder);
union lvds_encoder_control args;
int index = 0;
int hdmi_detected = 0;
uint8_t frev, crev;
- struct radeon_encoder_atom_dig *dig;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector;
- struct radeon_connector_atom_dig *dig_connector;
- connector = radeon_get_connector_for_encoder(encoder);
- if (!connector)
+ if (!dig || !dig_connector)
return;
- radeon_connector = to_radeon_connector(connector);
-
- if (!radeon_encoder->enc_priv)
- return;
-
- dig = radeon_encoder->enc_priv;
-
- if (!radeon_connector->con_priv)
- return;
-
- if (drm_detect_hdmi_monitor(radeon_connector->edid))
+ if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
hdmi_detected = 1;
- dig_connector = radeon_connector->con_priv;
-
memset(&args, 0, sizeof(args));
switch (radeon_encoder->encoder_id) {
@@ -586,7 +601,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
{
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
- struct radeon_connector_atom_dig *radeon_dig_connector;
+ struct radeon_connector_atom_dig *dig_connector;
connector = radeon_get_connector_for_encoder(encoder);
if (!connector)
@@ -617,9 +632,9 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
break;
case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_eDP:
- radeon_dig_connector = radeon_connector->con_priv;
- if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
- (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
+ dig_connector = radeon_connector->con_priv;
+ if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+ (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
return ATOM_ENCODER_MODE_DP;
else if (drm_detect_hdmi_monitor(radeon_connector->edid))
return ATOM_ENCODER_MODE_HDMI;
@@ -656,6 +671,18 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
* - 2 DIG encoder blocks.
* DIG1/2 can drive UNIPHY0/1/2 link A or link B
*
+ * DCE 4.0
+ * - 3 DIG transmitter blocks UNPHY0/1/2 (links A and B).
+ * Supports up to 6 digital outputs
+ * - 6 DIG encoder blocks.
+ * - DIG to PHY mapping is hardcoded
+ * DIG1 drives UNIPHY0 link A, A+B
+ * DIG2 drives UNIPHY0 link B
+ * DIG3 drives UNIPHY1 link A, A+B
+ * DIG4 drives UNIPHY1 link B
+ * DIG5 drives UNIPHY2 link A, A+B
+ * DIG6 drives UNIPHY2 link B
+ *
* Routing
* crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
* Examples:
@@ -664,88 +691,78 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
* crtc0 -> dig1 -> UNIPHY2 link A -> LVDS
* crtc1 -> dig2 -> UNIPHY1 link B+A -> TMDS/HDMI
*/
-static void
+
+union dig_encoder_control {
+ DIG_ENCODER_CONTROL_PS_ALLOCATION v1;
+ DIG_ENCODER_CONTROL_PARAMETERS_V2 v2;
+ DIG_ENCODER_CONTROL_PARAMETERS_V3 v3;
+};
+
+void
atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- DIG_ENCODER_CONTROL_PS_ALLOCATION args;
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_get_atom_connector_priv_from_encoder(encoder);
+ union dig_encoder_control args;
int index = 0, num = 0;
uint8_t frev, crev;
- struct radeon_encoder_atom_dig *dig;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector;
- struct radeon_connector_atom_dig *dig_connector;
- connector = radeon_get_connector_for_encoder(encoder);
- if (!connector)
+ if (!dig || !dig_connector)
return;
- radeon_connector = to_radeon_connector(connector);
-
- if (!radeon_connector->con_priv)
- return;
-
- dig_connector = radeon_connector->con_priv;
-
- if (!radeon_encoder->enc_priv)
- return;
-
- dig = radeon_encoder->enc_priv;
-
memset(&args, 0, sizeof(args));
- if (dig->dig_encoder)
- index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
- else
- index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
+ if (ASIC_IS_DCE4(rdev))
+ index = GetIndexIntoMasterTable(COMMAND, DIGxEncoderControl);
+ else {
+ if (dig->dig_encoder)
+ index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
+ else
+ index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
+ }
num = dig->dig_encoder + 1;
atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
- args.ucAction = action;
- args.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ args.v1.ucAction = action;
+ args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
+ args.v1.ucEncoderMode = atombios_get_encoder_mode(encoder);
- if (ASIC_IS_DCE32(rdev)) {
+ if (args.v1.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
+ if (dig_connector->dp_clock == 270000)
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+ args.v1.ucLaneNum = dig_connector->dp_lane_count;
+ } else if (radeon_encoder->pixel_clock > 165000)
+ args.v1.ucLaneNum = 8;
+ else
+ args.v1.ucLaneNum = 4;
+
+ if (ASIC_IS_DCE4(rdev)) {
+ args.v3.acConfig.ucDigSel = dig->dig_encoder;
+ args.v3.ucBitPerColor = PANEL_8BIT_PER_COLOR;
+ } else {
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
+ args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER1;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2;
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+ args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER2;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- args.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
- break;
- }
- } else {
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- args.ucConfig = ATOM_ENCODER_CONFIG_TRANSMITTER1;
- break;
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- args.ucConfig = ATOM_ENCODER_CONFIG_TRANSMITTER2;
+ args.v1.ucConfig = ATOM_ENCODER_CONFIG_V2_TRANSMITTER3;
break;
}
+ if (dig_connector->linkb)
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
+ else
+ args.v1.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
}
- args.ucEncoderMode = atombios_get_encoder_mode(encoder);
-
- if (args.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
- if (dig_connector->dp_clock == 270000)
- args.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
- args.ucLaneNum = dig_connector->dp_lane_count;
- } else if (radeon_encoder->pixel_clock > 165000)
- args.ucLaneNum = 8;
- else
- args.ucLaneNum = 4;
-
- if (dig_connector->linkb)
- args.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
- else
- args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
-
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
@@ -753,6 +770,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
union dig_transmitter_control {
DIG_TRANSMITTER_CONTROL_PS_ALLOCATION v1;
DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
+ DIG_TRANSMITTER_CONTROL_PARAMETERS_V3 v3;
};
void
@@ -761,37 +779,29 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_get_atom_connector_priv_from_encoder(encoder);
+ struct drm_connector *connector;
+ struct radeon_connector *radeon_connector;
union dig_transmitter_control args;
int index = 0, num = 0;
uint8_t frev, crev;
- struct radeon_encoder_atom_dig *dig;
- struct drm_connector *connector;
- struct radeon_connector *radeon_connector;
- struct radeon_connector_atom_dig *dig_connector;
bool is_dp = false;
+ int pll_id = 0;
- connector = radeon_get_connector_for_encoder(encoder);
- if (!connector)
+ if (!dig || !dig_connector)
return;
+ connector = radeon_get_connector_for_encoder(encoder);
radeon_connector = to_radeon_connector(connector);
- if (!radeon_encoder->enc_priv)
- return;
-
- dig = radeon_encoder->enc_priv;
-
- if (!radeon_connector->con_priv)
- return;
-
- dig_connector = radeon_connector->con_priv;
-
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP)
is_dp = true;
memset(&args, 0, sizeof(args));
- if (ASIC_IS_DCE32(rdev))
+ if (ASIC_IS_DCE32(rdev) || ASIC_IS_DCE4(rdev))
index = GetIndexIntoMasterTable(COMMAND, UNIPHYTransmitterControl);
else {
switch (radeon_encoder->encoder_id) {
@@ -821,7 +831,54 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
else
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
}
- if (ASIC_IS_DCE32(rdev)) {
+ if (ASIC_IS_DCE4(rdev)) {
+ if (is_dp)
+ args.v3.ucLaneNum = dig_connector->dp_lane_count;
+ else if (radeon_encoder->pixel_clock > 165000)
+ args.v3.ucLaneNum = 8;
+ else
+ args.v3.ucLaneNum = 4;
+
+ if (dig_connector->linkb) {
+ args.v3.acConfig.ucLinkSel = 1;
+ args.v3.acConfig.ucEncoderSel = 1;
+ }
+
+ /* Select the PLL for the PHY
+ * DP PHY should be clocked from external src if there is
+ * one.
+ */
+ if (encoder->crtc) {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ pll_id = radeon_crtc->pll_id;
+ }
+ if (is_dp && rdev->clock.dp_extclk)
+ args.v3.acConfig.ucRefClkSource = 2; /* external src */
+ else
+ args.v3.acConfig.ucRefClkSource = pll_id;
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ args.v3.acConfig.ucTransmitterSel = 0;
+ num = 0;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ args.v3.acConfig.ucTransmitterSel = 1;
+ num = 1;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ args.v3.acConfig.ucTransmitterSel = 2;
+ num = 2;
+ break;
+ }
+
+ if (is_dp)
+ args.v3.acConfig.fCoherentMode = 1; /* DP requires coherent */
+ else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+ if (dig->coherent_mode)
+ args.v3.acConfig.fCoherentMode = 1;
+ }
+ } else if (ASIC_IS_DCE32(rdev)) {
if (dig->dig_encoder == 1)
args.v2.acConfig.ucEncoderSel = 1;
if (dig_connector->linkb)
@@ -849,7 +906,6 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
args.v2.acConfig.fCoherentMode = 1;
}
} else {
-
args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
if (dig->dig_encoder)
@@ -1024,9 +1080,12 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
+ /* adjust pm to dpms change */
+ radeon_pm_compute_clocks(rdev);
}
-union crtc_sourc_param {
+union crtc_source_param {
SELECT_CRTC_SOURCE_PS_ALLOCATION v1;
SELECT_CRTC_SOURCE_PARAMETERS_V2 v2;
};
@@ -1038,7 +1097,7 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
- union crtc_sourc_param args;
+ union crtc_source_param args;
int index = GetIndexIntoMasterTable(COMMAND, SelectCRTC_Source);
uint8_t frev, crev;
struct radeon_encoder_atom_dig *dig;
@@ -1107,10 +1166,26 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
dig = radeon_encoder->enc_priv;
- if (dig->dig_encoder)
- args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
- else
+ switch (dig->dig_encoder) {
+ case 0:
args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
+ break;
+ case 1:
+ args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
+ break;
+ case 2:
+ args.v2.ucEncoderID = ASIC_INT_DIG3_ENCODER_ID;
+ break;
+ case 3:
+ args.v2.ucEncoderID = ASIC_INT_DIG4_ENCODER_ID;
+ break;
+ case 4:
+ args.v2.ucEncoderID = ASIC_INT_DIG5_ENCODER_ID;
+ break;
+ case 5:
+ args.v2.ucEncoderID = ASIC_INT_DIG6_ENCODER_ID;
+ break;
+ }
break;
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
@@ -1167,6 +1242,7 @@ atombios_apply_encoder_quirks(struct drm_encoder *encoder,
}
/* set scaler clears this on some chips */
+ /* XXX check DCE4 */
if (!(radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT))) {
if (ASIC_IS_AVIVO(rdev) && (mode->flags & DRM_MODE_FLAG_INTERLACE))
WREG32(AVIVO_D1MODE_DATA_FORMAT + radeon_crtc->crtc_offset,
@@ -1183,6 +1259,33 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder)
struct drm_encoder *test_encoder;
struct radeon_encoder_atom_dig *dig;
uint32_t dig_enc_in_use = 0;
+
+ if (ASIC_IS_DCE4(rdev)) {
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_get_atom_connector_priv_from_encoder(encoder);
+
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
+ if (dig_connector->linkb)
+ return 1;
+ else
+ return 0;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
+ if (dig_connector->linkb)
+ return 3;
+ else
+ return 2;
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
+ if (dig_connector->linkb)
+ return 5;
+ else
+ return 4;
+ break;
+ }
+ }
+
/* on DCE32 and encoder can driver any block so just crtc id */
if (ASIC_IS_DCE32(rdev)) {
return radeon_crtc->crtc_id;
@@ -1254,15 +1357,26 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- /* disable the encoder and transmitter */
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
- atombios_dig_encoder_setup(encoder, ATOM_DISABLE);
-
- /* setup and enable the encoder and transmitter */
- atombios_dig_encoder_setup(encoder, ATOM_ENABLE);
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+ if (ASIC_IS_DCE4(rdev)) {
+ /* disable the transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+ /* setup and enable the encoder */
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP);
+
+ /* init and enable the transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+ } else {
+ /* disable the encoder and transmitter */
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+ atombios_dig_encoder_setup(encoder, ATOM_DISABLE);
+
+ /* setup and enable the encoder and transmitter */
+ atombios_dig_encoder_setup(encoder, ATOM_ENABLE);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+ }
break;
case ENCODER_OBJECT_ID_INTERNAL_DDI:
atombios_ddia_setup(encoder, ATOM_ENABLE);
@@ -1282,7 +1396,9 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
}
atombios_apply_encoder_quirks(encoder, adjusted_mode);
- r600_hdmi_setmode(encoder, adjusted_mode);
+ /* XXX */
+ if (!ASIC_IS_DCE4(rdev))
+ r600_hdmi_setmode(encoder, adjusted_mode);
}
static bool
@@ -1480,10 +1596,18 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
return;
encoder = &radeon_encoder->base;
- if (rdev->flags & RADEON_SINGLE_CRTC)
+ switch (rdev->num_crtc) {
+ case 1:
encoder->possible_crtcs = 0x1;
- else
+ break;
+ case 2:
+ default:
encoder->possible_crtcs = 0x3;
+ break;
+ case 6:
+ encoder->possible_crtcs = 0x3f;
+ break;
+ }
radeon_encoder->enc_priv = NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_family.h b/drivers/gpu/drm/radeon/radeon_family.h
index 797972e344a..93c7d5d4191 100644
--- a/drivers/gpu/drm/radeon/radeon_family.h
+++ b/drivers/gpu/drm/radeon/radeon_family.h
@@ -75,6 +75,11 @@ enum radeon_family {
CHIP_RV730,
CHIP_RV710,
CHIP_RV740,
+ CHIP_CEDAR,
+ CHIP_REDWOOD,
+ CHIP_JUNIPER,
+ CHIP_CYPRESS,
+ CHIP_HEMLOCK,
CHIP_LAST,
};
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index d71e346e9ab..8fccbf29235 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -39,6 +39,8 @@
#include "drm_fb_helper.h"
+#include <linux/vga_switcheroo.h>
+
struct radeon_fb_device {
struct drm_fb_helper helper;
struct radeon_framebuffer *rfb;
@@ -148,7 +150,6 @@ int radeonfb_create(struct drm_device *dev,
unsigned long tmp;
bool fb_tiled = false; /* useful for testing */
u32 tiling_flags = 0;
- int crtc_count;
mode_cmd.width = surface_width;
mode_cmd.height = surface_height;
@@ -239,11 +240,7 @@ int radeonfb_create(struct drm_device *dev,
rfbdev = info->par;
rfbdev->helper.funcs = &radeon_fb_helper_funcs;
rfbdev->helper.dev = dev;
- if (rdev->flags & RADEON_SINGLE_CRTC)
- crtc_count = 1;
- else
- crtc_count = 2;
- ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, crtc_count,
+ ret = drm_fb_helper_init_crtc_count(&rfbdev->helper, rdev->num_crtc,
RADEONFB_CONN_LIMIT);
if (ret)
goto out_unref;
@@ -257,7 +254,7 @@ int radeonfb_create(struct drm_device *dev,
info->flags = FBINFO_DEFAULT;
info->fbops = &radeonfb_ops;
- tmp = fb_gpuaddr - rdev->mc.vram_location;
+ tmp = fb_gpuaddr - rdev->mc.vram_start;
info->fix.smem_start = rdev->mc.aper_base + tmp;
info->fix.smem_len = size;
info->screen_base = fbptr;
@@ -291,6 +288,7 @@ int radeonfb_create(struct drm_device *dev,
rfbdev->rdev = rdev;
mutex_unlock(&rdev->ddev->struct_mutex);
+ vga_switcheroo_client_fb_set(rdev->ddev->pdev, info);
return 0;
out_unref:
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index e73d56e83fa..1770d3c07fd 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -139,6 +139,7 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
unsigned t;
unsigned p;
int i, j;
+ u64 page_base;
if (!rdev->gart.ready) {
WARN(1, "trying to unbind memory to unitialized GART !\n");
@@ -151,9 +152,11 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
pci_unmap_page(rdev->pdev, rdev->gart.pages_addr[p],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
rdev->gart.pages[p] = NULL;
- rdev->gart.pages_addr[p] = 0;
+ rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
+ page_base = rdev->gart.pages_addr[p];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
- radeon_gart_set_page(rdev, t, 0);
+ radeon_gart_set_page(rdev, t, page_base);
+ page_base += RADEON_GPU_PAGE_SIZE;
}
}
}
@@ -199,8 +202,26 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
return 0;
}
+void radeon_gart_restore(struct radeon_device *rdev)
+{
+ int i, j, t;
+ u64 page_base;
+
+ for (i = 0, t = 0; i < rdev->gart.num_cpu_pages; i++) {
+ page_base = rdev->gart.pages_addr[i];
+ for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
+ radeon_gart_set_page(rdev, t, page_base);
+ page_base += RADEON_GPU_PAGE_SIZE;
+ }
+ }
+ mb();
+ radeon_gart_tlb_flush(rdev);
+}
+
int radeon_gart_init(struct radeon_device *rdev)
{
+ int r, i;
+
if (rdev->gart.pages) {
return 0;
}
@@ -209,6 +230,9 @@ int radeon_gart_init(struct radeon_device *rdev)
DRM_ERROR("Page size is smaller than GPU page size!\n");
return -EINVAL;
}
+ r = radeon_dummy_page_init(rdev);
+ if (r)
+ return r;
/* Compute table size */
rdev->gart.num_cpu_pages = rdev->mc.gtt_size / PAGE_SIZE;
rdev->gart.num_gpu_pages = rdev->mc.gtt_size / RADEON_GPU_PAGE_SIZE;
@@ -227,6 +251,10 @@ int radeon_gart_init(struct radeon_device *rdev)
radeon_gart_fini(rdev);
return -ENOMEM;
}
+ /* set GART entry to point to the dummy page by default */
+ for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
+ rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
+ }
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index db8e9a355a0..ef92d147d8f 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -69,9 +69,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
if (r != -ERESTARTSYS)
DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
size, initial_domain, alignment, r);
- mutex_lock(&rdev->ddev->struct_mutex);
- drm_gem_object_unreference(gobj);
- mutex_unlock(&rdev->ddev->struct_mutex);
+ drm_gem_object_unreference_unlocked(gobj);
return r;
}
gobj->driver_private = robj;
@@ -202,14 +200,10 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
}
r = drm_gem_handle_create(filp, gobj, &handle);
if (r) {
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gobj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(gobj);
return r;
}
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_handle_unreference(gobj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_handle_unreference_unlocked(gobj);
args->handle = handle;
return 0;
}
@@ -236,9 +230,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
r = radeon_gem_set_domain(gobj, args->read_domains, args->write_domain);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gobj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(gobj);
return r;
}
@@ -255,9 +247,7 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
}
robj = gobj->driver_private;
args->addr_ptr = radeon_bo_mmap_offset(robj);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gobj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(gobj);
return 0;
}
@@ -288,9 +278,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
default:
break;
}
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gobj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(gobj);
return r;
}
@@ -311,9 +299,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
/* callback hw specific functions if any */
if (robj->rdev->asic->ioctl_wait_idle)
robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gobj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(gobj);
return r;
}
@@ -331,9 +317,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
robj = gobj->driver_private;
r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gobj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(gobj);
return r;
}
@@ -356,8 +340,6 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
radeon_bo_unreserve(rbo);
out:
- mutex_lock(&dev->struct_mutex);
- drm_gem_object_unreference(gobj);
- mutex_unlock(&dev->struct_mutex);
+ drm_gem_object_unreference_unlocked(gobj);
return r;
}
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index da3da1e89d0..4ae50c19589 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -26,6 +26,7 @@
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon.h"
+#include "atom.h"
/**
* radeon_ddc_probe
@@ -59,7 +60,7 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
}
-void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
+static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
{
struct radeon_device *rdev = i2c->dev->dev_private;
struct radeon_i2c_bus_rec *rec = &i2c->rec;
@@ -71,13 +72,25 @@ void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
*/
if (rec->hw_capable) {
if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) {
- if (rec->a_clk_reg == RADEON_GPIO_MONID) {
+ u32 reg;
+
+ if (rdev->family >= CHIP_RV350)
+ reg = RADEON_GPIO_MONID;
+ else if ((rdev->family == CHIP_R300) ||
+ (rdev->family == CHIP_R350))
+ reg = RADEON_GPIO_DVI_DDC;
+ else
+ reg = RADEON_GPIO_CRT2_DDC;
+
+ mutex_lock(&rdev->dc_hw_i2c_mutex);
+ if (rec->a_clk_reg == reg) {
WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1)));
} else {
WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3)));
}
+ mutex_unlock(&rdev->dc_hw_i2c_mutex);
}
}
@@ -168,6 +181,692 @@ static void set_data(void *i2c_priv, int data)
WREG32(rec->en_data_reg, val);
}
+static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
+{
+ struct radeon_pll *spll = &rdev->clock.spll;
+ u32 sclk = radeon_get_engine_clock(rdev);
+ u32 prescale = 0;
+ u32 n, m;
+ u8 loop;
+ int i2c_clock;
+
+ switch (rdev->family) {
+ case CHIP_R100:
+ case CHIP_RV100:
+ case CHIP_RS100:
+ case CHIP_RV200:
+ case CHIP_RS200:
+ case CHIP_R200:
+ case CHIP_RV250:
+ case CHIP_RS300:
+ case CHIP_RV280:
+ case CHIP_R300:
+ case CHIP_R350:
+ case CHIP_RV350:
+ n = (spll->reference_freq) / (4 * 6);
+ for (loop = 1; loop < 255; loop++) {
+ if ((loop * (loop - 1)) > n)
+ break;
+ }
+ m = loop - 1;
+ prescale = m | (loop << 8);
+ break;
+ case CHIP_RV380:
+ case CHIP_RS400:
+ case CHIP_RS480:
+ case CHIP_R420:
+ case CHIP_R423:
+ case CHIP_RV410:
+ sclk = radeon_get_engine_clock(rdev);
+ prescale = (((sclk * 10)/(4 * 128 * 100) + 1) << 8) + 128;
+ break;
+ case CHIP_RS600:
+ case CHIP_RS690:
+ case CHIP_RS740:
+ /* todo */
+ break;
+ case CHIP_RV515:
+ case CHIP_R520:
+ case CHIP_RV530:
+ case CHIP_RV560:
+ case CHIP_RV570:
+ case CHIP_R580:
+ i2c_clock = 50;
+ sclk = radeon_get_engine_clock(rdev);
+ if (rdev->family == CHIP_R520)
+ prescale = (127 << 8) + ((sclk * 10) / (4 * 127 * i2c_clock));
+ else
+ prescale = (((sclk * 10)/(4 * 128 * 100) + 1) << 8) + 128;
+ break;
+ case CHIP_R600:
+ case CHIP_RV610:
+ case CHIP_RV630:
+ case CHIP_RV670:
+ /* todo */
+ break;
+ case CHIP_RV620:
+ case CHIP_RV635:
+ case CHIP_RS780:
+ case CHIP_RS880:
+ case CHIP_RV770:
+ case CHIP_RV730:
+ case CHIP_RV710:
+ case CHIP_RV740:
+ /* todo */
+ break;
+ case CHIP_CEDAR:
+ case CHIP_REDWOOD:
+ case CHIP_JUNIPER:
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ /* todo */
+ break;
+ default:
+ DRM_ERROR("i2c: unhandled radeon chip\n");
+ break;
+ }
+ return prescale;
+}
+
+
+/* hw i2c engine for r1xx-4xx hardware
+ * hw can buffer up to 15 bytes
+ */
+static int r100_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+ struct radeon_device *rdev = i2c->dev->dev_private;
+ struct radeon_i2c_bus_rec *rec = &i2c->rec;
+ struct i2c_msg *p;
+ int i, j, k, ret = num;
+ u32 prescale;
+ u32 i2c_cntl_0, i2c_cntl_1, i2c_data;
+ u32 tmp, reg;
+
+ mutex_lock(&rdev->dc_hw_i2c_mutex);
+ /* take the pm lock since we need a constant sclk */
+ mutex_lock(&rdev->pm.mutex);
+
+ prescale = radeon_get_i2c_prescale(rdev);
+
+ reg = ((prescale << RADEON_I2C_PRESCALE_SHIFT) |
+ RADEON_I2C_START |
+ RADEON_I2C_STOP |
+ RADEON_I2C_GO);
+
+ if (rdev->is_atom_bios) {
+ tmp = RREG32(RADEON_BIOS_6_SCRATCH);
+ WREG32(RADEON_BIOS_6_SCRATCH, tmp | ATOM_S6_HW_I2C_BUSY_STATE);
+ }
+
+ if (rec->mm_i2c) {
+ i2c_cntl_0 = RADEON_I2C_CNTL_0;
+ i2c_cntl_1 = RADEON_I2C_CNTL_1;
+ i2c_data = RADEON_I2C_DATA;
+ } else {
+ i2c_cntl_0 = RADEON_DVI_I2C_CNTL_0;
+ i2c_cntl_1 = RADEON_DVI_I2C_CNTL_1;
+ i2c_data = RADEON_DVI_I2C_DATA;
+
+ switch (rdev->family) {
+ case CHIP_R100:
+ case CHIP_RV100:
+ case CHIP_RS100:
+ case CHIP_RV200:
+ case CHIP_RS200:
+ case CHIP_RS300:
+ switch (rec->mask_clk_reg) {
+ case RADEON_GPIO_DVI_DDC:
+ /* no gpio select bit */
+ break;
+ default:
+ DRM_ERROR("gpio not supported with hw i2c\n");
+ ret = -EINVAL;
+ goto done;
+ }
+ break;
+ case CHIP_R200:
+ /* only bit 4 on r200 */
+ switch (rec->mask_clk_reg) {
+ case RADEON_GPIO_DVI_DDC:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
+ break;
+ case RADEON_GPIO_MONID:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
+ break;
+ default:
+ DRM_ERROR("gpio not supported with hw i2c\n");
+ ret = -EINVAL;
+ goto done;
+ }
+ break;
+ case CHIP_RV250:
+ case CHIP_RV280:
+ /* bits 3 and 4 */
+ switch (rec->mask_clk_reg) {
+ case RADEON_GPIO_DVI_DDC:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
+ break;
+ case RADEON_GPIO_VGA_DDC:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC2);
+ break;
+ case RADEON_GPIO_CRT2_DDC:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
+ break;
+ default:
+ DRM_ERROR("gpio not supported with hw i2c\n");
+ ret = -EINVAL;
+ goto done;
+ }
+ break;
+ case CHIP_R300:
+ case CHIP_R350:
+ /* only bit 4 on r300/r350 */
+ switch (rec->mask_clk_reg) {
+ case RADEON_GPIO_VGA_DDC:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
+ break;
+ case RADEON_GPIO_DVI_DDC:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
+ break;
+ default:
+ DRM_ERROR("gpio not supported with hw i2c\n");
+ ret = -EINVAL;
+ goto done;
+ }
+ break;
+ case CHIP_RV350:
+ case CHIP_RV380:
+ case CHIP_R420:
+ case CHIP_R423:
+ case CHIP_RV410:
+ case CHIP_RS400:
+ case CHIP_RS480:
+ /* bits 3 and 4 */
+ switch (rec->mask_clk_reg) {
+ case RADEON_GPIO_VGA_DDC:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1);
+ break;
+ case RADEON_GPIO_DVI_DDC:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC2);
+ break;
+ case RADEON_GPIO_MONID:
+ reg |= R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3);
+ break;
+ default:
+ DRM_ERROR("gpio not supported with hw i2c\n");
+ ret = -EINVAL;
+ goto done;
+ }
+ break;
+ default:
+ DRM_ERROR("unsupported asic\n");
+ ret = -EINVAL;
+ goto done;
+ break;
+ }
+ }
+
+ /* check for bus probe */
+ p = &msgs[0];
+ if ((num == 1) && (p->len == 0)) {
+ WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
+ RADEON_I2C_NACK |
+ RADEON_I2C_HALT |
+ RADEON_I2C_SOFT_RST));
+ WREG32(i2c_data, (p->addr << 1) & 0xff);
+ WREG32(i2c_data, 0);
+ WREG32(i2c_cntl_1, ((1 << RADEON_I2C_DATA_COUNT_SHIFT) |
+ (1 << RADEON_I2C_ADDR_COUNT_SHIFT) |
+ RADEON_I2C_EN |
+ (48 << RADEON_I2C_TIME_LIMIT_SHIFT)));
+ WREG32(i2c_cntl_0, reg);
+ for (k = 0; k < 32; k++) {
+ udelay(10);
+ tmp = RREG32(i2c_cntl_0);
+ if (tmp & RADEON_I2C_GO)
+ continue;
+ tmp = RREG32(i2c_cntl_0);
+ if (tmp & RADEON_I2C_DONE)
+ break;
+ else {
+ DRM_DEBUG("i2c write error 0x%08x\n", tmp);
+ WREG32(i2c_cntl_0, tmp | RADEON_I2C_ABORT);
+ ret = -EIO;
+ goto done;
+ }
+ }
+ goto done;
+ }
+
+ for (i = 0; i < num; i++) {
+ p = &msgs[i];
+ for (j = 0; j < p->len; j++) {
+ if (p->flags & I2C_M_RD) {
+ WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
+ RADEON_I2C_NACK |
+ RADEON_I2C_HALT |
+ RADEON_I2C_SOFT_RST));
+ WREG32(i2c_data, ((p->addr << 1) & 0xff) | 0x1);
+ WREG32(i2c_cntl_1, ((1 << RADEON_I2C_DATA_COUNT_SHIFT) |
+ (1 << RADEON_I2C_ADDR_COUNT_SHIFT) |
+ RADEON_I2C_EN |
+ (48 << RADEON_I2C_TIME_LIMIT_SHIFT)));
+ WREG32(i2c_cntl_0, reg | RADEON_I2C_RECEIVE);
+ for (k = 0; k < 32; k++) {
+ udelay(10);
+ tmp = RREG32(i2c_cntl_0);
+ if (tmp & RADEON_I2C_GO)
+ continue;
+ tmp = RREG32(i2c_cntl_0);
+ if (tmp & RADEON_I2C_DONE)
+ break;
+ else {
+ DRM_DEBUG("i2c read error 0x%08x\n", tmp);
+ WREG32(i2c_cntl_0, tmp | RADEON_I2C_ABORT);
+ ret = -EIO;
+ goto done;
+ }
+ }
+ p->buf[j] = RREG32(i2c_data) & 0xff;
+ } else {
+ WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
+ RADEON_I2C_NACK |
+ RADEON_I2C_HALT |
+ RADEON_I2C_SOFT_RST));
+ WREG32(i2c_data, (p->addr << 1) & 0xff);
+ WREG32(i2c_data, p->buf[j]);
+ WREG32(i2c_cntl_1, ((1 << RADEON_I2C_DATA_COUNT_SHIFT) |
+ (1 << RADEON_I2C_ADDR_COUNT_SHIFT) |
+ RADEON_I2C_EN |
+ (48 << RADEON_I2C_TIME_LIMIT_SHIFT)));
+ WREG32(i2c_cntl_0, reg);
+ for (k = 0; k < 32; k++) {
+ udelay(10);
+ tmp = RREG32(i2c_cntl_0);
+ if (tmp & RADEON_I2C_GO)
+ continue;
+ tmp = RREG32(i2c_cntl_0);
+ if (tmp & RADEON_I2C_DONE)
+ break;
+ else {
+ DRM_DEBUG("i2c write error 0x%08x\n", tmp);
+ WREG32(i2c_cntl_0, tmp | RADEON_I2C_ABORT);
+ ret = -EIO;
+ goto done;
+ }
+ }
+ }
+ }
+ }
+
+done:
+ WREG32(i2c_cntl_0, 0);
+ WREG32(i2c_cntl_1, 0);
+ WREG32(i2c_cntl_0, (RADEON_I2C_DONE |
+ RADEON_I2C_NACK |
+ RADEON_I2C_HALT |
+ RADEON_I2C_SOFT_RST));
+
+ if (rdev->is_atom_bios) {
+ tmp = RREG32(RADEON_BIOS_6_SCRATCH);
+ tmp &= ~ATOM_S6_HW_I2C_BUSY_STATE;
+ WREG32(RADEON_BIOS_6_SCRATCH, tmp);
+ }
+
+ mutex_unlock(&rdev->pm.mutex);
+ mutex_unlock(&rdev->dc_hw_i2c_mutex);
+
+ return ret;
+}
+
+/* hw i2c engine for r5xx hardware
+ * hw can buffer up to 15 bytes
+ */
+static int r500_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+ struct radeon_device *rdev = i2c->dev->dev_private;
+ struct radeon_i2c_bus_rec *rec = &i2c->rec;
+ struct i2c_msg *p;
+ int i, j, remaining, current_count, buffer_offset, ret = num;
+ u32 prescale;
+ u32 tmp, reg;
+ u32 saved1, saved2;
+
+ mutex_lock(&rdev->dc_hw_i2c_mutex);
+ /* take the pm lock since we need a constant sclk */
+ mutex_lock(&rdev->pm.mutex);
+
+ prescale = radeon_get_i2c_prescale(rdev);
+
+ /* clear gpio mask bits */
+ tmp = RREG32(rec->mask_clk_reg);
+ tmp &= ~rec->mask_clk_mask;
+ WREG32(rec->mask_clk_reg, tmp);
+ tmp = RREG32(rec->mask_clk_reg);
+
+ tmp = RREG32(rec->mask_data_reg);
+ tmp &= ~rec->mask_data_mask;
+ WREG32(rec->mask_data_reg, tmp);
+ tmp = RREG32(rec->mask_data_reg);
+
+ /* clear pin values */
+ tmp = RREG32(rec->a_clk_reg);
+ tmp &= ~rec->a_clk_mask;
+ WREG32(rec->a_clk_reg, tmp);
+ tmp = RREG32(rec->a_clk_reg);
+
+ tmp = RREG32(rec->a_data_reg);
+ tmp &= ~rec->a_data_mask;
+ WREG32(rec->a_data_reg, tmp);
+ tmp = RREG32(rec->a_data_reg);
+
+ /* set the pins to input */
+ tmp = RREG32(rec->en_clk_reg);
+ tmp &= ~rec->en_clk_mask;
+ WREG32(rec->en_clk_reg, tmp);
+ tmp = RREG32(rec->en_clk_reg);
+
+ tmp = RREG32(rec->en_data_reg);
+ tmp &= ~rec->en_data_mask;
+ WREG32(rec->en_data_reg, tmp);
+ tmp = RREG32(rec->en_data_reg);
+
+ /* */
+ tmp = RREG32(RADEON_BIOS_6_SCRATCH);
+ WREG32(RADEON_BIOS_6_SCRATCH, tmp | ATOM_S6_HW_I2C_BUSY_STATE);
+ saved1 = RREG32(AVIVO_DC_I2C_CONTROL1);
+ saved2 = RREG32(0x494);
+ WREG32(0x494, saved2 | 0x1);
+
+ WREG32(AVIVO_DC_I2C_ARBITRATION, AVIVO_DC_I2C_SW_WANTS_TO_USE_I2C);
+ for (i = 0; i < 50; i++) {
+ udelay(1);
+ if (RREG32(AVIVO_DC_I2C_ARBITRATION) & AVIVO_DC_I2C_SW_CAN_USE_I2C)
+ break;
+ }
+ if (i == 50) {
+ DRM_ERROR("failed to get i2c bus\n");
+ ret = -EBUSY;
+ goto done;
+ }
+
+ reg = AVIVO_DC_I2C_START | AVIVO_DC_I2C_STOP | AVIVO_DC_I2C_EN;
+ switch (rec->mask_clk_reg) {
+ case AVIVO_DC_GPIO_DDC1_MASK:
+ reg |= AVIVO_DC_I2C_PIN_SELECT(AVIVO_SEL_DDC1);
+ break;
+ case AVIVO_DC_GPIO_DDC2_MASK:
+ reg |= AVIVO_DC_I2C_PIN_SELECT(AVIVO_SEL_DDC2);
+ break;
+ case AVIVO_DC_GPIO_DDC3_MASK:
+ reg |= AVIVO_DC_I2C_PIN_SELECT(AVIVO_SEL_DDC3);
+ break;
+ default:
+ DRM_ERROR("gpio not supported with hw i2c\n");
+ ret = -EINVAL;
+ goto done;
+ }
+
+ /* check for bus probe */
+ p = &msgs[0];
+ if ((num == 1) && (p->len == 0)) {
+ WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
+ AVIVO_DC_I2C_NACK |
+ AVIVO_DC_I2C_HALT));
+ WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
+ udelay(1);
+ WREG32(AVIVO_DC_I2C_RESET, 0);
+
+ WREG32(AVIVO_DC_I2C_DATA, (p->addr << 1) & 0xff);
+ WREG32(AVIVO_DC_I2C_DATA, 0);
+
+ WREG32(AVIVO_DC_I2C_CONTROL3, AVIVO_DC_I2C_TIME_LIMIT(48));
+ WREG32(AVIVO_DC_I2C_CONTROL2, (AVIVO_DC_I2C_ADDR_COUNT(1) |
+ AVIVO_DC_I2C_DATA_COUNT(1) |
+ (prescale << 16)));
+ WREG32(AVIVO_DC_I2C_CONTROL1, reg);
+ WREG32(AVIVO_DC_I2C_STATUS1, AVIVO_DC_I2C_GO);
+ for (j = 0; j < 200; j++) {
+ udelay(50);
+ tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+ if (tmp & AVIVO_DC_I2C_GO)
+ continue;
+ tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+ if (tmp & AVIVO_DC_I2C_DONE)
+ break;
+ else {
+ DRM_DEBUG("i2c write error 0x%08x\n", tmp);
+ WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_ABORT);
+ ret = -EIO;
+ goto done;
+ }
+ }
+ goto done;
+ }
+
+ for (i = 0; i < num; i++) {
+ p = &msgs[i];
+ remaining = p->len;
+ buffer_offset = 0;
+ if (p->flags & I2C_M_RD) {
+ while (remaining) {
+ if (remaining > 15)
+ current_count = 15;
+ else
+ current_count = remaining;
+ WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
+ AVIVO_DC_I2C_NACK |
+ AVIVO_DC_I2C_HALT));
+ WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
+ udelay(1);
+ WREG32(AVIVO_DC_I2C_RESET, 0);
+
+ WREG32(AVIVO_DC_I2C_DATA, ((p->addr << 1) & 0xff) | 0x1);
+ WREG32(AVIVO_DC_I2C_CONTROL3, AVIVO_DC_I2C_TIME_LIMIT(48));
+ WREG32(AVIVO_DC_I2C_CONTROL2, (AVIVO_DC_I2C_ADDR_COUNT(1) |
+ AVIVO_DC_I2C_DATA_COUNT(current_count) |
+ (prescale << 16)));
+ WREG32(AVIVO_DC_I2C_CONTROL1, reg | AVIVO_DC_I2C_RECEIVE);
+ WREG32(AVIVO_DC_I2C_STATUS1, AVIVO_DC_I2C_GO);
+ for (j = 0; j < 200; j++) {
+ udelay(50);
+ tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+ if (tmp & AVIVO_DC_I2C_GO)
+ continue;
+ tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+ if (tmp & AVIVO_DC_I2C_DONE)
+ break;
+ else {
+ DRM_DEBUG("i2c read error 0x%08x\n", tmp);
+ WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_ABORT);
+ ret = -EIO;
+ goto done;
+ }
+ }
+ for (j = 0; j < current_count; j++)
+ p->buf[buffer_offset + j] = RREG32(AVIVO_DC_I2C_DATA) & 0xff;
+ remaining -= current_count;
+ buffer_offset += current_count;
+ }
+ } else {
+ while (remaining) {
+ if (remaining > 15)
+ current_count = 15;
+ else
+ current_count = remaining;
+ WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
+ AVIVO_DC_I2C_NACK |
+ AVIVO_DC_I2C_HALT));
+ WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
+ udelay(1);
+ WREG32(AVIVO_DC_I2C_RESET, 0);
+
+ WREG32(AVIVO_DC_I2C_DATA, (p->addr << 1) & 0xff);
+ for (j = 0; j < current_count; j++)
+ WREG32(AVIVO_DC_I2C_DATA, p->buf[buffer_offset + j]);
+
+ WREG32(AVIVO_DC_I2C_CONTROL3, AVIVO_DC_I2C_TIME_LIMIT(48));
+ WREG32(AVIVO_DC_I2C_CONTROL2, (AVIVO_DC_I2C_ADDR_COUNT(1) |
+ AVIVO_DC_I2C_DATA_COUNT(current_count) |
+ (prescale << 16)));
+ WREG32(AVIVO_DC_I2C_CONTROL1, reg);
+ WREG32(AVIVO_DC_I2C_STATUS1, AVIVO_DC_I2C_GO);
+ for (j = 0; j < 200; j++) {
+ udelay(50);
+ tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+ if (tmp & AVIVO_DC_I2C_GO)
+ continue;
+ tmp = RREG32(AVIVO_DC_I2C_STATUS1);
+ if (tmp & AVIVO_DC_I2C_DONE)
+ break;
+ else {
+ DRM_DEBUG("i2c write error 0x%08x\n", tmp);
+ WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_ABORT);
+ ret = -EIO;
+ goto done;
+ }
+ }
+ remaining -= current_count;
+ buffer_offset += current_count;
+ }
+ }
+ }
+
+done:
+ WREG32(AVIVO_DC_I2C_STATUS1, (AVIVO_DC_I2C_DONE |
+ AVIVO_DC_I2C_NACK |
+ AVIVO_DC_I2C_HALT));
+ WREG32(AVIVO_DC_I2C_RESET, AVIVO_DC_I2C_SOFT_RESET);
+ udelay(1);
+ WREG32(AVIVO_DC_I2C_RESET, 0);
+
+ WREG32(AVIVO_DC_I2C_ARBITRATION, AVIVO_DC_I2C_SW_DONE_USING_I2C);
+ WREG32(AVIVO_DC_I2C_CONTROL1, saved1);
+ WREG32(0x494, saved2);
+ tmp = RREG32(RADEON_BIOS_6_SCRATCH);
+ tmp &= ~ATOM_S6_HW_I2C_BUSY_STATE;
+ WREG32(RADEON_BIOS_6_SCRATCH, tmp);
+
+ mutex_unlock(&rdev->pm.mutex);
+ mutex_unlock(&rdev->dc_hw_i2c_mutex);
+
+ return ret;
+}
+
+static int radeon_sw_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+ int ret;
+
+ radeon_i2c_do_lock(i2c, 1);
+ ret = i2c_transfer(&i2c->algo.radeon.bit_adapter, msgs, num);
+ radeon_i2c_do_lock(i2c, 0);
+
+ return ret;
+}
+
+static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msgs, int num)
+{
+ struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+ struct radeon_device *rdev = i2c->dev->dev_private;
+ struct radeon_i2c_bus_rec *rec = &i2c->rec;
+ int ret;
+
+ switch (rdev->family) {
+ case CHIP_R100:
+ case CHIP_RV100:
+ case CHIP_RS100:
+ case CHIP_RV200:
+ case CHIP_RS200:
+ case CHIP_R200:
+ case CHIP_RV250:
+ case CHIP_RS300:
+ case CHIP_RV280:
+ case CHIP_R300:
+ case CHIP_R350:
+ case CHIP_RV350:
+ case CHIP_RV380:
+ case CHIP_R420:
+ case CHIP_R423:
+ case CHIP_RV410:
+ case CHIP_RS400:
+ case CHIP_RS480:
+ if (rec->hw_capable)
+ ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
+ else
+ ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
+ break;
+ case CHIP_RS600:
+ case CHIP_RS690:
+ case CHIP_RS740:
+ /* XXX fill in hw i2c implementation */
+ ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
+ break;
+ case CHIP_RV515:
+ case CHIP_R520:
+ case CHIP_RV530:
+ case CHIP_RV560:
+ case CHIP_RV570:
+ case CHIP_R580:
+ if (rec->hw_capable) {
+ if (rec->mm_i2c)
+ ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
+ else
+ ret = r500_hw_i2c_xfer(i2c_adap, msgs, num);
+ } else
+ ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
+ break;
+ case CHIP_R600:
+ case CHIP_RV610:
+ case CHIP_RV630:
+ case CHIP_RV670:
+ /* XXX fill in hw i2c implementation */
+ ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
+ break;
+ case CHIP_RV620:
+ case CHIP_RV635:
+ case CHIP_RS780:
+ case CHIP_RS880:
+ case CHIP_RV770:
+ case CHIP_RV730:
+ case CHIP_RV710:
+ case CHIP_RV740:
+ /* XXX fill in hw i2c implementation */
+ ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
+ break;
+ case CHIP_CEDAR:
+ case CHIP_REDWOOD:
+ case CHIP_JUNIPER:
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ /* XXX fill in hw i2c implementation */
+ ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
+ break;
+ default:
+ DRM_ERROR("i2c: unhandled radeon chip\n");
+ ret = -EIO;
+ break;
+ }
+
+ return ret;
+}
+
+static u32 radeon_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm radeon_i2c_algo = {
+ .master_xfer = radeon_i2c_xfer,
+ .functionality = radeon_i2c_func,
+};
+
struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
struct radeon_i2c_bus_rec *rec,
const char *name)
@@ -179,23 +878,36 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
if (i2c == NULL)
return NULL;
- i2c->adapter.owner = THIS_MODULE;
- i2c->dev = dev;
- i2c_set_adapdata(&i2c->adapter, i2c);
- i2c->adapter.algo_data = &i2c->algo.bit;
- i2c->algo.bit.setsda = set_data;
- i2c->algo.bit.setscl = set_clock;
- i2c->algo.bit.getsda = get_data;
- i2c->algo.bit.getscl = get_clock;
- i2c->algo.bit.udelay = 20;
+ /* set the internal bit adapter */
+ i2c->algo.radeon.bit_adapter.owner = THIS_MODULE;
+ i2c_set_adapdata(&i2c->algo.radeon.bit_adapter, i2c);
+ sprintf(i2c->algo.radeon.bit_adapter.name, "Radeon internal i2c bit bus %s", name);
+ i2c->algo.radeon.bit_adapter.algo_data = &i2c->algo.radeon.bit_data;
+ i2c->algo.radeon.bit_data.setsda = set_data;
+ i2c->algo.radeon.bit_data.setscl = set_clock;
+ i2c->algo.radeon.bit_data.getsda = get_data;
+ i2c->algo.radeon.bit_data.getscl = get_clock;
+ i2c->algo.radeon.bit_data.udelay = 20;
/* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always
* make this, 2 jiffies is a lot more reliable */
- i2c->algo.bit.timeout = 2;
- i2c->algo.bit.data = i2c;
+ i2c->algo.radeon.bit_data.timeout = 2;
+ i2c->algo.radeon.bit_data.data = i2c;
+ ret = i2c_bit_add_bus(&i2c->algo.radeon.bit_adapter);
+ if (ret) {
+ DRM_ERROR("Failed to register internal bit i2c %s\n", name);
+ goto out_free;
+ }
+ /* set the radeon i2c adapter */
+ i2c->dev = dev;
i2c->rec = *rec;
- ret = i2c_bit_add_bus(&i2c->adapter);
+ i2c->adapter.owner = THIS_MODULE;
+ i2c_set_adapdata(&i2c->adapter, i2c);
+ sprintf(i2c->adapter.name, "Radeon i2c %s", name);
+ i2c->adapter.algo_data = &i2c->algo.radeon;
+ i2c->adapter.algo = &radeon_i2c_algo;
+ ret = i2c_add_adapter(&i2c->adapter);
if (ret) {
- DRM_INFO("Failed to register i2c %s\n", name);
+ DRM_ERROR("Failed to register i2c %s\n", name);
goto out_free;
}
@@ -237,11 +949,19 @@ out_free:
}
-
void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
{
if (!i2c)
return;
+ i2c_del_adapter(&i2c->algo.radeon.bit_adapter);
+ i2c_del_adapter(&i2c->adapter);
+ kfree(i2c);
+}
+
+void radeon_i2c_destroy_dp(struct radeon_i2c_chan *i2c)
+{
+ if (!i2c)
+ return;
i2c_del_adapter(&i2c->adapter);
kfree(i2c);
@@ -252,10 +972,10 @@ struct drm_encoder *radeon_best_encoder(struct drm_connector *connector)
return NULL;
}
-void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus,
- u8 slave_addr,
- u8 addr,
- u8 *val)
+void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus,
+ u8 slave_addr,
+ u8 addr,
+ u8 *val)
{
u8 out_buf[2];
u8 in_buf[2];
@@ -286,10 +1006,10 @@ void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus,
}
}
-void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c_bus,
- u8 slave_addr,
- u8 addr,
- u8 val)
+void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c_bus,
+ u8 slave_addr,
+ u8 addr,
+ u8 val)
{
uint8_t out_buf[2];
struct i2c_msg msg = {
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index f23b05606eb..20ec276e759 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -30,6 +30,8 @@
#include "radeon.h"
#include "radeon_drm.h"
+#include <linux/vga_switcheroo.h>
+
int radeon_driver_unload_kms(struct drm_device *dev)
{
struct radeon_device *rdev = dev->dev_private;
@@ -136,6 +138,7 @@ int radeon_driver_firstopen_kms(struct drm_device *dev)
void radeon_driver_lastclose_kms(struct drm_device *dev)
{
+ vga_switcheroo_process_delayed_switch();
}
int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
@@ -276,17 +279,17 @@ struct drm_ioctl_desc radeon_ioctls_kms[] = {
DRM_IOCTL_DEF(DRM_RADEON_SURF_ALLOC, radeon_surface_alloc_kms, DRM_AUTH),
DRM_IOCTL_DEF(DRM_RADEON_SURF_FREE, radeon_surface_free_kms, DRM_AUTH),
/* KMS */
- DRM_IOCTL_DEF(DRM_RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_INFO, radeon_gem_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_CREATE, radeon_gem_create_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_MMAP, radeon_gem_mmap_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_DOMAIN, radeon_gem_set_domain_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_PREAD, radeon_gem_pread_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_PWRITE, radeon_gem_pwrite_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_WAIT_IDLE, radeon_gem_wait_idle_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_CS, radeon_cs_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_INFO, radeon_info_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_SET_TILING, radeon_gem_set_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_GET_TILING, radeon_gem_get_tiling_ioctl, DRM_AUTH|DRM_UNLOCKED),
+ DRM_IOCTL_DEF(DRM_RADEON_GEM_BUSY, radeon_gem_busy_ioctl, DRM_AUTH|DRM_UNLOCKED),
};
int radeon_max_kms_ioctl = DRM_ARRAY_SIZE(radeon_ioctls_kms);
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index b6d8081e124..df23d6a01d0 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -403,7 +403,7 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
/* if scanout was in GTT this really wouldn't work */
/* crtc offset is from display base addr not FB location */
- radeon_crtc->legacy_display_base_addr = rdev->mc.vram_location;
+ radeon_crtc->legacy_display_base_addr = rdev->mc.vram_start;
base -= radeon_crtc->legacy_display_base_addr;
@@ -582,29 +582,6 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
? RADEON_CRTC_V_SYNC_POL
: 0));
- /* TODO -> Dell Server */
- if (0) {
- uint32_t disp_hw_debug = RREG32(RADEON_DISP_HW_DEBUG);
- uint32_t tv_dac_cntl = RREG32(RADEON_TV_DAC_CNTL);
- uint32_t dac2_cntl = RREG32(RADEON_DAC_CNTL2);
- uint32_t crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL);
-
- dac2_cntl &= ~RADEON_DAC2_DAC_CLK_SEL;
- dac2_cntl |= RADEON_DAC2_DAC2_CLK_SEL;
-
- /* For CRT on DAC2, don't turn it on if BIOS didn't
- enable it, even it's detected.
- */
- disp_hw_debug |= RADEON_CRT2_DISP1_SEL;
- tv_dac_cntl &= ~((1<<2) | (3<<8) | (7<<24) | (0xff<<16));
- tv_dac_cntl |= (0x03 | (2<<8) | (0x58<<16));
-
- WREG32(RADEON_TV_DAC_CNTL, tv_dac_cntl);
- WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
- WREG32(RADEON_DAC_CNTL2, dac2_cntl);
- WREG32(RADEON_CRTC2_GEN_CNTL, crtc2_gen_cntl);
- }
-
if (radeon_crtc->crtc_id) {
uint32_t crtc2_gen_cntl;
uint32_t disp2_merge_cntl;
@@ -726,6 +703,10 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
pll = &rdev->clock.p1pll;
pll->flags = RADEON_PLL_LEGACY;
+ if (radeon_new_pll == 1)
+ pll->algo = PLL_ALGO_NEW;
+ else
+ pll->algo = PLL_ALGO_LEGACY;
if (mode->clock > 200000) /* range limits??? */
pll->flags |= RADEON_PLL_PREFER_HIGH_FB_DIV;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
index 38e45e231ef..cf389ce50a8 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c
@@ -115,6 +115,9 @@ static void radeon_legacy_lvds_dpms(struct drm_encoder *encoder, int mode)
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
else
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
+ /* adjust pm to dpms change */
+ radeon_pm_compute_clocks(rdev);
}
static void radeon_legacy_lvds_prepare(struct drm_encoder *encoder)
@@ -214,6 +217,11 @@ static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+
+ /* adjust pm to upcoming mode change */
+ radeon_pm_compute_clocks(rdev);
/* set the active encoder to connector routing */
radeon_encoder_set_active_device(encoder);
@@ -285,6 +293,9 @@ static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
else
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
+ /* adjust pm to dpms change */
+ radeon_pm_compute_clocks(rdev);
}
static void radeon_legacy_primary_dac_prepare(struct drm_encoder *encoder)
@@ -470,6 +481,9 @@ static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode)
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
else
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
+ /* adjust pm to dpms change */
+ radeon_pm_compute_clocks(rdev);
}
static void radeon_legacy_tmds_int_prepare(struct drm_encoder *encoder)
@@ -635,6 +649,9 @@ static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode)
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
else
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
+ /* adjust pm to dpms change */
+ radeon_pm_compute_clocks(rdev);
}
static void radeon_legacy_tmds_ext_prepare(struct drm_encoder *encoder)
@@ -842,6 +859,9 @@ static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
radeon_atombios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
else
radeon_combios_encoder_dpms_scratch_regs(encoder, (mode == DRM_MODE_DPMS_ON) ? true : false);
+
+ /* adjust pm to dpms change */
+ radeon_pm_compute_clocks(rdev);
}
static void radeon_legacy_tv_dac_prepare(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index e81b2aeb6a8..1702b820aa4 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -83,6 +83,8 @@ struct radeon_i2c_bus_rec {
bool valid;
/* id used by atom */
uint8_t i2c_id;
+ /* id used by atom */
+ uint8_t hpd_id;
/* can be used with hw i2c engine */
bool hw_capable;
/* uses multi-media i2c engine */
@@ -113,6 +115,7 @@ struct radeon_tmds_pll {
#define RADEON_MAX_BIOS_CONNECTOR 16
+/* pll flags */
#define RADEON_PLL_USE_BIOS_DIVS (1 << 0)
#define RADEON_PLL_NO_ODD_POST_DIV (1 << 1)
#define RADEON_PLL_USE_REF_DIV (1 << 2)
@@ -127,6 +130,12 @@ struct radeon_tmds_pll {
#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
#define RADEON_PLL_USE_POST_DIV (1 << 12)
+/* pll algo */
+enum radeon_pll_algo {
+ PLL_ALGO_LEGACY,
+ PLL_ALGO_NEW
+};
+
struct radeon_pll {
/* reference frequency */
uint32_t reference_freq;
@@ -157,6 +166,13 @@ struct radeon_pll {
/* pll id */
uint32_t id;
+ /* pll algo */
+ enum radeon_pll_algo algo;
+};
+
+struct i2c_algo_radeon_data {
+ struct i2c_adapter bit_adapter;
+ struct i2c_algo_bit_data bit_data;
};
struct radeon_i2c_chan {
@@ -164,7 +180,7 @@ struct radeon_i2c_chan {
struct drm_device *dev;
union {
struct i2c_algo_dp_aux_data dp;
- struct i2c_algo_bit_data bit;
+ struct i2c_algo_radeon_data radeon;
} algo;
struct radeon_i2c_bus_rec rec;
};
@@ -193,7 +209,7 @@ struct radeon_mode_info {
struct card_info *atom_card_info;
enum radeon_connector_table connector_table;
bool mode_config_initialized;
- struct radeon_crtc *crtcs[2];
+ struct radeon_crtc *crtcs[6];
/* DVI-I properties */
struct drm_property *coherent_mode_property;
/* DAC enable load detect */
@@ -202,7 +218,8 @@ struct radeon_mode_info {
struct drm_property *tv_std_property;
/* legacy TMDS PLL detect */
struct drm_property *tmds_pll_property;
-
+ /* hardcoded DFP edid from BIOS */
+ struct edid *bios_hardcoded_edid;
};
#define MAX_H_CODE_TIMING_LEN 32
@@ -237,6 +254,7 @@ struct radeon_crtc {
fixed20_12 vsc;
fixed20_12 hsc;
struct drm_display_mode native_mode;
+ int pll_id;
};
struct radeon_encoder_primary_dac {
@@ -303,6 +321,7 @@ struct radeon_encoder_atom_dig {
/* atom lvds */
uint32_t lvds_misc;
uint16_t panel_pwr_delay;
+ enum radeon_pll_algo pll_algo;
struct radeon_atom_ss *ss;
/* panel mode */
struct drm_display_mode native_mode;
@@ -398,6 +417,7 @@ extern void dp_link_train(struct drm_encoder *encoder,
struct drm_connector *connector);
extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
+extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action);
extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
int action, uint8_t lane_num,
uint8_t lane_set);
@@ -411,14 +431,15 @@ extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
struct radeon_i2c_bus_rec *rec,
const char *name);
extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c);
-extern void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus,
- u8 slave_addr,
- u8 addr,
- u8 *val);
-extern void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c,
- u8 slave_addr,
- u8 addr,
- u8 val);
+extern void radeon_i2c_destroy_dp(struct radeon_i2c_chan *i2c);
+extern void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus,
+ u8 slave_addr,
+ u8 addr,
+ u8 *val);
+extern void radeon_i2c_put_byte(struct radeon_i2c_chan *i2c,
+ u8 slave_addr,
+ u8 addr,
+ u8 val);
extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
@@ -432,14 +453,6 @@ extern void radeon_compute_pll(struct radeon_pll *pll,
uint32_t *ref_div_p,
uint32_t *post_div_p);
-extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
- uint64_t freq,
- uint32_t *dot_clock_p,
- uint32_t *fb_div_p,
- uint32_t *frac_fb_div_p,
- uint32_t *ref_div_p,
- uint32_t *post_div_p);
-
extern void radeon_setup_encoder_clones(struct drm_device *dev);
struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index);
@@ -473,6 +486,9 @@ extern int radeon_crtc_cursor_set(struct drm_crtc *crtc,
extern int radeon_crtc_cursor_move(struct drm_crtc *crtc,
int x, int y);
+extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev);
+extern struct edid *
+radeon_combios_get_hardcoded_edid(struct radeon_device *rdev);
extern bool radeon_atom_get_clock_info(struct drm_device *dev);
extern bool radeon_combios_get_clock_info(struct drm_device *dev);
extern struct radeon_encoder_atom_dig *
@@ -531,7 +547,6 @@ void radeon_atombios_init_crtc(struct drm_device *dev,
struct radeon_crtc *radeon_crtc);
void radeon_legacy_init_crtc(struct drm_device *dev,
struct radeon_crtc *radeon_crtc);
-extern void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state);
void radeon_get_clock_info(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index f1da370928e..fc9d00ac6b1 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -178,7 +178,6 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
{
int r, i;
- radeon_ttm_placement_from_domain(bo, domain);
if (bo->pin_count) {
bo->pin_count++;
if (gpu_addr)
@@ -186,6 +185,8 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
return 0;
}
radeon_ttm_placement_from_domain(bo, domain);
+ /* force to pin into visible video ram */
+ bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 8bce64cdc32..d4d1c39a0e9 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -18,21 +18,413 @@
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Rafał Miłecki <zajec5@gmail.com>
+ * Alex Deucher <alexdeucher@gmail.com>
*/
#include "drmP.h"
#include "radeon.h"
+#include "avivod.h"
-int radeon_debugfs_pm_init(struct radeon_device *rdev);
+#define RADEON_IDLE_LOOP_MS 100
+#define RADEON_RECLOCK_DELAY_MS 200
+#define RADEON_WAIT_VBLANK_TIMEOUT 200
+
+static void radeon_pm_set_clocks_locked(struct radeon_device *rdev);
+static void radeon_pm_set_clocks(struct radeon_device *rdev);
+static void radeon_pm_idle_work_handler(struct work_struct *work);
+static int radeon_debugfs_pm_init(struct radeon_device *rdev);
+
+static const char *pm_state_names[4] = {
+ "PM_STATE_DISABLED",
+ "PM_STATE_MINIMUM",
+ "PM_STATE_PAUSED",
+ "PM_STATE_ACTIVE"
+};
+
+static const char *pm_state_types[5] = {
+ "Default",
+ "Powersave",
+ "Battery",
+ "Balanced",
+ "Performance",
+};
+
+static void radeon_print_power_mode_info(struct radeon_device *rdev)
+{
+ int i, j;
+ bool is_default;
+
+ DRM_INFO("%d Power State(s)\n", rdev->pm.num_power_states);
+ for (i = 0; i < rdev->pm.num_power_states; i++) {
+ if (rdev->pm.default_power_state == &rdev->pm.power_state[i])
+ is_default = true;
+ else
+ is_default = false;
+ DRM_INFO("State %d %s %s\n", i,
+ pm_state_types[rdev->pm.power_state[i].type],
+ is_default ? "(default)" : "");
+ if ((rdev->flags & RADEON_IS_PCIE) && !(rdev->flags & RADEON_IS_IGP))
+ DRM_INFO("\t%d PCIE Lanes\n", rdev->pm.power_state[i].non_clock_info.pcie_lanes);
+ DRM_INFO("\t%d Clock Mode(s)\n", rdev->pm.power_state[i].num_clock_modes);
+ for (j = 0; j < rdev->pm.power_state[i].num_clock_modes; j++) {
+ if (rdev->flags & RADEON_IS_IGP)
+ DRM_INFO("\t\t%d engine: %d\n",
+ j,
+ rdev->pm.power_state[i].clock_info[j].sclk * 10);
+ else
+ DRM_INFO("\t\t%d engine/memory: %d/%d\n",
+ j,
+ rdev->pm.power_state[i].clock_info[j].sclk * 10,
+ rdev->pm.power_state[i].clock_info[j].mclk * 10);
+ }
+ }
+}
+
+static struct radeon_power_state * radeon_pick_power_state(struct radeon_device *rdev,
+ enum radeon_pm_state_type type)
+{
+ int i, j;
+ enum radeon_pm_state_type wanted_types[2];
+ int wanted_count;
+
+ switch (type) {
+ case POWER_STATE_TYPE_DEFAULT:
+ default:
+ return rdev->pm.default_power_state;
+ case POWER_STATE_TYPE_POWERSAVE:
+ if (rdev->flags & RADEON_IS_MOBILITY) {
+ wanted_types[0] = POWER_STATE_TYPE_POWERSAVE;
+ wanted_types[1] = POWER_STATE_TYPE_BATTERY;
+ wanted_count = 2;
+ } else {
+ wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE;
+ wanted_count = 1;
+ }
+ break;
+ case POWER_STATE_TYPE_BATTERY:
+ if (rdev->flags & RADEON_IS_MOBILITY) {
+ wanted_types[0] = POWER_STATE_TYPE_BATTERY;
+ wanted_types[1] = POWER_STATE_TYPE_POWERSAVE;
+ wanted_count = 2;
+ } else {
+ wanted_types[0] = POWER_STATE_TYPE_PERFORMANCE;
+ wanted_count = 1;
+ }
+ break;
+ case POWER_STATE_TYPE_BALANCED:
+ case POWER_STATE_TYPE_PERFORMANCE:
+ wanted_types[0] = type;
+ wanted_count = 1;
+ break;
+ }
+
+ for (i = 0; i < wanted_count; i++) {
+ for (j = 0; j < rdev->pm.num_power_states; j++) {
+ if (rdev->pm.power_state[j].type == wanted_types[i])
+ return &rdev->pm.power_state[j];
+ }
+ }
+
+ return rdev->pm.default_power_state;
+}
+
+static struct radeon_pm_clock_info * radeon_pick_clock_mode(struct radeon_device *rdev,
+ struct radeon_power_state *power_state,
+ enum radeon_pm_clock_mode_type type)
+{
+ switch (type) {
+ case POWER_MODE_TYPE_DEFAULT:
+ default:
+ return power_state->default_clock_mode;
+ case POWER_MODE_TYPE_LOW:
+ return &power_state->clock_info[0];
+ case POWER_MODE_TYPE_MID:
+ if (power_state->num_clock_modes > 2)
+ return &power_state->clock_info[1];
+ else
+ return &power_state->clock_info[0];
+ break;
+ case POWER_MODE_TYPE_HIGH:
+ return &power_state->clock_info[power_state->num_clock_modes - 1];
+ }
+
+}
+
+static void radeon_get_power_state(struct radeon_device *rdev,
+ enum radeon_pm_action action)
+{
+ switch (action) {
+ case PM_ACTION_MINIMUM:
+ rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_BATTERY);
+ rdev->pm.requested_clock_mode =
+ radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_LOW);
+ break;
+ case PM_ACTION_DOWNCLOCK:
+ rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_POWERSAVE);
+ rdev->pm.requested_clock_mode =
+ radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_MID);
+ break;
+ case PM_ACTION_UPCLOCK:
+ rdev->pm.requested_power_state = radeon_pick_power_state(rdev, POWER_STATE_TYPE_DEFAULT);
+ rdev->pm.requested_clock_mode =
+ radeon_pick_clock_mode(rdev, rdev->pm.requested_power_state, POWER_MODE_TYPE_HIGH);
+ break;
+ case PM_ACTION_NONE:
+ default:
+ DRM_ERROR("Requested mode for not defined action\n");
+ return;
+ }
+ DRM_INFO("Requested: e: %d m: %d p: %d\n",
+ rdev->pm.requested_clock_mode->sclk,
+ rdev->pm.requested_clock_mode->mclk,
+ rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
+}
+
+static void radeon_set_power_state(struct radeon_device *rdev)
+{
+ /* if *_clock_mode are the same, *_power_state are as well */
+ if (rdev->pm.requested_clock_mode == rdev->pm.current_clock_mode)
+ return;
+
+ DRM_INFO("Setting: e: %d m: %d p: %d\n",
+ rdev->pm.requested_clock_mode->sclk,
+ rdev->pm.requested_clock_mode->mclk,
+ rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
+ /* set pcie lanes */
+ /* set voltage */
+ /* set engine clock */
+ radeon_set_engine_clock(rdev, rdev->pm.requested_clock_mode->sclk);
+ /* set memory clock */
+
+ rdev->pm.current_power_state = rdev->pm.requested_power_state;
+ rdev->pm.current_clock_mode = rdev->pm.requested_clock_mode;
+}
int radeon_pm_init(struct radeon_device *rdev)
{
+ rdev->pm.state = PM_STATE_DISABLED;
+ rdev->pm.planned_action = PM_ACTION_NONE;
+ rdev->pm.downclocked = false;
+
+ if (rdev->bios) {
+ if (rdev->is_atom_bios)
+ radeon_atombios_get_power_modes(rdev);
+ else
+ radeon_combios_get_power_modes(rdev);
+ radeon_print_power_mode_info(rdev);
+ }
+
if (radeon_debugfs_pm_init(rdev)) {
DRM_ERROR("Failed to register debugfs file for PM!\n");
}
+ INIT_DELAYED_WORK(&rdev->pm.idle_work, radeon_pm_idle_work_handler);
+
+ if (radeon_dynpm != -1 && radeon_dynpm) {
+ rdev->pm.state = PM_STATE_PAUSED;
+ DRM_INFO("radeon: dynamic power management enabled\n");
+ }
+
+ DRM_INFO("radeon: power management initialized\n");
+
return 0;
}
+void radeon_pm_compute_clocks(struct radeon_device *rdev)
+{
+ struct drm_device *ddev = rdev->ddev;
+ struct drm_connector *connector;
+ struct radeon_crtc *radeon_crtc;
+ int count = 0;
+
+ if (rdev->pm.state == PM_STATE_DISABLED)
+ return;
+
+ mutex_lock(&rdev->pm.mutex);
+
+ rdev->pm.active_crtcs = 0;
+ list_for_each_entry(connector,
+ &ddev->mode_config.connector_list, head) {
+ if (connector->encoder &&
+ connector->dpms != DRM_MODE_DPMS_OFF) {
+ radeon_crtc = to_radeon_crtc(connector->encoder->crtc);
+ rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
+ ++count;
+ }
+ }
+
+ if (count > 1) {
+ if (rdev->pm.state == PM_STATE_ACTIVE) {
+ cancel_delayed_work(&rdev->pm.idle_work);
+
+ rdev->pm.state = PM_STATE_PAUSED;
+ rdev->pm.planned_action = PM_ACTION_UPCLOCK;
+ if (rdev->pm.downclocked)
+ radeon_pm_set_clocks(rdev);
+
+ DRM_DEBUG("radeon: dynamic power management deactivated\n");
+ }
+ } else if (count == 1) {
+ /* TODO: Increase clocks if needed for current mode */
+
+ if (rdev->pm.state == PM_STATE_MINIMUM) {
+ rdev->pm.state = PM_STATE_ACTIVE;
+ rdev->pm.planned_action = PM_ACTION_UPCLOCK;
+ radeon_pm_set_clocks(rdev);
+
+ queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
+ msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+ }
+ else if (rdev->pm.state == PM_STATE_PAUSED) {
+ rdev->pm.state = PM_STATE_ACTIVE;
+ queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
+ msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+ DRM_DEBUG("radeon: dynamic power management activated\n");
+ }
+ }
+ else { /* count == 0 */
+ if (rdev->pm.state != PM_STATE_MINIMUM) {
+ cancel_delayed_work(&rdev->pm.idle_work);
+
+ rdev->pm.state = PM_STATE_MINIMUM;
+ rdev->pm.planned_action = PM_ACTION_MINIMUM;
+ radeon_pm_set_clocks(rdev);
+ }
+ }
+
+ mutex_unlock(&rdev->pm.mutex);
+}
+
+static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish)
+{
+ u32 stat_crtc1 = 0, stat_crtc2 = 0;
+ bool in_vbl = true;
+
+ if (ASIC_IS_AVIVO(rdev)) {
+ if (rdev->pm.active_crtcs & (1 << 0)) {
+ stat_crtc1 = RREG32(D1CRTC_STATUS);
+ if (!(stat_crtc1 & 1))
+ in_vbl = false;
+ }
+ if (rdev->pm.active_crtcs & (1 << 1)) {
+ stat_crtc2 = RREG32(D2CRTC_STATUS);
+ if (!(stat_crtc2 & 1))
+ in_vbl = false;
+ }
+ }
+ if (in_vbl == false)
+ DRM_INFO("not in vbl for pm change %08x %08x at %s\n", stat_crtc1,
+ stat_crtc2, finish ? "exit" : "entry");
+ return in_vbl;
+}
+static void radeon_pm_set_clocks_locked(struct radeon_device *rdev)
+{
+ /*radeon_fence_wait_last(rdev);*/
+ switch (rdev->pm.planned_action) {
+ case PM_ACTION_UPCLOCK:
+ rdev->pm.downclocked = false;
+ break;
+ case PM_ACTION_DOWNCLOCK:
+ rdev->pm.downclocked = true;
+ break;
+ case PM_ACTION_MINIMUM:
+ break;
+ case PM_ACTION_NONE:
+ DRM_ERROR("%s: PM_ACTION_NONE\n", __func__);
+ break;
+ }
+
+ /* check if we are in vblank */
+ radeon_pm_debug_check_in_vbl(rdev, false);
+ radeon_set_power_state(rdev);
+ radeon_pm_debug_check_in_vbl(rdev, true);
+ rdev->pm.planned_action = PM_ACTION_NONE;
+}
+
+static void radeon_pm_set_clocks(struct radeon_device *rdev)
+{
+ radeon_get_power_state(rdev, rdev->pm.planned_action);
+ mutex_lock(&rdev->cp.mutex);
+
+ if (rdev->pm.active_crtcs & (1 << 0)) {
+ rdev->pm.req_vblank |= (1 << 0);
+ drm_vblank_get(rdev->ddev, 0);
+ }
+ if (rdev->pm.active_crtcs & (1 << 1)) {
+ rdev->pm.req_vblank |= (1 << 1);
+ drm_vblank_get(rdev->ddev, 1);
+ }
+ if (rdev->pm.active_crtcs)
+ wait_event_interruptible_timeout(
+ rdev->irq.vblank_queue, 0,
+ msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
+ if (rdev->pm.req_vblank & (1 << 0)) {
+ rdev->pm.req_vblank &= ~(1 << 0);
+ drm_vblank_put(rdev->ddev, 0);
+ }
+ if (rdev->pm.req_vblank & (1 << 1)) {
+ rdev->pm.req_vblank &= ~(1 << 1);
+ drm_vblank_put(rdev->ddev, 1);
+ }
+
+ radeon_pm_set_clocks_locked(rdev);
+ mutex_unlock(&rdev->cp.mutex);
+}
+
+static void radeon_pm_idle_work_handler(struct work_struct *work)
+{
+ struct radeon_device *rdev;
+ rdev = container_of(work, struct radeon_device,
+ pm.idle_work.work);
+
+ mutex_lock(&rdev->pm.mutex);
+ if (rdev->pm.state == PM_STATE_ACTIVE) {
+ unsigned long irq_flags;
+ int not_processed = 0;
+
+ read_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
+ if (!list_empty(&rdev->fence_drv.emited)) {
+ struct list_head *ptr;
+ list_for_each(ptr, &rdev->fence_drv.emited) {
+ /* count up to 3, that's enought info */
+ if (++not_processed >= 3)
+ break;
+ }
+ }
+ read_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
+
+ if (not_processed >= 3) { /* should upclock */
+ if (rdev->pm.planned_action == PM_ACTION_DOWNCLOCK) {
+ rdev->pm.planned_action = PM_ACTION_NONE;
+ } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
+ rdev->pm.downclocked) {
+ rdev->pm.planned_action =
+ PM_ACTION_UPCLOCK;
+ rdev->pm.action_timeout = jiffies +
+ msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
+ }
+ } else if (not_processed == 0) { /* should downclock */
+ if (rdev->pm.planned_action == PM_ACTION_UPCLOCK) {
+ rdev->pm.planned_action = PM_ACTION_NONE;
+ } else if (rdev->pm.planned_action == PM_ACTION_NONE &&
+ !rdev->pm.downclocked) {
+ rdev->pm.planned_action =
+ PM_ACTION_DOWNCLOCK;
+ rdev->pm.action_timeout = jiffies +
+ msecs_to_jiffies(RADEON_RECLOCK_DELAY_MS);
+ }
+ }
+
+ if (rdev->pm.planned_action != PM_ACTION_NONE &&
+ jiffies > rdev->pm.action_timeout) {
+ radeon_pm_set_clocks(rdev);
+ }
+ }
+ mutex_unlock(&rdev->pm.mutex);
+
+ queue_delayed_work(rdev->wq, &rdev->pm.idle_work,
+ msecs_to_jiffies(RADEON_IDLE_LOOP_MS));
+}
+
/*
* Debugfs info
*/
@@ -44,11 +436,14 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
struct radeon_device *rdev = dev->dev_private;
+ seq_printf(m, "state: %s\n", pm_state_names[rdev->pm.state]);
seq_printf(m, "default engine clock: %u0 kHz\n", rdev->clock.default_sclk);
seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
seq_printf(m, "default memory clock: %u0 kHz\n", rdev->clock.default_mclk);
if (rdev->asic->get_memory_clock)
seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
+ if (rdev->asic->get_pcie_lanes)
+ seq_printf(m, "PCIE lanes: %d\n", radeon_get_pcie_lanes(rdev));
return 0;
}
@@ -58,7 +453,7 @@ static struct drm_info_list radeon_pm_info_list[] = {
};
#endif
-int radeon_debugfs_pm_init(struct radeon_device *rdev)
+static int radeon_debugfs_pm_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
return radeon_debugfs_add_files(rdev, radeon_pm_info_list, ARRAY_SIZE(radeon_pm_info_list));
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 6d0a009dd4a..5c0dc082d33 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -54,7 +54,7 @@
#include "r300_reg.h"
#include "r500_reg.h"
#include "r600_reg.h"
-
+#include "evergreen_reg.h"
#define RADEON_MC_AGP_LOCATION 0x014c
#define RADEON_MC_AGP_START_MASK 0x0000FFFF
@@ -1060,32 +1060,38 @@
/* Multimedia I2C bus */
#define RADEON_I2C_CNTL_0 0x0090
-#define RADEON_I2C_DONE (1 << 0)
-#define RADEON_I2C_NACK (1 << 1)
-#define RADEON_I2C_HALT (1 << 2)
-#define RADEON_I2C_SOFT_RST (1 << 5)
-#define RADEON_I2C_DRIVE_EN (1 << 6)
-#define RADEON_I2C_DRIVE_SEL (1 << 7)
-#define RADEON_I2C_START (1 << 8)
-#define RADEON_I2C_STOP (1 << 9)
-#define RADEON_I2C_RECEIVE (1 << 10)
-#define RADEON_I2C_ABORT (1 << 11)
-#define RADEON_I2C_GO (1 << 12)
-#define RADEON_I2C_PRESCALE_SHIFT 16
+# define RADEON_I2C_DONE (1 << 0)
+# define RADEON_I2C_NACK (1 << 1)
+# define RADEON_I2C_HALT (1 << 2)
+# define RADEON_I2C_SOFT_RST (1 << 5)
+# define RADEON_I2C_DRIVE_EN (1 << 6)
+# define RADEON_I2C_DRIVE_SEL (1 << 7)
+# define RADEON_I2C_START (1 << 8)
+# define RADEON_I2C_STOP (1 << 9)
+# define RADEON_I2C_RECEIVE (1 << 10)
+# define RADEON_I2C_ABORT (1 << 11)
+# define RADEON_I2C_GO (1 << 12)
+# define RADEON_I2C_PRESCALE_SHIFT 16
#define RADEON_I2C_CNTL_1 0x0094
-#define RADEON_I2C_DATA_COUNT_SHIFT 0
-#define RADEON_I2C_ADDR_COUNT_SHIFT 4
-#define RADEON_I2C_INTRA_BYTE_DELAY_SHIFT 8
-#define RADEON_I2C_SEL (1 << 16)
-#define RADEON_I2C_EN (1 << 17)
-#define RADEON_I2C_TIME_LIMIT_SHIFT 24
+# define RADEON_I2C_DATA_COUNT_SHIFT 0
+# define RADEON_I2C_ADDR_COUNT_SHIFT 4
+# define RADEON_I2C_INTRA_BYTE_DELAY_SHIFT 8
+# define RADEON_I2C_SEL (1 << 16)
+# define RADEON_I2C_EN (1 << 17)
+# define RADEON_I2C_TIME_LIMIT_SHIFT 24
#define RADEON_I2C_DATA 0x0098
#define RADEON_DVI_I2C_CNTL_0 0x02e0
# define R200_DVI_I2C_PIN_SEL(x) ((x) << 3)
-# define R200_SEL_DDC1 0 /* 0x60 - VGA_DDC */
-# define R200_SEL_DDC2 1 /* 0x64 - DVI_DDC */
-# define R200_SEL_DDC3 2 /* 0x68 - MONID_DDC */
+# define R200_SEL_DDC1 0 /* depends on asic */
+# define R200_SEL_DDC2 1 /* depends on asic */
+# define R200_SEL_DDC3 2 /* depends on asic */
+# define RADEON_SW_WANTS_TO_USE_DVI_I2C (1 << 13)
+# define RADEON_SW_CAN_USE_DVI_I2C (1 << 13)
+# define RADEON_SW_DONE_USING_DVI_I2C (1 << 14)
+# define RADEON_HW_NEEDS_DVI_I2C (1 << 14)
+# define RADEON_ABORT_HW_DVI_I2C (1 << 15)
+# define RADEON_HW_USING_DVI_I2C (1 << 15)
#define RADEON_DVI_I2C_CNTL_1 0x02e4
#define RADEON_DVI_I2C_DATA 0x02e8
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 6579eb4c1f2..e50513a6273 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -34,6 +34,36 @@
int radeon_debugfs_ib_init(struct radeon_device *rdev);
+void radeon_ib_bogus_cleanup(struct radeon_device *rdev)
+{
+ struct radeon_ib *ib, *n;
+
+ list_for_each_entry_safe(ib, n, &rdev->ib_pool.bogus_ib, list) {
+ list_del(&ib->list);
+ vfree(ib->ptr);
+ kfree(ib);
+ }
+}
+
+void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib)
+{
+ struct radeon_ib *bib;
+
+ bib = kmalloc(sizeof(*bib), GFP_KERNEL);
+ if (bib == NULL)
+ return;
+ bib->ptr = vmalloc(ib->length_dw * 4);
+ if (bib->ptr == NULL) {
+ kfree(bib);
+ return;
+ }
+ memcpy(bib->ptr, ib->ptr, ib->length_dw * 4);
+ bib->length_dw = ib->length_dw;
+ mutex_lock(&rdev->ib_pool.mutex);
+ list_add_tail(&bib->list, &rdev->ib_pool.bogus_ib);
+ mutex_unlock(&rdev->ib_pool.mutex);
+}
+
/*
* IB.
*/
@@ -142,6 +172,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
if (rdev->ib_pool.robj)
return 0;
+ INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
/* Allocate 1M object buffer */
r = radeon_bo_create(rdev, NULL, RADEON_IB_POOL_SIZE*64*1024,
true, RADEON_GEM_DOMAIN_GTT,
@@ -192,6 +223,8 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
return;
}
mutex_lock(&rdev->ib_pool.mutex);
+ radeon_ib_bogus_cleanup(rdev);
+
if (rdev->ib_pool.robj) {
r = radeon_bo_reserve(rdev->ib_pool.robj, false);
if (likely(r == 0)) {
@@ -349,15 +382,49 @@ static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
return 0;
}
+static int radeon_debugfs_ib_bogus_info(struct seq_file *m, void *data)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct radeon_device *rdev = node->info_ent->data;
+ struct radeon_ib *ib;
+ unsigned i;
+
+ mutex_lock(&rdev->ib_pool.mutex);
+ if (list_empty(&rdev->ib_pool.bogus_ib)) {
+ mutex_unlock(&rdev->ib_pool.mutex);
+ seq_printf(m, "no bogus IB recorded\n");
+ return 0;
+ }
+ ib = list_first_entry(&rdev->ib_pool.bogus_ib, struct radeon_ib, list);
+ list_del_init(&ib->list);
+ mutex_unlock(&rdev->ib_pool.mutex);
+ seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
+ for (i = 0; i < ib->length_dw; i++) {
+ seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
+ }
+ vfree(ib->ptr);
+ kfree(ib);
+ return 0;
+}
+
static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
+
+static struct drm_info_list radeon_debugfs_ib_bogus_info_list[] = {
+ {"radeon_ib_bogus", radeon_debugfs_ib_bogus_info, 0, NULL},
+};
#endif
int radeon_debugfs_ib_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
unsigned i;
+ int r;
+ radeon_debugfs_ib_bogus_info_list[0].data = rdev;
+ r = radeon_debugfs_add_files(rdev, radeon_debugfs_ib_bogus_info_list, 1);
+ if (r)
+ return r;
for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 067167cb39c..3c32f840dcd 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -29,6 +29,7 @@
#include "drmP.h"
#include "drm.h"
+#include "drm_buffer.h"
#include "drm_sarea.h"
#include "radeon_drm.h"
#include "radeon_drv.h"
@@ -91,21 +92,27 @@ static __inline__ int radeon_check_and_fixup_offset(drm_radeon_private_t *
static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
dev_priv,
struct drm_file *file_priv,
- int id, u32 *data)
+ int id, struct drm_buffer *buf)
{
+ u32 *data;
switch (id) {
case RADEON_EMIT_PP_MISC:
- if (radeon_check_and_fixup_offset(dev_priv, file_priv,
- &data[(RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4])) {
+ data = drm_buffer_pointer_to_dword(buf,
+ (RADEON_RB3D_DEPTHOFFSET - RADEON_PP_MISC) / 4);
+
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
DRM_ERROR("Invalid depth buffer offset\n");
return -EINVAL;
}
+ dev_priv->have_z_offset = 1;
break;
case RADEON_EMIT_PP_CNTL:
- if (radeon_check_and_fixup_offset(dev_priv, file_priv,
- &data[(RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4])) {
+ data = drm_buffer_pointer_to_dword(buf,
+ (RADEON_RB3D_COLOROFFSET - RADEON_PP_CNTL) / 4);
+
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
DRM_ERROR("Invalid colour buffer offset\n");
return -EINVAL;
}
@@ -117,8 +124,8 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
case R200_EMIT_PP_TXOFFSET_3:
case R200_EMIT_PP_TXOFFSET_4:
case R200_EMIT_PP_TXOFFSET_5:
- if (radeon_check_and_fixup_offset(dev_priv, file_priv,
- &data[0])) {
+ data = drm_buffer_pointer_to_dword(buf, 0);
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
DRM_ERROR("Invalid R200 texture offset\n");
return -EINVAL;
}
@@ -127,8 +134,9 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
case RADEON_EMIT_PP_TXFILTER_0:
case RADEON_EMIT_PP_TXFILTER_1:
case RADEON_EMIT_PP_TXFILTER_2:
- if (radeon_check_and_fixup_offset(dev_priv, file_priv,
- &data[(RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4])) {
+ data = drm_buffer_pointer_to_dword(buf,
+ (RADEON_PP_TXOFFSET_0 - RADEON_PP_TXFILTER_0) / 4);
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv, data)) {
DRM_ERROR("Invalid R100 texture offset\n");
return -EINVAL;
}
@@ -142,9 +150,10 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
case R200_EMIT_PP_CUBIC_OFFSETS_5:{
int i;
for (i = 0; i < 5; i++) {
+ data = drm_buffer_pointer_to_dword(buf, i);
if (radeon_check_and_fixup_offset(dev_priv,
file_priv,
- &data[i])) {
+ data)) {
DRM_ERROR
("Invalid R200 cubic texture offset\n");
return -EINVAL;
@@ -158,9 +167,10 @@ static __inline__ int radeon_check_and_fixup_packets(drm_radeon_private_t *
case RADEON_EMIT_PP_CUBIC_OFFSETS_T2:{
int i;
for (i = 0; i < 5; i++) {
+ data = drm_buffer_pointer_to_dword(buf, i);
if (radeon_check_and_fixup_offset(dev_priv,
file_priv,
- &data[i])) {
+ data)) {
DRM_ERROR
("Invalid R100 cubic texture offset\n");
return -EINVAL;
@@ -269,23 +279,24 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
cmdbuf,
unsigned int *cmdsz)
{
- u32 *cmd = (u32 *) cmdbuf->buf;
+ u32 *cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 0);
u32 offset, narrays;
int count, i, k;
- *cmdsz = 2 + ((cmd[0] & RADEON_CP_PACKET_COUNT_MASK) >> 16);
+ count = ((*cmd & RADEON_CP_PACKET_COUNT_MASK) >> 16);
+ *cmdsz = 2 + count;
- if ((cmd[0] & 0xc0000000) != RADEON_CP_PACKET3) {
+ if ((*cmd & 0xc0000000) != RADEON_CP_PACKET3) {
DRM_ERROR("Not a type 3 packet\n");
return -EINVAL;
}
- if (4 * *cmdsz > cmdbuf->bufsz) {
+ if (4 * *cmdsz > drm_buffer_unprocessed(cmdbuf->buffer)) {
DRM_ERROR("Packet size larger than size of data provided\n");
return -EINVAL;
}
- switch(cmd[0] & 0xff00) {
+ switch (*cmd & 0xff00) {
/* XXX Are there old drivers needing other packets? */
case RADEON_3D_DRAW_IMMD:
@@ -312,7 +323,6 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
break;
case RADEON_3D_LOAD_VBPNTR:
- count = (cmd[0] >> 16) & 0x3fff;
if (count > 18) { /* 12 arrays max */
DRM_ERROR("Too large payload in 3D_LOAD_VBPNTR (count=%d)\n",
@@ -321,13 +331,16 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
}
/* carefully check packet contents */
- narrays = cmd[1] & ~0xc000;
+ cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+
+ narrays = *cmd & ~0xc000;
k = 0;
i = 2;
while ((k < narrays) && (i < (count + 2))) {
i++; /* skip attribute field */
+ cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
if (radeon_check_and_fixup_offset(dev_priv, file_priv,
- &cmd[i])) {
+ cmd)) {
DRM_ERROR
("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
k, i);
@@ -338,8 +351,10 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
if (k == narrays)
break;
/* have one more to process, they come in pairs */
+ cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, i);
+
if (radeon_check_and_fixup_offset(dev_priv,
- file_priv, &cmd[i]))
+ file_priv, cmd))
{
DRM_ERROR
("Invalid offset (k=%d i=%d) in 3D_LOAD_VBPNTR packet.\n",
@@ -363,7 +378,9 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
DRM_ERROR("Invalid 3d packet for r200-class chip\n");
return -EINVAL;
}
- if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[1])) {
+
+ cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv, cmd)) {
DRM_ERROR("Invalid rndr_gen_indx offset\n");
return -EINVAL;
}
@@ -374,12 +391,15 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
DRM_ERROR("Invalid 3d packet for r100-class chip\n");
return -EINVAL;
}
- if ((cmd[1] & 0x8000ffff) != 0x80000810) {
- DRM_ERROR("Invalid indx_buffer reg address %08X\n", cmd[1]);
+
+ cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+ if ((*cmd & 0x8000ffff) != 0x80000810) {
+ DRM_ERROR("Invalid indx_buffer reg address %08X\n", *cmd);
return -EINVAL;
}
- if (radeon_check_and_fixup_offset(dev_priv, file_priv, &cmd[2])) {
- DRM_ERROR("Invalid indx_buffer offset is %08X\n", cmd[2]);
+ cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
+ if (radeon_check_and_fixup_offset(dev_priv, file_priv, cmd)) {
+ DRM_ERROR("Invalid indx_buffer offset is %08X\n", *cmd);
return -EINVAL;
}
break;
@@ -388,31 +408,34 @@ static __inline__ int radeon_check_and_fixup_packet3(drm_radeon_private_t *
case RADEON_CNTL_PAINT_MULTI:
case RADEON_CNTL_BITBLT_MULTI:
/* MSB of opcode: next DWORD GUI_CNTL */
- if (cmd[1] & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
+ cmd = drm_buffer_pointer_to_dword(cmdbuf->buffer, 1);
+ if (*cmd & (RADEON_GMC_SRC_PITCH_OFFSET_CNTL
| RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
- offset = cmd[2] << 10;
+ u32 *cmd2 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 2);
+ offset = *cmd2 << 10;
if (radeon_check_and_fixup_offset
(dev_priv, file_priv, &offset)) {
DRM_ERROR("Invalid first packet offset\n");
return -EINVAL;
}
- cmd[2] = (cmd[2] & 0xffc00000) | offset >> 10;
+ *cmd2 = (*cmd2 & 0xffc00000) | offset >> 10;
}
- if ((cmd[1] & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
- (cmd[1] & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
- offset = cmd[3] << 10;
+ if ((*cmd & RADEON_GMC_SRC_PITCH_OFFSET_CNTL) &&
+ (*cmd & RADEON_GMC_DST_PITCH_OFFSET_CNTL)) {
+ u32 *cmd3 = drm_buffer_pointer_to_dword(cmdbuf->buffer, 3);
+ offset = *cmd << 10;
if (radeon_check_and_fixup_offset
(dev_priv, file_priv, &offset)) {
DRM_ERROR("Invalid second packet offset\n");
return -EINVAL;
}
- cmd[3] = (cmd[3] & 0xffc00000) | offset >> 10;
+ *cmd3 = (*cmd3 & 0xffc00000) | offset >> 10;
}
break;
default:
- DRM_ERROR("Invalid packet type %x\n", cmd[0] & 0xff00);
+ DRM_ERROR("Invalid packet type %x\n", *cmd & 0xff00);
return -EINVAL;
}
@@ -876,6 +899,11 @@ static void radeon_cp_dispatch_clear(struct drm_device * dev,
if (tmp & RADEON_BACK)
flags |= RADEON_FRONT;
}
+ if (flags & (RADEON_DEPTH|RADEON_STENCIL)) {
+ if (!dev_priv->have_z_offset)
+ printk_once(KERN_ERR "radeon: illegal depth clear request. Buggy mesa detected - please update.\n");
+ flags &= ~(RADEON_DEPTH | RADEON_STENCIL);
+ }
if (flags & (RADEON_FRONT | RADEON_BACK)) {
@@ -2611,7 +2639,6 @@ static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
{
int id = (int)header.packet.packet_id;
int sz, reg;
- int *data = (int *)cmdbuf->buf;
RING_LOCALS;
if (id >= RADEON_MAX_STATE_PACKETS)
@@ -2620,23 +2647,22 @@ static int radeon_emit_packets(drm_radeon_private_t * dev_priv,
sz = packet[id].len;
reg = packet[id].start;
- if (sz * sizeof(int) > cmdbuf->bufsz) {
+ if (sz * sizeof(u32) > drm_buffer_unprocessed(cmdbuf->buffer)) {
DRM_ERROR("Packet size provided larger than data provided\n");
return -EINVAL;
}
- if (radeon_check_and_fixup_packets(dev_priv, file_priv, id, data)) {
+ if (radeon_check_and_fixup_packets(dev_priv, file_priv, id,
+ cmdbuf->buffer)) {
DRM_ERROR("Packet verification failed\n");
return -EINVAL;
}
BEGIN_RING(sz + 1);
OUT_RING(CP_PACKET0(reg, (sz - 1)));
- OUT_RING_TABLE(data, sz);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
ADVANCE_RING();
- cmdbuf->buf += sz * sizeof(int);
- cmdbuf->bufsz -= sz * sizeof(int);
return 0;
}
@@ -2653,10 +2679,8 @@ static __inline__ int radeon_emit_scalars(drm_radeon_private_t *dev_priv,
OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
- OUT_RING_TABLE(cmdbuf->buf, sz);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
ADVANCE_RING();
- cmdbuf->buf += sz * sizeof(int);
- cmdbuf->bufsz -= sz * sizeof(int);
return 0;
}
@@ -2675,10 +2699,8 @@ static __inline__ int radeon_emit_scalars2(drm_radeon_private_t *dev_priv,
OUT_RING(CP_PACKET0(RADEON_SE_TCL_SCALAR_INDX_REG, 0));
OUT_RING(start | (stride << RADEON_SCAL_INDX_DWORD_STRIDE_SHIFT));
OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_SCALAR_DATA_REG, sz - 1));
- OUT_RING_TABLE(cmdbuf->buf, sz);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
ADVANCE_RING();
- cmdbuf->buf += sz * sizeof(int);
- cmdbuf->bufsz -= sz * sizeof(int);
return 0;
}
@@ -2696,11 +2718,9 @@ static __inline__ int radeon_emit_vectors(drm_radeon_private_t *dev_priv,
OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0));
OUT_RING(start | (stride << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1)));
- OUT_RING_TABLE(cmdbuf->buf, sz);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
ADVANCE_RING();
- cmdbuf->buf += sz * sizeof(int);
- cmdbuf->bufsz -= sz * sizeof(int);
return 0;
}
@@ -2714,7 +2734,7 @@ static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv,
if (!sz)
return 0;
- if (sz * 4 > cmdbuf->bufsz)
+ if (sz * 4 > drm_buffer_unprocessed(cmdbuf->buffer))
return -EINVAL;
BEGIN_RING(5 + sz);
@@ -2722,11 +2742,9 @@ static __inline__ int radeon_emit_veclinear(drm_radeon_private_t *dev_priv,
OUT_RING(CP_PACKET0(RADEON_SE_TCL_VECTOR_INDX_REG, 0));
OUT_RING(start | (1 << RADEON_VEC_INDX_OCTWORD_STRIDE_SHIFT));
OUT_RING(CP_PACKET0_TABLE(RADEON_SE_TCL_VECTOR_DATA_REG, (sz - 1)));
- OUT_RING_TABLE(cmdbuf->buf, sz);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, sz);
ADVANCE_RING();
- cmdbuf->buf += sz * sizeof(int);
- cmdbuf->bufsz -= sz * sizeof(int);
return 0;
}
@@ -2748,11 +2766,9 @@ static int radeon_emit_packet3(struct drm_device * dev,
}
BEGIN_RING(cmdsz);
- OUT_RING_TABLE(cmdbuf->buf, cmdsz);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, cmdsz);
ADVANCE_RING();
- cmdbuf->buf += cmdsz * 4;
- cmdbuf->bufsz -= cmdsz * 4;
return 0;
}
@@ -2805,16 +2821,16 @@ static int radeon_emit_packet3_cliprect(struct drm_device *dev,
}
BEGIN_RING(cmdsz);
- OUT_RING_TABLE(cmdbuf->buf, cmdsz);
+ OUT_RING_DRM_BUFFER(cmdbuf->buffer, cmdsz);
ADVANCE_RING();
} while (++i < cmdbuf->nbox);
if (cmdbuf->nbox == 1)
cmdbuf->nbox = 0;
+ return 0;
out:
- cmdbuf->buf += cmdsz * 4;
- cmdbuf->bufsz -= cmdsz * 4;
+ drm_buffer_advance(cmdbuf->buffer, cmdsz * 4);
return 0;
}
@@ -2847,16 +2863,16 @@ static int radeon_emit_wait(struct drm_device * dev, int flags)
return 0;
}
-static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file *file_priv)
+static int radeon_cp_cmdbuf(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
struct drm_device_dma *dma = dev->dma;
struct drm_buf *buf = NULL;
+ drm_radeon_cmd_header_t stack_header;
int idx;
drm_radeon_kcmd_buffer_t *cmdbuf = data;
- drm_radeon_cmd_header_t header;
- int orig_nbox, orig_bufsz;
- char *kbuf = NULL;
+ int orig_nbox;
LOCK_TEST_WITH_RETURN(dev, file_priv);
@@ -2871,17 +2887,16 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
* races between checking values and using those values in other code,
* and simply to avoid a lot of function calls to copy in data.
*/
- orig_bufsz = cmdbuf->bufsz;
- if (orig_bufsz != 0) {
- kbuf = kmalloc(cmdbuf->bufsz, GFP_KERNEL);
- if (kbuf == NULL)
- return -ENOMEM;
- if (DRM_COPY_FROM_USER(kbuf, (void __user *)cmdbuf->buf,
- cmdbuf->bufsz)) {
- kfree(kbuf);
- return -EFAULT;
- }
- cmdbuf->buf = kbuf;
+ if (cmdbuf->bufsz != 0) {
+ int rv;
+ void __user *buffer = cmdbuf->buffer;
+ rv = drm_buffer_alloc(&cmdbuf->buffer, cmdbuf->bufsz);
+ if (rv)
+ return rv;
+ rv = drm_buffer_copy_from_user(cmdbuf->buffer, buffer,
+ cmdbuf->bufsz);
+ if (rv)
+ return rv;
}
orig_nbox = cmdbuf->nbox;
@@ -2890,24 +2905,24 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
int temp;
temp = r300_do_cp_cmdbuf(dev, file_priv, cmdbuf);
- if (orig_bufsz != 0)
- kfree(kbuf);
+ if (cmdbuf->bufsz != 0)
+ drm_buffer_free(cmdbuf->buffer);
return temp;
}
/* microcode_version != r300 */
- while (cmdbuf->bufsz >= sizeof(header)) {
+ while (drm_buffer_unprocessed(cmdbuf->buffer) >= sizeof(stack_header)) {
- header.i = *(int *)cmdbuf->buf;
- cmdbuf->buf += sizeof(header);
- cmdbuf->bufsz -= sizeof(header);
+ drm_radeon_cmd_header_t *header;
+ header = drm_buffer_read_object(cmdbuf->buffer,
+ sizeof(stack_header), &stack_header);
- switch (header.header.cmd_type) {
+ switch (header->header.cmd_type) {
case RADEON_CMD_PACKET:
DRM_DEBUG("RADEON_CMD_PACKET\n");
if (radeon_emit_packets
- (dev_priv, file_priv, header, cmdbuf)) {
+ (dev_priv, file_priv, *header, cmdbuf)) {
DRM_ERROR("radeon_emit_packets failed\n");
goto err;
}
@@ -2915,7 +2930,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
case RADEON_CMD_SCALARS:
DRM_DEBUG("RADEON_CMD_SCALARS\n");
- if (radeon_emit_scalars(dev_priv, header, cmdbuf)) {
+ if (radeon_emit_scalars(dev_priv, *header, cmdbuf)) {
DRM_ERROR("radeon_emit_scalars failed\n");
goto err;
}
@@ -2923,7 +2938,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
case RADEON_CMD_VECTORS:
DRM_DEBUG("RADEON_CMD_VECTORS\n");
- if (radeon_emit_vectors(dev_priv, header, cmdbuf)) {
+ if (radeon_emit_vectors(dev_priv, *header, cmdbuf)) {
DRM_ERROR("radeon_emit_vectors failed\n");
goto err;
}
@@ -2931,7 +2946,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
case RADEON_CMD_DMA_DISCARD:
DRM_DEBUG("RADEON_CMD_DMA_DISCARD\n");
- idx = header.dma.buf_idx;
+ idx = header->dma.buf_idx;
if (idx < 0 || idx >= dma->buf_count) {
DRM_ERROR("buffer index %d (of %d max)\n",
idx, dma->buf_count - 1);
@@ -2968,7 +2983,7 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
case RADEON_CMD_SCALARS2:
DRM_DEBUG("RADEON_CMD_SCALARS2\n");
- if (radeon_emit_scalars2(dev_priv, header, cmdbuf)) {
+ if (radeon_emit_scalars2(dev_priv, *header, cmdbuf)) {
DRM_ERROR("radeon_emit_scalars2 failed\n");
goto err;
}
@@ -2976,37 +2991,37 @@ static int radeon_cp_cmdbuf(struct drm_device *dev, void *data, struct drm_file
case RADEON_CMD_WAIT:
DRM_DEBUG("RADEON_CMD_WAIT\n");
- if (radeon_emit_wait(dev, header.wait.flags)) {
+ if (radeon_emit_wait(dev, header->wait.flags)) {
DRM_ERROR("radeon_emit_wait failed\n");
goto err;
}
break;
case RADEON_CMD_VECLINEAR:
DRM_DEBUG("RADEON_CMD_VECLINEAR\n");
- if (radeon_emit_veclinear(dev_priv, header, cmdbuf)) {
+ if (radeon_emit_veclinear(dev_priv, *header, cmdbuf)) {
DRM_ERROR("radeon_emit_veclinear failed\n");
goto err;
}
break;
default:
- DRM_ERROR("bad cmd_type %d at %p\n",
- header.header.cmd_type,
- cmdbuf->buf - sizeof(header));
+ DRM_ERROR("bad cmd_type %d at byte %d\n",
+ header->header.cmd_type,
+ cmdbuf->buffer->iterator);
goto err;
}
}
- if (orig_bufsz != 0)
- kfree(kbuf);
+ if (cmdbuf->bufsz != 0)
+ drm_buffer_free(cmdbuf->buffer);
DRM_DEBUG("DONE\n");
COMMIT_RING();
return 0;
err:
- if (orig_bufsz != 0)
- kfree(kbuf);
+ if (cmdbuf->bufsz != 0)
+ drm_buffer_free(cmdbuf->buffer);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_test.c b/drivers/gpu/drm/radeon/radeon_test.c
index 9f5e2f929da..313c96bc09d 100644
--- a/drivers/gpu/drm/radeon/radeon_test.c
+++ b/drivers/gpu/drm/radeon/radeon_test.c
@@ -186,7 +186,7 @@ void radeon_test_moves(struct radeon_device *rdev)
radeon_bo_kunmap(gtt_obj[i]);
DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
- gtt_addr - rdev->mc.gtt_location);
+ gtt_addr - rdev->mc.gtt_start);
}
out_cleanup:
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 58b5adf974c..43c5ab34b63 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -150,7 +150,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_TT:
- man->gpu_offset = rdev->mc.gtt_location;
+ man->gpu_offset = rdev->mc.gtt_start;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
@@ -180,7 +180,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
break;
case TTM_PL_VRAM:
/* "On-card" video ram */
- man->gpu_offset = rdev->mc.vram_location;
+ man->gpu_offset = rdev->mc.vram_start;
man->flags = TTM_MEMTYPE_FLAG_FIXED |
TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
TTM_MEMTYPE_FLAG_MAPPABLE;
@@ -262,10 +262,10 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
switch (old_mem->mem_type) {
case TTM_PL_VRAM:
- old_start += rdev->mc.vram_location;
+ old_start += rdev->mc.vram_start;
break;
case TTM_PL_TT:
- old_start += rdev->mc.gtt_location;
+ old_start += rdev->mc.gtt_start;
break;
default:
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
@@ -273,10 +273,10 @@ static int radeon_move_blit(struct ttm_buffer_object *bo,
}
switch (new_mem->mem_type) {
case TTM_PL_VRAM:
- new_start += rdev->mc.vram_location;
+ new_start += rdev->mc.vram_start;
break;
case TTM_PL_TT:
- new_start += rdev->mc.gtt_location;
+ new_start += rdev->mc.gtt_start;
break;
default:
DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600
new file mode 100644
index 00000000000..8f414a5f520
--- /dev/null
+++ b/drivers/gpu/drm/radeon/reg_srcs/r600
@@ -0,0 +1,837 @@
+r600 0x9400
+0x000287A0 R7xx_CB_SHADER_CONTROL
+0x00028230 R7xx_PA_SC_EDGERULE
+0x000286C8 R7xx_SPI_THREAD_GROUPING
+0x00008D8C R7xx_SQ_DYN_GPR_CNTL_PS_FLUSH_REQ
+0x000088C4 VGT_CACHE_INVALIDATION
+0x00028A50 VGT_ENHANCE
+0x000088CC VGT_ES_PER_GS
+0x00028A2C VGT_GROUP_DECR
+0x00028A28 VGT_GROUP_FIRST_DECR
+0x00028A24 VGT_GROUP_PRIM_TYPE
+0x00028A30 VGT_GROUP_VECT_0_CNTL
+0x00028A38 VGT_GROUP_VECT_0_FMT_CNTL
+0x00028A34 VGT_GROUP_VECT_1_CNTL
+0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
+0x00028A40 VGT_GS_MODE
+0x00028A6C VGT_GS_OUT_PRIM_TYPE
+0x000088C8 VGT_GS_PER_ES
+0x000088E8 VGT_GS_PER_VS
+0x000088D4 VGT_GS_VERTEX_REUSE
+0x00028A14 VGT_HOS_CNTL
+0x00028A18 VGT_HOS_MAX_TESS_LEVEL
+0x00028A1C VGT_HOS_MIN_TESS_LEVEL
+0x00028A20 VGT_HOS_REUSE_DEPTH
+0x0000895C VGT_INDEX_TYPE
+0x00028408 VGT_INDX_OFFSET
+0x00028AA0 VGT_INSTANCE_STEP_RATE_0
+0x00028AA4 VGT_INSTANCE_STEP_RATE_1
+0x000088C0 VGT_LAST_COPY_STATE
+0x00028400 VGT_MAX_VTX_INDX
+0x000088D8 VGT_MC_LAT_CNTL
+0x00028404 VGT_MIN_VTX_INDX
+0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN
+0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX
+0x00008970 VGT_NUM_INDICES
+0x00008974 VGT_NUM_INSTANCES
+0x00028A10 VGT_OUTPUT_PATH_CNTL
+0x00028C5C VGT_OUT_DEALLOC_CNTL
+0x00028A84 VGT_PRIMITIVEID_EN
+0x00008958 VGT_PRIMITIVE_TYPE
+0x00028AB4 VGT_REUSE_OFF
+0x00028C58 VGT_VERTEX_REUSE_BLOCK_CNTL
+0x00028AB8 VGT_VTX_CNT_EN
+0x000088B0 VGT_VTX_VECT_EJECT_REG
+0x00028810 PA_CL_CLIP_CNTL
+0x00008A14 PA_CL_ENHANCE
+0x00028C14 PA_CL_GB_HORZ_CLIP_ADJ
+0x00028C18 PA_CL_GB_HORZ_DISC_ADJ
+0x00028C0C PA_CL_GB_VERT_CLIP_ADJ
+0x00028C10 PA_CL_GB_VERT_DISC_ADJ
+0x00028820 PA_CL_NANINF_CNTL
+0x00028E1C PA_CL_POINT_CULL_RAD
+0x00028E18 PA_CL_POINT_SIZE
+0x00028E10 PA_CL_POINT_X_RAD
+0x00028E14 PA_CL_POINT_Y_RAD
+0x00028E2C PA_CL_UCP_0_W
+0x00028E3C PA_CL_UCP_1_W
+0x00028E4C PA_CL_UCP_2_W
+0x00028E5C PA_CL_UCP_3_W
+0x00028E6C PA_CL_UCP_4_W
+0x00028E7C PA_CL_UCP_5_W
+0x00028E20 PA_CL_UCP_0_X
+0x00028E30 PA_CL_UCP_1_X
+0x00028E40 PA_CL_UCP_2_X
+0x00028E50 PA_CL_UCP_3_X
+0x00028E60 PA_CL_UCP_4_X
+0x00028E70 PA_CL_UCP_5_X
+0x00028E24 PA_CL_UCP_0_Y
+0x00028E34 PA_CL_UCP_1_Y
+0x00028E44 PA_CL_UCP_2_Y
+0x00028E54 PA_CL_UCP_3_Y
+0x00028E64 PA_CL_UCP_4_Y
+0x00028E74 PA_CL_UCP_5_Y
+0x00028E28 PA_CL_UCP_0_Z
+0x00028E38 PA_CL_UCP_1_Z
+0x00028E48 PA_CL_UCP_2_Z
+0x00028E58 PA_CL_UCP_3_Z
+0x00028E68 PA_CL_UCP_4_Z
+0x00028E78 PA_CL_UCP_5_Z
+0x00028440 PA_CL_VPORT_XOFFSET_0
+0x00028458 PA_CL_VPORT_XOFFSET_1
+0x00028470 PA_CL_VPORT_XOFFSET_2
+0x00028488 PA_CL_VPORT_XOFFSET_3
+0x000284A0 PA_CL_VPORT_XOFFSET_4
+0x000284B8 PA_CL_VPORT_XOFFSET_5
+0x000284D0 PA_CL_VPORT_XOFFSET_6
+0x000284E8 PA_CL_VPORT_XOFFSET_7
+0x00028500 PA_CL_VPORT_XOFFSET_8
+0x00028518 PA_CL_VPORT_XOFFSET_9
+0x00028530 PA_CL_VPORT_XOFFSET_10
+0x00028548 PA_CL_VPORT_XOFFSET_11
+0x00028560 PA_CL_VPORT_XOFFSET_12
+0x00028578 PA_CL_VPORT_XOFFSET_13
+0x00028590 PA_CL_VPORT_XOFFSET_14
+0x000285A8 PA_CL_VPORT_XOFFSET_15
+0x0002843C PA_CL_VPORT_XSCALE_0
+0x00028454 PA_CL_VPORT_XSCALE_1
+0x0002846C PA_CL_VPORT_XSCALE_2
+0x00028484 PA_CL_VPORT_XSCALE_3
+0x0002849C PA_CL_VPORT_XSCALE_4
+0x000284B4 PA_CL_VPORT_XSCALE_5
+0x000284CC PA_CL_VPORT_XSCALE_6
+0x000284E4 PA_CL_VPORT_XSCALE_7
+0x000284FC PA_CL_VPORT_XSCALE_8
+0x00028514 PA_CL_VPORT_XSCALE_9
+0x0002852C PA_CL_VPORT_XSCALE_10
+0x00028544 PA_CL_VPORT_XSCALE_11
+0x0002855C PA_CL_VPORT_XSCALE_12
+0x00028574 PA_CL_VPORT_XSCALE_13
+0x0002858C PA_CL_VPORT_XSCALE_14
+0x000285A4 PA_CL_VPORT_XSCALE_15
+0x00028448 PA_CL_VPORT_YOFFSET_0
+0x00028460 PA_CL_VPORT_YOFFSET_1
+0x00028478 PA_CL_VPORT_YOFFSET_2
+0x00028490 PA_CL_VPORT_YOFFSET_3
+0x000284A8 PA_CL_VPORT_YOFFSET_4
+0x000284C0 PA_CL_VPORT_YOFFSET_5
+0x000284D8 PA_CL_VPORT_YOFFSET_6
+0x000284F0 PA_CL_VPORT_YOFFSET_7
+0x00028508 PA_CL_VPORT_YOFFSET_8
+0x00028520 PA_CL_VPORT_YOFFSET_9
+0x00028538 PA_CL_VPORT_YOFFSET_10
+0x00028550 PA_CL_VPORT_YOFFSET_11
+0x00028568 PA_CL_VPORT_YOFFSET_12
+0x00028580 PA_CL_VPORT_YOFFSET_13
+0x00028598 PA_CL_VPORT_YOFFSET_14
+0x000285B0 PA_CL_VPORT_YOFFSET_15
+0x00028444 PA_CL_VPORT_YSCALE_0
+0x0002845C PA_CL_VPORT_YSCALE_1
+0x00028474 PA_CL_VPORT_YSCALE_2
+0x0002848C PA_CL_VPORT_YSCALE_3
+0x000284A4 PA_CL_VPORT_YSCALE_4
+0x000284BC PA_CL_VPORT_YSCALE_5
+0x000284D4 PA_CL_VPORT_YSCALE_6
+0x000284EC PA_CL_VPORT_YSCALE_7
+0x00028504 PA_CL_VPORT_YSCALE_8
+0x0002851C PA_CL_VPORT_YSCALE_9
+0x00028534 PA_CL_VPORT_YSCALE_10
+0x0002854C PA_CL_VPORT_YSCALE_11
+0x00028564 PA_CL_VPORT_YSCALE_12
+0x0002857C PA_CL_VPORT_YSCALE_13
+0x00028594 PA_CL_VPORT_YSCALE_14
+0x000285AC PA_CL_VPORT_YSCALE_15
+0x00028450 PA_CL_VPORT_ZOFFSET_0
+0x00028468 PA_CL_VPORT_ZOFFSET_1
+0x00028480 PA_CL_VPORT_ZOFFSET_2
+0x00028498 PA_CL_VPORT_ZOFFSET_3
+0x000284B0 PA_CL_VPORT_ZOFFSET_4
+0x000284C8 PA_CL_VPORT_ZOFFSET_5
+0x000284E0 PA_CL_VPORT_ZOFFSET_6
+0x000284F8 PA_CL_VPORT_ZOFFSET_7
+0x00028510 PA_CL_VPORT_ZOFFSET_8
+0x00028528 PA_CL_VPORT_ZOFFSET_9
+0x00028540 PA_CL_VPORT_ZOFFSET_10
+0x00028558 PA_CL_VPORT_ZOFFSET_11
+0x00028570 PA_CL_VPORT_ZOFFSET_12
+0x00028588 PA_CL_VPORT_ZOFFSET_13
+0x000285A0 PA_CL_VPORT_ZOFFSET_14
+0x000285B8 PA_CL_VPORT_ZOFFSET_15
+0x0002844C PA_CL_VPORT_ZSCALE_0
+0x00028464 PA_CL_VPORT_ZSCALE_1
+0x0002847C PA_CL_VPORT_ZSCALE_2
+0x00028494 PA_CL_VPORT_ZSCALE_3
+0x000284AC PA_CL_VPORT_ZSCALE_4
+0x000284C4 PA_CL_VPORT_ZSCALE_5
+0x000284DC PA_CL_VPORT_ZSCALE_6
+0x000284F4 PA_CL_VPORT_ZSCALE_7
+0x0002850C PA_CL_VPORT_ZSCALE_8
+0x00028524 PA_CL_VPORT_ZSCALE_9
+0x0002853C PA_CL_VPORT_ZSCALE_10
+0x00028554 PA_CL_VPORT_ZSCALE_11
+0x0002856C PA_CL_VPORT_ZSCALE_12
+0x00028584 PA_CL_VPORT_ZSCALE_13
+0x0002859C PA_CL_VPORT_ZSCALE_14
+0x000285B4 PA_CL_VPORT_ZSCALE_15
+0x0002881C PA_CL_VS_OUT_CNTL
+0x00028818 PA_CL_VTE_CNTL
+0x00028C48 PA_SC_AA_MASK
+0x00008B40 PA_SC_AA_SAMPLE_LOCS_2S
+0x00008B44 PA_SC_AA_SAMPLE_LOCS_4S
+0x00008B48 PA_SC_AA_SAMPLE_LOCS_8S_WD0
+0x00008B4C PA_SC_AA_SAMPLE_LOCS_8S_WD1
+0x00028C20 PA_SC_AA_SAMPLE_LOCS_8S_WD1_MCTX
+0x00028C1C PA_SC_AA_SAMPLE_LOCS_MCTX
+0x00028214 PA_SC_CLIPRECT_0_BR
+0x0002821C PA_SC_CLIPRECT_1_BR
+0x00028224 PA_SC_CLIPRECT_2_BR
+0x0002822C PA_SC_CLIPRECT_3_BR
+0x00028210 PA_SC_CLIPRECT_0_TL
+0x00028218 PA_SC_CLIPRECT_1_TL
+0x00028220 PA_SC_CLIPRECT_2_TL
+0x00028228 PA_SC_CLIPRECT_3_TL
+0x0002820C PA_SC_CLIPRECT_RULE
+0x00008BF0 PA_SC_ENHANCE
+0x00028244 PA_SC_GENERIC_SCISSOR_BR
+0x00028240 PA_SC_GENERIC_SCISSOR_TL
+0x00028C00 PA_SC_LINE_CNTL
+0x00028A0C PA_SC_LINE_STIPPLE
+0x00008B10 PA_SC_LINE_STIPPLE_STATE
+0x00028A4C PA_SC_MODE_CNTL
+0x00028A48 PA_SC_MPASS_PS_CNTL
+0x00008B20 PA_SC_MULTI_CHIP_CNTL
+0x00028034 PA_SC_SCREEN_SCISSOR_BR
+0x00028030 PA_SC_SCREEN_SCISSOR_TL
+0x00028254 PA_SC_VPORT_SCISSOR_0_BR
+0x0002825C PA_SC_VPORT_SCISSOR_1_BR
+0x00028264 PA_SC_VPORT_SCISSOR_2_BR
+0x0002826C PA_SC_VPORT_SCISSOR_3_BR
+0x00028274 PA_SC_VPORT_SCISSOR_4_BR
+0x0002827C PA_SC_VPORT_SCISSOR_5_BR
+0x00028284 PA_SC_VPORT_SCISSOR_6_BR
+0x0002828C PA_SC_VPORT_SCISSOR_7_BR
+0x00028294 PA_SC_VPORT_SCISSOR_8_BR
+0x0002829C PA_SC_VPORT_SCISSOR_9_BR
+0x000282A4 PA_SC_VPORT_SCISSOR_10_BR
+0x000282AC PA_SC_VPORT_SCISSOR_11_BR
+0x000282B4 PA_SC_VPORT_SCISSOR_12_BR
+0x000282BC PA_SC_VPORT_SCISSOR_13_BR
+0x000282C4 PA_SC_VPORT_SCISSOR_14_BR
+0x000282CC PA_SC_VPORT_SCISSOR_15_BR
+0x00028250 PA_SC_VPORT_SCISSOR_0_TL
+0x00028258 PA_SC_VPORT_SCISSOR_1_TL
+0x00028260 PA_SC_VPORT_SCISSOR_2_TL
+0x00028268 PA_SC_VPORT_SCISSOR_3_TL
+0x00028270 PA_SC_VPORT_SCISSOR_4_TL
+0x00028278 PA_SC_VPORT_SCISSOR_5_TL
+0x00028280 PA_SC_VPORT_SCISSOR_6_TL
+0x00028288 PA_SC_VPORT_SCISSOR_7_TL
+0x00028290 PA_SC_VPORT_SCISSOR_8_TL
+0x00028298 PA_SC_VPORT_SCISSOR_9_TL
+0x000282A0 PA_SC_VPORT_SCISSOR_10_TL
+0x000282A8 PA_SC_VPORT_SCISSOR_11_TL
+0x000282B0 PA_SC_VPORT_SCISSOR_12_TL
+0x000282B8 PA_SC_VPORT_SCISSOR_13_TL
+0x000282C0 PA_SC_VPORT_SCISSOR_14_TL
+0x000282C8 PA_SC_VPORT_SCISSOR_15_TL
+0x000282D4 PA_SC_VPORT_ZMAX_0
+0x000282DC PA_SC_VPORT_ZMAX_1
+0x000282E4 PA_SC_VPORT_ZMAX_2
+0x000282EC PA_SC_VPORT_ZMAX_3
+0x000282F4 PA_SC_VPORT_ZMAX_4
+0x000282FC PA_SC_VPORT_ZMAX_5
+0x00028304 PA_SC_VPORT_ZMAX_6
+0x0002830C PA_SC_VPORT_ZMAX_7
+0x00028314 PA_SC_VPORT_ZMAX_8
+0x0002831C PA_SC_VPORT_ZMAX_9
+0x00028324 PA_SC_VPORT_ZMAX_10
+0x0002832C PA_SC_VPORT_ZMAX_11
+0x00028334 PA_SC_VPORT_ZMAX_12
+0x0002833C PA_SC_VPORT_ZMAX_13
+0x00028344 PA_SC_VPORT_ZMAX_14
+0x0002834C PA_SC_VPORT_ZMAX_15
+0x000282D0 PA_SC_VPORT_ZMIN_0
+0x000282D8 PA_SC_VPORT_ZMIN_1
+0x000282E0 PA_SC_VPORT_ZMIN_2
+0x000282E8 PA_SC_VPORT_ZMIN_3
+0x000282F0 PA_SC_VPORT_ZMIN_4
+0x000282F8 PA_SC_VPORT_ZMIN_5
+0x00028300 PA_SC_VPORT_ZMIN_6
+0x00028308 PA_SC_VPORT_ZMIN_7
+0x00028310 PA_SC_VPORT_ZMIN_8
+0x00028318 PA_SC_VPORT_ZMIN_9
+0x00028320 PA_SC_VPORT_ZMIN_10
+0x00028328 PA_SC_VPORT_ZMIN_11
+0x00028330 PA_SC_VPORT_ZMIN_12
+0x00028338 PA_SC_VPORT_ZMIN_13
+0x00028340 PA_SC_VPORT_ZMIN_14
+0x00028348 PA_SC_VPORT_ZMIN_15
+0x00028200 PA_SC_WINDOW_OFFSET
+0x00028208 PA_SC_WINDOW_SCISSOR_BR
+0x00028204 PA_SC_WINDOW_SCISSOR_TL
+0x00028A08 PA_SU_LINE_CNTL
+0x00028A04 PA_SU_POINT_MINMAX
+0x00028A00 PA_SU_POINT_SIZE
+0x00028E0C PA_SU_POLY_OFFSET_BACK_OFFSET
+0x00028E08 PA_SU_POLY_OFFSET_BACK_SCALE
+0x00028DFC PA_SU_POLY_OFFSET_CLAMP
+0x00028DF8 PA_SU_POLY_OFFSET_DB_FMT_CNTL
+0x00028E04 PA_SU_POLY_OFFSET_FRONT_OFFSET
+0x00028E00 PA_SU_POLY_OFFSET_FRONT_SCALE
+0x00028814 PA_SU_SC_MODE_CNTL
+0x00028C08 PA_SU_VTX_CNTL
+0x00008C00 SQ_CONFIG
+0x00008C04 SQ_GPR_RESOURCE_MGMT_1
+0x00008C08 SQ_GPR_RESOURCE_MGMT_2
+0x00008C10 SQ_STACK_RESOURCE_MGMT_1
+0x00008C14 SQ_STACK_RESOURCE_MGMT_2
+0x00008C0C SQ_THREAD_RESOURCE_MGMT
+0x00028380 SQ_VTX_SEMANTIC_0
+0x00028384 SQ_VTX_SEMANTIC_1
+0x00028388 SQ_VTX_SEMANTIC_2
+0x0002838C SQ_VTX_SEMANTIC_3
+0x00028390 SQ_VTX_SEMANTIC_4
+0x00028394 SQ_VTX_SEMANTIC_5
+0x00028398 SQ_VTX_SEMANTIC_6
+0x0002839C SQ_VTX_SEMANTIC_7
+0x000283A0 SQ_VTX_SEMANTIC_8
+0x000283A4 SQ_VTX_SEMANTIC_9
+0x000283A8 SQ_VTX_SEMANTIC_10
+0x000283AC SQ_VTX_SEMANTIC_11
+0x000283B0 SQ_VTX_SEMANTIC_12
+0x000283B4 SQ_VTX_SEMANTIC_13
+0x000283B8 SQ_VTX_SEMANTIC_14
+0x000283BC SQ_VTX_SEMANTIC_15
+0x000283C0 SQ_VTX_SEMANTIC_16
+0x000283C4 SQ_VTX_SEMANTIC_17
+0x000283C8 SQ_VTX_SEMANTIC_18
+0x000283CC SQ_VTX_SEMANTIC_19
+0x000283D0 SQ_VTX_SEMANTIC_20
+0x000283D4 SQ_VTX_SEMANTIC_21
+0x000283D8 SQ_VTX_SEMANTIC_22
+0x000283DC SQ_VTX_SEMANTIC_23
+0x000283E0 SQ_VTX_SEMANTIC_24
+0x000283E4 SQ_VTX_SEMANTIC_25
+0x000283E8 SQ_VTX_SEMANTIC_26
+0x000283EC SQ_VTX_SEMANTIC_27
+0x000283F0 SQ_VTX_SEMANTIC_28
+0x000283F4 SQ_VTX_SEMANTIC_29
+0x000283F8 SQ_VTX_SEMANTIC_30
+0x000283FC SQ_VTX_SEMANTIC_31
+0x000288E0 SQ_VTX_SEMANTIC_CLEAR
+0x0003CFF4 SQ_VTX_START_INST_LOC
+0x0003C000 SQ_TEX_SAMPLER_WORD0_0
+0x0003C004 SQ_TEX_SAMPLER_WORD1_0
+0x0003C008 SQ_TEX_SAMPLER_WORD2_0
+0x00030000 SQ_ALU_CONSTANT0_0
+0x00030004 SQ_ALU_CONSTANT1_0
+0x00030008 SQ_ALU_CONSTANT2_0
+0x0003000C SQ_ALU_CONSTANT3_0
+0x0003E380 SQ_BOOL_CONST_0
+0x0003E384 SQ_BOOL_CONST_1
+0x0003E388 SQ_BOOL_CONST_2
+0x0003E200 SQ_LOOP_CONST_0
+0x0003E200 SQ_LOOP_CONST_DX10_0
+0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0
+0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1
+0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2
+0x000281CC SQ_ALU_CONST_BUFFER_SIZE_GS_3
+0x000281D0 SQ_ALU_CONST_BUFFER_SIZE_GS_4
+0x000281D4 SQ_ALU_CONST_BUFFER_SIZE_GS_5
+0x000281D8 SQ_ALU_CONST_BUFFER_SIZE_GS_6
+0x000281DC SQ_ALU_CONST_BUFFER_SIZE_GS_7
+0x000281E0 SQ_ALU_CONST_BUFFER_SIZE_GS_8
+0x000281E4 SQ_ALU_CONST_BUFFER_SIZE_GS_9
+0x000281E8 SQ_ALU_CONST_BUFFER_SIZE_GS_10
+0x000281EC SQ_ALU_CONST_BUFFER_SIZE_GS_11
+0x000281F0 SQ_ALU_CONST_BUFFER_SIZE_GS_12
+0x000281F4 SQ_ALU_CONST_BUFFER_SIZE_GS_13
+0x000281F8 SQ_ALU_CONST_BUFFER_SIZE_GS_14
+0x000281FC SQ_ALU_CONST_BUFFER_SIZE_GS_15
+0x00028140 SQ_ALU_CONST_BUFFER_SIZE_PS_0
+0x00028144 SQ_ALU_CONST_BUFFER_SIZE_PS_1
+0x00028148 SQ_ALU_CONST_BUFFER_SIZE_PS_2
+0x0002814C SQ_ALU_CONST_BUFFER_SIZE_PS_3
+0x00028150 SQ_ALU_CONST_BUFFER_SIZE_PS_4
+0x00028154 SQ_ALU_CONST_BUFFER_SIZE_PS_5
+0x00028158 SQ_ALU_CONST_BUFFER_SIZE_PS_6
+0x0002815C SQ_ALU_CONST_BUFFER_SIZE_PS_7
+0x00028160 SQ_ALU_CONST_BUFFER_SIZE_PS_8
+0x00028164 SQ_ALU_CONST_BUFFER_SIZE_PS_9
+0x00028168 SQ_ALU_CONST_BUFFER_SIZE_PS_10
+0x0002816C SQ_ALU_CONST_BUFFER_SIZE_PS_11
+0x00028170 SQ_ALU_CONST_BUFFER_SIZE_PS_12
+0x00028174 SQ_ALU_CONST_BUFFER_SIZE_PS_13
+0x00028178 SQ_ALU_CONST_BUFFER_SIZE_PS_14
+0x0002817C SQ_ALU_CONST_BUFFER_SIZE_PS_15
+0x00028180 SQ_ALU_CONST_BUFFER_SIZE_VS_0
+0x00028184 SQ_ALU_CONST_BUFFER_SIZE_VS_1
+0x00028188 SQ_ALU_CONST_BUFFER_SIZE_VS_2
+0x0002818C SQ_ALU_CONST_BUFFER_SIZE_VS_3
+0x00028190 SQ_ALU_CONST_BUFFER_SIZE_VS_4
+0x00028194 SQ_ALU_CONST_BUFFER_SIZE_VS_5
+0x00028198 SQ_ALU_CONST_BUFFER_SIZE_VS_6
+0x0002819C SQ_ALU_CONST_BUFFER_SIZE_VS_7
+0x000281A0 SQ_ALU_CONST_BUFFER_SIZE_VS_8
+0x000281A4 SQ_ALU_CONST_BUFFER_SIZE_VS_9
+0x000281A8 SQ_ALU_CONST_BUFFER_SIZE_VS_10
+0x000281AC SQ_ALU_CONST_BUFFER_SIZE_VS_11
+0x000281B0 SQ_ALU_CONST_BUFFER_SIZE_VS_12
+0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13
+0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14
+0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15
+0x000289C0 SQ_ALU_CONST_CACHE_GS_0
+0x000289C4 SQ_ALU_CONST_CACHE_GS_1
+0x000289C8 SQ_ALU_CONST_CACHE_GS_2
+0x000289CC SQ_ALU_CONST_CACHE_GS_3
+0x000289D0 SQ_ALU_CONST_CACHE_GS_4
+0x000289D4 SQ_ALU_CONST_CACHE_GS_5
+0x000289D8 SQ_ALU_CONST_CACHE_GS_6
+0x000289DC SQ_ALU_CONST_CACHE_GS_7
+0x000289E0 SQ_ALU_CONST_CACHE_GS_8
+0x000289E4 SQ_ALU_CONST_CACHE_GS_9
+0x000289E8 SQ_ALU_CONST_CACHE_GS_10
+0x000289EC SQ_ALU_CONST_CACHE_GS_11
+0x000289F0 SQ_ALU_CONST_CACHE_GS_12
+0x000289F4 SQ_ALU_CONST_CACHE_GS_13
+0x000289F8 SQ_ALU_CONST_CACHE_GS_14
+0x000289FC SQ_ALU_CONST_CACHE_GS_15
+0x00028940 SQ_ALU_CONST_CACHE_PS_0
+0x00028944 SQ_ALU_CONST_CACHE_PS_1
+0x00028948 SQ_ALU_CONST_CACHE_PS_2
+0x0002894C SQ_ALU_CONST_CACHE_PS_3
+0x00028950 SQ_ALU_CONST_CACHE_PS_4
+0x00028954 SQ_ALU_CONST_CACHE_PS_5
+0x00028958 SQ_ALU_CONST_CACHE_PS_6
+0x0002895C SQ_ALU_CONST_CACHE_PS_7
+0x00028960 SQ_ALU_CONST_CACHE_PS_8
+0x00028964 SQ_ALU_CONST_CACHE_PS_9
+0x00028968 SQ_ALU_CONST_CACHE_PS_10
+0x0002896C SQ_ALU_CONST_CACHE_PS_11
+0x00028970 SQ_ALU_CONST_CACHE_PS_12
+0x00028974 SQ_ALU_CONST_CACHE_PS_13
+0x00028978 SQ_ALU_CONST_CACHE_PS_14
+0x0002897C SQ_ALU_CONST_CACHE_PS_15
+0x00028980 SQ_ALU_CONST_CACHE_VS_0
+0x00028984 SQ_ALU_CONST_CACHE_VS_1
+0x00028988 SQ_ALU_CONST_CACHE_VS_2
+0x0002898C SQ_ALU_CONST_CACHE_VS_3
+0x00028990 SQ_ALU_CONST_CACHE_VS_4
+0x00028994 SQ_ALU_CONST_CACHE_VS_5
+0x00028998 SQ_ALU_CONST_CACHE_VS_6
+0x0002899C SQ_ALU_CONST_CACHE_VS_7
+0x000289A0 SQ_ALU_CONST_CACHE_VS_8
+0x000289A4 SQ_ALU_CONST_CACHE_VS_9
+0x000289A8 SQ_ALU_CONST_CACHE_VS_10
+0x000289AC SQ_ALU_CONST_CACHE_VS_11
+0x000289B0 SQ_ALU_CONST_CACHE_VS_12
+0x000289B4 SQ_ALU_CONST_CACHE_VS_13
+0x000289B8 SQ_ALU_CONST_CACHE_VS_14
+0x000289BC SQ_ALU_CONST_CACHE_VS_15
+0x000288D8 SQ_PGM_CF_OFFSET_ES
+0x000288DC SQ_PGM_CF_OFFSET_FS
+0x000288D4 SQ_PGM_CF_OFFSET_GS
+0x000288CC SQ_PGM_CF_OFFSET_PS
+0x000288D0 SQ_PGM_CF_OFFSET_VS
+0x00028854 SQ_PGM_EXPORTS_PS
+0x00028890 SQ_PGM_RESOURCES_ES
+0x000288A4 SQ_PGM_RESOURCES_FS
+0x0002887C SQ_PGM_RESOURCES_GS
+0x00028850 SQ_PGM_RESOURCES_PS
+0x00028868 SQ_PGM_RESOURCES_VS
+0x00009100 SPI_CONFIG_CNTL
+0x0000913C SPI_CONFIG_CNTL_1
+0x000286DC SPI_FOG_CNTL
+0x000286E4 SPI_FOG_FUNC_BIAS
+0x000286E0 SPI_FOG_FUNC_SCALE
+0x000286D8 SPI_INPUT_Z
+0x000286D4 SPI_INTERP_CONTROL_0
+0x00028644 SPI_PS_INPUT_CNTL_0
+0x00028648 SPI_PS_INPUT_CNTL_1
+0x0002864C SPI_PS_INPUT_CNTL_2
+0x00028650 SPI_PS_INPUT_CNTL_3
+0x00028654 SPI_PS_INPUT_CNTL_4
+0x00028658 SPI_PS_INPUT_CNTL_5
+0x0002865C SPI_PS_INPUT_CNTL_6
+0x00028660 SPI_PS_INPUT_CNTL_7
+0x00028664 SPI_PS_INPUT_CNTL_8
+0x00028668 SPI_PS_INPUT_CNTL_9
+0x0002866C SPI_PS_INPUT_CNTL_10
+0x00028670 SPI_PS_INPUT_CNTL_11
+0x00028674 SPI_PS_INPUT_CNTL_12
+0x00028678 SPI_PS_INPUT_CNTL_13
+0x0002867C SPI_PS_INPUT_CNTL_14
+0x00028680 SPI_PS_INPUT_CNTL_15
+0x00028684 SPI_PS_INPUT_CNTL_16
+0x00028688 SPI_PS_INPUT_CNTL_17
+0x0002868C SPI_PS_INPUT_CNTL_18
+0x00028690 SPI_PS_INPUT_CNTL_19
+0x00028694 SPI_PS_INPUT_CNTL_20
+0x00028698 SPI_PS_INPUT_CNTL_21
+0x0002869C SPI_PS_INPUT_CNTL_22
+0x000286A0 SPI_PS_INPUT_CNTL_23
+0x000286A4 SPI_PS_INPUT_CNTL_24
+0x000286A8 SPI_PS_INPUT_CNTL_25
+0x000286AC SPI_PS_INPUT_CNTL_26
+0x000286B0 SPI_PS_INPUT_CNTL_27
+0x000286B4 SPI_PS_INPUT_CNTL_28
+0x000286B8 SPI_PS_INPUT_CNTL_29
+0x000286BC SPI_PS_INPUT_CNTL_30
+0x000286C0 SPI_PS_INPUT_CNTL_31
+0x000286CC SPI_PS_IN_CONTROL_0
+0x000286D0 SPI_PS_IN_CONTROL_1
+0x000286C4 SPI_VS_OUT_CONFIG
+0x00028614 SPI_VS_OUT_ID_0
+0x00028618 SPI_VS_OUT_ID_1
+0x0002861C SPI_VS_OUT_ID_2
+0x00028620 SPI_VS_OUT_ID_3
+0x00028624 SPI_VS_OUT_ID_4
+0x00028628 SPI_VS_OUT_ID_5
+0x0002862C SPI_VS_OUT_ID_6
+0x00028630 SPI_VS_OUT_ID_7
+0x00028634 SPI_VS_OUT_ID_8
+0x00028638 SPI_VS_OUT_ID_9
+0x00028438 SX_ALPHA_REF
+0x00028410 SX_ALPHA_TEST_CONTROL
+0x00028350 SX_MISC
+0x0000A020 SMX_DC_CTL0
+0x0000A024 SMX_DC_CTL1
+0x0000A028 SMX_DC_CTL2
+0x00009608 TC_CNTL
+0x00009604 TC_INVALIDATE
+0x00009490 TD_CNTL
+0x00009400 TD_FILTER4
+0x00009404 TD_FILTER4_1
+0x00009408 TD_FILTER4_2
+0x0000940C TD_FILTER4_3
+0x00009410 TD_FILTER4_4
+0x00009414 TD_FILTER4_5
+0x00009418 TD_FILTER4_6
+0x0000941C TD_FILTER4_7
+0x00009420 TD_FILTER4_8
+0x00009424 TD_FILTER4_9
+0x00009428 TD_FILTER4_10
+0x0000942C TD_FILTER4_11
+0x00009430 TD_FILTER4_12
+0x00009434 TD_FILTER4_13
+0x00009438 TD_FILTER4_14
+0x0000943C TD_FILTER4_15
+0x00009440 TD_FILTER4_16
+0x00009444 TD_FILTER4_17
+0x00009448 TD_FILTER4_18
+0x0000944C TD_FILTER4_19
+0x00009450 TD_FILTER4_20
+0x00009454 TD_FILTER4_21
+0x00009458 TD_FILTER4_22
+0x0000945C TD_FILTER4_23
+0x00009460 TD_FILTER4_24
+0x00009464 TD_FILTER4_25
+0x00009468 TD_FILTER4_26
+0x0000946C TD_FILTER4_27
+0x00009470 TD_FILTER4_28
+0x00009474 TD_FILTER4_29
+0x00009478 TD_FILTER4_30
+0x0000947C TD_FILTER4_31
+0x00009480 TD_FILTER4_32
+0x00009484 TD_FILTER4_33
+0x00009488 TD_FILTER4_34
+0x0000948C TD_FILTER4_35
+0x0000A80C TD_GS_SAMPLER0_BORDER_ALPHA
+0x0000A81C TD_GS_SAMPLER1_BORDER_ALPHA
+0x0000A82C TD_GS_SAMPLER2_BORDER_ALPHA
+0x0000A83C TD_GS_SAMPLER3_BORDER_ALPHA
+0x0000A84C TD_GS_SAMPLER4_BORDER_ALPHA
+0x0000A85C TD_GS_SAMPLER5_BORDER_ALPHA
+0x0000A86C TD_GS_SAMPLER6_BORDER_ALPHA
+0x0000A87C TD_GS_SAMPLER7_BORDER_ALPHA
+0x0000A88C TD_GS_SAMPLER8_BORDER_ALPHA
+0x0000A89C TD_GS_SAMPLER9_BORDER_ALPHA
+0x0000A8AC TD_GS_SAMPLER10_BORDER_ALPHA
+0x0000A8BC TD_GS_SAMPLER11_BORDER_ALPHA
+0x0000A8CC TD_GS_SAMPLER12_BORDER_ALPHA
+0x0000A8DC TD_GS_SAMPLER13_BORDER_ALPHA
+0x0000A8EC TD_GS_SAMPLER14_BORDER_ALPHA
+0x0000A8FC TD_GS_SAMPLER15_BORDER_ALPHA
+0x0000A90C TD_GS_SAMPLER16_BORDER_ALPHA
+0x0000A91C TD_GS_SAMPLER17_BORDER_ALPHA
+0x0000A808 TD_GS_SAMPLER0_BORDER_BLUE
+0x0000A818 TD_GS_SAMPLER1_BORDER_BLUE
+0x0000A828 TD_GS_SAMPLER2_BORDER_BLUE
+0x0000A838 TD_GS_SAMPLER3_BORDER_BLUE
+0x0000A848 TD_GS_SAMPLER4_BORDER_BLUE
+0x0000A858 TD_GS_SAMPLER5_BORDER_BLUE
+0x0000A868 TD_GS_SAMPLER6_BORDER_BLUE
+0x0000A878 TD_GS_SAMPLER7_BORDER_BLUE
+0x0000A888 TD_GS_SAMPLER8_BORDER_BLUE
+0x0000A898 TD_GS_SAMPLER9_BORDER_BLUE
+0x0000A8A8 TD_GS_SAMPLER10_BORDER_BLUE
+0x0000A8B8 TD_GS_SAMPLER11_BORDER_BLUE
+0x0000A8C8 TD_GS_SAMPLER12_BORDER_BLUE
+0x0000A8D8 TD_GS_SAMPLER13_BORDER_BLUE
+0x0000A8E8 TD_GS_SAMPLER14_BORDER_BLUE
+0x0000A8F8 TD_GS_SAMPLER15_BORDER_BLUE
+0x0000A908 TD_GS_SAMPLER16_BORDER_BLUE
+0x0000A918 TD_GS_SAMPLER17_BORDER_BLUE
+0x0000A804 TD_GS_SAMPLER0_BORDER_GREEN
+0x0000A814 TD_GS_SAMPLER1_BORDER_GREEN
+0x0000A824 TD_GS_SAMPLER2_BORDER_GREEN
+0x0000A834 TD_GS_SAMPLER3_BORDER_GREEN
+0x0000A844 TD_GS_SAMPLER4_BORDER_GREEN
+0x0000A854 TD_GS_SAMPLER5_BORDER_GREEN
+0x0000A864 TD_GS_SAMPLER6_BORDER_GREEN
+0x0000A874 TD_GS_SAMPLER7_BORDER_GREEN
+0x0000A884 TD_GS_SAMPLER8_BORDER_GREEN
+0x0000A894 TD_GS_SAMPLER9_BORDER_GREEN
+0x0000A8A4 TD_GS_SAMPLER10_BORDER_GREEN
+0x0000A8B4 TD_GS_SAMPLER11_BORDER_GREEN
+0x0000A8C4 TD_GS_SAMPLER12_BORDER_GREEN
+0x0000A8D4 TD_GS_SAMPLER13_BORDER_GREEN
+0x0000A8E4 TD_GS_SAMPLER14_BORDER_GREEN
+0x0000A8F4 TD_GS_SAMPLER15_BORDER_GREEN
+0x0000A904 TD_GS_SAMPLER16_BORDER_GREEN
+0x0000A914 TD_GS_SAMPLER17_BORDER_GREEN
+0x0000A800 TD_GS_SAMPLER0_BORDER_RED
+0x0000A810 TD_GS_SAMPLER1_BORDER_RED
+0x0000A820 TD_GS_SAMPLER2_BORDER_RED
+0x0000A830 TD_GS_SAMPLER3_BORDER_RED
+0x0000A840 TD_GS_SAMPLER4_BORDER_RED
+0x0000A850 TD_GS_SAMPLER5_BORDER_RED
+0x0000A860 TD_GS_SAMPLER6_BORDER_RED
+0x0000A870 TD_GS_SAMPLER7_BORDER_RED
+0x0000A880 TD_GS_SAMPLER8_BORDER_RED
+0x0000A890 TD_GS_SAMPLER9_BORDER_RED
+0x0000A8A0 TD_GS_SAMPLER10_BORDER_RED
+0x0000A8B0 TD_GS_SAMPLER11_BORDER_RED
+0x0000A8C0 TD_GS_SAMPLER12_BORDER_RED
+0x0000A8D0 TD_GS_SAMPLER13_BORDER_RED
+0x0000A8E0 TD_GS_SAMPLER14_BORDER_RED
+0x0000A8F0 TD_GS_SAMPLER15_BORDER_RED
+0x0000A900 TD_GS_SAMPLER16_BORDER_RED
+0x0000A910 TD_GS_SAMPLER17_BORDER_RED
+0x0000A40C TD_PS_SAMPLER0_BORDER_ALPHA
+0x0000A41C TD_PS_SAMPLER1_BORDER_ALPHA
+0x0000A42C TD_PS_SAMPLER2_BORDER_ALPHA
+0x0000A43C TD_PS_SAMPLER3_BORDER_ALPHA
+0x0000A44C TD_PS_SAMPLER4_BORDER_ALPHA
+0x0000A45C TD_PS_SAMPLER5_BORDER_ALPHA
+0x0000A46C TD_PS_SAMPLER6_BORDER_ALPHA
+0x0000A47C TD_PS_SAMPLER7_BORDER_ALPHA
+0x0000A48C TD_PS_SAMPLER8_BORDER_ALPHA
+0x0000A49C TD_PS_SAMPLER9_BORDER_ALPHA
+0x0000A4AC TD_PS_SAMPLER10_BORDER_ALPHA
+0x0000A4BC TD_PS_SAMPLER11_BORDER_ALPHA
+0x0000A4CC TD_PS_SAMPLER12_BORDER_ALPHA
+0x0000A4DC TD_PS_SAMPLER13_BORDER_ALPHA
+0x0000A4EC TD_PS_SAMPLER14_BORDER_ALPHA
+0x0000A4FC TD_PS_SAMPLER15_BORDER_ALPHA
+0x0000A50C TD_PS_SAMPLER16_BORDER_ALPHA
+0x0000A51C TD_PS_SAMPLER17_BORDER_ALPHA
+0x0000A408 TD_PS_SAMPLER0_BORDER_BLUE
+0x0000A418 TD_PS_SAMPLER1_BORDER_BLUE
+0x0000A428 TD_PS_SAMPLER2_BORDER_BLUE
+0x0000A438 TD_PS_SAMPLER3_BORDER_BLUE
+0x0000A448 TD_PS_SAMPLER4_BORDER_BLUE
+0x0000A458 TD_PS_SAMPLER5_BORDER_BLUE
+0x0000A468 TD_PS_SAMPLER6_BORDER_BLUE
+0x0000A478 TD_PS_SAMPLER7_BORDER_BLUE
+0x0000A488 TD_PS_SAMPLER8_BORDER_BLUE
+0x0000A498 TD_PS_SAMPLER9_BORDER_BLUE
+0x0000A4A8 TD_PS_SAMPLER10_BORDER_BLUE
+0x0000A4B8 TD_PS_SAMPLER11_BORDER_BLUE
+0x0000A4C8 TD_PS_SAMPLER12_BORDER_BLUE
+0x0000A4D8 TD_PS_SAMPLER13_BORDER_BLUE
+0x0000A4E8 TD_PS_SAMPLER14_BORDER_BLUE
+0x0000A4F8 TD_PS_SAMPLER15_BORDER_BLUE
+0x0000A508 TD_PS_SAMPLER16_BORDER_BLUE
+0x0000A518 TD_PS_SAMPLER17_BORDER_BLUE
+0x0000A404 TD_PS_SAMPLER0_BORDER_GREEN
+0x0000A414 TD_PS_SAMPLER1_BORDER_GREEN
+0x0000A424 TD_PS_SAMPLER2_BORDER_GREEN
+0x0000A434 TD_PS_SAMPLER3_BORDER_GREEN
+0x0000A444 TD_PS_SAMPLER4_BORDER_GREEN
+0x0000A454 TD_PS_SAMPLER5_BORDER_GREEN
+0x0000A464 TD_PS_SAMPLER6_BORDER_GREEN
+0x0000A474 TD_PS_SAMPLER7_BORDER_GREEN
+0x0000A484 TD_PS_SAMPLER8_BORDER_GREEN
+0x0000A494 TD_PS_SAMPLER9_BORDER_GREEN
+0x0000A4A4 TD_PS_SAMPLER10_BORDER_GREEN
+0x0000A4B4 TD_PS_SAMPLER11_BORDER_GREEN
+0x0000A4C4 TD_PS_SAMPLER12_BORDER_GREEN
+0x0000A4D4 TD_PS_SAMPLER13_BORDER_GREEN
+0x0000A4E4 TD_PS_SAMPLER14_BORDER_GREEN
+0x0000A4F4 TD_PS_SAMPLER15_BORDER_GREEN
+0x0000A504 TD_PS_SAMPLER16_BORDER_GREEN
+0x0000A514 TD_PS_SAMPLER17_BORDER_GREEN
+0x0000A400 TD_PS_SAMPLER0_BORDER_RED
+0x0000A410 TD_PS_SAMPLER1_BORDER_RED
+0x0000A420 TD_PS_SAMPLER2_BORDER_RED
+0x0000A430 TD_PS_SAMPLER3_BORDER_RED
+0x0000A440 TD_PS_SAMPLER4_BORDER_RED
+0x0000A450 TD_PS_SAMPLER5_BORDER_RED
+0x0000A460 TD_PS_SAMPLER6_BORDER_RED
+0x0000A470 TD_PS_SAMPLER7_BORDER_RED
+0x0000A480 TD_PS_SAMPLER8_BORDER_RED
+0x0000A490 TD_PS_SAMPLER9_BORDER_RED
+0x0000A4A0 TD_PS_SAMPLER10_BORDER_RED
+0x0000A4B0 TD_PS_SAMPLER11_BORDER_RED
+0x0000A4C0 TD_PS_SAMPLER12_BORDER_RED
+0x0000A4D0 TD_PS_SAMPLER13_BORDER_RED
+0x0000A4E0 TD_PS_SAMPLER14_BORDER_RED
+0x0000A4F0 TD_PS_SAMPLER15_BORDER_RED
+0x0000A500 TD_PS_SAMPLER16_BORDER_RED
+0x0000A510 TD_PS_SAMPLER17_BORDER_RED
+0x0000AA00 TD_PS_SAMPLER0_CLEARTYPE_KERNEL
+0x0000AA04 TD_PS_SAMPLER1_CLEARTYPE_KERNEL
+0x0000AA08 TD_PS_SAMPLER2_CLEARTYPE_KERNEL
+0x0000AA0C TD_PS_SAMPLER3_CLEARTYPE_KERNEL
+0x0000AA10 TD_PS_SAMPLER4_CLEARTYPE_KERNEL
+0x0000AA14 TD_PS_SAMPLER5_CLEARTYPE_KERNEL
+0x0000AA18 TD_PS_SAMPLER6_CLEARTYPE_KERNEL
+0x0000AA1C TD_PS_SAMPLER7_CLEARTYPE_KERNEL
+0x0000AA20 TD_PS_SAMPLER8_CLEARTYPE_KERNEL
+0x0000AA24 TD_PS_SAMPLER9_CLEARTYPE_KERNEL
+0x0000AA28 TD_PS_SAMPLER10_CLEARTYPE_KERNEL
+0x0000AA2C TD_PS_SAMPLER11_CLEARTYPE_KERNEL
+0x0000AA30 TD_PS_SAMPLER12_CLEARTYPE_KERNEL
+0x0000AA34 TD_PS_SAMPLER13_CLEARTYPE_KERNEL
+0x0000AA38 TD_PS_SAMPLER14_CLEARTYPE_KERNEL
+0x0000AA3C TD_PS_SAMPLER15_CLEARTYPE_KERNEL
+0x0000AA40 TD_PS_SAMPLER16_CLEARTYPE_KERNEL
+0x0000AA44 TD_PS_SAMPLER17_CLEARTYPE_KERNEL
+0x0000A60C TD_VS_SAMPLER0_BORDER_ALPHA
+0x0000A61C TD_VS_SAMPLER1_BORDER_ALPHA
+0x0000A62C TD_VS_SAMPLER2_BORDER_ALPHA
+0x0000A63C TD_VS_SAMPLER3_BORDER_ALPHA
+0x0000A64C TD_VS_SAMPLER4_BORDER_ALPHA
+0x0000A65C TD_VS_SAMPLER5_BORDER_ALPHA
+0x0000A66C TD_VS_SAMPLER6_BORDER_ALPHA
+0x0000A67C TD_VS_SAMPLER7_BORDER_ALPHA
+0x0000A68C TD_VS_SAMPLER8_BORDER_ALPHA
+0x0000A69C TD_VS_SAMPLER9_BORDER_ALPHA
+0x0000A6AC TD_VS_SAMPLER10_BORDER_ALPHA
+0x0000A6BC TD_VS_SAMPLER11_BORDER_ALPHA
+0x0000A6CC TD_VS_SAMPLER12_BORDER_ALPHA
+0x0000A6DC TD_VS_SAMPLER13_BORDER_ALPHA
+0x0000A6EC TD_VS_SAMPLER14_BORDER_ALPHA
+0x0000A6FC TD_VS_SAMPLER15_BORDER_ALPHA
+0x0000A70C TD_VS_SAMPLER16_BORDER_ALPHA
+0x0000A71C TD_VS_SAMPLER17_BORDER_ALPHA
+0x0000A608 TD_VS_SAMPLER0_BORDER_BLUE
+0x0000A618 TD_VS_SAMPLER1_BORDER_BLUE
+0x0000A628 TD_VS_SAMPLER2_BORDER_BLUE
+0x0000A638 TD_VS_SAMPLER3_BORDER_BLUE
+0x0000A648 TD_VS_SAMPLER4_BORDER_BLUE
+0x0000A658 TD_VS_SAMPLER5_BORDER_BLUE
+0x0000A668 TD_VS_SAMPLER6_BORDER_BLUE
+0x0000A678 TD_VS_SAMPLER7_BORDER_BLUE
+0x0000A688 TD_VS_SAMPLER8_BORDER_BLUE
+0x0000A698 TD_VS_SAMPLER9_BORDER_BLUE
+0x0000A6A8 TD_VS_SAMPLER10_BORDER_BLUE
+0x0000A6B8 TD_VS_SAMPLER11_BORDER_BLUE
+0x0000A6C8 TD_VS_SAMPLER12_BORDER_BLUE
+0x0000A6D8 TD_VS_SAMPLER13_BORDER_BLUE
+0x0000A6E8 TD_VS_SAMPLER14_BORDER_BLUE
+0x0000A6F8 TD_VS_SAMPLER15_BORDER_BLUE
+0x0000A708 TD_VS_SAMPLER16_BORDER_BLUE
+0x0000A718 TD_VS_SAMPLER17_BORDER_BLUE
+0x0000A604 TD_VS_SAMPLER0_BORDER_GREEN
+0x0000A614 TD_VS_SAMPLER1_BORDER_GREEN
+0x0000A624 TD_VS_SAMPLER2_BORDER_GREEN
+0x0000A634 TD_VS_SAMPLER3_BORDER_GREEN
+0x0000A644 TD_VS_SAMPLER4_BORDER_GREEN
+0x0000A654 TD_VS_SAMPLER5_BORDER_GREEN
+0x0000A664 TD_VS_SAMPLER6_BORDER_GREEN
+0x0000A674 TD_VS_SAMPLER7_BORDER_GREEN
+0x0000A684 TD_VS_SAMPLER8_BORDER_GREEN
+0x0000A694 TD_VS_SAMPLER9_BORDER_GREEN
+0x0000A6A4 TD_VS_SAMPLER10_BORDER_GREEN
+0x0000A6B4 TD_VS_SAMPLER11_BORDER_GREEN
+0x0000A6C4 TD_VS_SAMPLER12_BORDER_GREEN
+0x0000A6D4 TD_VS_SAMPLER13_BORDER_GREEN
+0x0000A6E4 TD_VS_SAMPLER14_BORDER_GREEN
+0x0000A6F4 TD_VS_SAMPLER15_BORDER_GREEN
+0x0000A704 TD_VS_SAMPLER16_BORDER_GREEN
+0x0000A714 TD_VS_SAMPLER17_BORDER_GREEN
+0x0000A600 TD_VS_SAMPLER0_BORDER_RED
+0x0000A610 TD_VS_SAMPLER1_BORDER_RED
+0x0000A620 TD_VS_SAMPLER2_BORDER_RED
+0x0000A630 TD_VS_SAMPLER3_BORDER_RED
+0x0000A640 TD_VS_SAMPLER4_BORDER_RED
+0x0000A650 TD_VS_SAMPLER5_BORDER_RED
+0x0000A660 TD_VS_SAMPLER6_BORDER_RED
+0x0000A670 TD_VS_SAMPLER7_BORDER_RED
+0x0000A680 TD_VS_SAMPLER8_BORDER_RED
+0x0000A690 TD_VS_SAMPLER9_BORDER_RED
+0x0000A6A0 TD_VS_SAMPLER10_BORDER_RED
+0x0000A6B0 TD_VS_SAMPLER11_BORDER_RED
+0x0000A6C0 TD_VS_SAMPLER12_BORDER_RED
+0x0000A6D0 TD_VS_SAMPLER13_BORDER_RED
+0x0000A6E0 TD_VS_SAMPLER14_BORDER_RED
+0x0000A6F0 TD_VS_SAMPLER15_BORDER_RED
+0x0000A700 TD_VS_SAMPLER16_BORDER_RED
+0x0000A710 TD_VS_SAMPLER17_BORDER_RED
+0x00009508 TA_CNTL_AUX
+0x0002802C DB_DEPTH_CLEAR
+0x00028D24 DB_HTILE_SURFACE
+0x00028D34 DB_PREFETCH_LIMIT
+0x00028D30 DB_PRELOAD_CONTROL
+0x00028D0C DB_RENDER_CONTROL
+0x00028D10 DB_RENDER_OVERRIDE
+0x0002880C DB_SHADER_CONTROL
+0x00028D2C DB_SRESULTS_COMPARE_STATE1
+0x00028430 DB_STENCILREFMASK
+0x00028434 DB_STENCILREFMASK_BF
+0x00028028 DB_STENCIL_CLEAR
+0x00028780 CB_BLEND0_CONTROL
+0x00028784 CB_BLEND1_CONTROL
+0x00028788 CB_BLEND2_CONTROL
+0x0002878C CB_BLEND3_CONTROL
+0x00028790 CB_BLEND4_CONTROL
+0x00028794 CB_BLEND5_CONTROL
+0x00028798 CB_BLEND6_CONTROL
+0x0002879C CB_BLEND7_CONTROL
+0x00028804 CB_BLEND_CONTROL
+0x00028420 CB_BLEND_ALPHA
+0x0002841C CB_BLEND_BLUE
+0x00028418 CB_BLEND_GREEN
+0x00028414 CB_BLEND_RED
+0x0002812C CB_CLEAR_ALPHA
+0x00028128 CB_CLEAR_BLUE
+0x00028124 CB_CLEAR_GREEN
+0x00028120 CB_CLEAR_RED
+0x00028C30 CB_CLRCMP_CONTROL
+0x00028C38 CB_CLRCMP_DST
+0x00028C3C CB_CLRCMP_MSK
+0x00028C34 CB_CLRCMP_SRC
+0x00028100 CB_COLOR0_MASK
+0x00028104 CB_COLOR1_MASK
+0x00028108 CB_COLOR2_MASK
+0x0002810C CB_COLOR3_MASK
+0x00028110 CB_COLOR4_MASK
+0x00028114 CB_COLOR5_MASK
+0x00028118 CB_COLOR6_MASK
+0x0002811C CB_COLOR7_MASK
+0x00028080 CB_COLOR0_VIEW
+0x00028084 CB_COLOR1_VIEW
+0x00028088 CB_COLOR2_VIEW
+0x0002808C CB_COLOR3_VIEW
+0x00028090 CB_COLOR4_VIEW
+0x00028094 CB_COLOR5_VIEW
+0x00028098 CB_COLOR6_VIEW
+0x0002809C CB_COLOR7_VIEW
+0x00028808 CB_COLOR_CONTROL
+0x0002842C CB_FOG_BLUE
+0x00028428 CB_FOG_GREEN
+0x00028424 CB_FOG_RED
+0x00008040 WAIT_UNTIL
+0x00008950 CC_GC_SHADER_PIPE_CONFIG
+0x00008954 GC_USER_SHADER_PIPE_CONFIG
+0x00009714 VC_ENHANCE
+0x00009830 DB_DEBUG
+0x00009838 DB_WATERMARKS
+0x00028D28 DB_SRESULTS_COMPARE_STATE0
+0x00028D44 DB_ALPHA_TO_MASK
+0x00009504 TA_CNTL
+0x00009700 VC_CNTL
+0x00009718 VC_CONFIG
+0x0000A02C SMX_DC_MC_INTF_CTL
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 287fcebfb4e..626d51891ee 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -113,6 +113,7 @@ int rs400_gart_enable(struct radeon_device *rdev)
uint32_t size_reg;
uint32_t tmp;
+ radeon_gart_restore(rdev);
tmp = RREG32_MC(RS690_AIC_CTRL_SCRATCH);
tmp |= RS690_DIS_OUT_OF_PCI_GART_ACCESS;
WREG32_MC(RS690_AIC_CTRL_SCRATCH, tmp);
@@ -150,9 +151,8 @@ int rs400_gart_enable(struct radeon_device *rdev)
WREG32(RADEON_AGP_BASE, 0xFFFFFFFF);
WREG32(RS480_AGP_BASE_2, 0);
}
- tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
- tmp = REG_SET(RS690_MC_AGP_TOP, tmp >> 16);
- tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_location >> 16);
+ tmp = REG_SET(RS690_MC_AGP_TOP, rdev->mc.gtt_end >> 16);
+ tmp |= REG_SET(RS690_MC_AGP_START, rdev->mc.gtt_start >> 16);
if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) {
WREG32_MC(RS690_MCCFG_AGP_LOCATION, tmp);
tmp = RREG32(RADEON_BUS_CNTL) & ~RS600_BUS_MASTER_DIS;
@@ -251,14 +251,19 @@ void rs400_gpu_init(struct radeon_device *rdev)
}
}
-void rs400_vram_info(struct radeon_device *rdev)
+void rs400_mc_init(struct radeon_device *rdev)
{
+ u64 base;
+
rs400_gart_adjust_size(rdev);
+ rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev);
/* DDR for all card after R300 & IGP */
rdev->mc.vram_is_ddr = true;
rdev->mc.vram_width = 128;
-
r100_vram_init_sizes(rdev);
+ base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
+ radeon_vram_location(rdev, &rdev->mc, base);
+ radeon_gtt_location(rdev, &rdev->mc);
}
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
@@ -362,22 +367,6 @@ static int rs400_debugfs_pcie_gart_info_init(struct radeon_device *rdev)
#endif
}
-static int rs400_mc_init(struct radeon_device *rdev)
-{
- int r;
- u32 tmp;
-
- /* Setup GPU memory space */
- tmp = RREG32(R_00015C_NB_TOM);
- rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16;
- rdev->mc.gtt_location = 0xFFFFFFFFUL;
- r = radeon_mc_setup(rdev);
- rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev);
- if (r)
- return r;
- return 0;
-}
-
void rs400_mc_program(struct radeon_device *rdev)
{
struct r100_mc_save save;
@@ -516,12 +505,8 @@ int rs400_init(struct radeon_device *rdev)
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
- /* Get vram informations */
- rs400_vram_info(rdev);
- /* Initialize memory controller (also test AGP) */
- r = rs400_mc_init(rdev);
- if (r)
- return r;
+ /* initialize memory controller */
+ rs400_mc_init(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index c3818562a13..47f046b78c6 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -45,23 +45,6 @@
void rs600_gpu_init(struct radeon_device *rdev);
int rs600_mc_wait_for_idle(struct radeon_device *rdev);
-int rs600_mc_init(struct radeon_device *rdev)
-{
- /* read back the MC value from the hw */
- int r;
- u32 tmp;
-
- /* Setup GPU memory space */
- tmp = RREG32_MC(R_000004_MC_FB_LOCATION);
- rdev->mc.vram_location = G_000004_MC_FB_START(tmp) << 16;
- rdev->mc.gtt_location = 0xffffffffUL;
- r = radeon_mc_setup(rdev);
- rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
- if (r)
- return r;
- return 0;
-}
-
/* hpd for digital panel detect/disconnect */
bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
{
@@ -213,6 +196,7 @@ int rs600_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
+ radeon_gart_restore(rdev);
/* Enable bus master */
tmp = RREG32(R_00004C_BUS_CNTL) & C_00004C_BUS_MASTER_DIS;
WREG32(R_00004C_BUS_CNTL, tmp);
@@ -406,10 +390,14 @@ int rs600_irq_process(struct radeon_device *rdev)
if (G_000044_SW_INT(status))
radeon_fence_process(rdev);
/* Vertical blank interrupts */
- if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int))
+ if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) {
drm_handle_vblank(rdev->ddev, 0);
- if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int))
+ wake_up(&rdev->irq.vblank_queue);
+ }
+ if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) {
drm_handle_vblank(rdev->ddev, 1);
+ wake_up(&rdev->irq.vblank_queue);
+ }
if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) {
queue_hotplug = true;
DRM_DEBUG("HPD1\n");
@@ -470,22 +458,22 @@ void rs600_gpu_init(struct radeon_device *rdev)
dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
}
-void rs600_vram_info(struct radeon_device *rdev)
+void rs600_mc_init(struct radeon_device *rdev)
{
+ u64 base;
+
+ rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+ rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
rdev->mc.vram_is_ddr = true;
rdev->mc.vram_width = 128;
-
rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
-
- rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
- rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
-
- if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
- rdev->mc.mc_vram_size = rdev->mc.aper_size;
-
- if (rdev->mc.real_vram_size > rdev->mc.aper_size)
- rdev->mc.real_vram_size = rdev->mc.aper_size;
+ rdev->mc.visible_vram_size = rdev->mc.aper_size;
+ rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+ base = RREG32_MC(R_000004_MC_FB_LOCATION);
+ base = G_000004_MC_FB_START(base) << 16;
+ radeon_vram_location(rdev, &rdev->mc, base);
+ radeon_gtt_location(rdev, &rdev->mc);
}
void rs600_bandwidth_update(struct radeon_device *rdev)
@@ -661,12 +649,8 @@ int rs600_init(struct radeon_device *rdev)
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
- /* Get vram informations */
- rs600_vram_info(rdev);
- /* Initialize memory controller (also test AGP) */
- r = rs600_mc_init(rdev);
- if (r)
- return r;
+ /* initialize memory controller */
+ rs600_mc_init(rdev);
rs600_debugfs(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 06e2771aee5..83b9174f76f 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -129,27 +129,21 @@ void rs690_pm_info(struct radeon_device *rdev)
rdev->pm.sideport_bandwidth.full = rfixed_div(rdev->pm.sideport_bandwidth, tmp);
}
-void rs690_vram_info(struct radeon_device *rdev)
+void rs690_mc_init(struct radeon_device *rdev)
{
fixed20_12 a;
+ u64 base;
rs400_gart_adjust_size(rdev);
-
rdev->mc.vram_is_ddr = true;
rdev->mc.vram_width = 128;
-
rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
-
rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
-
- if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
- rdev->mc.mc_vram_size = rdev->mc.aper_size;
-
- if (rdev->mc.real_vram_size > rdev->mc.aper_size)
- rdev->mc.real_vram_size = rdev->mc.aper_size;
-
+ rdev->mc.visible_vram_size = rdev->mc.aper_size;
+ base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
+ base = G_000100_MC_FB_START(base) << 16;
rs690_pm_info(rdev);
/* FIXME: we should enforce default clock in case GPU is not in
* default setup
@@ -160,22 +154,9 @@ void rs690_vram_info(struct radeon_device *rdev)
a.full = rfixed_const(16);
/* core_bandwidth = sclk(Mhz) * 16 */
rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
-}
-
-static int rs690_mc_init(struct radeon_device *rdev)
-{
- int r;
- u32 tmp;
-
- /* Setup GPU memory space */
- tmp = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
- rdev->mc.vram_location = G_000100_MC_FB_START(tmp) << 16;
- rdev->mc.gtt_location = 0xFFFFFFFFUL;
- r = radeon_mc_setup(rdev);
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
- if (r)
- return r;
- return 0;
+ radeon_vram_location(rdev, &rdev->mc, base);
+ radeon_gtt_location(rdev, &rdev->mc);
}
void rs690_line_buffer_adjust(struct radeon_device *rdev,
@@ -728,12 +709,8 @@ int rs690_init(struct radeon_device *rdev)
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
- /* Get vram informations */
- rs690_vram_info(rdev);
- /* Initialize memory controller (also test AGP) */
- r = rs690_mc_init(rdev);
- if (r)
- return r;
+ /* initialize memory controller */
+ rs690_mc_init(rdev);
rv515_debugfs(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 0e1e6b8632b..bea747da123 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -277,13 +277,15 @@ static void rv515_vram_get_type(struct radeon_device *rdev)
}
}
-void rv515_vram_info(struct radeon_device *rdev)
+void rv515_mc_init(struct radeon_device *rdev)
{
fixed20_12 a;
rv515_vram_get_type(rdev);
-
r100_vram_init_sizes(rdev);
+ radeon_vram_location(rdev, &rdev->mc, 0);
+ if (!(rdev->flags & RADEON_IS_AGP))
+ radeon_gtt_location(rdev, &rdev->mc);
/* FIXME: we should enforce default clock in case GPU is not in
* default setup
*/
@@ -587,12 +589,15 @@ int rv515_init(struct radeon_device *rdev)
radeon_get_clock_info(rdev->ddev);
/* Initialize power management */
radeon_pm_init(rdev);
- /* Get vram informations */
- rv515_vram_info(rdev);
- /* Initialize memory controller (also test AGP) */
- r = r420_mc_init(rdev);
- if (r)
- return r;
+ /* initialize AGP */
+ if (rdev->flags & RADEON_IS_AGP) {
+ r = radeon_agp_init(rdev);
+ if (r) {
+ radeon_agp_disable(rdev);
+ }
+ }
+ /* initialize memory controller */
+ rv515_mc_init(rdev);
rv515_debugfs(rdev);
/* Fence driver */
r = radeon_fence_driver_init(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 03021674d09..37887dee12a 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -56,6 +56,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
r = radeon_gart_table_vram_pin(rdev);
if (r)
return r;
+ radeon_gart_restore(rdev);
/* Setup L2 cache */
WREG32(VM_L2_CNTL, ENABLE_L2_CACHE | ENABLE_L2_FRAGMENT_PROCESSING |
ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE |
@@ -273,9 +274,10 @@ static int rv770_cp_load_microcode(struct radeon_device *rdev)
/*
* Core functions
*/
-static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
- u32 num_backends,
- u32 backend_disable_mask)
+static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
+ u32 num_tile_pipes,
+ u32 num_backends,
+ u32 backend_disable_mask)
{
u32 backend_map = 0;
u32 enabled_backends_mask;
@@ -284,6 +286,7 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
u32 swizzle_pipe[R7XX_MAX_PIPES];
u32 cur_backend;
u32 i;
+ bool force_no_swizzle;
if (num_tile_pipes > R7XX_MAX_PIPES)
num_tile_pipes = R7XX_MAX_PIPES;
@@ -313,6 +316,18 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
if (enabled_backends_count != num_backends)
num_backends = enabled_backends_count;
+ switch (rdev->family) {
+ case CHIP_RV770:
+ case CHIP_RV730:
+ force_no_swizzle = false;
+ break;
+ case CHIP_RV710:
+ case CHIP_RV740:
+ default:
+ force_no_swizzle = true;
+ break;
+ }
+
memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES);
switch (num_tile_pipes) {
case 1:
@@ -323,49 +338,100 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
swizzle_pipe[1] = 1;
break;
case 3:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 1;
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 1;
+ }
break;
case 4:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 3;
- swizzle_pipe[3] = 1;
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 3;
+ swizzle_pipe[3] = 1;
+ }
break;
case 5:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 1;
- swizzle_pipe[4] = 3;
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 1;
+ swizzle_pipe[4] = 3;
+ }
break;
case 6:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 5;
- swizzle_pipe[4] = 3;
- swizzle_pipe[5] = 1;
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ swizzle_pipe[5] = 5;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 5;
+ swizzle_pipe[4] = 3;
+ swizzle_pipe[5] = 1;
+ }
break;
case 7:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 6;
- swizzle_pipe[4] = 3;
- swizzle_pipe[5] = 1;
- swizzle_pipe[6] = 5;
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ swizzle_pipe[5] = 5;
+ swizzle_pipe[6] = 6;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 6;
+ swizzle_pipe[4] = 3;
+ swizzle_pipe[5] = 1;
+ swizzle_pipe[6] = 5;
+ }
break;
case 8:
- swizzle_pipe[0] = 0;
- swizzle_pipe[1] = 2;
- swizzle_pipe[2] = 4;
- swizzle_pipe[3] = 6;
- swizzle_pipe[4] = 3;
- swizzle_pipe[5] = 1;
- swizzle_pipe[6] = 7;
- swizzle_pipe[7] = 5;
+ if (force_no_swizzle) {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 1;
+ swizzle_pipe[2] = 2;
+ swizzle_pipe[3] = 3;
+ swizzle_pipe[4] = 4;
+ swizzle_pipe[5] = 5;
+ swizzle_pipe[6] = 6;
+ swizzle_pipe[7] = 7;
+ } else {
+ swizzle_pipe[0] = 0;
+ swizzle_pipe[1] = 2;
+ swizzle_pipe[2] = 4;
+ swizzle_pipe[3] = 6;
+ swizzle_pipe[4] = 3;
+ swizzle_pipe[5] = 1;
+ swizzle_pipe[6] = 7;
+ swizzle_pipe[7] = 5;
+ }
break;
}
@@ -385,8 +451,10 @@ static u32 r700_get_tile_pipe_to_backend_map(u32 num_tile_pipes,
static void rv770_gpu_init(struct radeon_device *rdev)
{
int i, j, num_qd_pipes;
+ u32 ta_aux_cntl;
u32 sx_debug_1;
u32 smx_dc_ctl0;
+ u32 db_debug3;
u32 num_gs_verts_per_thread;
u32 vgt_gs_per_es;
u32 gs_prim_buffer_depth = 0;
@@ -515,6 +583,7 @@ static void rv770_gpu_init(struct radeon_device *rdev)
switch (rdev->config.rv770.max_tile_pipes) {
case 1:
+ default:
gb_tiling_config |= PIPE_TILING(0);
break;
case 2:
@@ -526,16 +595,17 @@ static void rv770_gpu_init(struct radeon_device *rdev)
case 8:
gb_tiling_config |= PIPE_TILING(3);
break;
- default:
- break;
}
+ rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes;
if (rdev->family == CHIP_RV770)
gb_tiling_config |= BANK_TILING(1);
else
gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
+ rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3);
gb_tiling_config |= GROUP_SIZE(0);
+ rdev->config.rv770.tiling_group_size = 256;
if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
gb_tiling_config |= ROW_TILING(3);
@@ -549,21 +619,27 @@ static void rv770_gpu_init(struct radeon_device *rdev)
gb_tiling_config |= BANK_SWAPS(1);
- if (rdev->family == CHIP_RV740)
- backend_map = 0x28;
- else
- backend_map = r700_get_tile_pipe_to_backend_map(rdev->config.rv770.max_tile_pipes,
- rdev->config.rv770.max_backends,
- (0xff << rdev->config.rv770.max_backends) & 0xff);
- gb_tiling_config |= BACKEND_MAP(backend_map);
+ cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
+ cc_rb_backend_disable |=
+ BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK);
- cc_gc_shader_pipe_config =
+ cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
+ cc_gc_shader_pipe_config |=
INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << rdev->config.rv770.max_pipes) & R7XX_MAX_PIPES_MASK);
cc_gc_shader_pipe_config |=
INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << rdev->config.rv770.max_simds) & R7XX_MAX_SIMDS_MASK);
- cc_rb_backend_disable =
- BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK);
+ if (rdev->family == CHIP_RV740)
+ backend_map = 0x28;
+ else
+ backend_map = r700_get_tile_pipe_to_backend_map(rdev,
+ rdev->config.rv770.max_tile_pipes,
+ (R7XX_MAX_BACKENDS -
+ r600_count_pipe_bits((cc_rb_backend_disable &
+ R7XX_MAX_BACKENDS_MASK) >> 16)),
+ (cc_rb_backend_disable >> 16));
+ gb_tiling_config |= BACKEND_MAP(backend_map);
+
WREG32(GB_TILING_CONFIG, gb_tiling_config);
WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
@@ -571,16 +647,13 @@ static void rv770_gpu_init(struct radeon_device *rdev)
WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
- WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
+ WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
- WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CGTS_SYS_TCC_DISABLE, 0);
WREG32(CGTS_TCC_DISABLE, 0);
- WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
- WREG32(CGTS_USER_TCC_DISABLE, 0);
num_qd_pipes =
- R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK);
+ R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK);
WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK);
@@ -590,10 +663,8 @@ static void rv770_gpu_init(struct radeon_device *rdev)
WREG32(CP_MEQ_THRESHOLDS, STQ_SPLIT(0x30));
- WREG32(TA_CNTL_AUX, (DISABLE_CUBE_ANISO |
- SYNC_GRADIENT |
- SYNC_WALKER |
- SYNC_ALIGNER));
+ ta_aux_cntl = RREG32(TA_CNTL_AUX);
+ WREG32(TA_CNTL_AUX, ta_aux_cntl | DISABLE_CUBE_ANISO);
sx_debug_1 = RREG32(SX_DEBUG_1);
sx_debug_1 |= ENABLE_NEW_SMX_ADDRESS;
@@ -604,14 +675,28 @@ static void rv770_gpu_init(struct radeon_device *rdev)
smx_dc_ctl0 |= CACHE_DEPTH((rdev->config.rv770.sx_num_of_sets * 64) - 1);
WREG32(SMX_DC_CTL0, smx_dc_ctl0);
- WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) |
- GS_FLUSH_CTL(4) |
- ACK_FLUSH_CTL(3) |
- SYNC_FLUSH_CTL));
+ if (rdev->family != CHIP_RV740)
+ WREG32(SMX_EVENT_CTL, (ES_FLUSH_CTL(4) |
+ GS_FLUSH_CTL(4) |
+ ACK_FLUSH_CTL(3) |
+ SYNC_FLUSH_CTL));
- if (rdev->family == CHIP_RV770)
- WREG32(DB_DEBUG3, DB_CLK_OFF_DELAY(0x1f));
- else {
+ db_debug3 = RREG32(DB_DEBUG3);
+ db_debug3 &= ~DB_CLK_OFF_DELAY(0x1f);
+ switch (rdev->family) {
+ case CHIP_RV770:
+ case CHIP_RV740:
+ db_debug3 |= DB_CLK_OFF_DELAY(0x1f);
+ break;
+ case CHIP_RV710:
+ case CHIP_RV730:
+ default:
+ db_debug3 |= DB_CLK_OFF_DELAY(2);
+ break;
+ }
+ WREG32(DB_DEBUG3, db_debug3);
+
+ if (rdev->family != CHIP_RV770) {
db_debug4 = RREG32(DB_DEBUG4);
db_debug4 |= DISABLE_TILE_COVERED_FOR_PS_ITER;
WREG32(DB_DEBUG4, db_debug4);
@@ -640,10 +725,10 @@ static void rv770_gpu_init(struct radeon_device *rdev)
ALU_UPDATE_FIFO_HIWATER(0x8));
switch (rdev->family) {
case CHIP_RV770:
- sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x1);
- break;
case CHIP_RV730:
case CHIP_RV710:
+ sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x1);
+ break;
case CHIP_RV740:
default:
sq_ms_fifo_sizes |= FETCH_FIFO_HIWATER(0x4);
@@ -816,45 +901,13 @@ int rv770_mc_init(struct radeon_device *rdev)
/* Setup GPU memory space */
rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
-
- if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
+ rdev->mc.visible_vram_size = rdev->mc.aper_size;
+ /* FIXME remove this once we support unmappable VRAM */
+ if (rdev->mc.mc_vram_size > rdev->mc.aper_size) {
rdev->mc.mc_vram_size = rdev->mc.aper_size;
-
- if (rdev->mc.real_vram_size > rdev->mc.aper_size)
rdev->mc.real_vram_size = rdev->mc.aper_size;
-
- if (rdev->flags & RADEON_IS_AGP) {
- /* gtt_size is setup by radeon_agp_init */
- rdev->mc.gtt_location = rdev->mc.agp_base;
- tmp = 0xFFFFFFFFUL - rdev->mc.agp_base - rdev->mc.gtt_size;
- /* Try to put vram before or after AGP because we
- * we want SYSTEM_APERTURE to cover both VRAM and
- * AGP so that GPU can catch out of VRAM/AGP access
- */
- if (rdev->mc.gtt_location > rdev->mc.mc_vram_size) {
- /* Enough place before */
- rdev->mc.vram_location = rdev->mc.gtt_location -
- rdev->mc.mc_vram_size;
- } else if (tmp > rdev->mc.mc_vram_size) {
- /* Enough place after */
- rdev->mc.vram_location = rdev->mc.gtt_location +
- rdev->mc.gtt_size;
- } else {
- /* Try to setup VRAM then AGP might not
- * not work on some card
- */
- rdev->mc.vram_location = 0x00000000UL;
- rdev->mc.gtt_location = rdev->mc.mc_vram_size;
- }
- } else {
- rdev->mc.vram_location = 0x00000000UL;
- rdev->mc.gtt_location = rdev->mc.mc_vram_size;
- rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
}
- rdev->mc.vram_start = rdev->mc.vram_location;
- rdev->mc.vram_end = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
- rdev->mc.gtt_start = rdev->mc.gtt_location;
- rdev->mc.gtt_end = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
+ r600_vram_gtt_location(rdev, &rdev->mc);
/* FIXME: we should enforce default clock in case GPU is not in
* default setup
*/
@@ -863,6 +916,7 @@ int rv770_mc_init(struct radeon_device *rdev)
rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
return 0;
}
+
int rv770_gpu_reset(struct radeon_device *rdev)
{
/* FIXME: implement any rv770 specific bits */
@@ -1038,6 +1092,7 @@ int rv770_init(struct radeon_device *rdev)
r = radeon_fence_driver_init(rdev);
if (r)
return r;
+ /* initialize AGP */
if (rdev->flags & RADEON_IS_AGP) {
r = radeon_agp_init(rdev);
if (r)
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index a1367ab6f26..9506f8cb99e 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -343,4 +343,6 @@
#define WAIT_UNTIL 0x8040
+#define SRBM_STATUS 0x0E50
+
#endif
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 3d47a2c1232..a759170763b 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -480,7 +480,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
void *from_virtual;
void *to_virtual;
int i;
- int ret;
+ int ret = -ENOMEM;
if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
@@ -499,8 +499,10 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
for (i = 0; i < ttm->num_pages; ++i) {
from_page = read_mapping_page(swap_space, i, NULL);
- if (IS_ERR(from_page))
+ if (IS_ERR(from_page)) {
+ ret = PTR_ERR(from_page);
goto out_err;
+ }
to_page = __ttm_tt_get_page(ttm, i);
if (unlikely(to_page == NULL))
goto out_err;
@@ -523,7 +525,7 @@ static int ttm_tt_swapin(struct ttm_tt *ttm)
return 0;
out_err:
ttm_tt_free_alloced_pages(ttm);
- return -ENOMEM;
+ return ret;
}
int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
@@ -535,6 +537,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
void *from_virtual;
void *to_virtual;
int i;
+ int ret = -ENOMEM;
BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
BUG_ON(ttm->caching_state != tt_cached);
@@ -557,7 +560,7 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
0);
if (unlikely(IS_ERR(swap_storage))) {
printk(KERN_ERR "Failed allocating swap storage.\n");
- return -ENOMEM;
+ return PTR_ERR(swap_storage);
}
} else
swap_storage = persistant_swap_storage;
@@ -569,9 +572,10 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
if (unlikely(from_page == NULL))
continue;
to_page = read_mapping_page(swap_space, i, NULL);
- if (unlikely(to_page == NULL))
+ if (unlikely(IS_ERR(to_page))) {
+ ret = PTR_ERR(to_page);
goto out_err;
-
+ }
preempt_disable();
from_virtual = kmap_atomic(from_page, KM_USER0);
to_virtual = kmap_atomic(to_page, KM_USER1);
@@ -595,5 +599,5 @@ out_err:
if (!persistant_swap_storage)
fput(swap_storage);
- return -ENOMEM;
+ return ret;
}
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig
index 0920492cea0..61ab4daf0bb 100644
--- a/drivers/gpu/vga/Kconfig
+++ b/drivers/gpu/vga/Kconfig
@@ -16,3 +16,14 @@ config VGA_ARB_MAX_GPUS
help
Reserves space in the kernel to maintain resource locking for
multiple GPUS. The overhead for each GPU is very small.
+
+config VGA_SWITCHEROO
+ bool "Laptop Hybrid Grapics - GPU switching support"
+ depends on X86
+ depends on ACPI
+ help
+ Many laptops released in 2008/9/10 have two gpus with a multiplxer
+ to switch between them. This adds support for dynamic switching when
+ X isn't running and delayed switching until the next logoff. This
+ features is called hybrid graphics, ATI PowerXpress, and Nvidia
+ HybridPower.
diff --git a/drivers/gpu/vga/Makefile b/drivers/gpu/vga/Makefile
index 7cc8c1ed645..14ca30b75d0 100644
--- a/drivers/gpu/vga/Makefile
+++ b/drivers/gpu/vga/Makefile
@@ -1 +1,2 @@
obj-$(CONFIG_VGA_ARB) += vgaarb.o
+obj-$(CONFIG_VGA_SWITCHEROO) += vga_switcheroo.o
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
new file mode 100644
index 00000000000..d6d1149d525
--- /dev/null
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -0,0 +1,450 @@
+/*
+ * Copyright (c) 2010 Red Hat Inc.
+ * Author : Dave Airlie <airlied@redhat.com>
+ *
+ *
+ * Licensed under GPLv2
+ *
+ * vga_switcheroo.c - Support for laptop with dual GPU using one set of outputs
+
+ Switcher interface - methods require for ATPX and DCM
+ - switchto - this throws the output MUX switch
+ - discrete_set_power - sets the power state for the discrete card
+
+ GPU driver interface
+ - set_gpu_state - this should do the equiv of s/r for the card
+ - this should *not* set the discrete power state
+ - switch_check - check if the device is in a position to switch now
+ */
+
+#include <linux/module.h>
+#include <linux/dmi.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/fb.h>
+
+#include <linux/pci.h>
+#include <linux/vga_switcheroo.h>
+
+struct vga_switcheroo_client {
+ struct pci_dev *pdev;
+ struct fb_info *fb_info;
+ int pwr_state;
+ void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state);
+ bool (*can_switch)(struct pci_dev *pdev);
+ int id;
+ bool active;
+};
+
+static DEFINE_MUTEX(vgasr_mutex);
+
+struct vgasr_priv {
+
+ bool active;
+ bool delayed_switch_active;
+ enum vga_switcheroo_client_id delayed_client_id;
+
+ struct dentry *debugfs_root;
+ struct dentry *switch_file;
+
+ int registered_clients;
+ struct vga_switcheroo_client clients[VGA_SWITCHEROO_MAX_CLIENTS];
+
+ struct vga_switcheroo_handler *handler;
+};
+
+static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv);
+static void vga_switcheroo_debugfs_fini(struct vgasr_priv *priv);
+
+/* only one switcheroo per system */
+static struct vgasr_priv vgasr_priv;
+
+int vga_switcheroo_register_handler(struct vga_switcheroo_handler *handler)
+{
+ mutex_lock(&vgasr_mutex);
+ if (vgasr_priv.handler) {
+ mutex_unlock(&vgasr_mutex);
+ return -EINVAL;
+ }
+
+ vgasr_priv.handler = handler;
+ mutex_unlock(&vgasr_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(vga_switcheroo_register_handler);
+
+void vga_switcheroo_unregister_handler(void)
+{
+ mutex_lock(&vgasr_mutex);
+ vgasr_priv.handler = NULL;
+ mutex_unlock(&vgasr_mutex);
+}
+EXPORT_SYMBOL(vga_switcheroo_unregister_handler);
+
+static void vga_switcheroo_enable(void)
+{
+ int i;
+ int ret;
+ /* call the handler to init */
+ vgasr_priv.handler->init();
+
+ for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
+ ret = vgasr_priv.handler->get_client_id(vgasr_priv.clients[i].pdev);
+ if (ret < 0)
+ return;
+
+ vgasr_priv.clients[i].id = ret;
+ }
+ vga_switcheroo_debugfs_init(&vgasr_priv);
+ vgasr_priv.active = true;
+}
+
+int vga_switcheroo_register_client(struct pci_dev *pdev,
+ void (*set_gpu_state)(struct pci_dev *pdev, enum vga_switcheroo_state),
+ bool (*can_switch)(struct pci_dev *pdev))
+{
+ int index;
+
+ mutex_lock(&vgasr_mutex);
+ /* don't do IGD vs DIS here */
+ if (vgasr_priv.registered_clients & 1)
+ index = 1;
+ else
+ index = 0;
+
+ vgasr_priv.clients[index].pwr_state = VGA_SWITCHEROO_ON;
+ vgasr_priv.clients[index].pdev = pdev;
+ vgasr_priv.clients[index].set_gpu_state = set_gpu_state;
+ vgasr_priv.clients[index].can_switch = can_switch;
+ vgasr_priv.clients[index].id = -1;
+ if (pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW)
+ vgasr_priv.clients[index].active = true;
+
+ vgasr_priv.registered_clients |= (1 << index);
+
+ /* if we get two clients + handler */
+ if (vgasr_priv.registered_clients == 0x3 && vgasr_priv.handler) {
+ printk(KERN_INFO "vga_switcheroo: enabled\n");
+ vga_switcheroo_enable();
+ }
+ mutex_unlock(&vgasr_mutex);
+ return 0;
+}
+EXPORT_SYMBOL(vga_switcheroo_register_client);
+
+void vga_switcheroo_unregister_client(struct pci_dev *pdev)
+{
+ int i;
+
+ mutex_lock(&vgasr_mutex);
+ for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
+ if (vgasr_priv.clients[i].pdev == pdev) {
+ vgasr_priv.registered_clients &= ~(1 << i);
+ break;
+ }
+ }
+
+ printk(KERN_INFO "vga_switcheroo: disabled\n");
+ vga_switcheroo_debugfs_fini(&vgasr_priv);
+ vgasr_priv.active = false;
+ mutex_unlock(&vgasr_mutex);
+}
+EXPORT_SYMBOL(vga_switcheroo_unregister_client);
+
+void vga_switcheroo_client_fb_set(struct pci_dev *pdev,
+ struct fb_info *info)
+{
+ int i;
+
+ mutex_lock(&vgasr_mutex);
+ for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
+ if (vgasr_priv.clients[i].pdev == pdev) {
+ vgasr_priv.clients[i].fb_info = info;
+ break;
+ }
+ }
+ mutex_unlock(&vgasr_mutex);
+}
+EXPORT_SYMBOL(vga_switcheroo_client_fb_set);
+
+static int vga_switcheroo_show(struct seq_file *m, void *v)
+{
+ int i;
+ mutex_lock(&vgasr_mutex);
+ for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
+ seq_printf(m, "%d:%c:%s:%s\n", i,
+ vgasr_priv.clients[i].active ? '+' : ' ',
+ vgasr_priv.clients[i].pwr_state ? "Pwr" : "Off",
+ pci_name(vgasr_priv.clients[i].pdev));
+ }
+ mutex_unlock(&vgasr_mutex);
+ return 0;
+}
+
+static int vga_switcheroo_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, vga_switcheroo_show, NULL);
+}
+
+static int vga_switchon(struct vga_switcheroo_client *client)
+{
+ int ret;
+
+ ret = vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_ON);
+ /* call the driver callback to turn on device */
+ client->set_gpu_state(client->pdev, VGA_SWITCHEROO_ON);
+ client->pwr_state = VGA_SWITCHEROO_ON;
+ return 0;
+}
+
+static int vga_switchoff(struct vga_switcheroo_client *client)
+{
+ /* call the driver callback to turn off device */
+ client->set_gpu_state(client->pdev, VGA_SWITCHEROO_OFF);
+ vgasr_priv.handler->power_state(client->id, VGA_SWITCHEROO_OFF);
+ client->pwr_state = VGA_SWITCHEROO_OFF;
+ return 0;
+}
+
+static int vga_switchto(struct vga_switcheroo_client *new_client)
+{
+ int ret;
+ int i;
+ struct vga_switcheroo_client *active = NULL;
+
+ if (new_client->active == true)
+ return 0;
+
+ for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
+ if (vgasr_priv.clients[i].active == true) {
+ active = &vgasr_priv.clients[i];
+ break;
+ }
+ }
+ if (!active)
+ return 0;
+
+ /* power up the first device */
+ ret = pci_enable_device(new_client->pdev);
+ if (ret)
+ return ret;
+
+ if (new_client->pwr_state == VGA_SWITCHEROO_OFF)
+ vga_switchon(new_client);
+
+ /* swap shadow resource to denote boot VGA device has changed so X starts on new device */
+ active->active = false;
+
+ active->pdev->resource[PCI_ROM_RESOURCE].flags &= ~IORESOURCE_ROM_SHADOW;
+ new_client->pdev->resource[PCI_ROM_RESOURCE].flags |= IORESOURCE_ROM_SHADOW;
+
+ if (new_client->fb_info) {
+ struct fb_event event;
+ event.info = new_client->fb_info;
+ fb_notifier_call_chain(FB_EVENT_REMAP_ALL_CONSOLE, &event);
+ }
+
+ ret = vgasr_priv.handler->switchto(new_client->id);
+ if (ret)
+ return ret;
+
+ if (active->pwr_state == VGA_SWITCHEROO_ON)
+ vga_switchoff(active);
+
+ new_client->active = true;
+ return 0;
+}
+
+static ssize_t
+vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char usercmd[64];
+ const char *pdev_name;
+ int i, ret;
+ bool delay = false, can_switch;
+ int client_id = -1;
+ struct vga_switcheroo_client *client = NULL;
+
+ if (cnt > 63)
+ cnt = 63;
+
+ if (copy_from_user(usercmd, ubuf, cnt))
+ return -EFAULT;
+
+ mutex_lock(&vgasr_mutex);
+
+ if (!vgasr_priv.active)
+ return -EINVAL;
+
+ /* pwr off the device not in use */
+ if (strncmp(usercmd, "OFF", 3) == 0) {
+ for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
+ if (vgasr_priv.clients[i].active)
+ continue;
+ if (vgasr_priv.clients[i].pwr_state == VGA_SWITCHEROO_ON)
+ vga_switchoff(&vgasr_priv.clients[i]);
+ }
+ goto out;
+ }
+ /* pwr on the device not in use */
+ if (strncmp(usercmd, "ON", 2) == 0) {
+ for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
+ if (vgasr_priv.clients[i].active)
+ continue;
+ if (vgasr_priv.clients[i].pwr_state == VGA_SWITCHEROO_OFF)
+ vga_switchon(&vgasr_priv.clients[i]);
+ }
+ goto out;
+ }
+
+ /* request a delayed switch - test can we switch now */
+ if (strncmp(usercmd, "DIGD", 4) == 0) {
+ client_id = VGA_SWITCHEROO_IGD;
+ delay = true;
+ }
+
+ if (strncmp(usercmd, "DDIS", 4) == 0) {
+ client_id = VGA_SWITCHEROO_DIS;
+ delay = true;
+ }
+
+ if (strncmp(usercmd, "IGD", 3) == 0)
+ client_id = VGA_SWITCHEROO_IGD;
+
+ if (strncmp(usercmd, "DIS", 3) == 0)
+ client_id = VGA_SWITCHEROO_DIS;
+
+ if (client_id == -1)
+ goto out;
+
+ for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
+ if (vgasr_priv.clients[i].id == client_id) {
+ client = &vgasr_priv.clients[i];
+ break;
+ }
+ }
+
+ vgasr_priv.delayed_switch_active = false;
+ /* okay we want a switch - test if devices are willing to switch */
+ can_switch = true;
+ for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
+ can_switch = vgasr_priv.clients[i].can_switch(vgasr_priv.clients[i].pdev);
+ if (can_switch == false) {
+ printk(KERN_ERR "vga_switcheroo: client %d refused switch\n", i);
+ break;
+ }
+ }
+
+ if (can_switch == false && delay == false)
+ goto out;
+
+ if (can_switch == true) {
+ pdev_name = pci_name(client->pdev);
+ ret = vga_switchto(client);
+ if (ret)
+ printk(KERN_ERR "vga_switcheroo: switching failed %d\n", ret);
+ } else {
+ printk(KERN_INFO "vga_switcheroo: setting delayed switch to client %d\n", client->id);
+ vgasr_priv.delayed_switch_active = true;
+ vgasr_priv.delayed_client_id = client_id;
+
+ /* we should at least power up the card to
+ make the switch faster */
+ if (client->pwr_state == VGA_SWITCHEROO_OFF)
+ vga_switchon(client);
+ }
+
+out:
+ mutex_unlock(&vgasr_mutex);
+ return cnt;
+}
+
+static const struct file_operations vga_switcheroo_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = vga_switcheroo_debugfs_open,
+ .write = vga_switcheroo_debugfs_write,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void vga_switcheroo_debugfs_fini(struct vgasr_priv *priv)
+{
+ if (priv->switch_file) {
+ debugfs_remove(priv->switch_file);
+ priv->switch_file = NULL;
+ }
+ if (priv->debugfs_root) {
+ debugfs_remove(priv->debugfs_root);
+ priv->debugfs_root = NULL;
+ }
+}
+
+static int vga_switcheroo_debugfs_init(struct vgasr_priv *priv)
+{
+ /* already initialised */
+ if (priv->debugfs_root)
+ return 0;
+ priv->debugfs_root = debugfs_create_dir("vgaswitcheroo", NULL);
+
+ if (!priv->debugfs_root) {
+ printk(KERN_ERR "vga_switcheroo: Cannot create /sys/kernel/debug/vgaswitcheroo\n");
+ goto fail;
+ }
+
+ priv->switch_file = debugfs_create_file("switch", 0644,
+ priv->debugfs_root, NULL, &vga_switcheroo_debugfs_fops);
+ if (!priv->switch_file) {
+ printk(KERN_ERR "vga_switcheroo: cannot create /sys/kernel/debug/vgaswitcheroo/switch\n");
+ goto fail;
+ }
+ return 0;
+fail:
+ vga_switcheroo_debugfs_fini(priv);
+ return -1;
+}
+
+int vga_switcheroo_process_delayed_switch(void)
+{
+ struct vga_switcheroo_client *client = NULL;
+ const char *pdev_name;
+ bool can_switch = true;
+ int i;
+ int ret;
+ int err = -EINVAL;
+
+ mutex_lock(&vgasr_mutex);
+ if (!vgasr_priv.delayed_switch_active)
+ goto err;
+
+ printk(KERN_INFO "vga_switcheroo: processing delayed switch to %d\n", vgasr_priv.delayed_client_id);
+
+ for (i = 0; i < VGA_SWITCHEROO_MAX_CLIENTS; i++) {
+ if (vgasr_priv.clients[i].id == vgasr_priv.delayed_client_id)
+ client = &vgasr_priv.clients[i];
+ can_switch = vgasr_priv.clients[i].can_switch(vgasr_priv.clients[i].pdev);
+ if (can_switch == false) {
+ printk(KERN_ERR "vga_switcheroo: client %d refused switch\n", i);
+ break;
+ }
+ }
+
+ if (can_switch == false || client == NULL)
+ goto err;
+
+ pdev_name = pci_name(client->pdev);
+ ret = vga_switchto(client);
+ if (ret)
+ printk(KERN_ERR "vga_switcheroo: delayed switching failed %d\n", ret);
+
+ vgasr_priv.delayed_switch_active = false;
+ err = 0;
+err:
+ mutex_unlock(&vgasr_mutex);
+ return err;
+}
+EXPORT_SYMBOL(vga_switcheroo_process_delayed_switch);
+
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 867e08433e4..433602aed46 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -265,9 +265,10 @@ static int hiddev_release(struct inode * inode, struct file * file)
static int hiddev_open(struct inode *inode, struct file *file)
{
struct hiddev_list *list;
- int res;
+ int res, i;
- int i = iminor(inode) - HIDDEV_MINOR_BASE;
+ lock_kernel();
+ i = iminor(inode) - HIDDEV_MINOR_BASE;
if (i >= HIDDEV_MINORS || i < 0 || !hiddev_table[i])
return -ENODEV;
@@ -313,10 +314,12 @@ static int hiddev_open(struct inode *inode, struct file *file)
usbhid_open(hid);
}
+ unlock_kernel();
return 0;
bail:
file->private_data = NULL;
kfree(list);
+ unlock_kernel();
return res;
}
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 68cf87749a4..e4595e6147b 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -170,6 +170,16 @@ config SENSORS_ADM9240
This driver can also be built as a module. If so, the module
will be called adm9240.
+config SENSORS_ADT7411
+ tristate "Analog Devices ADT7411"
+ depends on I2C && EXPERIMENTAL
+ help
+ If you say yes here you get support for the Analog Devices
+ ADT7411 voltage and temperature monitoring chip.
+
+ This driver can also be built as a module. If so, the module
+ will be called adt7411.
+
config SENSORS_ADT7462
tristate "Analog Devices ADT7462"
depends on I2C && EXPERIMENTAL
@@ -190,20 +200,6 @@ config SENSORS_ADT7470
This driver can also be built as a module. If so, the module
will be called adt7470.
-config SENSORS_ADT7473
- tristate "Analog Devices ADT7473 (DEPRECATED)"
- depends on I2C && EXPERIMENTAL
- select SENSORS_ADT7475
- help
- If you say yes here you get support for the Analog Devices
- ADT7473 temperature monitoring chips.
-
- This driver is deprecated, you should use the adt7475 driver
- instead.
-
- This driver can also be built as a module. If so, the module
- will be called adt7473.
-
config SENSORS_ADT7475
tristate "Analog Devices ADT7473, ADT7475, ADT7476 and ADT7490"
depends on I2C && EXPERIMENTAL
@@ -216,6 +212,19 @@ config SENSORS_ADT7475
This driver can also be build as a module. If so, the module
will be called adt7475.
+config SENSORS_ASC7621
+ tristate "Andigilog aSC7621"
+ depends on HWMON && I2C
+ help
+ If you say yes here you get support for the aSC7621
+ family of SMBus sensors chip found on most Intel X48, X38, 975,
+ 965 and 945 desktop boards. Currently supported chips:
+ aSC7621
+ aSC7621a
+
+ This driver can also be built as a module. If so, the module
+ will be called asc7621.
+
config SENSORS_K8TEMP
tristate "AMD Athlon64/FX or Opteron temperature sensor"
depends on X86 && PCI && EXPERIMENTAL
@@ -563,9 +572,10 @@ config SENSORS_LM90
depends on I2C
help
If you say yes here you get support for National Semiconductor LM90,
- LM86, LM89 and LM99, Analog Devices ADM1032 and ADT7461, and Maxim
+ LM86, LM89 and LM99, Analog Devices ADM1032 and ADT7461, Maxim
MAX6646, MAX6647, MAX6648, MAX6649, MAX6657, MAX6658, MAX6659,
- MAX6680, MAX6681 and MAX6692 sensor chips.
+ MAX6680, MAX6681 and MAX6692, and Winbond/Nuvoton W83L771AWG/ASG
+ sensor chips.
This driver can also be built as a module. If so, the module
will be called lm90.
@@ -909,7 +919,8 @@ config SENSORS_W83793
select HWMON_VID
help
If you say yes here you get support for the Winbond W83793
- hardware monitoring chip.
+ hardware monitoring chip, including support for the integrated
+ watchdog.
This driver can also be built as a module. If so, the module
will be called w83793.
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 4bc215c0953..4aa1a3d112a 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -29,12 +29,13 @@ obj-$(CONFIG_SENSORS_ADM1029) += adm1029.o
obj-$(CONFIG_SENSORS_ADM1031) += adm1031.o
obj-$(CONFIG_SENSORS_ADM9240) += adm9240.o
obj-$(CONFIG_SENSORS_ADS7828) += ads7828.o
+obj-$(CONFIG_SENSORS_ADT7411) += adt7411.o
obj-$(CONFIG_SENSORS_ADT7462) += adt7462.o
obj-$(CONFIG_SENSORS_ADT7470) += adt7470.o
-obj-$(CONFIG_SENSORS_ADT7473) += adt7473.o
obj-$(CONFIG_SENSORS_ADT7475) += adt7475.o
obj-$(CONFIG_SENSORS_APPLESMC) += applesmc.o
obj-$(CONFIG_SENSORS_AMS) += ams/
+obj-$(CONFIG_SENSORS_ASC7621) += asc7621.o
obj-$(CONFIG_SENSORS_ATXP1) += atxp1.o
obj-$(CONFIG_SENSORS_CORETEMP) += coretemp.o
obj-$(CONFIG_SENSORS_DME1737) += dme1737.o
diff --git a/drivers/hwmon/adcxx.c b/drivers/hwmon/adcxx.c
index 5e9e095f113..74d9c5195e4 100644
--- a/drivers/hwmon/adcxx.c
+++ b/drivers/hwmon/adcxx.c
@@ -62,18 +62,23 @@ static ssize_t adcxx_read(struct device *dev,
struct spi_device *spi = to_spi_device(dev);
struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
struct adcxx *adc = dev_get_drvdata(&spi->dev);
- u8 tx_buf[2] = { attr->index << 3 }; /* other bits are don't care */
+ u8 tx_buf[2];
u8 rx_buf[2];
int status;
- int value;
+ u32 value;
if (mutex_lock_interruptible(&adc->lock))
return -ERESTARTSYS;
- status = spi_write_then_read(spi, tx_buf, sizeof(tx_buf),
- rx_buf, sizeof(rx_buf));
+ if (adc->channels == 1) {
+ status = spi_read(spi, rx_buf, sizeof(rx_buf));
+ } else {
+ tx_buf[0] = attr->index << 3; /* other bits are don't care */
+ status = spi_write_then_read(spi, tx_buf, sizeof(tx_buf),
+ rx_buf, sizeof(rx_buf));
+ }
if (status < 0) {
- dev_warn(dev, "spi_write_then_read failed with status %d\n",
+ dev_warn(dev, "SPI synch. transfer failed with status %d\n",
status);
goto out;
}
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c
new file mode 100644
index 00000000000..3471884e42d
--- /dev/null
+++ b/drivers/hwmon/adt7411.c
@@ -0,0 +1,366 @@
+/*
+ * Driver for the ADT7411 (I2C/SPI 8 channel 10 bit ADC & temperature-sensor)
+ *
+ * Copyright (C) 2008, 2010 Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * TODO: SPI, support for external temperature sensor
+ * use power-down mode for suspend?, interrupt handling?
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/jiffies.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+
+#define ADT7411_REG_INT_TEMP_VDD_LSB 0x03
+#define ADT7411_REG_EXT_TEMP_AIN14_LSB 0x04
+#define ADT7411_REG_VDD_MSB 0x06
+#define ADT7411_REG_INT_TEMP_MSB 0x07
+#define ADT7411_REG_EXT_TEMP_AIN1_MSB 0x08
+
+#define ADT7411_REG_CFG1 0x18
+#define ADT7411_CFG1_START_MONITOR (1 << 0)
+
+#define ADT7411_REG_CFG2 0x19
+#define ADT7411_CFG2_DISABLE_AVG (1 << 5)
+
+#define ADT7411_REG_CFG3 0x1a
+#define ADT7411_CFG3_ADC_CLK_225 (1 << 0)
+#define ADT7411_CFG3_REF_VDD (1 << 4)
+
+#define ADT7411_REG_DEVICE_ID 0x4d
+#define ADT7411_REG_MANUFACTURER_ID 0x4e
+
+#define ADT7411_DEVICE_ID 0x2
+#define ADT7411_MANUFACTURER_ID 0x41
+
+static const unsigned short normal_i2c[] = { 0x48, 0x4a, 0x4b, I2C_CLIENT_END };
+
+struct adt7411_data {
+ struct mutex device_lock; /* for "atomic" device accesses */
+ struct mutex update_lock;
+ unsigned long next_update;
+ int vref_cached;
+ struct device *hwmon_dev;
+};
+
+/*
+ * When reading a register containing (up to 4) lsb, all associated
+ * msb-registers get locked by the hardware. After _one_ of those msb is read,
+ * _all_ are unlocked. In order to use this locking correctly, reading lsb/msb
+ * is protected here with a mutex, too.
+ */
+static int adt7411_read_10_bit(struct i2c_client *client, u8 lsb_reg,
+ u8 msb_reg, u8 lsb_shift)
+{
+ struct adt7411_data *data = i2c_get_clientdata(client);
+ int val, tmp;
+
+ mutex_lock(&data->device_lock);
+
+ val = i2c_smbus_read_byte_data(client, lsb_reg);
+ if (val < 0)
+ goto exit_unlock;
+
+ tmp = (val >> lsb_shift) & 3;
+ val = i2c_smbus_read_byte_data(client, msb_reg);
+
+ if (val >= 0)
+ val = (val << 2) | tmp;
+
+ exit_unlock:
+ mutex_unlock(&data->device_lock);
+
+ return val;
+}
+
+static int adt7411_modify_bit(struct i2c_client *client, u8 reg, u8 bit,
+ bool flag)
+{
+ struct adt7411_data *data = i2c_get_clientdata(client);
+ int ret, val;
+
+ mutex_lock(&data->device_lock);
+
+ ret = i2c_smbus_read_byte_data(client, reg);
+ if (ret < 0)
+ goto exit_unlock;
+
+ if (flag)
+ val = ret | bit;
+ else
+ val = ret & ~bit;
+
+ ret = i2c_smbus_write_byte_data(client, reg, val);
+
+ exit_unlock:
+ mutex_unlock(&data->device_lock);
+ return ret;
+}
+
+static ssize_t adt7411_show_vdd(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int ret = adt7411_read_10_bit(client, ADT7411_REG_INT_TEMP_VDD_LSB,
+ ADT7411_REG_VDD_MSB, 2);
+
+ return ret < 0 ? ret : sprintf(buf, "%u\n", ret * 7000 / 1024);
+}
+
+static ssize_t adt7411_show_temp(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ int val = adt7411_read_10_bit(client, ADT7411_REG_INT_TEMP_VDD_LSB,
+ ADT7411_REG_INT_TEMP_MSB, 0);
+
+ if (val < 0)
+ return val;
+
+ val = val & 0x200 ? val - 0x400 : val; /* 10 bit signed */
+
+ return sprintf(buf, "%d\n", val * 250);
+}
+
+static ssize_t adt7411_show_input(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int nr = to_sensor_dev_attr(attr)->index;
+ struct i2c_client *client = to_i2c_client(dev);
+ struct adt7411_data *data = i2c_get_clientdata(client);
+ int val;
+ u8 lsb_reg, lsb_shift;
+
+ mutex_lock(&data->update_lock);
+ if (time_after_eq(jiffies, data->next_update)) {
+ val = i2c_smbus_read_byte_data(client, ADT7411_REG_CFG3);
+ if (val < 0)
+ goto exit_unlock;
+
+ if (val & ADT7411_CFG3_REF_VDD) {
+ val = adt7411_read_10_bit(client,
+ ADT7411_REG_INT_TEMP_VDD_LSB,
+ ADT7411_REG_VDD_MSB, 2);
+ if (val < 0)
+ goto exit_unlock;
+
+ data->vref_cached = val * 7000 / 1024;
+ } else {
+ data->vref_cached = 2250;
+ }
+
+ data->next_update = jiffies + HZ;
+ }
+
+ lsb_reg = ADT7411_REG_EXT_TEMP_AIN14_LSB + (nr >> 2);
+ lsb_shift = 2 * (nr & 0x03);
+ val = adt7411_read_10_bit(client, lsb_reg,
+ ADT7411_REG_EXT_TEMP_AIN1_MSB + nr, lsb_shift);
+ if (val < 0)
+ goto exit_unlock;
+
+ val = sprintf(buf, "%u\n", val * data->vref_cached / 1024);
+ exit_unlock:
+ mutex_unlock(&data->update_lock);
+ return val;
+}
+
+static ssize_t adt7411_show_bit(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sensor_device_attribute_2 *attr2 = to_sensor_dev_attr_2(attr);
+ struct i2c_client *client = to_i2c_client(dev);
+ int ret = i2c_smbus_read_byte_data(client, attr2->index);
+
+ return ret < 0 ? ret : sprintf(buf, "%u\n", !!(ret & attr2->nr));
+}
+
+static ssize_t adt7411_set_bit(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct sensor_device_attribute_2 *s_attr2 = to_sensor_dev_attr_2(attr);
+ struct i2c_client *client = to_i2c_client(dev);
+ struct adt7411_data *data = i2c_get_clientdata(client);
+ int ret;
+ unsigned long flag;
+
+ ret = strict_strtoul(buf, 0, &flag);
+ if (ret || flag > 1)
+ return -EINVAL;
+
+ ret = adt7411_modify_bit(client, s_attr2->index, s_attr2->nr, flag);
+
+ /* force update */
+ mutex_lock(&data->update_lock);
+ data->next_update = jiffies;
+ mutex_unlock(&data->update_lock);
+
+ return ret < 0 ? ret : count;
+}
+
+#define ADT7411_BIT_ATTR(__name, __reg, __bit) \
+ SENSOR_DEVICE_ATTR_2(__name, S_IRUGO | S_IWUSR, adt7411_show_bit, \
+ adt7411_set_bit, __bit, __reg)
+
+static DEVICE_ATTR(temp1_input, S_IRUGO, adt7411_show_temp, NULL);
+static DEVICE_ATTR(in0_input, S_IRUGO, adt7411_show_vdd, NULL);
+static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, adt7411_show_input, NULL, 0);
+static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, adt7411_show_input, NULL, 1);
+static SENSOR_DEVICE_ATTR(in3_input, S_IRUGO, adt7411_show_input, NULL, 2);
+static SENSOR_DEVICE_ATTR(in4_input, S_IRUGO, adt7411_show_input, NULL, 3);
+static SENSOR_DEVICE_ATTR(in5_input, S_IRUGO, adt7411_show_input, NULL, 4);
+static SENSOR_DEVICE_ATTR(in6_input, S_IRUGO, adt7411_show_input, NULL, 5);
+static SENSOR_DEVICE_ATTR(in7_input, S_IRUGO, adt7411_show_input, NULL, 6);
+static SENSOR_DEVICE_ATTR(in8_input, S_IRUGO, adt7411_show_input, NULL, 7);
+static ADT7411_BIT_ATTR(no_average, ADT7411_REG_CFG2, ADT7411_CFG2_DISABLE_AVG);
+static ADT7411_BIT_ATTR(fast_sampling, ADT7411_REG_CFG3, ADT7411_CFG3_ADC_CLK_225);
+static ADT7411_BIT_ATTR(adc_ref_vdd, ADT7411_REG_CFG3, ADT7411_CFG3_REF_VDD);
+
+static struct attribute *adt7411_attrs[] = {
+ &dev_attr_temp1_input.attr,
+ &dev_attr_in0_input.attr,
+ &sensor_dev_attr_in1_input.dev_attr.attr,
+ &sensor_dev_attr_in2_input.dev_attr.attr,
+ &sensor_dev_attr_in3_input.dev_attr.attr,
+ &sensor_dev_attr_in4_input.dev_attr.attr,
+ &sensor_dev_attr_in5_input.dev_attr.attr,
+ &sensor_dev_attr_in6_input.dev_attr.attr,
+ &sensor_dev_attr_in7_input.dev_attr.attr,
+ &sensor_dev_attr_in8_input.dev_attr.attr,
+ &sensor_dev_attr_no_average.dev_attr.attr,
+ &sensor_dev_attr_fast_sampling.dev_attr.attr,
+ &sensor_dev_attr_adc_ref_vdd.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group adt7411_attr_grp = {
+ .attrs = adt7411_attrs,
+};
+
+static int adt7411_detect(struct i2c_client *client,
+ struct i2c_board_info *info)
+{
+ int val;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+
+ val = i2c_smbus_read_byte_data(client, ADT7411_REG_MANUFACTURER_ID);
+ if (val < 0 || val != ADT7411_MANUFACTURER_ID) {
+ dev_dbg(&client->dev, "Wrong manufacturer ID. Got %d, "
+ "expected %d\n", val, ADT7411_MANUFACTURER_ID);
+ return -ENODEV;
+ }
+
+ val = i2c_smbus_read_byte_data(client, ADT7411_REG_DEVICE_ID);
+ if (val < 0 || val != ADT7411_DEVICE_ID) {
+ dev_dbg(&client->dev, "Wrong device ID. Got %d, "
+ "expected %d\n", val, ADT7411_DEVICE_ID);
+ return -ENODEV;
+ }
+
+ strlcpy(info->type, "adt7411", I2C_NAME_SIZE);
+
+ return 0;
+}
+
+static int __devinit adt7411_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct adt7411_data *data;
+ int ret;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, data);
+ mutex_init(&data->device_lock);
+ mutex_init(&data->update_lock);
+
+ ret = adt7411_modify_bit(client, ADT7411_REG_CFG1,
+ ADT7411_CFG1_START_MONITOR, 1);
+ if (ret < 0)
+ goto exit_free;
+
+ /* force update on first occasion */
+ data->next_update = jiffies;
+
+ ret = sysfs_create_group(&client->dev.kobj, &adt7411_attr_grp);
+ if (ret)
+ goto exit_free;
+
+ data->hwmon_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->hwmon_dev)) {
+ ret = PTR_ERR(data->hwmon_dev);
+ goto exit_remove;
+ }
+
+ dev_info(&client->dev, "successfully registered\n");
+
+ return 0;
+
+ exit_remove:
+ sysfs_remove_group(&client->dev.kobj, &adt7411_attr_grp);
+ exit_free:
+ i2c_set_clientdata(client, NULL);
+ kfree(data);
+ return ret;
+}
+
+static int __devexit adt7411_remove(struct i2c_client *client)
+{
+ struct adt7411_data *data = i2c_get_clientdata(client);
+
+ hwmon_device_unregister(data->hwmon_dev);
+ sysfs_remove_group(&client->dev.kobj, &adt7411_attr_grp);
+ i2c_set_clientdata(client, NULL);
+ kfree(data);
+ return 0;
+}
+
+static const struct i2c_device_id adt7411_id[] = {
+ { "adt7411", 0 },
+ { }
+};
+
+static struct i2c_driver adt7411_driver = {
+ .driver = {
+ .name = "adt7411",
+ },
+ .probe = adt7411_probe,
+ .remove = __devexit_p(adt7411_remove),
+ .id_table = adt7411_id,
+ .detect = adt7411_detect,
+ .address_list = normal_i2c,
+ .class = I2C_CLASS_HWMON,
+};
+
+static int __init sensors_adt7411_init(void)
+{
+ return i2c_add_driver(&adt7411_driver);
+}
+module_init(sensors_adt7411_init)
+
+static void __exit sensors_adt7411_exit(void)
+{
+ i2c_del_driver(&adt7411_driver);
+}
+module_exit(sensors_adt7411_exit)
+
+MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de> and "
+ "Wolfram Sang <w.sang@pengutronix.de>");
+MODULE_DESCRIPTION("ADT7411 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/hwmon/adt7473.c b/drivers/hwmon/adt7473.c
deleted file mode 100644
index 434576f61c8..00000000000
--- a/drivers/hwmon/adt7473.c
+++ /dev/null
@@ -1,1180 +0,0 @@
-/*
- * A hwmon driver for the Analog Devices ADT7473
- * Copyright (C) 2007 IBM
- *
- * Author: Darrick J. Wong <djwong@us.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <linux/module.h>
-#include <linux/jiffies.h>
-#include <linux/i2c.h>
-#include <linux/hwmon.h>
-#include <linux/hwmon-sysfs.h>
-#include <linux/err.h>
-#include <linux/mutex.h>
-#include <linux/delay.h>
-#include <linux/log2.h>
-
-/* Addresses to scan */
-static const unsigned short normal_i2c[] = { 0x2C, 0x2D, 0x2E, I2C_CLIENT_END };
-
-/* ADT7473 registers */
-#define ADT7473_REG_BASE_ADDR 0x20
-
-#define ADT7473_REG_VOLT_BASE_ADDR 0x21
-#define ADT7473_REG_VOLT_MIN_BASE_ADDR 0x46
-
-#define ADT7473_REG_TEMP_BASE_ADDR 0x25
-#define ADT7473_REG_TEMP_LIMITS_BASE_ADDR 0x4E
-#define ADT7473_REG_TEMP_TMIN_BASE_ADDR 0x67
-#define ADT7473_REG_TEMP_TMAX_BASE_ADDR 0x6A
-
-#define ADT7473_REG_FAN_BASE_ADDR 0x28
-#define ADT7473_REG_FAN_MIN_BASE_ADDR 0x54
-
-#define ADT7473_REG_PWM_BASE_ADDR 0x30
-#define ADT7473_REG_PWM_MIN_BASE_ADDR 0x64
-#define ADT7473_REG_PWM_MAX_BASE_ADDR 0x38
-#define ADT7473_REG_PWM_BHVR_BASE_ADDR 0x5C
-#define ADT7473_PWM_BHVR_MASK 0xE0
-#define ADT7473_PWM_BHVR_SHIFT 5
-
-#define ADT7473_REG_CFG1 0x40
-#define ADT7473_CFG1_START 0x01
-#define ADT7473_CFG1_READY 0x04
-#define ADT7473_REG_CFG2 0x73
-#define ADT7473_REG_CFG3 0x78
-#define ADT7473_REG_CFG4 0x7D
-#define ADT7473_CFG4_MAX_DUTY_AT_OVT 0x08
-#define ADT7473_REG_CFG5 0x7C
-#define ADT7473_CFG5_TEMP_TWOS 0x01
-#define ADT7473_CFG5_TEMP_OFFSET 0x02
-
-#define ADT7473_REG_DEVICE 0x3D
-#define ADT7473_VENDOR 0x41
-#define ADT7473_REG_VENDOR 0x3E
-#define ADT7473_DEVICE 0x73
-#define ADT7473_REG_REVISION 0x3F
-#define ADT7473_REV_68 0x68
-#define ADT7473_REV_69 0x69
-
-#define ADT7473_REG_ALARM1 0x41
-#define ADT7473_VCCP_ALARM 0x02
-#define ADT7473_VCC_ALARM 0x04
-#define ADT7473_R1T_ALARM 0x10
-#define ADT7473_LT_ALARM 0x20
-#define ADT7473_R2T_ALARM 0x40
-#define ADT7473_OOL 0x80
-#define ADT7473_REG_ALARM2 0x42
-#define ADT7473_OVT_ALARM 0x02
-#define ADT7473_FAN1_ALARM 0x04
-#define ADT7473_FAN2_ALARM 0x08
-#define ADT7473_FAN3_ALARM 0x10
-#define ADT7473_FAN4_ALARM 0x20
-#define ADT7473_R1T_SHORT 0x40
-#define ADT7473_R2T_SHORT 0x80
-
-#define ALARM2(x) ((x) << 8)
-
-#define ADT7473_VOLT_COUNT 2
-#define ADT7473_REG_VOLT(x) (ADT7473_REG_VOLT_BASE_ADDR + (x))
-#define ADT7473_REG_VOLT_MIN(x) (ADT7473_REG_VOLT_MIN_BASE_ADDR + ((x) * 2))
-#define ADT7473_REG_VOLT_MAX(x) (ADT7473_REG_VOLT_MIN_BASE_ADDR + \
- ((x) * 2) + 1)
-
-#define ADT7473_TEMP_COUNT 3
-#define ADT7473_REG_TEMP(x) (ADT7473_REG_TEMP_BASE_ADDR + (x))
-#define ADT7473_REG_TEMP_MIN(x) (ADT7473_REG_TEMP_LIMITS_BASE_ADDR + ((x) * 2))
-#define ADT7473_REG_TEMP_MAX(x) (ADT7473_REG_TEMP_LIMITS_BASE_ADDR + \
- ((x) * 2) + 1)
-#define ADT7473_REG_TEMP_TMIN(x) (ADT7473_REG_TEMP_TMIN_BASE_ADDR + (x))
-#define ADT7473_REG_TEMP_TMAX(x) (ADT7473_REG_TEMP_TMAX_BASE_ADDR + (x))
-
-#define ADT7473_FAN_COUNT 4
-#define ADT7473_REG_FAN(x) (ADT7473_REG_FAN_BASE_ADDR + ((x) * 2))
-#define ADT7473_REG_FAN_MIN(x) (ADT7473_REG_FAN_MIN_BASE_ADDR + ((x) * 2))
-
-#define ADT7473_PWM_COUNT 3
-#define ADT7473_REG_PWM(x) (ADT7473_REG_PWM_BASE_ADDR + (x))
-#define ADT7473_REG_PWM_MAX(x) (ADT7473_REG_PWM_MAX_BASE_ADDR + (x))
-#define ADT7473_REG_PWM_MIN(x) (ADT7473_REG_PWM_MIN_BASE_ADDR + (x))
-#define ADT7473_REG_PWM_BHVR(x) (ADT7473_REG_PWM_BHVR_BASE_ADDR + (x))
-
-/* How often do we reread sensors values? (In jiffies) */
-#define SENSOR_REFRESH_INTERVAL (2 * HZ)
-
-/* How often do we reread sensor limit values? (In jiffies) */
-#define LIMIT_REFRESH_INTERVAL (60 * HZ)
-
-/* datasheet says to divide this number by the fan reading to get fan rpm */
-#define FAN_PERIOD_TO_RPM(x) ((90000 * 60) / (x))
-#define FAN_RPM_TO_PERIOD FAN_PERIOD_TO_RPM
-#define FAN_PERIOD_INVALID 65535
-#define FAN_DATA_VALID(x) ((x) && (x) != FAN_PERIOD_INVALID)
-
-struct adt7473_data {
- struct device *hwmon_dev;
- struct attribute_group attrs;
- struct mutex lock;
- char sensors_valid;
- char limits_valid;
- unsigned long sensors_last_updated; /* In jiffies */
- unsigned long limits_last_updated; /* In jiffies */
-
- u8 volt[ADT7473_VOLT_COUNT];
- s8 volt_min[ADT7473_VOLT_COUNT];
- s8 volt_max[ADT7473_VOLT_COUNT];
-
- s8 temp[ADT7473_TEMP_COUNT];
- s8 temp_min[ADT7473_TEMP_COUNT];
- s8 temp_max[ADT7473_TEMP_COUNT];
- s8 temp_tmin[ADT7473_TEMP_COUNT];
- /* This is called the !THERM limit in the datasheet */
- s8 temp_tmax[ADT7473_TEMP_COUNT];
-
- u16 fan[ADT7473_FAN_COUNT];
- u16 fan_min[ADT7473_FAN_COUNT];
-
- u8 pwm[ADT7473_PWM_COUNT];
- u8 pwm_max[ADT7473_PWM_COUNT];
- u8 pwm_min[ADT7473_PWM_COUNT];
- u8 pwm_behavior[ADT7473_PWM_COUNT];
-
- u8 temp_twos_complement;
- u8 temp_offset;
-
- u16 alarm;
- u8 max_duty_at_overheat;
-};
-
-static int adt7473_probe(struct i2c_client *client,
- const struct i2c_device_id *id);
-static int adt7473_detect(struct i2c_client *client,
- struct i2c_board_info *info);
-static int adt7473_remove(struct i2c_client *client);
-
-static const struct i2c_device_id adt7473_id[] = {
- { "adt7473", 0 },
- { }
-};
-
-static struct i2c_driver adt7473_driver = {
- .class = I2C_CLASS_HWMON,
- .driver = {
- .name = "adt7473",
- },
- .probe = adt7473_probe,
- .remove = adt7473_remove,
- .id_table = adt7473_id,
- .detect = adt7473_detect,
- .address_list = normal_i2c,
-};
-
-/*
- * 16-bit registers on the ADT7473 are low-byte first. The data sheet says
- * that the low byte must be read before the high byte.
- */
-static inline int adt7473_read_word_data(struct i2c_client *client, u8 reg)
-{
- u16 foo;
- foo = i2c_smbus_read_byte_data(client, reg);
- foo |= ((u16)i2c_smbus_read_byte_data(client, reg + 1) << 8);
- return foo;
-}
-
-static inline int adt7473_write_word_data(struct i2c_client *client, u8 reg,
- u16 value)
-{
- return i2c_smbus_write_byte_data(client, reg, value & 0xFF)
- && i2c_smbus_write_byte_data(client, reg + 1, value >> 8);
-}
-
-static void adt7473_init_client(struct i2c_client *client)
-{
- int reg = i2c_smbus_read_byte_data(client, ADT7473_REG_CFG1);
-
- if (!(reg & ADT7473_CFG1_READY)) {
- dev_err(&client->dev, "Chip not ready.\n");
- } else {
- /* start monitoring */
- i2c_smbus_write_byte_data(client, ADT7473_REG_CFG1,
- reg | ADT7473_CFG1_START);
- }
-}
-
-static struct adt7473_data *adt7473_update_device(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7473_data *data = i2c_get_clientdata(client);
- unsigned long local_jiffies = jiffies;
- u8 cfg;
- int i;
-
- mutex_lock(&data->lock);
- if (time_before(local_jiffies, data->sensors_last_updated +
- SENSOR_REFRESH_INTERVAL)
- && data->sensors_valid)
- goto no_sensor_update;
-
- for (i = 0; i < ADT7473_VOLT_COUNT; i++)
- data->volt[i] = i2c_smbus_read_byte_data(client,
- ADT7473_REG_VOLT(i));
-
- /* Determine temperature encoding */
- cfg = i2c_smbus_read_byte_data(client, ADT7473_REG_CFG5);
- data->temp_twos_complement = (cfg & ADT7473_CFG5_TEMP_TWOS);
-
- /*
- * What does this do? it implies a variable temperature sensor
- * offset, but the datasheet doesn't say anything about this bit
- * and other parts of the datasheet imply that "offset64" mode
- * means that you shift temp values by -64 if the above bit was set.
- */
- data->temp_offset = (cfg & ADT7473_CFG5_TEMP_OFFSET);
-
- for (i = 0; i < ADT7473_TEMP_COUNT; i++)
- data->temp[i] = i2c_smbus_read_byte_data(client,
- ADT7473_REG_TEMP(i));
-
- for (i = 0; i < ADT7473_FAN_COUNT; i++)
- data->fan[i] = adt7473_read_word_data(client,
- ADT7473_REG_FAN(i));
-
- for (i = 0; i < ADT7473_PWM_COUNT; i++)
- data->pwm[i] = i2c_smbus_read_byte_data(client,
- ADT7473_REG_PWM(i));
-
- data->alarm = i2c_smbus_read_byte_data(client, ADT7473_REG_ALARM1);
- if (data->alarm & ADT7473_OOL)
- data->alarm |= ALARM2(i2c_smbus_read_byte_data(client,
- ADT7473_REG_ALARM2));
-
- data->sensors_last_updated = local_jiffies;
- data->sensors_valid = 1;
-
-no_sensor_update:
- if (time_before(local_jiffies, data->limits_last_updated +
- LIMIT_REFRESH_INTERVAL)
- && data->limits_valid)
- goto out;
-
- for (i = 0; i < ADT7473_VOLT_COUNT; i++) {
- data->volt_min[i] = i2c_smbus_read_byte_data(client,
- ADT7473_REG_VOLT_MIN(i));
- data->volt_max[i] = i2c_smbus_read_byte_data(client,
- ADT7473_REG_VOLT_MAX(i));
- }
-
- for (i = 0; i < ADT7473_TEMP_COUNT; i++) {
- data->temp_min[i] = i2c_smbus_read_byte_data(client,
- ADT7473_REG_TEMP_MIN(i));
- data->temp_max[i] = i2c_smbus_read_byte_data(client,
- ADT7473_REG_TEMP_MAX(i));
- data->temp_tmin[i] = i2c_smbus_read_byte_data(client,
- ADT7473_REG_TEMP_TMIN(i));
- data->temp_tmax[i] = i2c_smbus_read_byte_data(client,
- ADT7473_REG_TEMP_TMAX(i));
- }
-
- for (i = 0; i < ADT7473_FAN_COUNT; i++)
- data->fan_min[i] = adt7473_read_word_data(client,
- ADT7473_REG_FAN_MIN(i));
-
- for (i = 0; i < ADT7473_PWM_COUNT; i++) {
- data->pwm_max[i] = i2c_smbus_read_byte_data(client,
- ADT7473_REG_PWM_MAX(i));
- data->pwm_min[i] = i2c_smbus_read_byte_data(client,
- ADT7473_REG_PWM_MIN(i));
- data->pwm_behavior[i] = i2c_smbus_read_byte_data(client,
- ADT7473_REG_PWM_BHVR(i));
- }
-
- i = i2c_smbus_read_byte_data(client, ADT7473_REG_CFG4);
- data->max_duty_at_overheat = !!(i & ADT7473_CFG4_MAX_DUTY_AT_OVT);
-
- data->limits_last_updated = local_jiffies;
- data->limits_valid = 1;
-
-out:
- mutex_unlock(&data->lock);
- return data;
-}
-
-/*
- * Conversions
- */
-
-/* IN are scaled acording to built-in resistors */
-static const int adt7473_scaling[] = { /* .001 Volts */
- 2250, 3300
-};
-#define SCALE(val, from, to) (((val) * (to) + ((from) / 2)) / (from))
-
-static int decode_volt(int volt_index, u8 raw)
-{
- return SCALE(raw, 192, adt7473_scaling[volt_index]);
-}
-
-static u8 encode_volt(int volt_index, int cooked)
-{
- int raw = SCALE(cooked, adt7473_scaling[volt_index], 192);
- return SENSORS_LIMIT(raw, 0, 255);
-}
-
-static ssize_t show_volt_min(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct adt7473_data *data = adt7473_update_device(dev);
- return sprintf(buf, "%d\n",
- decode_volt(attr->index, data->volt_min[attr->index]));
-}
-
-static ssize_t set_volt_min(struct device *dev,
- struct device_attribute *devattr,
- const char *buf,
- size_t count)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7473_data *data = i2c_get_clientdata(client);
- long volt;
-
- if (strict_strtol(buf, 10, &volt))
- return -EINVAL;
-
- volt = encode_volt(attr->index, volt);
-
- mutex_lock(&data->lock);
- data->volt_min[attr->index] = volt;
- i2c_smbus_write_byte_data(client, ADT7473_REG_VOLT_MIN(attr->index),
- volt);
- mutex_unlock(&data->lock);
-
- return count;
-}
-
-static ssize_t show_volt_max(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct adt7473_data *data = adt7473_update_device(dev);
- return sprintf(buf, "%d\n",
- decode_volt(attr->index, data->volt_max[attr->index]));
-}
-
-static ssize_t set_volt_max(struct device *dev,
- struct device_attribute *devattr,
- const char *buf,
- size_t count)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7473_data *data = i2c_get_clientdata(client);
- long volt;
-
- if (strict_strtol(buf, 10, &volt))
- return -EINVAL;
-
- volt = encode_volt(attr->index, volt);
-
- mutex_lock(&data->lock);
- data->volt_max[attr->index] = volt;
- i2c_smbus_write_byte_data(client, ADT7473_REG_VOLT_MAX(attr->index),
- volt);
- mutex_unlock(&data->lock);
-
- return count;
-}
-
-static ssize_t show_volt(struct device *dev, struct device_attribute *devattr,
- char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct adt7473_data *data = adt7473_update_device(dev);
-
- return sprintf(buf, "%d\n",
- decode_volt(attr->index, data->volt[attr->index]));
-}
-
-/*
- * This chip can report temperature data either as a two's complement
- * number in the range -128 to 127, or as an unsigned number that must
- * be offset by 64.
- */
-static int decode_temp(u8 twos_complement, u8 raw)
-{
- return twos_complement ? (s8)raw : raw - 64;
-}
-
-static u8 encode_temp(u8 twos_complement, int cooked)
-{
- u8 ret = twos_complement ? cooked & 0xFF : cooked + 64;
- return SENSORS_LIMIT(ret, 0, 255);
-}
-
-static ssize_t show_temp_min(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct adt7473_data *data = adt7473_update_device(dev);
- return sprintf(buf, "%d\n", 1000 * decode_temp(
- data->temp_twos_complement,
- data->temp_min[attr->index]));
-}
-
-static ssize_t set_temp_min(struct device *dev,
- struct device_attribute *devattr,
- const char *buf,
- size_t count)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7473_data *data = i2c_get_clientdata(client);
- long temp;
-
- if (strict_strtol(buf, 10, &temp))
- return -EINVAL;
-
- temp = DIV_ROUND_CLOSEST(temp, 1000);
- temp = encode_temp(data->temp_twos_complement, temp);
-
- mutex_lock(&data->lock);
- data->temp_min[attr->index] = temp;
- i2c_smbus_write_byte_data(client, ADT7473_REG_TEMP_MIN(attr->index),
- temp);
- mutex_unlock(&data->lock);
-
- return count;
-}
-
-static ssize_t show_temp_max(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct adt7473_data *data = adt7473_update_device(dev);
- return sprintf(buf, "%d\n", 1000 * decode_temp(
- data->temp_twos_complement,
- data->temp_max[attr->index]));
-}
-
-static ssize_t set_temp_max(struct device *dev,
- struct device_attribute *devattr,
- const char *buf,
- size_t count)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7473_data *data = i2c_get_clientdata(client);
- long temp;
-
- if (strict_strtol(buf, 10, &temp))
- return -EINVAL;
-
- temp = DIV_ROUND_CLOSEST(temp, 1000);
- temp = encode_temp(data->temp_twos_complement, temp);
-
- mutex_lock(&data->lock);
- data->temp_max[attr->index] = temp;
- i2c_smbus_write_byte_data(client, ADT7473_REG_TEMP_MAX(attr->index),
- temp);
- mutex_unlock(&data->lock);
-
- return count;
-}
-
-static ssize_t show_temp(struct device *dev, struct device_attribute *devattr,
- char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct adt7473_data *data = adt7473_update_device(dev);
- return sprintf(buf, "%d\n", 1000 * decode_temp(
- data->temp_twos_complement,
- data->temp[attr->index]));
-}
-
-static ssize_t show_fan_min(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct adt7473_data *data = adt7473_update_device(dev);
-
- if (FAN_DATA_VALID(data->fan_min[attr->index]))
- return sprintf(buf, "%d\n",
- FAN_PERIOD_TO_RPM(data->fan_min[attr->index]));
- else
- return sprintf(buf, "0\n");
-}
-
-static ssize_t set_fan_min(struct device *dev,
- struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7473_data *data = i2c_get_clientdata(client);
- long temp;
-
- if (strict_strtol(buf, 10, &temp) || !temp)
- return -EINVAL;
-
- temp = FAN_RPM_TO_PERIOD(temp);
- temp = SENSORS_LIMIT(temp, 1, 65534);
-
- mutex_lock(&data->lock);
- data->fan_min[attr->index] = temp;
- adt7473_write_word_data(client, ADT7473_REG_FAN_MIN(attr->index), temp);
- mutex_unlock(&data->lock);
-
- return count;
-}
-
-static ssize_t show_fan(struct device *dev, struct device_attribute *devattr,
- char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct adt7473_data *data = adt7473_update_device(dev);
-
- if (FAN_DATA_VALID(data->fan[attr->index]))
- return sprintf(buf, "%d\n",
- FAN_PERIOD_TO_RPM(data->fan[attr->index]));
- else
- return sprintf(buf, "0\n");
-}
-
-static ssize_t show_max_duty_at_crit(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- struct adt7473_data *data = adt7473_update_device(dev);
- return sprintf(buf, "%d\n", data->max_duty_at_overheat);
-}
-
-static ssize_t set_max_duty_at_crit(struct device *dev,
- struct device_attribute *devattr,
- const char *buf,
- size_t count)
-{
- u8 reg;
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7473_data *data = i2c_get_clientdata(client);
- long temp;
-
- if (strict_strtol(buf, 10, &temp))
- return -EINVAL;
-
- mutex_lock(&data->lock);
- data->max_duty_at_overheat = !!temp;
- reg = i2c_smbus_read_byte_data(client, ADT7473_REG_CFG4);
- if (temp)
- reg |= ADT7473_CFG4_MAX_DUTY_AT_OVT;
- else
- reg &= ~ADT7473_CFG4_MAX_DUTY_AT_OVT;
- i2c_smbus_write_byte_data(client, ADT7473_REG_CFG4, reg);
- mutex_unlock(&data->lock);
-
- return count;
-}
-
-static ssize_t show_pwm(struct device *dev, struct device_attribute *devattr,
- char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct adt7473_data *data = adt7473_update_device(dev);
- return sprintf(buf, "%d\n", data->pwm[attr->index]);
-}
-
-static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr,
- const char *buf, size_t count)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7473_data *data = i2c_get_clientdata(client);
- long temp;
-
- if (strict_strtol(buf, 10, &temp))
- return -EINVAL;
-
- temp = SENSORS_LIMIT(temp, 0, 255);
-
- mutex_lock(&data->lock);
- data->pwm[attr->index] = temp;
- i2c_smbus_write_byte_data(client, ADT7473_REG_PWM(attr->index), temp);
- mutex_unlock(&data->lock);
-
- return count;
-}
-
-static ssize_t show_pwm_max(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct adt7473_data *data = adt7473_update_device(dev);
- return sprintf(buf, "%d\n", data->pwm_max[attr->index]);
-}
-
-static ssize_t set_pwm_max(struct device *dev,
- struct device_attribute *devattr,
- const char *buf,
- size_t count)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7473_data *data = i2c_get_clientdata(client);
- long temp;
-
- if (strict_strtol(buf, 10, &temp))
- return -EINVAL;
-
- temp = SENSORS_LIMIT(temp, 0, 255);
-
- mutex_lock(&data->lock);
- data->pwm_max[attr->index] = temp;
- i2c_smbus_write_byte_data(client, ADT7473_REG_PWM_MAX(attr->index),
- temp);
- mutex_unlock(&data->lock);
-
- return count;
-}
-
-static ssize_t show_pwm_min(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct adt7473_data *data = adt7473_update_device(dev);
- return sprintf(buf, "%d\n", data->pwm_min[attr->index]);
-}
-
-static ssize_t set_pwm_min(struct device *dev,
- struct device_attribute *devattr,
- const char *buf,
- size_t count)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7473_data *data = i2c_get_clientdata(client);
- long temp;
-
- if (strict_strtol(buf, 10, &temp))
- return -EINVAL;
-
- temp = SENSORS_LIMIT(temp, 0, 255);
-
- mutex_lock(&data->lock);
- data->pwm_min[attr->index] = temp;
- i2c_smbus_write_byte_data(client, ADT7473_REG_PWM_MIN(attr->index),
- temp);
- mutex_unlock(&data->lock);
-
- return count;
-}
-
-static ssize_t show_temp_tmax(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct adt7473_data *data = adt7473_update_device(dev);
- return sprintf(buf, "%d\n", 1000 * decode_temp(
- data->temp_twos_complement,
- data->temp_tmax[attr->index]));
-}
-
-static ssize_t set_temp_tmax(struct device *dev,
- struct device_attribute *devattr,
- const char *buf,
- size_t count)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7473_data *data = i2c_get_clientdata(client);
- long temp;
-
- if (strict_strtol(buf, 10, &temp))
- return -EINVAL;
-
- temp = DIV_ROUND_CLOSEST(temp, 1000);
- temp = encode_temp(data->temp_twos_complement, temp);
-
- mutex_lock(&data->lock);
- data->temp_tmax[attr->index] = temp;
- i2c_smbus_write_byte_data(client, ADT7473_REG_TEMP_TMAX(attr->index),
- temp);
- mutex_unlock(&data->lock);
-
- return count;
-}
-
-static ssize_t show_temp_tmin(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct adt7473_data *data = adt7473_update_device(dev);
- return sprintf(buf, "%d\n", 1000 * decode_temp(
- data->temp_twos_complement,
- data->temp_tmin[attr->index]));
-}
-
-static ssize_t set_temp_tmin(struct device *dev,
- struct device_attribute *devattr,
- const char *buf,
- size_t count)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7473_data *data = i2c_get_clientdata(client);
- long temp;
-
- if (strict_strtol(buf, 10, &temp))
- return -EINVAL;
-
- temp = DIV_ROUND_CLOSEST(temp, 1000);
- temp = encode_temp(data->temp_twos_complement, temp);
-
- mutex_lock(&data->lock);
- data->temp_tmin[attr->index] = temp;
- i2c_smbus_write_byte_data(client, ADT7473_REG_TEMP_TMIN(attr->index),
- temp);
- mutex_unlock(&data->lock);
-
- return count;
-}
-
-static ssize_t show_pwm_enable(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct adt7473_data *data = adt7473_update_device(dev);
-
- switch (data->pwm_behavior[attr->index] >> ADT7473_PWM_BHVR_SHIFT) {
- case 3:
- return sprintf(buf, "0\n");
- case 7:
- return sprintf(buf, "1\n");
- default:
- return sprintf(buf, "2\n");
- }
-}
-
-static ssize_t set_pwm_enable(struct device *dev,
- struct device_attribute *devattr,
- const char *buf,
- size_t count)
-{
- u8 reg;
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7473_data *data = i2c_get_clientdata(client);
- long temp;
-
- if (strict_strtol(buf, 10, &temp))
- return -EINVAL;
-
- switch (temp) {
- case 0:
- temp = 3;
- break;
- case 1:
- temp = 7;
- break;
- case 2:
- /* Enter automatic mode with fans off */
- temp = 4;
- break;
- default:
- return -EINVAL;
- }
-
- mutex_lock(&data->lock);
- reg = i2c_smbus_read_byte_data(client,
- ADT7473_REG_PWM_BHVR(attr->index));
- reg = (temp << ADT7473_PWM_BHVR_SHIFT) |
- (reg & ~ADT7473_PWM_BHVR_MASK);
- i2c_smbus_write_byte_data(client, ADT7473_REG_PWM_BHVR(attr->index),
- reg);
- data->pwm_behavior[attr->index] = reg;
- mutex_unlock(&data->lock);
-
- return count;
-}
-
-static ssize_t show_pwm_auto_temp(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct adt7473_data *data = adt7473_update_device(dev);
- int bhvr = data->pwm_behavior[attr->index] >> ADT7473_PWM_BHVR_SHIFT;
-
- switch (bhvr) {
- case 3:
- case 4:
- case 7:
- return sprintf(buf, "0\n");
- case 0:
- case 1:
- case 5:
- case 6:
- return sprintf(buf, "%d\n", bhvr + 1);
- case 2:
- return sprintf(buf, "4\n");
- }
- /* shouldn't ever get here */
- BUG();
-}
-
-static ssize_t set_pwm_auto_temp(struct device *dev,
- struct device_attribute *devattr,
- const char *buf,
- size_t count)
-{
- u8 reg;
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct i2c_client *client = to_i2c_client(dev);
- struct adt7473_data *data = i2c_get_clientdata(client);
- long temp;
-
- if (strict_strtol(buf, 10, &temp))
- return -EINVAL;
-
- switch (temp) {
- case 1:
- case 2:
- case 6:
- case 7:
- temp--;
- break;
- case 0:
- temp = 4;
- break;
- default:
- return -EINVAL;
- }
-
- mutex_lock(&data->lock);
- reg = i2c_smbus_read_byte_data(client,
- ADT7473_REG_PWM_BHVR(attr->index));
- reg = (temp << ADT7473_PWM_BHVR_SHIFT) |
- (reg & ~ADT7473_PWM_BHVR_MASK);
- i2c_smbus_write_byte_data(client, ADT7473_REG_PWM_BHVR(attr->index),
- reg);
- data->pwm_behavior[attr->index] = reg;
- mutex_unlock(&data->lock);
-
- return count;
-}
-
-static ssize_t show_alarm(struct device *dev,
- struct device_attribute *devattr,
- char *buf)
-{
- struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
- struct adt7473_data *data = adt7473_update_device(dev);
-
- if (data->alarm & attr->index)
- return sprintf(buf, "1\n");
- else
- return sprintf(buf, "0\n");
-}
-
-
-static SENSOR_DEVICE_ATTR(in1_max, S_IWUSR | S_IRUGO, show_volt_max,
- set_volt_max, 0);
-static SENSOR_DEVICE_ATTR(in2_max, S_IWUSR | S_IRUGO, show_volt_max,
- set_volt_max, 1);
-
-static SENSOR_DEVICE_ATTR(in1_min, S_IWUSR | S_IRUGO, show_volt_min,
- set_volt_min, 0);
-static SENSOR_DEVICE_ATTR(in2_min, S_IWUSR | S_IRUGO, show_volt_min,
- set_volt_min, 1);
-
-static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, show_volt, NULL, 0);
-static SENSOR_DEVICE_ATTR(in2_input, S_IRUGO, show_volt, NULL, 1);
-
-static SENSOR_DEVICE_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL,
- ADT7473_VCCP_ALARM);
-static SENSOR_DEVICE_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL,
- ADT7473_VCC_ALARM);
-
-static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_temp_max,
- set_temp_max, 0);
-static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp_max,
- set_temp_max, 1);
-static SENSOR_DEVICE_ATTR(temp3_max, S_IWUSR | S_IRUGO, show_temp_max,
- set_temp_max, 2);
-
-static SENSOR_DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, show_temp_min,
- set_temp_min, 0);
-static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp_min,
- set_temp_min, 1);
-static SENSOR_DEVICE_ATTR(temp3_min, S_IWUSR | S_IRUGO, show_temp_min,
- set_temp_min, 2);
-
-static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0);
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1);
-static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2);
-
-static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL,
- ADT7473_R1T_ALARM | ALARM2(ADT7473_R1T_SHORT));
-static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL,
- ADT7473_LT_ALARM);
-static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL,
- ADT7473_R2T_ALARM | ALARM2(ADT7473_R2T_SHORT));
-
-static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan_min,
- set_fan_min, 0);
-static SENSOR_DEVICE_ATTR(fan2_min, S_IWUSR | S_IRUGO, show_fan_min,
- set_fan_min, 1);
-static SENSOR_DEVICE_ATTR(fan3_min, S_IWUSR | S_IRUGO, show_fan_min,
- set_fan_min, 2);
-static SENSOR_DEVICE_ATTR(fan4_min, S_IWUSR | S_IRUGO, show_fan_min,
- set_fan_min, 3);
-
-static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0);
-static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1);
-static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, show_fan, NULL, 2);
-static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, show_fan, NULL, 3);
-
-static SENSOR_DEVICE_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL,
- ALARM2(ADT7473_FAN1_ALARM));
-static SENSOR_DEVICE_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL,
- ALARM2(ADT7473_FAN2_ALARM));
-static SENSOR_DEVICE_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL,
- ALARM2(ADT7473_FAN3_ALARM));
-static SENSOR_DEVICE_ATTR(fan4_alarm, S_IRUGO, show_alarm, NULL,
- ALARM2(ADT7473_FAN4_ALARM));
-
-static SENSOR_DEVICE_ATTR(pwm_use_point2_pwm_at_crit, S_IWUSR | S_IRUGO,
- show_max_duty_at_crit, set_max_duty_at_crit, 0);
-
-static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 0);
-static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 1);
-static SENSOR_DEVICE_ATTR(pwm3, S_IWUSR | S_IRUGO, show_pwm, set_pwm, 2);
-
-static SENSOR_DEVICE_ATTR(pwm1_auto_point1_pwm, S_IWUSR | S_IRUGO,
- show_pwm_min, set_pwm_min, 0);
-static SENSOR_DEVICE_ATTR(pwm2_auto_point1_pwm, S_IWUSR | S_IRUGO,
- show_pwm_min, set_pwm_min, 1);
-static SENSOR_DEVICE_ATTR(pwm3_auto_point1_pwm, S_IWUSR | S_IRUGO,
- show_pwm_min, set_pwm_min, 2);
-
-static SENSOR_DEVICE_ATTR(pwm1_auto_point2_pwm, S_IWUSR | S_IRUGO,
- show_pwm_max, set_pwm_max, 0);
-static SENSOR_DEVICE_ATTR(pwm2_auto_point2_pwm, S_IWUSR | S_IRUGO,
- show_pwm_max, set_pwm_max, 1);
-static SENSOR_DEVICE_ATTR(pwm3_auto_point2_pwm, S_IWUSR | S_IRUGO,
- show_pwm_max, set_pwm_max, 2);
-
-static SENSOR_DEVICE_ATTR(temp1_auto_point1_temp, S_IWUSR | S_IRUGO,
- show_temp_tmin, set_temp_tmin, 0);
-static SENSOR_DEVICE_ATTR(temp2_auto_point1_temp, S_IWUSR | S_IRUGO,
- show_temp_tmin, set_temp_tmin, 1);
-static SENSOR_DEVICE_ATTR(temp3_auto_point1_temp, S_IWUSR | S_IRUGO,
- show_temp_tmin, set_temp_tmin, 2);
-
-static SENSOR_DEVICE_ATTR(temp1_auto_point2_temp, S_IWUSR | S_IRUGO,
- show_temp_tmax, set_temp_tmax, 0);
-static SENSOR_DEVICE_ATTR(temp2_auto_point2_temp, S_IWUSR | S_IRUGO,
- show_temp_tmax, set_temp_tmax, 1);
-static SENSOR_DEVICE_ATTR(temp3_auto_point2_temp, S_IWUSR | S_IRUGO,
- show_temp_tmax, set_temp_tmax, 2);
-
-static SENSOR_DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
- set_pwm_enable, 0);
-static SENSOR_DEVICE_ATTR(pwm2_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
- set_pwm_enable, 1);
-static SENSOR_DEVICE_ATTR(pwm3_enable, S_IWUSR | S_IRUGO, show_pwm_enable,
- set_pwm_enable, 2);
-
-static SENSOR_DEVICE_ATTR(pwm1_auto_channels_temp, S_IWUSR | S_IRUGO,
- show_pwm_auto_temp, set_pwm_auto_temp, 0);
-static SENSOR_DEVICE_ATTR(pwm2_auto_channels_temp, S_IWUSR | S_IRUGO,
- show_pwm_auto_temp, set_pwm_auto_temp, 1);
-static SENSOR_DEVICE_ATTR(pwm3_auto_channels_temp, S_IWUSR | S_IRUGO,
- show_pwm_auto_temp, set_pwm_auto_temp, 2);
-
-static struct attribute *adt7473_attr[] =
-{
- &sensor_dev_attr_in1_max.dev_attr.attr,
- &sensor_dev_attr_in2_max.dev_attr.attr,
- &sensor_dev_attr_in1_min.dev_attr.attr,
- &sensor_dev_attr_in2_min.dev_attr.attr,
- &sensor_dev_attr_in1_input.dev_attr.attr,
- &sensor_dev_attr_in2_input.dev_attr.attr,
- &sensor_dev_attr_in1_alarm.dev_attr.attr,
- &sensor_dev_attr_in2_alarm.dev_attr.attr,
-
- &sensor_dev_attr_temp1_max.dev_attr.attr,
- &sensor_dev_attr_temp2_max.dev_attr.attr,
- &sensor_dev_attr_temp3_max.dev_attr.attr,
- &sensor_dev_attr_temp1_min.dev_attr.attr,
- &sensor_dev_attr_temp2_min.dev_attr.attr,
- &sensor_dev_attr_temp3_min.dev_attr.attr,
- &sensor_dev_attr_temp1_input.dev_attr.attr,
- &sensor_dev_attr_temp2_input.dev_attr.attr,
- &sensor_dev_attr_temp3_input.dev_attr.attr,
- &sensor_dev_attr_temp1_alarm.dev_attr.attr,
- &sensor_dev_attr_temp2_alarm.dev_attr.attr,
- &sensor_dev_attr_temp3_alarm.dev_attr.attr,
- &sensor_dev_attr_temp1_auto_point1_temp.dev_attr.attr,
- &sensor_dev_attr_temp2_auto_point1_temp.dev_attr.attr,
- &sensor_dev_attr_temp3_auto_point1_temp.dev_attr.attr,
- &sensor_dev_attr_temp1_auto_point2_temp.dev_attr.attr,
- &sensor_dev_attr_temp2_auto_point2_temp.dev_attr.attr,
- &sensor_dev_attr_temp3_auto_point2_temp.dev_attr.attr,
-
- &sensor_dev_attr_fan1_min.dev_attr.attr,
- &sensor_dev_attr_fan2_min.dev_attr.attr,
- &sensor_dev_attr_fan3_min.dev_attr.attr,
- &sensor_dev_attr_fan4_min.dev_attr.attr,
- &sensor_dev_attr_fan1_input.dev_attr.attr,
- &sensor_dev_attr_fan2_input.dev_attr.attr,
- &sensor_dev_attr_fan3_input.dev_attr.attr,
- &sensor_dev_attr_fan4_input.dev_attr.attr,
- &sensor_dev_attr_fan1_alarm.dev_attr.attr,
- &sensor_dev_attr_fan2_alarm.dev_attr.attr,
- &sensor_dev_attr_fan3_alarm.dev_attr.attr,
- &sensor_dev_attr_fan4_alarm.dev_attr.attr,
-
- &sensor_dev_attr_pwm_use_point2_pwm_at_crit.dev_attr.attr,
-
- &sensor_dev_attr_pwm1.dev_attr.attr,
- &sensor_dev_attr_pwm2.dev_attr.attr,
- &sensor_dev_attr_pwm3.dev_attr.attr,
- &sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr,
- &sensor_dev_attr_pwm2_auto_point1_pwm.dev_attr.attr,
- &sensor_dev_attr_pwm3_auto_point1_pwm.dev_attr.attr,
- &sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr,
- &sensor_dev_attr_pwm2_auto_point2_pwm.dev_attr.attr,
- &sensor_dev_attr_pwm3_auto_point2_pwm.dev_attr.attr,
-
- &sensor_dev_attr_pwm1_enable.dev_attr.attr,
- &sensor_dev_attr_pwm2_enable.dev_attr.attr,
- &sensor_dev_attr_pwm3_enable.dev_attr.attr,
- &sensor_dev_attr_pwm1_auto_channels_temp.dev_attr.attr,
- &sensor_dev_attr_pwm2_auto_channels_temp.dev_attr.attr,
- &sensor_dev_attr_pwm3_auto_channels_temp.dev_attr.attr,
-
- NULL
-};
-
-/* Return 0 if detection is successful, -ENODEV otherwise */
-static int adt7473_detect(struct i2c_client *client,
- struct i2c_board_info *info)
-{
- struct i2c_adapter *adapter = client->adapter;
- int vendor, device, revision;
-
- if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
- return -ENODEV;
-
- vendor = i2c_smbus_read_byte_data(client, ADT7473_REG_VENDOR);
- if (vendor != ADT7473_VENDOR)
- return -ENODEV;
-
- device = i2c_smbus_read_byte_data(client, ADT7473_REG_DEVICE);
- if (device != ADT7473_DEVICE)
- return -ENODEV;
-
- revision = i2c_smbus_read_byte_data(client, ADT7473_REG_REVISION);
- if (revision != ADT7473_REV_68 && revision != ADT7473_REV_69)
- return -ENODEV;
-
- strlcpy(info->type, "adt7473", I2C_NAME_SIZE);
-
- return 0;
-}
-
-static int adt7473_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
-{
- struct adt7473_data *data;
- int err;
-
- data = kzalloc(sizeof(struct adt7473_data), GFP_KERNEL);
- if (!data) {
- err = -ENOMEM;
- goto exit;
- }
-
- i2c_set_clientdata(client, data);
- mutex_init(&data->lock);
-
- dev_info(&client->dev, "%s chip found\n", client->name);
-
- /* Initialize the ADT7473 chip */
- adt7473_init_client(client);
-
- /* Register sysfs hooks */
- data->attrs.attrs = adt7473_attr;
- err = sysfs_create_group(&client->dev.kobj, &data->attrs);
- if (err)
- goto exit_free;
-
- data->hwmon_dev = hwmon_device_register(&client->dev);
- if (IS_ERR(data->hwmon_dev)) {
- err = PTR_ERR(data->hwmon_dev);
- goto exit_remove;
- }
-
- return 0;
-
-exit_remove:
- sysfs_remove_group(&client->dev.kobj, &data->attrs);
-exit_free:
- kfree(data);
-exit:
- return err;
-}
-
-static int adt7473_remove(struct i2c_client *client)
-{
- struct adt7473_data *data = i2c_get_clientdata(client);
-
- hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&client->dev.kobj, &data->attrs);
- kfree(data);
- return 0;
-}
-
-static int __init adt7473_init(void)
-{
- pr_notice("The adt7473 driver is deprecated, please use the adt7475 "
- "driver instead\n");
- return i2c_add_driver(&adt7473_driver);
-}
-
-static void __exit adt7473_exit(void)
-{
- i2c_del_driver(&adt7473_driver);
-}
-
-MODULE_AUTHOR("Darrick J. Wong <djwong@us.ibm.com>");
-MODULE_DESCRIPTION("ADT7473 driver");
-MODULE_LICENSE("GPL");
-
-module_init(adt7473_init);
-module_exit(adt7473_exit);
diff --git a/drivers/hwmon/asc7621.c b/drivers/hwmon/asc7621.c
new file mode 100644
index 00000000000..7f948105d8a
--- /dev/null
+++ b/drivers/hwmon/asc7621.c
@@ -0,0 +1,1255 @@
+/*
+ * asc7621.c - Part of lm_sensors, Linux kernel modules for hardware monitoring
+ * Copyright (c) 2007, 2010 George Joseph <george.joseph@fairview5.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/i2c.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+
+/* Addresses to scan */
+static unsigned short normal_i2c[] = {
+ 0x2c, 0x2d, 0x2e, I2C_CLIENT_END
+};
+
+enum asc7621_type {
+ asc7621,
+ asc7621a
+};
+
+#define INTERVAL_HIGH (HZ + HZ / 2)
+#define INTERVAL_LOW (1 * 60 * HZ)
+#define PRI_NONE 0
+#define PRI_LOW 1
+#define PRI_HIGH 2
+#define FIRST_CHIP asc7621
+#define LAST_CHIP asc7621a
+
+struct asc7621_chip {
+ char *name;
+ enum asc7621_type chip_type;
+ u8 company_reg;
+ u8 company_id;
+ u8 verstep_reg;
+ u8 verstep_id;
+ unsigned short *addresses;
+};
+
+static struct asc7621_chip asc7621_chips[] = {
+ {
+ .name = "asc7621",
+ .chip_type = asc7621,
+ .company_reg = 0x3e,
+ .company_id = 0x61,
+ .verstep_reg = 0x3f,
+ .verstep_id = 0x6c,
+ .addresses = normal_i2c,
+ },
+ {
+ .name = "asc7621a",
+ .chip_type = asc7621a,
+ .company_reg = 0x3e,
+ .company_id = 0x61,
+ .verstep_reg = 0x3f,
+ .verstep_id = 0x6d,
+ .addresses = normal_i2c,
+ },
+};
+
+/*
+ * Defines the highest register to be used, not the count.
+ * The actual count will probably be smaller because of gaps
+ * in the implementation (unused register locations).
+ * This define will safely set the array size of both the parameter
+ * and data arrays.
+ * This comes from the data sheet register description table.
+ */
+#define LAST_REGISTER 0xff
+
+struct asc7621_data {
+ struct i2c_client client;
+ struct device *class_dev;
+ struct mutex update_lock;
+ int valid; /* !=0 if following fields are valid */
+ unsigned long last_high_reading; /* In jiffies */
+ unsigned long last_low_reading; /* In jiffies */
+ /*
+ * Registers we care about occupy the corresponding index
+ * in the array. Registers we don't care about are left
+ * at 0.
+ */
+ u8 reg[LAST_REGISTER + 1];
+};
+
+/*
+ * Macro to get the parent asc7621_param structure
+ * from a sensor_device_attribute passed into the
+ * show/store functions.
+ */
+#define to_asc7621_param(_sda) \
+ container_of(_sda, struct asc7621_param, sda)
+
+/*
+ * Each parameter to be retrieved needs an asc7621_param structure
+ * allocated. It contains the sensor_device_attribute structure
+ * and the control info needed to retrieve the value from the register map.
+ */
+struct asc7621_param {
+ struct sensor_device_attribute sda;
+ u8 priority;
+ u8 msb[3];
+ u8 lsb[3];
+ u8 mask[3];
+ u8 shift[3];
+};
+
+/*
+ * This is the map that ultimately indicates whether we'll be
+ * retrieving a register value or not, and at what frequency.
+ */
+static u8 asc7621_register_priorities[255];
+
+static struct asc7621_data *asc7621_update_device(struct device *dev);
+
+static inline u8 read_byte(struct i2c_client *client, u8 reg)
+{
+ int res = i2c_smbus_read_byte_data(client, reg);
+ if (res < 0) {
+ dev_err(&client->dev,
+ "Unable to read from register 0x%02x.\n", reg);
+ return 0;
+ };
+ return res & 0xff;
+}
+
+static inline int write_byte(struct i2c_client *client, u8 reg, u8 data)
+{
+ int res = i2c_smbus_write_byte_data(client, reg, data);
+ if (res < 0) {
+ dev_err(&client->dev,
+ "Unable to write value 0x%02x to register 0x%02x.\n",
+ data, reg);
+ };
+ return res;
+}
+
+/*
+ * Data Handlers
+ * Each function handles the formatting, storage
+ * and retrieval of like parameters.
+ */
+
+#define SETUP_SHOW_data_param(d, a) \
+ struct sensor_device_attribute *sda = to_sensor_dev_attr(a); \
+ struct asc7621_data *data = asc7621_update_device(d); \
+ struct asc7621_param *param = to_asc7621_param(sda)
+
+#define SETUP_STORE_data_param(d, a) \
+ struct sensor_device_attribute *sda = to_sensor_dev_attr(a); \
+ struct i2c_client *client = to_i2c_client(d); \
+ struct asc7621_data *data = i2c_get_clientdata(client); \
+ struct asc7621_param *param = to_asc7621_param(sda)
+
+/*
+ * u8 is just what it sounds like...an unsigned byte with no
+ * special formatting.
+ */
+static ssize_t show_u8(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ SETUP_SHOW_data_param(dev, attr);
+
+ return sprintf(buf, "%u\n", data->reg[param->msb[0]]);
+}
+
+static ssize_t store_u8(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ SETUP_STORE_data_param(dev, attr);
+ long reqval;
+
+ if (strict_strtol(buf, 10, &reqval))
+ return -EINVAL;
+
+ reqval = SENSORS_LIMIT(reqval, 0, 255);
+
+ mutex_lock(&data->update_lock);
+ data->reg[param->msb[0]] = reqval;
+ write_byte(client, param->msb[0], reqval);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+/*
+ * Many of the config values occupy only a few bits of a register.
+ */
+static ssize_t show_bitmask(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ SETUP_SHOW_data_param(dev, attr);
+
+ return sprintf(buf, "%u\n",
+ (data->reg[param->msb[0]] >> param->
+ shift[0]) & param->mask[0]);
+}
+
+static ssize_t store_bitmask(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ SETUP_STORE_data_param(dev, attr);
+ long reqval;
+ u8 currval;
+
+ if (strict_strtol(buf, 10, &reqval))
+ return -EINVAL;
+
+ reqval = SENSORS_LIMIT(reqval, 0, param->mask[0]);
+
+ reqval = (reqval & param->mask[0]) << param->shift[0];
+
+ mutex_lock(&data->update_lock);
+ currval = read_byte(client, param->msb[0]);
+ reqval |= (currval & ~(param->mask[0] << param->shift[0]));
+ data->reg[param->msb[0]] = reqval;
+ write_byte(client, param->msb[0], reqval);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+/*
+ * 16 bit fan rpm values
+ * reported by the device as the number of 11.111us periods (90khz)
+ * between full fan rotations. Therefore...
+ * RPM = (90000 * 60) / register value
+ */
+static ssize_t show_fan16(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ SETUP_SHOW_data_param(dev, attr);
+ u16 regval;
+
+ mutex_lock(&data->update_lock);
+ regval = (data->reg[param->msb[0]] << 8) | data->reg[param->lsb[0]];
+ mutex_unlock(&data->update_lock);
+
+ return sprintf(buf, "%u\n",
+ (regval == 0 ? -1 : (regval) ==
+ 0xffff ? 0 : 5400000 / regval));
+}
+
+static ssize_t store_fan16(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ SETUP_STORE_data_param(dev, attr);
+ long reqval;
+
+ if (strict_strtol(buf, 10, &reqval))
+ return -EINVAL;
+
+ reqval =
+ (SENSORS_LIMIT((reqval) <= 0 ? 0 : 5400000 / (reqval), 0, 65534));
+
+ mutex_lock(&data->update_lock);
+ data->reg[param->msb[0]] = (reqval >> 8) & 0xff;
+ data->reg[param->lsb[0]] = reqval & 0xff;
+ write_byte(client, param->msb[0], data->reg[param->msb[0]]);
+ write_byte(client, param->lsb[0], data->reg[param->lsb[0]]);
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+/*
+ * Voltages are scaled in the device so that the nominal voltage
+ * is 3/4ths of the 0-255 range (i.e. 192).
+ * If all voltages are 'normal' then all voltage registers will
+ * read 0xC0. This doesn't help us if we don't have a point of refernce.
+ * The data sheet however provides us with the full scale value for each
+ * which is stored in in_scaling. The sda->index parameter value provides
+ * the index into in_scaling.
+ *
+ * NOTE: The chip expects the first 2 inputs be 2.5 and 2.25 volts
+ * respectively. That doesn't mean that's what the motherboard provides. :)
+ */
+
+static int asc7621_in_scaling[] = {
+ 3320, 3000, 4380, 6640, 16000
+};
+
+static ssize_t show_in10(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ SETUP_SHOW_data_param(dev, attr);
+ u16 regval;
+ u8 nr = sda->index;
+
+ mutex_lock(&data->update_lock);
+ regval = (data->reg[param->msb[0]] * asc7621_in_scaling[nr]) / 256;
+
+ /* The LSB value is a 2-bit scaling of the MSB's LSbit value.
+ * I.E. If the maximim voltage for this input is 6640 millivolts then
+ * a MSB register value of 0 = 0mv and 255 = 6640mv.
+ * A 1 step change therefore represents 25.9mv (6640 / 256).
+ * The extra 2-bits therefore represent increments of 6.48mv.
+ */
+ regval += ((asc7621_in_scaling[nr] / 256) / 4) *
+ (data->reg[param->lsb[0]] >> 6);
+
+ mutex_unlock(&data->update_lock);
+
+ return sprintf(buf, "%u\n", regval);
+}
+
+/* 8 bit voltage values (the mins and maxs) */
+static ssize_t show_in8(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ SETUP_SHOW_data_param(dev, attr);
+ u8 nr = sda->index;
+
+ return sprintf(buf, "%u\n",
+ ((data->reg[param->msb[0]] *
+ asc7621_in_scaling[nr]) / 256));
+}
+
+static ssize_t store_in8(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ SETUP_STORE_data_param(dev, attr);
+ long reqval;
+ u8 nr = sda->index;
+
+ if (strict_strtol(buf, 10, &reqval))
+ return -EINVAL;
+
+ reqval = SENSORS_LIMIT(reqval, 0, asc7621_in_scaling[nr]);
+
+ reqval = (reqval * 255 + 128) / asc7621_in_scaling[nr];
+
+ mutex_lock(&data->update_lock);
+ data->reg[param->msb[0]] = reqval;
+ write_byte(client, param->msb[0], reqval);
+ mutex_unlock(&data->update_lock);
+
+ return count;
+}
+
+static ssize_t show_temp8(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ SETUP_SHOW_data_param(dev, attr);
+
+ return sprintf(buf, "%d\n", ((s8) data->reg[param->msb[0]]) * 1000);
+}
+
+static ssize_t store_temp8(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ SETUP_STORE_data_param(dev, attr);
+ long reqval;
+ s8 temp;
+
+ if (strict_strtol(buf, 10, &reqval))
+ return -EINVAL;
+
+ reqval = SENSORS_LIMIT(reqval, -127000, 127000);
+
+ temp = reqval / 1000;
+
+ mutex_lock(&data->update_lock);
+ data->reg[param->msb[0]] = temp;
+ write_byte(client, param->msb[0], temp);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+/*
+ * Temperatures that occupy 2 bytes always have the whole
+ * number of degrees in the MSB with some part of the LSB
+ * indicating fractional degrees.
+ */
+
+/* mmmmmmmm.llxxxxxx */
+static ssize_t show_temp10(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ SETUP_SHOW_data_param(dev, attr);
+ u8 msb, lsb;
+ int temp;
+
+ mutex_lock(&data->update_lock);
+ msb = data->reg[param->msb[0]];
+ lsb = (data->reg[param->lsb[0]] >> 6) & 0x03;
+ temp = (((s8) msb) * 1000) + (lsb * 250);
+ mutex_unlock(&data->update_lock);
+
+ return sprintf(buf, "%d\n", temp);
+}
+
+/* mmmmmm.ll */
+static ssize_t show_temp62(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ SETUP_SHOW_data_param(dev, attr);
+ u8 regval = data->reg[param->msb[0]];
+ int temp = ((s8) (regval & 0xfc) * 1000) + ((regval & 0x03) * 250);
+
+ return sprintf(buf, "%d\n", temp);
+}
+
+static ssize_t store_temp62(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ SETUP_STORE_data_param(dev, attr);
+ long reqval, i, f;
+ s8 temp;
+
+ if (strict_strtol(buf, 10, &reqval))
+ return -EINVAL;
+
+ reqval = SENSORS_LIMIT(reqval, -32000, 31750);
+ i = reqval / 1000;
+ f = reqval - (i * 1000);
+ temp = i << 2;
+ temp |= f / 250;
+
+ mutex_lock(&data->update_lock);
+ data->reg[param->msb[0]] = temp;
+ write_byte(client, param->msb[0], temp);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+/*
+ * The aSC7621 doesn't provide an "auto_point2". Instead, you
+ * specify the auto_point1 and a range. To keep with the sysfs
+ * hwmon specs, we synthesize the auto_point_2 from them.
+ */
+
+static u32 asc7621_range_map[] = {
+ 2000, 2500, 3330, 4000, 5000, 6670, 8000, 10000,
+ 13330, 16000, 20000, 26670, 32000, 40000, 53330, 80000,
+};
+
+static ssize_t show_ap2_temp(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ SETUP_SHOW_data_param(dev, attr);
+ long auto_point1;
+ u8 regval;
+ int temp;
+
+ mutex_lock(&data->update_lock);
+ auto_point1 = ((s8) data->reg[param->msb[1]]) * 1000;
+ regval =
+ ((data->reg[param->msb[0]] >> param->shift[0]) & param->mask[0]);
+ temp = auto_point1 + asc7621_range_map[SENSORS_LIMIT(regval, 0, 15)];
+ mutex_unlock(&data->update_lock);
+
+ return sprintf(buf, "%d\n", temp);
+
+}
+
+static ssize_t store_ap2_temp(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ SETUP_STORE_data_param(dev, attr);
+ long reqval, auto_point1;
+ int i;
+ u8 currval, newval = 0;
+
+ if (strict_strtol(buf, 10, &reqval))
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ auto_point1 = data->reg[param->msb[1]] * 1000;
+ reqval = SENSORS_LIMIT(reqval, auto_point1 + 2000, auto_point1 + 80000);
+
+ for (i = ARRAY_SIZE(asc7621_range_map) - 1; i >= 0; i--) {
+ if (reqval >= auto_point1 + asc7621_range_map[i]) {
+ newval = i;
+ break;
+ }
+ }
+
+ newval = (newval & param->mask[0]) << param->shift[0];
+ currval = read_byte(client, param->msb[0]);
+ newval |= (currval & ~(param->mask[0] << param->shift[0]));
+ data->reg[param->msb[0]] = newval;
+ write_byte(client, param->msb[0], newval);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t show_pwm_ac(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ SETUP_SHOW_data_param(dev, attr);
+ u8 config, altbit, regval;
+ u8 map[] = {
+ 0x01, 0x02, 0x04, 0x1f, 0x00, 0x06, 0x07, 0x10,
+ 0x08, 0x0f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f
+ };
+
+ mutex_lock(&data->update_lock);
+ config = (data->reg[param->msb[0]] >> param->shift[0]) & param->mask[0];
+ altbit = (data->reg[param->msb[1]] >> param->shift[1]) & param->mask[1];
+ regval = config | (altbit << 3);
+ mutex_unlock(&data->update_lock);
+
+ return sprintf(buf, "%u\n", map[SENSORS_LIMIT(regval, 0, 15)]);
+}
+
+static ssize_t store_pwm_ac(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ SETUP_STORE_data_param(dev, attr);
+ unsigned long reqval;
+ u8 currval, config, altbit, newval;
+ u16 map[] = {
+ 0x04, 0x00, 0x01, 0xff, 0x02, 0xff, 0x05, 0x06,
+ 0x08, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0f,
+ 0x07, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x03,
+ };
+
+ if (strict_strtoul(buf, 10, &reqval))
+ return -EINVAL;
+
+ if (reqval > 31)
+ return -EINVAL;
+
+ reqval = map[reqval];
+ if (reqval == 0xff)
+ return -EINVAL;
+
+ config = reqval & 0x07;
+ altbit = (reqval >> 3) & 0x01;
+
+ config = (config & param->mask[0]) << param->shift[0];
+ altbit = (altbit & param->mask[1]) << param->shift[1];
+
+ mutex_lock(&data->update_lock);
+ currval = read_byte(client, param->msb[0]);
+ newval = config | (currval & ~(param->mask[0] << param->shift[0]));
+ newval = altbit | (newval & ~(param->mask[1] << param->shift[1]));
+ data->reg[param->msb[0]] = newval;
+ write_byte(client, param->msb[0], newval);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t show_pwm_enable(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ SETUP_SHOW_data_param(dev, attr);
+ u8 config, altbit, minoff, val, newval;
+
+ mutex_lock(&data->update_lock);
+ config = (data->reg[param->msb[0]] >> param->shift[0]) & param->mask[0];
+ altbit = (data->reg[param->msb[1]] >> param->shift[1]) & param->mask[1];
+ minoff = (data->reg[param->msb[2]] >> param->shift[2]) & param->mask[2];
+ mutex_unlock(&data->update_lock);
+
+ val = config | (altbit << 3);
+ newval = 0;
+
+ if (val == 3 || val >= 10)
+ newval = 255;
+ else if (val == 4)
+ newval = 0;
+ else if (val == 7)
+ newval = 1;
+ else if (minoff == 1)
+ newval = 2;
+ else
+ newval = 3;
+
+ return sprintf(buf, "%u\n", newval);
+}
+
+static ssize_t store_pwm_enable(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ SETUP_STORE_data_param(dev, attr);
+ long reqval;
+ u8 currval, config, altbit, newval, minoff = 255;
+
+ if (strict_strtol(buf, 10, &reqval))
+ return -EINVAL;
+
+ switch (reqval) {
+ case 0:
+ newval = 0x04;
+ break;
+ case 1:
+ newval = 0x07;
+ break;
+ case 2:
+ newval = 0x00;
+ minoff = 1;
+ break;
+ case 3:
+ newval = 0x00;
+ minoff = 0;
+ break;
+ case 255:
+ newval = 0x03;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ config = newval & 0x07;
+ altbit = (newval >> 3) & 0x01;
+
+ mutex_lock(&data->update_lock);
+ config = (config & param->mask[0]) << param->shift[0];
+ altbit = (altbit & param->mask[1]) << param->shift[1];
+ currval = read_byte(client, param->msb[0]);
+ newval = config | (currval & ~(param->mask[0] << param->shift[0]));
+ newval = altbit | (newval & ~(param->mask[1] << param->shift[1]));
+ data->reg[param->msb[0]] = newval;
+ write_byte(client, param->msb[0], newval);
+ if (minoff < 255) {
+ minoff = (minoff & param->mask[2]) << param->shift[2];
+ currval = read_byte(client, param->msb[2]);
+ newval =
+ minoff | (currval & ~(param->mask[2] << param->shift[2]));
+ data->reg[param->msb[2]] = newval;
+ write_byte(client, param->msb[2], newval);
+ }
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static u32 asc7621_pwm_freq_map[] = {
+ 10, 15, 23, 30, 38, 47, 62, 94,
+ 23000, 24000, 25000, 26000, 27000, 28000, 29000, 30000
+};
+
+static ssize_t show_pwm_freq(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ SETUP_SHOW_data_param(dev, attr);
+ u8 regval =
+ (data->reg[param->msb[0]] >> param->shift[0]) & param->mask[0];
+
+ regval = SENSORS_LIMIT(regval, 0, 15);
+
+ return sprintf(buf, "%u\n", asc7621_pwm_freq_map[regval]);
+}
+
+static ssize_t store_pwm_freq(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ SETUP_STORE_data_param(dev, attr);
+ unsigned long reqval;
+ u8 currval, newval = 255;
+ int i;
+
+ if (strict_strtoul(buf, 10, &reqval))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(asc7621_pwm_freq_map); i++) {
+ if (reqval == asc7621_pwm_freq_map[i]) {
+ newval = i;
+ break;
+ }
+ }
+ if (newval == 255)
+ return -EINVAL;
+
+ newval = (newval & param->mask[0]) << param->shift[0];
+
+ mutex_lock(&data->update_lock);
+ currval = read_byte(client, param->msb[0]);
+ newval |= (currval & ~(param->mask[0] << param->shift[0]));
+ data->reg[param->msb[0]] = newval;
+ write_byte(client, param->msb[0], newval);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static u32 asc7621_pwm_auto_spinup_map[] = {
+ 0, 100, 250, 400, 700, 1000, 2000, 4000
+};
+
+static ssize_t show_pwm_ast(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ SETUP_SHOW_data_param(dev, attr);
+ u8 regval =
+ (data->reg[param->msb[0]] >> param->shift[0]) & param->mask[0];
+
+ regval = SENSORS_LIMIT(regval, 0, 7);
+
+ return sprintf(buf, "%u\n", asc7621_pwm_auto_spinup_map[regval]);
+
+}
+
+static ssize_t store_pwm_ast(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ SETUP_STORE_data_param(dev, attr);
+ long reqval;
+ u8 currval, newval = 255;
+ u32 i;
+
+ if (strict_strtol(buf, 10, &reqval))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(asc7621_pwm_auto_spinup_map); i++) {
+ if (reqval == asc7621_pwm_auto_spinup_map[i]) {
+ newval = i;
+ break;
+ }
+ }
+ if (newval == 255)
+ return -EINVAL;
+
+ newval = (newval & param->mask[0]) << param->shift[0];
+
+ mutex_lock(&data->update_lock);
+ currval = read_byte(client, param->msb[0]);
+ newval |= (currval & ~(param->mask[0] << param->shift[0]));
+ data->reg[param->msb[0]] = newval;
+ write_byte(client, param->msb[0], newval);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static u32 asc7621_temp_smoothing_time_map[] = {
+ 35000, 17600, 11800, 7000, 4400, 3000, 1600, 800
+};
+
+static ssize_t show_temp_st(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ SETUP_SHOW_data_param(dev, attr);
+ u8 regval =
+ (data->reg[param->msb[0]] >> param->shift[0]) & param->mask[0];
+ regval = SENSORS_LIMIT(regval, 0, 7);
+
+ return sprintf(buf, "%u\n", asc7621_temp_smoothing_time_map[regval]);
+}
+
+static ssize_t store_temp_st(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ SETUP_STORE_data_param(dev, attr);
+ long reqval;
+ u8 currval, newval = 255;
+ u32 i;
+
+ if (strict_strtol(buf, 10, &reqval))
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(asc7621_temp_smoothing_time_map); i++) {
+ if (reqval == asc7621_temp_smoothing_time_map[i]) {
+ newval = i;
+ break;
+ }
+ }
+
+ if (newval == 255)
+ return -EINVAL;
+
+ newval = (newval & param->mask[0]) << param->shift[0];
+
+ mutex_lock(&data->update_lock);
+ currval = read_byte(client, param->msb[0]);
+ newval |= (currval & ~(param->mask[0] << param->shift[0]));
+ data->reg[param->msb[0]] = newval;
+ write_byte(client, param->msb[0], newval);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+/*
+ * End of data handlers
+ *
+ * These defines do nothing more than make the table easier
+ * to read when wrapped at column 80.
+ */
+
+/*
+ * Creates a variable length array inititalizer.
+ * VAA(1,3,5,7) would produce {1,3,5,7}
+ */
+#define VAA(args...) {args}
+
+#define PREAD(name, n, pri, rm, rl, m, s, r) \
+ {.sda = SENSOR_ATTR(name, S_IRUGO, show_##r, NULL, n), \
+ .priority = pri, .msb[0] = rm, .lsb[0] = rl, .mask[0] = m, \
+ .shift[0] = s,}
+
+#define PWRITE(name, n, pri, rm, rl, m, s, r) \
+ {.sda = SENSOR_ATTR(name, S_IRUGO | S_IWUSR, show_##r, store_##r, n), \
+ .priority = pri, .msb[0] = rm, .lsb[0] = rl, .mask[0] = m, \
+ .shift[0] = s,}
+
+/*
+ * PWRITEM assumes that the initializers for the .msb, .lsb, .mask and .shift
+ * were created using the VAA macro.
+ */
+#define PWRITEM(name, n, pri, rm, rl, m, s, r) \
+ {.sda = SENSOR_ATTR(name, S_IRUGO | S_IWUSR, show_##r, store_##r, n), \
+ .priority = pri, .msb = rm, .lsb = rl, .mask = m, .shift = s,}
+
+static struct asc7621_param asc7621_params[] = {
+ PREAD(in0_input, 0, PRI_HIGH, 0x20, 0x13, 0, 0, in10),
+ PREAD(in1_input, 1, PRI_HIGH, 0x21, 0x18, 0, 0, in10),
+ PREAD(in2_input, 2, PRI_HIGH, 0x22, 0x11, 0, 0, in10),
+ PREAD(in3_input, 3, PRI_HIGH, 0x23, 0x12, 0, 0, in10),
+ PREAD(in4_input, 4, PRI_HIGH, 0x24, 0x14, 0, 0, in10),
+
+ PWRITE(in0_min, 0, PRI_LOW, 0x44, 0, 0, 0, in8),
+ PWRITE(in1_min, 1, PRI_LOW, 0x46, 0, 0, 0, in8),
+ PWRITE(in2_min, 2, PRI_LOW, 0x48, 0, 0, 0, in8),
+ PWRITE(in3_min, 3, PRI_LOW, 0x4a, 0, 0, 0, in8),
+ PWRITE(in4_min, 4, PRI_LOW, 0x4c, 0, 0, 0, in8),
+
+ PWRITE(in0_max, 0, PRI_LOW, 0x45, 0, 0, 0, in8),
+ PWRITE(in1_max, 1, PRI_LOW, 0x47, 0, 0, 0, in8),
+ PWRITE(in2_max, 2, PRI_LOW, 0x49, 0, 0, 0, in8),
+ PWRITE(in3_max, 3, PRI_LOW, 0x4b, 0, 0, 0, in8),
+ PWRITE(in4_max, 4, PRI_LOW, 0x4d, 0, 0, 0, in8),
+
+ PREAD(in0_alarm, 0, PRI_LOW, 0x41, 0, 0x01, 0, bitmask),
+ PREAD(in1_alarm, 1, PRI_LOW, 0x41, 0, 0x01, 1, bitmask),
+ PREAD(in2_alarm, 2, PRI_LOW, 0x41, 0, 0x01, 2, bitmask),
+ PREAD(in3_alarm, 3, PRI_LOW, 0x41, 0, 0x01, 3, bitmask),
+ PREAD(in4_alarm, 4, PRI_LOW, 0x42, 0, 0x01, 0, bitmask),
+
+ PREAD(fan1_input, 0, PRI_HIGH, 0x29, 0x28, 0, 0, fan16),
+ PREAD(fan2_input, 1, PRI_HIGH, 0x2b, 0x2a, 0, 0, fan16),
+ PREAD(fan3_input, 2, PRI_HIGH, 0x2d, 0x2c, 0, 0, fan16),
+ PREAD(fan4_input, 3, PRI_HIGH, 0x2f, 0x2e, 0, 0, fan16),
+
+ PWRITE(fan1_min, 0, PRI_LOW, 0x55, 0x54, 0, 0, fan16),
+ PWRITE(fan2_min, 1, PRI_LOW, 0x57, 0x56, 0, 0, fan16),
+ PWRITE(fan3_min, 2, PRI_LOW, 0x59, 0x58, 0, 0, fan16),
+ PWRITE(fan4_min, 3, PRI_LOW, 0x5b, 0x5a, 0, 0, fan16),
+
+ PREAD(fan1_alarm, 0, PRI_LOW, 0x42, 0, 0x01, 0, bitmask),
+ PREAD(fan2_alarm, 1, PRI_LOW, 0x42, 0, 0x01, 1, bitmask),
+ PREAD(fan3_alarm, 2, PRI_LOW, 0x42, 0, 0x01, 2, bitmask),
+ PREAD(fan4_alarm, 3, PRI_LOW, 0x42, 0, 0x01, 3, bitmask),
+
+ PREAD(temp1_input, 0, PRI_HIGH, 0x25, 0x10, 0, 0, temp10),
+ PREAD(temp2_input, 1, PRI_HIGH, 0x26, 0x15, 0, 0, temp10),
+ PREAD(temp3_input, 2, PRI_HIGH, 0x27, 0x16, 0, 0, temp10),
+ PREAD(temp4_input, 3, PRI_HIGH, 0x33, 0x17, 0, 0, temp10),
+ PREAD(temp5_input, 4, PRI_HIGH, 0xf7, 0xf6, 0, 0, temp10),
+ PREAD(temp6_input, 5, PRI_HIGH, 0xf9, 0xf8, 0, 0, temp10),
+ PREAD(temp7_input, 6, PRI_HIGH, 0xfb, 0xfa, 0, 0, temp10),
+ PREAD(temp8_input, 7, PRI_HIGH, 0xfd, 0xfc, 0, 0, temp10),
+
+ PWRITE(temp1_min, 0, PRI_LOW, 0x4e, 0, 0, 0, temp8),
+ PWRITE(temp2_min, 1, PRI_LOW, 0x50, 0, 0, 0, temp8),
+ PWRITE(temp3_min, 2, PRI_LOW, 0x52, 0, 0, 0, temp8),
+ PWRITE(temp4_min, 3, PRI_LOW, 0x34, 0, 0, 0, temp8),
+
+ PWRITE(temp1_max, 0, PRI_LOW, 0x4f, 0, 0, 0, temp8),
+ PWRITE(temp2_max, 1, PRI_LOW, 0x51, 0, 0, 0, temp8),
+ PWRITE(temp3_max, 2, PRI_LOW, 0x53, 0, 0, 0, temp8),
+ PWRITE(temp4_max, 3, PRI_LOW, 0x35, 0, 0, 0, temp8),
+
+ PREAD(temp1_alarm, 0, PRI_LOW, 0x41, 0, 0x01, 4, bitmask),
+ PREAD(temp2_alarm, 1, PRI_LOW, 0x41, 0, 0x01, 5, bitmask),
+ PREAD(temp3_alarm, 2, PRI_LOW, 0x41, 0, 0x01, 6, bitmask),
+ PREAD(temp4_alarm, 3, PRI_LOW, 0x43, 0, 0x01, 0, bitmask),
+
+ PWRITE(temp1_source, 0, PRI_LOW, 0x02, 0, 0x07, 4, bitmask),
+ PWRITE(temp2_source, 1, PRI_LOW, 0x02, 0, 0x07, 0, bitmask),
+ PWRITE(temp3_source, 2, PRI_LOW, 0x03, 0, 0x07, 4, bitmask),
+ PWRITE(temp4_source, 3, PRI_LOW, 0x03, 0, 0x07, 0, bitmask),
+
+ PWRITE(temp1_smoothing_enable, 0, PRI_LOW, 0x62, 0, 0x01, 3, bitmask),
+ PWRITE(temp2_smoothing_enable, 1, PRI_LOW, 0x63, 0, 0x01, 7, bitmask),
+ PWRITE(temp3_smoothing_enable, 2, PRI_LOW, 0x64, 0, 0x01, 3, bitmask),
+ PWRITE(temp4_smoothing_enable, 3, PRI_LOW, 0x3c, 0, 0x01, 3, bitmask),
+
+ PWRITE(temp1_smoothing_time, 0, PRI_LOW, 0x62, 0, 0x07, 0, temp_st),
+ PWRITE(temp2_smoothing_time, 1, PRI_LOW, 0x63, 0, 0x07, 4, temp_st),
+ PWRITE(temp3_smoothing_time, 2, PRI_LOW, 0x63, 0, 0x07, 0, temp_st),
+ PWRITE(temp4_smoothing_time, 3, PRI_LOW, 0x3c, 0, 0x07, 0, temp_st),
+
+ PWRITE(temp1_auto_point1_temp_hyst, 0, PRI_LOW, 0x6d, 0, 0x0f, 4,
+ bitmask),
+ PWRITE(temp2_auto_point1_temp_hyst, 1, PRI_LOW, 0x6d, 0, 0x0f, 0,
+ bitmask),
+ PWRITE(temp3_auto_point1_temp_hyst, 2, PRI_LOW, 0x6e, 0, 0x0f, 4,
+ bitmask),
+ PWRITE(temp4_auto_point1_temp_hyst, 3, PRI_LOW, 0x6e, 0, 0x0f, 0,
+ bitmask),
+
+ PREAD(temp1_auto_point2_temp_hyst, 0, PRI_LOW, 0x6d, 0, 0x0f, 4,
+ bitmask),
+ PREAD(temp2_auto_point2_temp_hyst, 1, PRI_LOW, 0x6d, 0, 0x0f, 0,
+ bitmask),
+ PREAD(temp3_auto_point2_temp_hyst, 2, PRI_LOW, 0x6e, 0, 0x0f, 4,
+ bitmask),
+ PREAD(temp4_auto_point2_temp_hyst, 3, PRI_LOW, 0x6e, 0, 0x0f, 0,
+ bitmask),
+
+ PWRITE(temp1_auto_point1_temp, 0, PRI_LOW, 0x67, 0, 0, 0, temp8),
+ PWRITE(temp2_auto_point1_temp, 1, PRI_LOW, 0x68, 0, 0, 0, temp8),
+ PWRITE(temp3_auto_point1_temp, 2, PRI_LOW, 0x69, 0, 0, 0, temp8),
+ PWRITE(temp4_auto_point1_temp, 3, PRI_LOW, 0x3b, 0, 0, 0, temp8),
+
+ PWRITEM(temp1_auto_point2_temp, 0, PRI_LOW, VAA(0x5f, 0x67), VAA(0),
+ VAA(0x0f), VAA(4), ap2_temp),
+ PWRITEM(temp2_auto_point2_temp, 1, PRI_LOW, VAA(0x60, 0x68), VAA(0),
+ VAA(0x0f), VAA(4), ap2_temp),
+ PWRITEM(temp3_auto_point2_temp, 2, PRI_LOW, VAA(0x61, 0x69), VAA(0),
+ VAA(0x0f), VAA(4), ap2_temp),
+ PWRITEM(temp4_auto_point2_temp, 3, PRI_LOW, VAA(0x3c, 0x3b), VAA(0),
+ VAA(0x0f), VAA(4), ap2_temp),
+
+ PWRITE(temp1_crit, 0, PRI_LOW, 0x6a, 0, 0, 0, temp8),
+ PWRITE(temp2_crit, 1, PRI_LOW, 0x6b, 0, 0, 0, temp8),
+ PWRITE(temp3_crit, 2, PRI_LOW, 0x6c, 0, 0, 0, temp8),
+ PWRITE(temp4_crit, 3, PRI_LOW, 0x3d, 0, 0, 0, temp8),
+
+ PWRITE(temp5_enable, 4, PRI_LOW, 0x0e, 0, 0x01, 0, bitmask),
+ PWRITE(temp6_enable, 5, PRI_LOW, 0x0e, 0, 0x01, 1, bitmask),
+ PWRITE(temp7_enable, 6, PRI_LOW, 0x0e, 0, 0x01, 2, bitmask),
+ PWRITE(temp8_enable, 7, PRI_LOW, 0x0e, 0, 0x01, 3, bitmask),
+
+ PWRITE(remote1_offset, 0, PRI_LOW, 0x1c, 0, 0, 0, temp62),
+ PWRITE(remote2_offset, 1, PRI_LOW, 0x1d, 0, 0, 0, temp62),
+
+ PWRITE(pwm1, 0, PRI_HIGH, 0x30, 0, 0, 0, u8),
+ PWRITE(pwm2, 1, PRI_HIGH, 0x31, 0, 0, 0, u8),
+ PWRITE(pwm3, 2, PRI_HIGH, 0x32, 0, 0, 0, u8),
+
+ PWRITE(pwm1_invert, 0, PRI_LOW, 0x5c, 0, 0x01, 4, bitmask),
+ PWRITE(pwm2_invert, 1, PRI_LOW, 0x5d, 0, 0x01, 4, bitmask),
+ PWRITE(pwm3_invert, 2, PRI_LOW, 0x5e, 0, 0x01, 4, bitmask),
+
+ PWRITEM(pwm1_enable, 0, PRI_LOW, VAA(0x5c, 0x5c, 0x62), VAA(0, 0, 0),
+ VAA(0x07, 0x01, 0x01), VAA(5, 3, 5), pwm_enable),
+ PWRITEM(pwm2_enable, 1, PRI_LOW, VAA(0x5d, 0x5d, 0x62), VAA(0, 0, 0),
+ VAA(0x07, 0x01, 0x01), VAA(5, 3, 6), pwm_enable),
+ PWRITEM(pwm3_enable, 2, PRI_LOW, VAA(0x5e, 0x5e, 0x62), VAA(0, 0, 0),
+ VAA(0x07, 0x01, 0x01), VAA(5, 3, 7), pwm_enable),
+
+ PWRITEM(pwm1_auto_channels, 0, PRI_LOW, VAA(0x5c, 0x5c), VAA(0, 0),
+ VAA(0x07, 0x01), VAA(5, 3), pwm_ac),
+ PWRITEM(pwm2_auto_channels, 1, PRI_LOW, VAA(0x5d, 0x5d), VAA(0, 0),
+ VAA(0x07, 0x01), VAA(5, 3), pwm_ac),
+ PWRITEM(pwm3_auto_channels, 2, PRI_LOW, VAA(0x5e, 0x5e), VAA(0, 0),
+ VAA(0x07, 0x01), VAA(5, 3), pwm_ac),
+
+ PWRITE(pwm1_auto_point1_pwm, 0, PRI_LOW, 0x64, 0, 0, 0, u8),
+ PWRITE(pwm2_auto_point1_pwm, 1, PRI_LOW, 0x65, 0, 0, 0, u8),
+ PWRITE(pwm3_auto_point1_pwm, 2, PRI_LOW, 0x66, 0, 0, 0, u8),
+
+ PWRITE(pwm1_auto_point2_pwm, 0, PRI_LOW, 0x38, 0, 0, 0, u8),
+ PWRITE(pwm2_auto_point2_pwm, 1, PRI_LOW, 0x39, 0, 0, 0, u8),
+ PWRITE(pwm3_auto_point2_pwm, 2, PRI_LOW, 0x3a, 0, 0, 0, u8),
+
+ PWRITE(pwm1_freq, 0, PRI_LOW, 0x5f, 0, 0x0f, 0, pwm_freq),
+ PWRITE(pwm2_freq, 1, PRI_LOW, 0x60, 0, 0x0f, 0, pwm_freq),
+ PWRITE(pwm3_freq, 2, PRI_LOW, 0x61, 0, 0x0f, 0, pwm_freq),
+
+ PREAD(pwm1_auto_zone_assigned, 0, PRI_LOW, 0, 0, 0x03, 2, bitmask),
+ PREAD(pwm2_auto_zone_assigned, 1, PRI_LOW, 0, 0, 0x03, 4, bitmask),
+ PREAD(pwm3_auto_zone_assigned, 2, PRI_LOW, 0, 0, 0x03, 6, bitmask),
+
+ PWRITE(pwm1_auto_spinup_time, 0, PRI_LOW, 0x5c, 0, 0x07, 0, pwm_ast),
+ PWRITE(pwm2_auto_spinup_time, 1, PRI_LOW, 0x5d, 0, 0x07, 0, pwm_ast),
+ PWRITE(pwm3_auto_spinup_time, 2, PRI_LOW, 0x5e, 0, 0x07, 0, pwm_ast),
+
+ PWRITE(peci_enable, 0, PRI_LOW, 0x40, 0, 0x01, 4, bitmask),
+ PWRITE(peci_avg, 0, PRI_LOW, 0x36, 0, 0x07, 0, bitmask),
+ PWRITE(peci_domain, 0, PRI_LOW, 0x36, 0, 0x01, 3, bitmask),
+ PWRITE(peci_legacy, 0, PRI_LOW, 0x36, 0, 0x01, 4, bitmask),
+ PWRITE(peci_diode, 0, PRI_LOW, 0x0e, 0, 0x07, 4, bitmask),
+ PWRITE(peci_4domain, 0, PRI_LOW, 0x0e, 0, 0x01, 4, bitmask),
+
+};
+
+static struct asc7621_data *asc7621_update_device(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct asc7621_data *data = i2c_get_clientdata(client);
+ int i;
+
+/*
+ * The asc7621 chips guarantee consistent reads of multi-byte values
+ * regardless of the order of the reads. No special logic is needed
+ * so we can just read the registers in whatever order they appear
+ * in the asc7621_params array.
+ */
+
+ mutex_lock(&data->update_lock);
+
+ /* Read all the high priority registers */
+
+ if (!data->valid ||
+ time_after(jiffies, data->last_high_reading + INTERVAL_HIGH)) {
+
+ for (i = 0; i < ARRAY_SIZE(asc7621_register_priorities); i++) {
+ if (asc7621_register_priorities[i] == PRI_HIGH) {
+ data->reg[i] =
+ i2c_smbus_read_byte_data(client, i) & 0xff;
+ }
+ }
+ data->last_high_reading = jiffies;
+ }; /* last_reading */
+
+ /* Read all the low priority registers. */
+
+ if (!data->valid ||
+ time_after(jiffies, data->last_low_reading + INTERVAL_LOW)) {
+
+ for (i = 0; i < ARRAY_SIZE(asc7621_params); i++) {
+ if (asc7621_register_priorities[i] == PRI_LOW) {
+ data->reg[i] =
+ i2c_smbus_read_byte_data(client, i) & 0xff;
+ }
+ }
+ data->last_low_reading = jiffies;
+ }; /* last_reading */
+
+ data->valid = 1;
+
+ mutex_unlock(&data->update_lock);
+
+ return data;
+}
+
+/*
+ * Standard detection and initialization below
+ *
+ * Helper function that checks if an address is valid
+ * for a particular chip.
+ */
+
+static inline int valid_address_for_chip(int chip_type, int address)
+{
+ int i;
+
+ for (i = 0; asc7621_chips[chip_type].addresses[i] != I2C_CLIENT_END;
+ i++) {
+ if (asc7621_chips[chip_type].addresses[i] == address)
+ return 1;
+ }
+ return 0;
+}
+
+static void asc7621_init_client(struct i2c_client *client)
+{
+ int value;
+
+ /* Warn if part was not "READY" */
+
+ value = read_byte(client, 0x40);
+
+ if (value & 0x02) {
+ dev_err(&client->dev,
+ "Client (%d,0x%02x) config is locked.\n",
+ i2c_adapter_id(client->adapter), client->addr);
+ };
+ if (!(value & 0x04)) {
+ dev_err(&client->dev, "Client (%d,0x%02x) is not ready.\n",
+ i2c_adapter_id(client->adapter), client->addr);
+ };
+
+/*
+ * Start monitoring
+ *
+ * Try to clear LOCK, Set START, save everything else
+ */
+ value = (value & ~0x02) | 0x01;
+ write_byte(client, 0x40, value & 0xff);
+
+}
+
+static int
+asc7621_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ struct asc7621_data *data;
+ int i, err;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -EIO;
+
+ data = kzalloc(sizeof(struct asc7621_data), GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, data);
+ data->valid = 0;
+ mutex_init(&data->update_lock);
+
+ /* Initialize the asc7621 chip */
+ asc7621_init_client(client);
+
+ /* Create the sysfs entries */
+ for (i = 0; i < ARRAY_SIZE(asc7621_params); i++) {
+ err =
+ device_create_file(&client->dev,
+ &(asc7621_params[i].sda.dev_attr));
+ if (err)
+ goto exit_remove;
+ }
+
+ data->class_dev = hwmon_device_register(&client->dev);
+ if (IS_ERR(data->class_dev)) {
+ err = PTR_ERR(data->class_dev);
+ goto exit_remove;
+ }
+
+ return 0;
+
+exit_remove:
+ for (i = 0; i < ARRAY_SIZE(asc7621_params); i++) {
+ device_remove_file(&client->dev,
+ &(asc7621_params[i].sda.dev_attr));
+ }
+
+ i2c_set_clientdata(client, NULL);
+ kfree(data);
+ return err;
+}
+
+static int asc7621_detect(struct i2c_client *client,
+ struct i2c_board_info *info)
+{
+ struct i2c_adapter *adapter = client->adapter;
+ int company, verstep, chip_index;
+ struct device *dev;
+
+ dev = &client->dev;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
+ return -ENODEV;
+
+ for (chip_index = FIRST_CHIP; chip_index <= LAST_CHIP; chip_index++) {
+
+ if (!valid_address_for_chip(chip_index, client->addr))
+ continue;
+
+ company = read_byte(client,
+ asc7621_chips[chip_index].company_reg);
+ verstep = read_byte(client,
+ asc7621_chips[chip_index].verstep_reg);
+
+ if (company == asc7621_chips[chip_index].company_id &&
+ verstep == asc7621_chips[chip_index].verstep_id) {
+ strlcpy(client->name, asc7621_chips[chip_index].name,
+ I2C_NAME_SIZE);
+ strlcpy(info->type, asc7621_chips[chip_index].name,
+ I2C_NAME_SIZE);
+
+ dev_info(&adapter->dev, "Matched %s\n",
+ asc7621_chips[chip_index].name);
+ return 0;
+ }
+ }
+
+ return -ENODEV;
+}
+
+static int asc7621_remove(struct i2c_client *client)
+{
+ struct asc7621_data *data = i2c_get_clientdata(client);
+ int i;
+
+ hwmon_device_unregister(data->class_dev);
+
+ for (i = 0; i < ARRAY_SIZE(asc7621_params); i++) {
+ device_remove_file(&client->dev,
+ &(asc7621_params[i].sda.dev_attr));
+ }
+
+ i2c_set_clientdata(client, NULL);
+ kfree(data);
+ return 0;
+}
+
+static const struct i2c_device_id asc7621_id[] = {
+ {"asc7621", asc7621},
+ {"asc7621a", asc7621a},
+ {},
+};
+
+MODULE_DEVICE_TABLE(i2c, asc7621_id);
+
+static struct i2c_driver asc7621_driver = {
+ .class = I2C_CLASS_HWMON,
+ .driver = {
+ .name = "asc7621",
+ },
+ .probe = asc7621_probe,
+ .remove = asc7621_remove,
+ .id_table = asc7621_id,
+ .detect = asc7621_detect,
+ .address_list = normal_i2c,
+};
+
+static int __init sm_asc7621_init(void)
+{
+ int i, j;
+/*
+ * Collect all the registers needed into a single array.
+ * This way, if a register isn't actually used for anything,
+ * we don't retrieve it.
+ */
+
+ for (i = 0; i < ARRAY_SIZE(asc7621_params); i++) {
+ for (j = 0; j < ARRAY_SIZE(asc7621_params[i].msb); j++)
+ asc7621_register_priorities[asc7621_params[i].msb[j]] =
+ asc7621_params[i].priority;
+ for (j = 0; j < ARRAY_SIZE(asc7621_params[i].lsb); j++)
+ asc7621_register_priorities[asc7621_params[i].lsb[j]] =
+ asc7621_params[i].priority;
+ }
+ return i2c_add_driver(&asc7621_driver);
+}
+
+static void __exit sm_asc7621_exit(void)
+{
+ i2c_del_driver(&asc7621_driver);
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("George Joseph");
+MODULE_DESCRIPTION("Andigilog aSC7621 and aSC7621a driver");
+
+module_init(sm_asc7621_init);
+module_exit(sm_asc7621_exit);
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
index fa0728232e7..0627f7a5b9b 100644
--- a/drivers/hwmon/fschmd.c
+++ b/drivers/hwmon/fschmd.c
@@ -267,7 +267,7 @@ struct fschmd_data {
struct list_head list; /* member of the watchdog_data_list */
struct kref kref;
struct miscdevice watchdog_miscdev;
- int kind;
+ enum chips kind;
unsigned long watchdog_is_open;
char watchdog_expect_close;
char watchdog_name[10]; /* must be unique to avoid sysfs conflict */
@@ -325,8 +325,7 @@ static ssize_t show_in_value(struct device *dev,
int index = to_sensor_dev_attr(devattr)->index;
struct fschmd_data *data = fschmd_update_device(dev);
- /* fscher / fschrc - 1 as data->kind is an array index, not a chips */
- if (data->kind == (fscher - 1) || data->kind >= (fschrc - 1))
+ if (data->kind == fscher || data->kind >= fschrc)
return sprintf(buf, "%d\n", (data->volt[index] * dmi_vref *
dmi_mult[index]) / 255 + dmi_offset[index]);
else
@@ -492,7 +491,7 @@ static ssize_t show_pwm_auto_point1_pwm(struct device *dev,
int val = data->fan_min[index];
/* 0 = allow turning off (except on the syl), 1-255 = 50-100% */
- if (val || data->kind == fscsyl - 1)
+ if (val || data->kind == fscsyl)
val = val / 2 + 128;
return sprintf(buf, "%d\n", val);
@@ -506,7 +505,7 @@ static ssize_t store_pwm_auto_point1_pwm(struct device *dev,
unsigned long v = simple_strtoul(buf, NULL, 10);
/* reg: 0 = allow turning off (except on the syl), 1-255 = 50-100% */
- if (v || data->kind == fscsyl - 1) {
+ if (v || data->kind == fscsyl) {
v = SENSORS_LIMIT(v, 128, 255);
v = (v - 128) * 2 + 1;
}
@@ -1037,7 +1036,7 @@ static int fschmd_detect(struct i2c_client *client,
else
return -ENODEV;
- strlcpy(info->type, fschmd_id[kind - 1].name, I2C_NAME_SIZE);
+ strlcpy(info->type, fschmd_id[kind].name, I2C_NAME_SIZE);
return 0;
}
@@ -1065,6 +1064,7 @@ static int fschmd_probe(struct i2c_client *client,
(where the client is found through a data ptr instead of the
otherway around) */
data->client = client;
+ data->kind = kind;
if (kind == fscpos) {
/* The Poseidon has hardwired temp limits, fill these
@@ -1085,9 +1085,6 @@ static int fschmd_probe(struct i2c_client *client,
}
}
- /* i2c kind goes from 1-6, we want from 0-5 to address arrays */
- data->kind = kind - 1;
-
/* Read in some never changing registers */
data->revision = i2c_smbus_read_byte_data(client, FSCHMD_REG_REVISION);
data->global_control = i2c_smbus_read_byte_data(client,
diff --git a/drivers/hwmon/g760a.c b/drivers/hwmon/g760a.c
index 19c01a49f6b..09ea12e0a55 100644
--- a/drivers/hwmon/g760a.c
+++ b/drivers/hwmon/g760a.c
@@ -68,7 +68,7 @@ struct g760a_data {
#define PWM_FROM_CNT(cnt) (0xff-(cnt))
#define PWM_TO_CNT(pwm) (0xff-(pwm))
-unsigned int rpm_from_cnt(u8 val, u32 clk, u16 div)
+static inline unsigned int rpm_from_cnt(u8 val, u32 clk, u16 div)
{
return ((val == 0x00) ? 0 : ((clk*30)/(val*div)));
}
diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
index 0ffe84d190b..1002befd87d 100644
--- a/drivers/hwmon/it87.c
+++ b/drivers/hwmon/it87.c
@@ -1,40 +1,40 @@
/*
- it87.c - Part of lm_sensors, Linux kernel modules for hardware
- monitoring.
-
- The IT8705F is an LPC-based Super I/O part that contains UARTs, a
- parallel port, an IR port, a MIDI port, a floppy controller, etc., in
- addition to an Environment Controller (Enhanced Hardware Monitor and
- Fan Controller)
-
- This driver supports only the Environment Controller in the IT8705F and
- similar parts. The other devices are supported by different drivers.
-
- Supports: IT8705F Super I/O chip w/LPC interface
- IT8712F Super I/O chip w/LPC interface
- IT8716F Super I/O chip w/LPC interface
- IT8718F Super I/O chip w/LPC interface
- IT8720F Super I/O chip w/LPC interface
- IT8726F Super I/O chip w/LPC interface
- Sis950 A clone of the IT8705F
-
- Copyright (C) 2001 Chris Gauthron
- Copyright (C) 2005-2007 Jean Delvare <khali@linux-fr.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
-*/
+ * it87.c - Part of lm_sensors, Linux kernel modules for hardware
+ * monitoring.
+ *
+ * The IT8705F is an LPC-based Super I/O part that contains UARTs, a
+ * parallel port, an IR port, a MIDI port, a floppy controller, etc., in
+ * addition to an Environment Controller (Enhanced Hardware Monitor and
+ * Fan Controller)
+ *
+ * This driver supports only the Environment Controller in the IT8705F and
+ * similar parts. The other devices are supported by different drivers.
+ *
+ * Supports: IT8705F Super I/O chip w/LPC interface
+ * IT8712F Super I/O chip w/LPC interface
+ * IT8716F Super I/O chip w/LPC interface
+ * IT8718F Super I/O chip w/LPC interface
+ * IT8720F Super I/O chip w/LPC interface
+ * IT8726F Super I/O chip w/LPC interface
+ * Sis950 A clone of the IT8705F
+ *
+ * Copyright (C) 2001 Chris Gauthron
+ * Copyright (C) 2005-2010 Jean Delvare <khali@linux-fr.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
#include <linux/module.h>
#include <linux/init.h>
@@ -128,6 +128,7 @@ superio_exit(void)
#define IT87_SIO_GPIO5_REG 0x29
#define IT87_SIO_PINX2_REG 0x2c /* Pin selection */
#define IT87_SIO_VID_REG 0xfc /* VID value */
+#define IT87_SIO_BEEP_PIN_REG 0xf6 /* Beep pin mapping */
/* Update battery voltage after every reading if true */
static int update_vbat;
@@ -187,9 +188,13 @@ static const u8 IT87_REG_FANX_MIN[] = { 0x1b, 0x1c, 0x1d, 0x85, 0x87 };
#define IT87_REG_VIN_ENABLE 0x50
#define IT87_REG_TEMP_ENABLE 0x51
+#define IT87_REG_BEEP_ENABLE 0x5c
#define IT87_REG_CHIPID 0x58
+#define IT87_REG_AUTO_TEMP(nr, i) (0x60 + (nr) * 8 + (i))
+#define IT87_REG_AUTO_PWM(nr, i) (0x65 + (nr) * 8 + (i))
+
#define IN_TO_REG(val) (SENSORS_LIMIT((((val) + 8)/16),0,255))
#define IN_FROM_REG(val) ((val) * 16)
@@ -246,6 +251,7 @@ struct it87_sio_data {
/* Values read from Super-I/O config space */
u8 revision;
u8 vid_value;
+ u8 beep_pin;
/* Features skipped based on config or DMI */
u8 skip_vid;
u8 skip_fan;
@@ -279,9 +285,21 @@ struct it87_data {
u8 vid; /* Register encoding, combined */
u8 vrm;
u32 alarms; /* Register encoding, combined */
+ u8 beeps; /* Register encoding */
u8 fan_main_ctrl; /* Register value */
u8 fan_ctl; /* Register value */
- u8 manual_pwm_ctl[3]; /* manual PWM value set by user */
+
+ /* The following 3 arrays correspond to the same registers. The
+ * meaning of bits 6-0 depends on the value of bit 7, and we want
+ * to preserve settings on mode changes, so we have to track all
+ * values separately. */
+ u8 pwm_ctrl[3]; /* Register value */
+ u8 pwm_duty[3]; /* Manual PWM value set by user (bit 6-0) */
+ u8 pwm_temp_map[3]; /* PWM to temp. chan. mapping (bits 1-0) */
+
+ /* Automatic fan speed control registers */
+ u8 auto_pwm[3][4]; /* [nr][3] is hard-coded */
+ s8 auto_temp[3][5]; /* [nr][0] is point1_temp_hyst */
};
static inline int has_16bit_fans(const struct it87_data *data)
@@ -296,6 +314,15 @@ static inline int has_16bit_fans(const struct it87_data *data)
|| data->type == it8720;
}
+static inline int has_old_autopwm(const struct it87_data *data)
+{
+ /* The old automatic fan speed control interface is implemented
+ by IT8705F chips up to revision F and IT8712F chips up to
+ revision G. */
+ return (data->type == it87 && data->revision < 0x03)
+ || (data->type == it8712 && data->revision < 0x08);
+}
+
static int it87_probe(struct platform_device *pdev);
static int __devexit it87_remove(struct platform_device *pdev);
@@ -352,7 +379,10 @@ static ssize_t set_in_min(struct device *dev, struct device_attribute *attr,
int nr = sensor_attr->index;
struct it87_data *data = dev_get_drvdata(dev);
- unsigned long val = simple_strtoul(buf, NULL, 10);
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
mutex_lock(&data->update_lock);
data->in_min[nr] = IN_TO_REG(val);
@@ -368,7 +398,10 @@ static ssize_t set_in_max(struct device *dev, struct device_attribute *attr,
int nr = sensor_attr->index;
struct it87_data *data = dev_get_drvdata(dev);
- unsigned long val = simple_strtoul(buf, NULL, 10);
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
mutex_lock(&data->update_lock);
data->in_max[nr] = IN_TO_REG(val);
@@ -441,7 +474,10 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
int nr = sensor_attr->index;
struct it87_data *data = dev_get_drvdata(dev);
- int val = simple_strtol(buf, NULL, 10);
+ long val;
+
+ if (strict_strtol(buf, 10, &val) < 0)
+ return -EINVAL;
mutex_lock(&data->update_lock);
data->temp_high[nr] = TEMP_TO_REG(val);
@@ -456,7 +492,10 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
int nr = sensor_attr->index;
struct it87_data *data = dev_get_drvdata(dev);
- int val = simple_strtol(buf, NULL, 10);
+ long val;
+
+ if (strict_strtol(buf, 10, &val) < 0)
+ return -EINVAL;
mutex_lock(&data->update_lock);
data->temp_low[nr] = TEMP_TO_REG(val);
@@ -483,8 +522,9 @@ static ssize_t show_sensor(struct device *dev, struct device_attribute *attr,
int nr = sensor_attr->index;
struct it87_data *data = it87_update_device(dev);
- u8 reg = data->sensor; /* In case the value is updated while we use it */
-
+ u8 reg = data->sensor; /* In case the value is updated while
+ we use it */
+
if (reg & (1 << nr))
return sprintf(buf, "3\n"); /* thermal diode */
if (reg & (8 << nr))
@@ -498,7 +538,10 @@ static ssize_t set_sensor(struct device *dev, struct device_attribute *attr,
int nr = sensor_attr->index;
struct it87_data *data = dev_get_drvdata(dev);
- int val = simple_strtol(buf, NULL, 10);
+ long val;
+
+ if (strict_strtol(buf, 10, &val) < 0)
+ return -EINVAL;
mutex_lock(&data->update_lock);
@@ -511,9 +554,9 @@ static ssize_t set_sensor(struct device *dev, struct device_attribute *attr,
}
/* 3 = thermal diode; 4 = thermistor; 0 = disabled */
if (val == 3)
- data->sensor |= 1 << nr;
+ data->sensor |= 1 << nr;
else if (val == 4)
- data->sensor |= 8 << nr;
+ data->sensor |= 8 << nr;
else if (val != 0) {
mutex_unlock(&data->update_lock);
return -EINVAL;
@@ -531,6 +574,19 @@ show_sensor_offset(2);
show_sensor_offset(3);
/* 3 Fans */
+
+static int pwm_mode(const struct it87_data *data, int nr)
+{
+ int ctrl = data->fan_main_ctrl & (1 << nr);
+
+ if (ctrl == 0) /* Full speed */
+ return 0;
+ if (data->pwm_ctrl[nr] & 0x80) /* Automatic mode */
+ return 2;
+ else /* Manual mode */
+ return 1;
+}
+
static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
char *buf)
{
@@ -538,7 +594,7 @@ static ssize_t show_fan(struct device *dev, struct device_attribute *attr,
int nr = sensor_attr->index;
struct it87_data *data = it87_update_device(dev);
- return sprintf(buf,"%d\n", FAN_FROM_REG(data->fan[nr],
+ return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[nr],
DIV_FROM_REG(data->fan_div[nr])));
}
static ssize_t show_fan_min(struct device *dev, struct device_attribute *attr,
@@ -548,8 +604,8 @@ static ssize_t show_fan_min(struct device *dev, struct device_attribute *attr,
int nr = sensor_attr->index;
struct it87_data *data = it87_update_device(dev);
- return sprintf(buf,"%d\n",
- FAN_FROM_REG(data->fan_min[nr], DIV_FROM_REG(data->fan_div[nr])));
+ return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan_min[nr],
+ DIV_FROM_REG(data->fan_div[nr])));
}
static ssize_t show_fan_div(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -560,14 +616,14 @@ static ssize_t show_fan_div(struct device *dev, struct device_attribute *attr,
struct it87_data *data = it87_update_device(dev);
return sprintf(buf, "%d\n", DIV_FROM_REG(data->fan_div[nr]));
}
-static ssize_t show_pwm_enable(struct device *dev, struct device_attribute *attr,
- char *buf)
+static ssize_t show_pwm_enable(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct it87_data *data = it87_update_device(dev);
- return sprintf(buf,"%d\n", (data->fan_main_ctrl & (1 << nr)) ? 1 : 0);
+ return sprintf(buf, "%d\n", pwm_mode(data, nr));
}
static ssize_t show_pwm(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -576,7 +632,7 @@ static ssize_t show_pwm(struct device *dev, struct device_attribute *attr,
int nr = sensor_attr->index;
struct it87_data *data = it87_update_device(dev);
- return sprintf(buf,"%d\n", data->manual_pwm_ctl[nr]);
+ return sprintf(buf, "%d\n", PWM_FROM_REG(data->pwm_duty[nr]));
}
static ssize_t show_pwm_freq(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -593,15 +649,24 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
int nr = sensor_attr->index;
struct it87_data *data = dev_get_drvdata(dev);
- int val = simple_strtol(buf, NULL, 10);
+ long val;
u8 reg;
+ if (strict_strtol(buf, 10, &val) < 0)
+ return -EINVAL;
+
mutex_lock(&data->update_lock);
reg = it87_read_value(data, IT87_REG_FAN_DIV);
switch (nr) {
- case 0: data->fan_div[nr] = reg & 0x07; break;
- case 1: data->fan_div[nr] = (reg >> 3) & 0x07; break;
- case 2: data->fan_div[nr] = (reg & 0x40) ? 3 : 1; break;
+ case 0:
+ data->fan_div[nr] = reg & 0x07;
+ break;
+ case 1:
+ data->fan_div[nr] = (reg >> 3) & 0x07;
+ break;
+ case 2:
+ data->fan_div[nr] = (reg & 0x40) ? 3 : 1;
+ break;
}
data->fan_min[nr] = FAN_TO_REG(val, DIV_FROM_REG(data->fan_div[nr]));
@@ -616,10 +681,13 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
int nr = sensor_attr->index;
struct it87_data *data = dev_get_drvdata(dev);
- unsigned long val = simple_strtoul(buf, NULL, 10);
+ unsigned long val;
int min;
u8 old;
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
+
mutex_lock(&data->update_lock);
old = it87_read_value(data, IT87_REG_FAN_DIV);
@@ -651,6 +719,32 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
mutex_unlock(&data->update_lock);
return count;
}
+
+/* Returns 0 if OK, -EINVAL otherwise */
+static int check_trip_points(struct device *dev, int nr)
+{
+ const struct it87_data *data = dev_get_drvdata(dev);
+ int i, err = 0;
+
+ if (has_old_autopwm(data)) {
+ for (i = 0; i < 3; i++) {
+ if (data->auto_temp[nr][i] > data->auto_temp[nr][i + 1])
+ err = -EINVAL;
+ }
+ for (i = 0; i < 2; i++) {
+ if (data->auto_pwm[nr][i] > data->auto_pwm[nr][i + 1])
+ err = -EINVAL;
+ }
+ }
+
+ if (err) {
+ dev_err(dev, "Inconsistent trip points, not switching to "
+ "automatic mode\n");
+ dev_err(dev, "Adjust the trip points and try again\n");
+ }
+ return err;
+}
+
static ssize_t set_pwm_enable(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
@@ -658,7 +752,16 @@ static ssize_t set_pwm_enable(struct device *dev,
int nr = sensor_attr->index;
struct it87_data *data = dev_get_drvdata(dev);
- int val = simple_strtol(buf, NULL, 10);
+ long val;
+
+ if (strict_strtol(buf, 10, &val) < 0 || val < 0 || val > 2)
+ return -EINVAL;
+
+ /* Check trip points before switching to automatic mode */
+ if (val == 2) {
+ if (check_trip_points(dev, nr) < 0)
+ return -EINVAL;
+ }
mutex_lock(&data->update_lock);
@@ -669,16 +772,18 @@ static ssize_t set_pwm_enable(struct device *dev,
it87_write_value(data, IT87_REG_FAN_CTL, tmp | (1 << nr));
/* set on/off mode */
data->fan_main_ctrl &= ~(1 << nr);
- it87_write_value(data, IT87_REG_FAN_MAIN_CTRL, data->fan_main_ctrl);
- } else if (val == 1) {
+ it87_write_value(data, IT87_REG_FAN_MAIN_CTRL,
+ data->fan_main_ctrl);
+ } else {
+ if (val == 1) /* Manual mode */
+ data->pwm_ctrl[nr] = data->pwm_duty[nr];
+ else /* Automatic mode */
+ data->pwm_ctrl[nr] = 0x80 | data->pwm_temp_map[nr];
+ it87_write_value(data, IT87_REG_PWM(nr), data->pwm_ctrl[nr]);
/* set SmartGuardian mode */
data->fan_main_ctrl |= (1 << nr);
- it87_write_value(data, IT87_REG_FAN_MAIN_CTRL, data->fan_main_ctrl);
- /* set saved pwm value, clear FAN_CTLX PWM mode bit */
- it87_write_value(data, IT87_REG_PWM(nr), PWM_TO_REG(data->manual_pwm_ctl[nr]));
- } else {
- mutex_unlock(&data->update_lock);
- return -EINVAL;
+ it87_write_value(data, IT87_REG_FAN_MAIN_CTRL,
+ data->fan_main_ctrl);
}
mutex_unlock(&data->update_lock);
@@ -691,15 +796,19 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
int nr = sensor_attr->index;
struct it87_data *data = dev_get_drvdata(dev);
- int val = simple_strtol(buf, NULL, 10);
+ long val;
- if (val < 0 || val > 255)
+ if (strict_strtol(buf, 10, &val) < 0 || val < 0 || val > 255)
return -EINVAL;
mutex_lock(&data->update_lock);
- data->manual_pwm_ctl[nr] = val;
- if (data->fan_main_ctrl & (1 << nr))
- it87_write_value(data, IT87_REG_PWM(nr), PWM_TO_REG(data->manual_pwm_ctl[nr]));
+ data->pwm_duty[nr] = PWM_TO_REG(val);
+ /* If we are in manual mode, write the duty cycle immediately;
+ * otherwise, just store it for later use. */
+ if (!(data->pwm_ctrl[nr] & 0x80)) {
+ data->pwm_ctrl[nr] = data->pwm_duty[nr];
+ it87_write_value(data, IT87_REG_PWM(nr), data->pwm_ctrl[nr]);
+ }
mutex_unlock(&data->update_lock);
return count;
}
@@ -707,9 +816,12 @@ static ssize_t set_pwm_freq(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct it87_data *data = dev_get_drvdata(dev);
- unsigned long val = simple_strtoul(buf, NULL, 10);
+ unsigned long val;
int i;
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
+
/* Search for the nearest available frequency */
for (i = 0; i < 7; i++) {
if (val > (pwm_freq[i] + pwm_freq[i+1]) / 2)
@@ -724,6 +836,132 @@ static ssize_t set_pwm_freq(struct device *dev,
return count;
}
+static ssize_t show_pwm_temp_map(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ int nr = sensor_attr->index;
+
+ struct it87_data *data = it87_update_device(dev);
+ int map;
+
+ if (data->pwm_temp_map[nr] < 3)
+ map = 1 << data->pwm_temp_map[nr];
+ else
+ map = 0; /* Should never happen */
+ return sprintf(buf, "%d\n", map);
+}
+static ssize_t set_pwm_temp_map(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+ int nr = sensor_attr->index;
+
+ struct it87_data *data = dev_get_drvdata(dev);
+ long val;
+ u8 reg;
+
+ /* This check can go away if we ever support automatic fan speed
+ control on newer chips. */
+ if (!has_old_autopwm(data)) {
+ dev_notice(dev, "Mapping change disabled for safety reasons\n");
+ return -EINVAL;
+ }
+
+ if (strict_strtol(buf, 10, &val) < 0)
+ return -EINVAL;
+
+ switch (val) {
+ case (1 << 0):
+ reg = 0x00;
+ break;
+ case (1 << 1):
+ reg = 0x01;
+ break;
+ case (1 << 2):
+ reg = 0x02;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ mutex_lock(&data->update_lock);
+ data->pwm_temp_map[nr] = reg;
+ /* If we are in automatic mode, write the temp mapping immediately;
+ * otherwise, just store it for later use. */
+ if (data->pwm_ctrl[nr] & 0x80) {
+ data->pwm_ctrl[nr] = 0x80 | data->pwm_temp_map[nr];
+ it87_write_value(data, IT87_REG_PWM(nr), data->pwm_ctrl[nr]);
+ }
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t show_auto_pwm(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct it87_data *data = it87_update_device(dev);
+ struct sensor_device_attribute_2 *sensor_attr =
+ to_sensor_dev_attr_2(attr);
+ int nr = sensor_attr->nr;
+ int point = sensor_attr->index;
+
+ return sprintf(buf, "%d\n", PWM_FROM_REG(data->auto_pwm[nr][point]));
+}
+
+static ssize_t set_auto_pwm(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct it87_data *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute_2 *sensor_attr =
+ to_sensor_dev_attr_2(attr);
+ int nr = sensor_attr->nr;
+ int point = sensor_attr->index;
+ long val;
+
+ if (strict_strtol(buf, 10, &val) < 0 || val < 0 || val > 255)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ data->auto_pwm[nr][point] = PWM_TO_REG(val);
+ it87_write_value(data, IT87_REG_AUTO_PWM(nr, point),
+ data->auto_pwm[nr][point]);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static ssize_t show_auto_temp(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct it87_data *data = it87_update_device(dev);
+ struct sensor_device_attribute_2 *sensor_attr =
+ to_sensor_dev_attr_2(attr);
+ int nr = sensor_attr->nr;
+ int point = sensor_attr->index;
+
+ return sprintf(buf, "%d\n", TEMP_FROM_REG(data->auto_temp[nr][point]));
+}
+
+static ssize_t set_auto_temp(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct it87_data *data = dev_get_drvdata(dev);
+ struct sensor_device_attribute_2 *sensor_attr =
+ to_sensor_dev_attr_2(attr);
+ int nr = sensor_attr->nr;
+ int point = sensor_attr->index;
+ long val;
+
+ if (strict_strtol(buf, 10, &val) < 0 || val < -128000 || val > 127000)
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ data->auto_temp[nr][point] = TEMP_TO_REG(val);
+ it87_write_value(data, IT87_REG_AUTO_TEMP(nr, point),
+ data->auto_temp[nr][point]);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
#define show_fan_offset(offset) \
static SENSOR_DEVICE_ATTR(fan##offset##_input, S_IRUGO, \
@@ -744,7 +982,36 @@ static SENSOR_DEVICE_ATTR(pwm##offset, S_IRUGO | S_IWUSR, \
show_pwm, set_pwm, offset - 1); \
static DEVICE_ATTR(pwm##offset##_freq, \
(offset == 1 ? S_IRUGO | S_IWUSR : S_IRUGO), \
- show_pwm_freq, (offset == 1 ? set_pwm_freq : NULL));
+ show_pwm_freq, (offset == 1 ? set_pwm_freq : NULL)); \
+static SENSOR_DEVICE_ATTR(pwm##offset##_auto_channels_temp, \
+ S_IRUGO | S_IWUSR, show_pwm_temp_map, set_pwm_temp_map, \
+ offset - 1); \
+static SENSOR_DEVICE_ATTR_2(pwm##offset##_auto_point1_pwm, \
+ S_IRUGO | S_IWUSR, show_auto_pwm, set_auto_pwm, \
+ offset - 1, 0); \
+static SENSOR_DEVICE_ATTR_2(pwm##offset##_auto_point2_pwm, \
+ S_IRUGO | S_IWUSR, show_auto_pwm, set_auto_pwm, \
+ offset - 1, 1); \
+static SENSOR_DEVICE_ATTR_2(pwm##offset##_auto_point3_pwm, \
+ S_IRUGO | S_IWUSR, show_auto_pwm, set_auto_pwm, \
+ offset - 1, 2); \
+static SENSOR_DEVICE_ATTR_2(pwm##offset##_auto_point4_pwm, \
+ S_IRUGO, show_auto_pwm, NULL, offset - 1, 3); \
+static SENSOR_DEVICE_ATTR_2(pwm##offset##_auto_point1_temp, \
+ S_IRUGO | S_IWUSR, show_auto_temp, set_auto_temp, \
+ offset - 1, 1); \
+static SENSOR_DEVICE_ATTR_2(pwm##offset##_auto_point1_temp_hyst, \
+ S_IRUGO | S_IWUSR, show_auto_temp, set_auto_temp, \
+ offset - 1, 0); \
+static SENSOR_DEVICE_ATTR_2(pwm##offset##_auto_point2_temp, \
+ S_IRUGO | S_IWUSR, show_auto_temp, set_auto_temp, \
+ offset - 1, 2); \
+static SENSOR_DEVICE_ATTR_2(pwm##offset##_auto_point3_temp, \
+ S_IRUGO | S_IWUSR, show_auto_temp, set_auto_temp, \
+ offset - 1, 3); \
+static SENSOR_DEVICE_ATTR_2(pwm##offset##_auto_point4_temp, \
+ S_IRUGO | S_IWUSR, show_auto_temp, set_auto_temp, \
+ offset - 1, 4);
show_pwm_offset(1);
show_pwm_offset(2);
@@ -775,7 +1042,10 @@ static ssize_t set_fan16_min(struct device *dev, struct device_attribute *attr,
struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
int nr = sensor_attr->index;
struct it87_data *data = dev_get_drvdata(dev);
- int val = simple_strtol(buf, NULL, 10);
+ long val;
+
+ if (strict_strtol(buf, 10, &val) < 0)
+ return -EINVAL;
mutex_lock(&data->update_lock);
data->fan_min[nr] = FAN16_TO_REG(val);
@@ -805,7 +1075,8 @@ show_fan16_offset(4);
show_fan16_offset(5);
/* Alarms */
-static ssize_t show_alarms(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct it87_data *data = it87_update_device(dev);
return sprintf(buf, "%u\n", data->alarms);
@@ -836,27 +1107,78 @@ static SENSOR_DEVICE_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 16);
static SENSOR_DEVICE_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 17);
static SENSOR_DEVICE_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 18);
-static ssize_t
-show_vrm_reg(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_beep(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ int bitnr = to_sensor_dev_attr(attr)->index;
+ struct it87_data *data = it87_update_device(dev);
+ return sprintf(buf, "%u\n", (data->beeps >> bitnr) & 1);
+}
+static ssize_t set_beep(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int bitnr = to_sensor_dev_attr(attr)->index;
+ struct it87_data *data = dev_get_drvdata(dev);
+ long val;
+
+ if (strict_strtol(buf, 10, &val) < 0
+ || (val != 0 && val != 1))
+ return -EINVAL;
+
+ mutex_lock(&data->update_lock);
+ data->beeps = it87_read_value(data, IT87_REG_BEEP_ENABLE);
+ if (val)
+ data->beeps |= (1 << bitnr);
+ else
+ data->beeps &= ~(1 << bitnr);
+ it87_write_value(data, IT87_REG_BEEP_ENABLE, data->beeps);
+ mutex_unlock(&data->update_lock);
+ return count;
+}
+
+static SENSOR_DEVICE_ATTR(in0_beep, S_IRUGO | S_IWUSR,
+ show_beep, set_beep, 1);
+static SENSOR_DEVICE_ATTR(in1_beep, S_IRUGO, show_beep, NULL, 1);
+static SENSOR_DEVICE_ATTR(in2_beep, S_IRUGO, show_beep, NULL, 1);
+static SENSOR_DEVICE_ATTR(in3_beep, S_IRUGO, show_beep, NULL, 1);
+static SENSOR_DEVICE_ATTR(in4_beep, S_IRUGO, show_beep, NULL, 1);
+static SENSOR_DEVICE_ATTR(in5_beep, S_IRUGO, show_beep, NULL, 1);
+static SENSOR_DEVICE_ATTR(in6_beep, S_IRUGO, show_beep, NULL, 1);
+static SENSOR_DEVICE_ATTR(in7_beep, S_IRUGO, show_beep, NULL, 1);
+/* fanX_beep writability is set later */
+static SENSOR_DEVICE_ATTR(fan1_beep, S_IRUGO, show_beep, set_beep, 0);
+static SENSOR_DEVICE_ATTR(fan2_beep, S_IRUGO, show_beep, set_beep, 0);
+static SENSOR_DEVICE_ATTR(fan3_beep, S_IRUGO, show_beep, set_beep, 0);
+static SENSOR_DEVICE_ATTR(fan4_beep, S_IRUGO, show_beep, set_beep, 0);
+static SENSOR_DEVICE_ATTR(fan5_beep, S_IRUGO, show_beep, set_beep, 0);
+static SENSOR_DEVICE_ATTR(temp1_beep, S_IRUGO | S_IWUSR,
+ show_beep, set_beep, 2);
+static SENSOR_DEVICE_ATTR(temp2_beep, S_IRUGO, show_beep, NULL, 2);
+static SENSOR_DEVICE_ATTR(temp3_beep, S_IRUGO, show_beep, NULL, 2);
+
+static ssize_t show_vrm_reg(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct it87_data *data = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", data->vrm);
}
-static ssize_t
-store_vrm_reg(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+static ssize_t store_vrm_reg(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
{
struct it87_data *data = dev_get_drvdata(dev);
- u32 val;
+ unsigned long val;
+
+ if (strict_strtoul(buf, 10, &val) < 0)
+ return -EINVAL;
- val = simple_strtoul(buf, NULL, 10);
data->vrm = val;
return count;
}
static DEVICE_ATTR(vrm, S_IRUGO | S_IWUSR, show_vrm_reg, store_vrm_reg);
-static ssize_t
-show_vid_reg(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_vid_reg(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct it87_data *data = it87_update_device(dev);
return sprintf(buf, "%ld\n", (long) vid_from_reg(data->vid, data->vrm));
@@ -931,51 +1253,176 @@ static const struct attribute_group it87_group = {
.attrs = it87_attributes,
};
-static struct attribute *it87_attributes_opt[] = {
+static struct attribute *it87_attributes_beep[] = {
+ &sensor_dev_attr_in0_beep.dev_attr.attr,
+ &sensor_dev_attr_in1_beep.dev_attr.attr,
+ &sensor_dev_attr_in2_beep.dev_attr.attr,
+ &sensor_dev_attr_in3_beep.dev_attr.attr,
+ &sensor_dev_attr_in4_beep.dev_attr.attr,
+ &sensor_dev_attr_in5_beep.dev_attr.attr,
+ &sensor_dev_attr_in6_beep.dev_attr.attr,
+ &sensor_dev_attr_in7_beep.dev_attr.attr,
+
+ &sensor_dev_attr_temp1_beep.dev_attr.attr,
+ &sensor_dev_attr_temp2_beep.dev_attr.attr,
+ &sensor_dev_attr_temp3_beep.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group it87_group_beep = {
+ .attrs = it87_attributes_beep,
+};
+
+static struct attribute *it87_attributes_fan16[5][3+1] = { {
&sensor_dev_attr_fan1_input16.dev_attr.attr,
&sensor_dev_attr_fan1_min16.dev_attr.attr,
+ &sensor_dev_attr_fan1_alarm.dev_attr.attr,
+ NULL
+}, {
&sensor_dev_attr_fan2_input16.dev_attr.attr,
&sensor_dev_attr_fan2_min16.dev_attr.attr,
+ &sensor_dev_attr_fan2_alarm.dev_attr.attr,
+ NULL
+}, {
&sensor_dev_attr_fan3_input16.dev_attr.attr,
&sensor_dev_attr_fan3_min16.dev_attr.attr,
+ &sensor_dev_attr_fan3_alarm.dev_attr.attr,
+ NULL
+}, {
&sensor_dev_attr_fan4_input16.dev_attr.attr,
&sensor_dev_attr_fan4_min16.dev_attr.attr,
+ &sensor_dev_attr_fan4_alarm.dev_attr.attr,
+ NULL
+}, {
&sensor_dev_attr_fan5_input16.dev_attr.attr,
&sensor_dev_attr_fan5_min16.dev_attr.attr,
+ &sensor_dev_attr_fan5_alarm.dev_attr.attr,
+ NULL
+} };
+
+static const struct attribute_group it87_group_fan16[5] = {
+ { .attrs = it87_attributes_fan16[0] },
+ { .attrs = it87_attributes_fan16[1] },
+ { .attrs = it87_attributes_fan16[2] },
+ { .attrs = it87_attributes_fan16[3] },
+ { .attrs = it87_attributes_fan16[4] },
+};
+static struct attribute *it87_attributes_fan[3][4+1] = { {
&sensor_dev_attr_fan1_input.dev_attr.attr,
&sensor_dev_attr_fan1_min.dev_attr.attr,
&sensor_dev_attr_fan1_div.dev_attr.attr,
+ &sensor_dev_attr_fan1_alarm.dev_attr.attr,
+ NULL
+}, {
&sensor_dev_attr_fan2_input.dev_attr.attr,
&sensor_dev_attr_fan2_min.dev_attr.attr,
&sensor_dev_attr_fan2_div.dev_attr.attr,
+ &sensor_dev_attr_fan2_alarm.dev_attr.attr,
+ NULL
+}, {
&sensor_dev_attr_fan3_input.dev_attr.attr,
&sensor_dev_attr_fan3_min.dev_attr.attr,
&sensor_dev_attr_fan3_div.dev_attr.attr,
-
- &sensor_dev_attr_fan1_alarm.dev_attr.attr,
- &sensor_dev_attr_fan2_alarm.dev_attr.attr,
&sensor_dev_attr_fan3_alarm.dev_attr.attr,
- &sensor_dev_attr_fan4_alarm.dev_attr.attr,
- &sensor_dev_attr_fan5_alarm.dev_attr.attr,
+ NULL
+} };
+
+static const struct attribute_group it87_group_fan[3] = {
+ { .attrs = it87_attributes_fan[0] },
+ { .attrs = it87_attributes_fan[1] },
+ { .attrs = it87_attributes_fan[2] },
+};
+
+static const struct attribute_group *
+it87_get_fan_group(const struct it87_data *data)
+{
+ return has_16bit_fans(data) ? it87_group_fan16 : it87_group_fan;
+}
+static struct attribute *it87_attributes_pwm[3][4+1] = { {
&sensor_dev_attr_pwm1_enable.dev_attr.attr,
- &sensor_dev_attr_pwm2_enable.dev_attr.attr,
- &sensor_dev_attr_pwm3_enable.dev_attr.attr,
&sensor_dev_attr_pwm1.dev_attr.attr,
- &sensor_dev_attr_pwm2.dev_attr.attr,
- &sensor_dev_attr_pwm3.dev_attr.attr,
&dev_attr_pwm1_freq.attr,
+ &sensor_dev_attr_pwm1_auto_channels_temp.dev_attr.attr,
+ NULL
+}, {
+ &sensor_dev_attr_pwm2_enable.dev_attr.attr,
+ &sensor_dev_attr_pwm2.dev_attr.attr,
&dev_attr_pwm2_freq.attr,
+ &sensor_dev_attr_pwm2_auto_channels_temp.dev_attr.attr,
+ NULL
+}, {
+ &sensor_dev_attr_pwm3_enable.dev_attr.attr,
+ &sensor_dev_attr_pwm3.dev_attr.attr,
&dev_attr_pwm3_freq.attr,
+ &sensor_dev_attr_pwm3_auto_channels_temp.dev_attr.attr,
+ NULL
+} };
+
+static const struct attribute_group it87_group_pwm[3] = {
+ { .attrs = it87_attributes_pwm[0] },
+ { .attrs = it87_attributes_pwm[1] },
+ { .attrs = it87_attributes_pwm[2] },
+};
+static struct attribute *it87_attributes_autopwm[3][9+1] = { {
+ &sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point3_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point4_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point1_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point1_temp_hyst.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point2_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point3_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm1_auto_point4_temp.dev_attr.attr,
+ NULL
+}, {
+ &sensor_dev_attr_pwm2_auto_point1_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point2_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point3_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point4_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point1_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point1_temp_hyst.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point2_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point3_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm2_auto_point4_temp.dev_attr.attr,
+ NULL
+}, {
+ &sensor_dev_attr_pwm3_auto_point1_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_point2_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_point3_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_point4_pwm.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_point1_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_point1_temp_hyst.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_point2_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_point3_temp.dev_attr.attr,
+ &sensor_dev_attr_pwm3_auto_point4_temp.dev_attr.attr,
+ NULL
+} };
+
+static const struct attribute_group it87_group_autopwm[3] = {
+ { .attrs = it87_attributes_autopwm[0] },
+ { .attrs = it87_attributes_autopwm[1] },
+ { .attrs = it87_attributes_autopwm[2] },
+};
+
+static struct attribute *it87_attributes_fan_beep[] = {
+ &sensor_dev_attr_fan1_beep.dev_attr.attr,
+ &sensor_dev_attr_fan2_beep.dev_attr.attr,
+ &sensor_dev_attr_fan3_beep.dev_attr.attr,
+ &sensor_dev_attr_fan4_beep.dev_attr.attr,
+ &sensor_dev_attr_fan5_beep.dev_attr.attr,
+};
+
+static struct attribute *it87_attributes_vid[] = {
&dev_attr_vrm.attr,
&dev_attr_cpu0_vid.attr,
NULL
};
-static const struct attribute_group it87_group_opt = {
- .attrs = it87_attributes_opt,
+static const struct attribute_group it87_group_vid = {
+ .attrs = it87_attributes_vid,
};
/* SuperIO detection - will change isa_address if a chip is found */
@@ -1035,6 +1482,10 @@ static int __init it87_find(unsigned short *address,
if (sio_data->type == it87) {
/* The IT8705F doesn't have VID pins at all */
sio_data->skip_vid = 1;
+
+ /* The IT8705F has a different LD number for GPIO */
+ superio_select(5);
+ sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f;
} else {
int reg;
@@ -1068,7 +1519,11 @@ static int __init it87_find(unsigned short *address,
pr_info("it87: in3 is VCC (+5V)\n");
if (reg & (1 << 1))
pr_info("it87: in7 is VCCH (+5V Stand-By)\n");
+
+ sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f;
}
+ if (sio_data->beep_pin)
+ pr_info("it87: Beeping is supported\n");
/* Disable specific features based on DMI strings */
board_vendor = dmi_get_system_info(DMI_BOARD_VENDOR);
@@ -1093,14 +1548,46 @@ exit:
return err;
}
+static void it87_remove_files(struct device *dev)
+{
+ struct it87_data *data = platform_get_drvdata(pdev);
+ struct it87_sio_data *sio_data = dev->platform_data;
+ const struct attribute_group *fan_group = it87_get_fan_group(data);
+ int i;
+
+ sysfs_remove_group(&dev->kobj, &it87_group);
+ if (sio_data->beep_pin)
+ sysfs_remove_group(&dev->kobj, &it87_group_beep);
+ for (i = 0; i < 5; i++) {
+ if (!(data->has_fan & (1 << i)))
+ continue;
+ sysfs_remove_group(&dev->kobj, &fan_group[i]);
+ if (sio_data->beep_pin)
+ sysfs_remove_file(&dev->kobj,
+ it87_attributes_fan_beep[i]);
+ }
+ for (i = 0; i < 3; i++) {
+ if (sio_data->skip_pwm & (1 << 0))
+ continue;
+ sysfs_remove_group(&dev->kobj, &it87_group_pwm[i]);
+ if (has_old_autopwm(data))
+ sysfs_remove_group(&dev->kobj,
+ &it87_group_autopwm[i]);
+ }
+ if (!sio_data->skip_vid)
+ sysfs_remove_group(&dev->kobj, &it87_group_vid);
+}
+
static int __devinit it87_probe(struct platform_device *pdev)
{
struct it87_data *data;
struct resource *res;
struct device *dev = &pdev->dev;
struct it87_sio_data *sio_data = dev->platform_data;
- int err = 0;
+ const struct attribute_group *fan_group;
+ int err = 0, i;
int enable_pwm_interface;
+ int fan_beep_need_rw;
static const char *names[] = {
"it87",
"it8712",
@@ -1118,7 +1605,8 @@ static int __devinit it87_probe(struct platform_device *pdev)
goto ERROR0;
}
- if (!(data = kzalloc(sizeof(struct it87_data), GFP_KERNEL))) {
+ data = kzalloc(sizeof(struct it87_data), GFP_KERNEL);
+ if (!data) {
err = -ENOMEM;
goto ERROR1;
}
@@ -1146,120 +1634,60 @@ static int __devinit it87_probe(struct platform_device *pdev)
it87_init_device(pdev);
/* Register sysfs hooks */
- if ((err = sysfs_create_group(&dev->kobj, &it87_group)))
+ err = sysfs_create_group(&dev->kobj, &it87_group);
+ if (err)
goto ERROR2;
+ if (sio_data->beep_pin) {
+ err = sysfs_create_group(&dev->kobj, &it87_group_beep);
+ if (err)
+ goto ERROR4;
+ }
+
/* Do not create fan files for disabled fans */
- if (has_16bit_fans(data)) {
- /* 16-bit tachometers */
- if (data->has_fan & (1 << 0)) {
- if ((err = device_create_file(dev,
- &sensor_dev_attr_fan1_input16.dev_attr))
- || (err = device_create_file(dev,
- &sensor_dev_attr_fan1_min16.dev_attr))
- || (err = device_create_file(dev,
- &sensor_dev_attr_fan1_alarm.dev_attr)))
- goto ERROR4;
- }
- if (data->has_fan & (1 << 1)) {
- if ((err = device_create_file(dev,
- &sensor_dev_attr_fan2_input16.dev_attr))
- || (err = device_create_file(dev,
- &sensor_dev_attr_fan2_min16.dev_attr))
- || (err = device_create_file(dev,
- &sensor_dev_attr_fan2_alarm.dev_attr)))
- goto ERROR4;
- }
- if (data->has_fan & (1 << 2)) {
- if ((err = device_create_file(dev,
- &sensor_dev_attr_fan3_input16.dev_attr))
- || (err = device_create_file(dev,
- &sensor_dev_attr_fan3_min16.dev_attr))
- || (err = device_create_file(dev,
- &sensor_dev_attr_fan3_alarm.dev_attr)))
- goto ERROR4;
- }
- if (data->has_fan & (1 << 3)) {
- if ((err = device_create_file(dev,
- &sensor_dev_attr_fan4_input16.dev_attr))
- || (err = device_create_file(dev,
- &sensor_dev_attr_fan4_min16.dev_attr))
- || (err = device_create_file(dev,
- &sensor_dev_attr_fan4_alarm.dev_attr)))
- goto ERROR4;
- }
- if (data->has_fan & (1 << 4)) {
- if ((err = device_create_file(dev,
- &sensor_dev_attr_fan5_input16.dev_attr))
- || (err = device_create_file(dev,
- &sensor_dev_attr_fan5_min16.dev_attr))
- || (err = device_create_file(dev,
- &sensor_dev_attr_fan5_alarm.dev_attr)))
- goto ERROR4;
- }
- } else {
- /* 8-bit tachometers with clock divider */
- if (data->has_fan & (1 << 0)) {
- if ((err = device_create_file(dev,
- &sensor_dev_attr_fan1_input.dev_attr))
- || (err = device_create_file(dev,
- &sensor_dev_attr_fan1_min.dev_attr))
- || (err = device_create_file(dev,
- &sensor_dev_attr_fan1_div.dev_attr))
- || (err = device_create_file(dev,
- &sensor_dev_attr_fan1_alarm.dev_attr)))
- goto ERROR4;
- }
- if (data->has_fan & (1 << 1)) {
- if ((err = device_create_file(dev,
- &sensor_dev_attr_fan2_input.dev_attr))
- || (err = device_create_file(dev,
- &sensor_dev_attr_fan2_min.dev_attr))
- || (err = device_create_file(dev,
- &sensor_dev_attr_fan2_div.dev_attr))
- || (err = device_create_file(dev,
- &sensor_dev_attr_fan2_alarm.dev_attr)))
- goto ERROR4;
- }
- if (data->has_fan & (1 << 2)) {
- if ((err = device_create_file(dev,
- &sensor_dev_attr_fan3_input.dev_attr))
- || (err = device_create_file(dev,
- &sensor_dev_attr_fan3_min.dev_attr))
- || (err = device_create_file(dev,
- &sensor_dev_attr_fan3_div.dev_attr))
- || (err = device_create_file(dev,
- &sensor_dev_attr_fan3_alarm.dev_attr)))
+ fan_group = it87_get_fan_group(data);
+ fan_beep_need_rw = 1;
+ for (i = 0; i < 5; i++) {
+ if (!(data->has_fan & (1 << i)))
+ continue;
+ err = sysfs_create_group(&dev->kobj, &fan_group[i]);
+ if (err)
+ goto ERROR4;
+
+ if (sio_data->beep_pin) {
+ err = sysfs_create_file(&dev->kobj,
+ it87_attributes_fan_beep[i]);
+ if (err)
goto ERROR4;
+ if (!fan_beep_need_rw)
+ continue;
+
+ /* As we have a single beep enable bit for all fans,
+ * only the first enabled fan has a writable attribute
+ * for it. */
+ if (sysfs_chmod_file(&dev->kobj,
+ it87_attributes_fan_beep[i],
+ S_IRUGO | S_IWUSR))
+ dev_dbg(dev, "chmod +w fan%d_beep failed\n",
+ i + 1);
+ fan_beep_need_rw = 0;
}
}
if (enable_pwm_interface) {
- if (!(sio_data->skip_pwm & (1 << 0))) {
- if ((err = device_create_file(dev,
- &sensor_dev_attr_pwm1_enable.dev_attr))
- || (err = device_create_file(dev,
- &sensor_dev_attr_pwm1.dev_attr))
- || (err = device_create_file(dev,
- &dev_attr_pwm1_freq)))
- goto ERROR4;
- }
- if (!(sio_data->skip_pwm & (1 << 1))) {
- if ((err = device_create_file(dev,
- &sensor_dev_attr_pwm2_enable.dev_attr))
- || (err = device_create_file(dev,
- &sensor_dev_attr_pwm2.dev_attr))
- || (err = device_create_file(dev,
- &dev_attr_pwm2_freq)))
+ for (i = 0; i < 3; i++) {
+ if (sio_data->skip_pwm & (1 << i))
+ continue;
+ err = sysfs_create_group(&dev->kobj,
+ &it87_group_pwm[i]);
+ if (err)
goto ERROR4;
- }
- if (!(sio_data->skip_pwm & (1 << 2))) {
- if ((err = device_create_file(dev,
- &sensor_dev_attr_pwm3_enable.dev_attr))
- || (err = device_create_file(dev,
- &sensor_dev_attr_pwm3.dev_attr))
- || (err = device_create_file(dev,
- &dev_attr_pwm3_freq)))
+
+ if (!has_old_autopwm(data))
+ continue;
+ err = sysfs_create_group(&dev->kobj,
+ &it87_group_autopwm[i]);
+ if (err)
goto ERROR4;
}
}
@@ -1268,10 +1696,8 @@ static int __devinit it87_probe(struct platform_device *pdev)
data->vrm = vid_which_vrm();
/* VID reading from Super-I/O config space if available */
data->vid = sio_data->vid_value;
- if ((err = device_create_file(dev,
- &dev_attr_vrm))
- || (err = device_create_file(dev,
- &dev_attr_cpu0_vid)))
+ err = sysfs_create_group(&dev->kobj, &it87_group_vid);
+ if (err)
goto ERROR4;
}
@@ -1284,8 +1710,7 @@ static int __devinit it87_probe(struct platform_device *pdev)
return 0;
ERROR4:
- sysfs_remove_group(&dev->kobj, &it87_group);
- sysfs_remove_group(&dev->kobj, &it87_group_opt);
+ it87_remove_files(dev);
ERROR2:
platform_set_drvdata(pdev, NULL);
kfree(data);
@@ -1300,8 +1725,7 @@ static int __devexit it87_remove(struct platform_device *pdev)
struct it87_data *data = platform_get_drvdata(pdev);
hwmon_device_unregister(data->hwmon_dev);
- sysfs_remove_group(&pdev->dev.kobj, &it87_group);
- sysfs_remove_group(&pdev->dev.kobj, &it87_group_opt);
+ it87_remove_files(&pdev->dev);
release_region(data->addr, IT87_EC_EXTENT);
platform_set_drvdata(pdev, NULL);
@@ -1387,15 +1811,18 @@ static void __devinit it87_init_device(struct platform_device *pdev)
int tmp, i;
u8 mask;
- /* initialize to sane defaults:
- * - if the chip is in manual pwm mode, this will be overwritten with
- * the actual settings on the chip (so in this case, initialization
- * is not needed)
- * - if in automatic or on/off mode, we could switch to manual mode,
- * read the registers and set manual_pwm_ctl accordingly, but currently
- * this is not implemented, so we initialize to something sane */
+ /* For each PWM channel:
+ * - If it is in automatic mode, setting to manual mode should set
+ * the fan to full speed by default.
+ * - If it is in manual mode, we need a mapping to temperature
+ * channels to use when later setting to automatic mode later.
+ * Use a 1:1 mapping by default (we are clueless.)
+ * In both cases, the value can (and should) be changed by the user
+ * prior to switching to a different mode. */
for (i = 0; i < 3; i++) {
- data->manual_pwm_ctl[i] = 0xff;
+ data->pwm_temp_map[i] = i;
+ data->pwm_duty[i] = 0x7f; /* Full speed */
+ data->auto_pwm[i][3] = 0x7f; /* Full speed, hard-coded */
}
/* Some chips seem to have default value 0xff for all limit
@@ -1436,7 +1863,8 @@ static void __devinit it87_init_device(struct platform_device *pdev)
if ((data->fan_main_ctrl & mask) == 0) {
/* Enable all fan tachometers */
data->fan_main_ctrl |= mask;
- it87_write_value(data, IT87_REG_FAN_MAIN_CTRL, data->fan_main_ctrl);
+ it87_write_value(data, IT87_REG_FAN_MAIN_CTRL,
+ data->fan_main_ctrl);
}
data->has_fan = (data->fan_main_ctrl >> 4) & 0x07;
@@ -1461,30 +1889,32 @@ static void __devinit it87_init_device(struct platform_device *pdev)
/* Fan input pins may be used for alternative functions */
data->has_fan &= ~sio_data->skip_fan;
- /* Set current fan mode registers and the default settings for the
- * other mode registers */
- for (i = 0; i < 3; i++) {
- if (data->fan_main_ctrl & (1 << i)) {
- /* pwm mode */
- tmp = it87_read_value(data, IT87_REG_PWM(i));
- if (tmp & 0x80) {
- /* automatic pwm - not yet implemented, but
- * leave the settings made by the BIOS alone
- * until a change is requested via the sysfs
- * interface */
- } else {
- /* manual pwm */
- data->manual_pwm_ctl[i] = PWM_FROM_REG(tmp);
- }
- }
- }
-
/* Start monitoring */
it87_write_value(data, IT87_REG_CONFIG,
(it87_read_value(data, IT87_REG_CONFIG) & 0x36)
| (update_vbat ? 0x41 : 0x01));
}
+static void it87_update_pwm_ctrl(struct it87_data *data, int nr)
+{
+ data->pwm_ctrl[nr] = it87_read_value(data, IT87_REG_PWM(nr));
+ if (data->pwm_ctrl[nr] & 0x80) /* Automatic mode */
+ data->pwm_temp_map[nr] = data->pwm_ctrl[nr] & 0x03;
+ else /* Manual mode */
+ data->pwm_duty[nr] = data->pwm_ctrl[nr] & 0x7f;
+
+ if (has_old_autopwm(data)) {
+ int i;
+
+ for (i = 0; i < 5 ; i++)
+ data->auto_temp[nr][i] = it87_read_value(data,
+ IT87_REG_AUTO_TEMP(nr, i));
+ for (i = 0; i < 3 ; i++)
+ data->auto_pwm[nr][i] = it87_read_value(data,
+ IT87_REG_AUTO_PWM(nr, i));
+ }
+}
+
static struct it87_data *it87_update_device(struct device *dev)
{
struct it87_data *data = dev_get_drvdata(dev);
@@ -1494,24 +1924,22 @@ static struct it87_data *it87_update_device(struct device *dev)
if (time_after(jiffies, data->last_updated + HZ + HZ / 2)
|| !data->valid) {
-
if (update_vbat) {
/* Cleared after each update, so reenable. Value
- returned by this read will be previous value */
+ returned by this read will be previous value */
it87_write_value(data, IT87_REG_CONFIG,
- it87_read_value(data, IT87_REG_CONFIG) | 0x40);
+ it87_read_value(data, IT87_REG_CONFIG) | 0x40);
}
for (i = 0; i <= 7; i++) {
data->in[i] =
- it87_read_value(data, IT87_REG_VIN(i));
+ it87_read_value(data, IT87_REG_VIN(i));
data->in_min[i] =
- it87_read_value(data, IT87_REG_VIN_MIN(i));
+ it87_read_value(data, IT87_REG_VIN_MIN(i));
data->in_max[i] =
- it87_read_value(data, IT87_REG_VIN_MAX(i));
+ it87_read_value(data, IT87_REG_VIN_MAX(i));
}
/* in8 (battery) has no limit registers */
- data->in[8] =
- it87_read_value(data, IT87_REG_VIN(8));
+ data->in[8] = it87_read_value(data, IT87_REG_VIN(8));
for (i = 0; i < 5; i++) {
/* Skip disabled fans */
@@ -1519,7 +1947,7 @@ static struct it87_data *it87_update_device(struct device *dev)
continue;
data->fan_min[i] =
- it87_read_value(data, IT87_REG_FAN_MIN[i]);
+ it87_read_value(data, IT87_REG_FAN_MIN[i]);
data->fan[i] = it87_read_value(data,
IT87_REG_FAN[i]);
/* Add high byte if in 16-bit mode */
@@ -1532,11 +1960,11 @@ static struct it87_data *it87_update_device(struct device *dev)
}
for (i = 0; i < 3; i++) {
data->temp[i] =
- it87_read_value(data, IT87_REG_TEMP(i));
+ it87_read_value(data, IT87_REG_TEMP(i));
data->temp_high[i] =
- it87_read_value(data, IT87_REG_TEMP_HIGH(i));
+ it87_read_value(data, IT87_REG_TEMP_HIGH(i));
data->temp_low[i] =
- it87_read_value(data, IT87_REG_TEMP_LOW(i));
+ it87_read_value(data, IT87_REG_TEMP_LOW(i));
}
/* Newer chips don't have clock dividers */
@@ -1551,9 +1979,13 @@ static struct it87_data *it87_update_device(struct device *dev)
it87_read_value(data, IT87_REG_ALARM1) |
(it87_read_value(data, IT87_REG_ALARM2) << 8) |
(it87_read_value(data, IT87_REG_ALARM3) << 16);
+ data->beeps = it87_read_value(data, IT87_REG_BEEP_ENABLE);
+
data->fan_main_ctrl = it87_read_value(data,
IT87_REG_FAN_MAIN_CTRL);
data->fan_ctl = it87_read_value(data, IT87_REG_FAN_CTL);
+ for (i = 0; i < 3; i++)
+ it87_update_pwm_ctrl(data, i);
data->sensor = it87_read_value(data, IT87_REG_TEMP_ENABLE);
/* The 8705 does not have VID capability.
@@ -1628,7 +2060,7 @@ exit:
static int __init sm_it87_init(void)
{
int err;
- unsigned short isa_address=0;
+ unsigned short isa_address = 0;
struct it87_sio_data sio_data;
memset(&sio_data, 0, sizeof(struct it87_sio_data));
@@ -1640,7 +2072,7 @@ static int __init sm_it87_init(void)
return err;
err = it87_device_add(isa_address, &sio_data);
- if (err){
+ if (err) {
platform_driver_unregister(&it87_driver);
return err;
}
@@ -1661,7 +2093,8 @@ MODULE_DESCRIPTION("IT8705F/8712F/8716F/8718F/8720F/8726F, SiS950 driver");
module_param(update_vbat, bool, 0);
MODULE_PARM_DESC(update_vbat, "Update vbat if set else return powerup value");
module_param(fix_pwm_polarity, bool, 0);
-MODULE_PARM_DESC(fix_pwm_polarity, "Force PWM polarity to active high (DANGEROUS)");
+MODULE_PARM_DESC(fix_pwm_polarity,
+ "Force PWM polarity to active high (DANGEROUS)");
MODULE_LICENSE("GPL");
module_init(sm_it87_init);
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 7c9bdc16742..7cc2708871a 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -1,7 +1,7 @@
/*
* lm90.c - Part of lm_sensors, Linux kernel modules for hardware
* monitoring
- * Copyright (C) 2003-2009 Jean Delvare <khali@linux-fr.org>
+ * Copyright (C) 2003-2010 Jean Delvare <khali@linux-fr.org>
*
* Based on the lm83 driver. The LM90 is a sensor chip made by National
* Semiconductor. It reports up to two temperatures (its own plus up to
@@ -93,7 +93,8 @@
static const unsigned short normal_i2c[] = {
0x18, 0x19, 0x1a, 0x29, 0x2a, 0x2b, 0x4c, 0x4d, 0x4e, I2C_CLIENT_END };
-enum chips { lm90, adm1032, lm99, lm86, max6657, adt7461, max6680, max6646 };
+enum chips { lm90, adm1032, lm99, lm86, max6657, adt7461, max6680, max6646,
+ w83l771 };
/*
* The LM90 registers
@@ -151,6 +152,7 @@ static int lm90_detect(struct i2c_client *client, struct i2c_board_info *info);
static int lm90_probe(struct i2c_client *client,
const struct i2c_device_id *id);
static void lm90_init_client(struct i2c_client *client);
+static void lm90_alert(struct i2c_client *client, unsigned int flag);
static int lm90_remove(struct i2c_client *client);
static struct lm90_data *lm90_update_device(struct device *dev);
@@ -173,6 +175,7 @@ static const struct i2c_device_id lm90_id[] = {
{ "max6659", max6657 },
{ "max6680", max6680 },
{ "max6681", max6680 },
+ { "w83l771", w83l771 },
{ }
};
MODULE_DEVICE_TABLE(i2c, lm90_id);
@@ -184,6 +187,7 @@ static struct i2c_driver lm90_driver = {
},
.probe = lm90_probe,
.remove = lm90_remove,
+ .alert = lm90_alert,
.id_table = lm90_id,
.detect = lm90_detect,
.address_list = normal_i2c,
@@ -201,6 +205,9 @@ struct lm90_data {
int kind;
int flags;
+ u8 config_orig; /* Original configuration register value */
+ u8 alert_alarms; /* Which alarm bits trigger ALERT# */
+
/* registers values */
s8 temp8[4]; /* 0: local low limit
1: local high limit
@@ -758,6 +765,14 @@ static int lm90_detect(struct i2c_client *new_client,
&& reg_convrate <= 0x07) {
name = "max6646";
}
+ } else
+ if (address == 0x4C
+ && man_id == 0x5C) { /* Winbond/Nuvoton */
+ if ((chip_id & 0xFE) == 0x10 /* W83L771AWG/ASG */
+ && (reg_config1 & 0x2A) == 0x00
+ && reg_convrate <= 0x08) {
+ name = "w83l771";
+ }
}
if (!name) { /* identification failed */
@@ -794,6 +809,19 @@ static int lm90_probe(struct i2c_client *new_client,
new_client->flags &= ~I2C_CLIENT_PEC;
}
+ /* Different devices have different alarm bits triggering the
+ * ALERT# output */
+ switch (data->kind) {
+ case lm90:
+ case lm99:
+ case lm86:
+ data->alert_alarms = 0x7b;
+ break;
+ default:
+ data->alert_alarms = 0x7c;
+ break;
+ }
+
/* Initialize the LM90 chip */
lm90_init_client(new_client);
@@ -830,7 +858,7 @@ exit:
static void lm90_init_client(struct i2c_client *client)
{
- u8 config, config_orig;
+ u8 config;
struct lm90_data *data = i2c_get_clientdata(client);
/*
@@ -842,7 +870,7 @@ static void lm90_init_client(struct i2c_client *client)
dev_warn(&client->dev, "Initialization failed!\n");
return;
}
- config_orig = config;
+ data->config_orig = config;
/* Check Temperature Range Select */
if (data->kind == adt7461) {
@@ -860,7 +888,7 @@ static void lm90_init_client(struct i2c_client *client)
}
config &= 0xBF; /* run */
- if (config != config_orig) /* Only write if changed */
+ if (config != data->config_orig) /* Only write if changed */
i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1, config);
}
@@ -875,10 +903,46 @@ static int lm90_remove(struct i2c_client *client)
device_remove_file(&client->dev,
&sensor_dev_attr_temp2_offset.dev_attr);
+ /* Restore initial configuration */
+ i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1,
+ data->config_orig);
+
kfree(data);
return 0;
}
+static void lm90_alert(struct i2c_client *client, unsigned int flag)
+{
+ struct lm90_data *data = i2c_get_clientdata(client);
+ u8 config, alarms;
+
+ lm90_read_reg(client, LM90_REG_R_STATUS, &alarms);
+ if ((alarms & 0x7f) == 0) {
+ dev_info(&client->dev, "Everything OK\n");
+ } else {
+ if (alarms & 0x61)
+ dev_warn(&client->dev,
+ "temp%d out of range, please check!\n", 1);
+ if (alarms & 0x1a)
+ dev_warn(&client->dev,
+ "temp%d out of range, please check!\n", 2);
+ if (alarms & 0x04)
+ dev_warn(&client->dev,
+ "temp%d diode open, please check!\n", 2);
+
+ /* Disable ALERT# output, because these chips don't implement
+ SMBus alert correctly; they should only hold the alert line
+ low briefly. */
+ if ((data->kind == adm1032 || data->kind == adt7461)
+ && (alarms & data->alert_alarms)) {
+ dev_dbg(&client->dev, "Disabling ALERT#\n");
+ lm90_read_reg(client, LM90_REG_R_CONFIG1, &config);
+ i2c_smbus_write_byte_data(client, LM90_REG_W_CONFIG1,
+ config | 0x80);
+ }
+ }
+}
+
static int lm90_read16(struct i2c_client *client, u8 regh, u8 regl, u16 *value)
{
int err;
@@ -966,6 +1030,21 @@ static struct lm90_data *lm90_update_device(struct device *dev)
}
lm90_read_reg(client, LM90_REG_R_STATUS, &data->alarms);
+ /* Re-enable ALERT# output if it was originally enabled and
+ * relevant alarms are all clear */
+ if ((data->config_orig & 0x80) == 0
+ && (data->alarms & data->alert_alarms) == 0) {
+ u8 config;
+
+ lm90_read_reg(client, LM90_REG_R_CONFIG1, &config);
+ if (config & 0x80) {
+ dev_dbg(&client->dev, "Re-enabling ALERT#\n");
+ i2c_smbus_write_byte_data(client,
+ LM90_REG_W_CONFIG1,
+ config & ~0x80);
+ }
+ }
+
data->last_updated = jiffies;
data->valid = 1;
}
diff --git a/drivers/hwmon/tmp401.c b/drivers/hwmon/tmp401.c
index a13b30e8d8d..d14a1af9f55 100644
--- a/drivers/hwmon/tmp401.c
+++ b/drivers/hwmon/tmp401.c
@@ -134,7 +134,7 @@ struct tmp401_data {
struct mutex update_lock;
char valid; /* zero until following fields are valid */
unsigned long last_updated; /* in jiffies */
- int kind;
+ enum chips kind;
/* register values */
u8 status;
@@ -524,7 +524,7 @@ static int tmp401_detect(struct i2c_client *client,
if (reg > 15)
return -ENODEV;
- strlcpy(info->type, tmp401_id[kind - 1].name, I2C_NAME_SIZE);
+ strlcpy(info->type, tmp401_id[kind].name, I2C_NAME_SIZE);
return 0;
}
@@ -572,8 +572,7 @@ static int tmp401_probe(struct i2c_client *client,
goto exit_remove;
}
- dev_info(&client->dev, "Detected TI %s chip\n",
- names[data->kind - 1]);
+ dev_info(&client->dev, "Detected TI %s chip\n", names[data->kind]);
return 0;
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c
index 4f7c051e2d7..738c472ece2 100644
--- a/drivers/hwmon/tmp421.c
+++ b/drivers/hwmon/tmp421.c
@@ -61,9 +61,9 @@ static const u8 TMP421_TEMP_LSB[4] = { 0x10, 0x11, 0x12, 0x13 };
#define TMP423_DEVICE_ID 0x23
static const struct i2c_device_id tmp421_id[] = {
- { "tmp421", tmp421 },
- { "tmp422", tmp422 },
- { "tmp423", tmp423 },
+ { "tmp421", 2 },
+ { "tmp422", 3 },
+ { "tmp423", 4 },
{ }
};
MODULE_DEVICE_TABLE(i2c, tmp421_id);
@@ -73,21 +73,23 @@ struct tmp421_data {
struct mutex update_lock;
char valid;
unsigned long last_updated;
- int kind;
+ int channels;
u8 config;
s16 temp[4];
};
static int temp_from_s16(s16 reg)
{
- int temp = reg;
+ /* Mask out status bits */
+ int temp = reg & ~0xf;
return (temp * 1000 + 128) / 256;
}
static int temp_from_u16(u16 reg)
{
- int temp = reg;
+ /* Mask out status bits */
+ int temp = reg & ~0xf;
/* Add offset for extended temperature range. */
temp -= 64 * 256;
@@ -107,7 +109,7 @@ static struct tmp421_data *tmp421_update_device(struct device *dev)
data->config = i2c_smbus_read_byte_data(client,
TMP421_CONFIG_REG_1);
- for (i = 0; i <= data->kind; i++) {
+ for (i = 0; i < data->channels; i++) {
data->temp[i] = i2c_smbus_read_byte_data(client,
TMP421_TEMP_MSB[i]) << 8;
data->temp[i] |= i2c_smbus_read_byte_data(client,
@@ -166,7 +168,7 @@ static mode_t tmp421_is_visible(struct kobject *kobj, struct attribute *a,
devattr = container_of(a, struct device_attribute, attr);
index = to_sensor_dev_attr(devattr)->index;
- if (data->kind > index)
+ if (index < data->channels)
return a->mode;
return 0;
@@ -252,9 +254,9 @@ static int tmp421_detect(struct i2c_client *client,
return -ENODEV;
}
- strlcpy(info->type, tmp421_id[kind - 1].name, I2C_NAME_SIZE);
+ strlcpy(info->type, tmp421_id[kind].name, I2C_NAME_SIZE);
dev_info(&adapter->dev, "Detected TI %s chip at 0x%02x\n",
- names[kind - 1], client->addr);
+ names[kind], client->addr);
return 0;
}
@@ -271,7 +273,7 @@ static int tmp421_probe(struct i2c_client *client,
i2c_set_clientdata(client, data);
mutex_init(&data->update_lock);
- data->kind = id->driver_data;
+ data->channels = id->driver_data;
err = tmp421_init_client(client);
if (err)
diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c
index d47b4c9949c..e6078c9f0e2 100644
--- a/drivers/hwmon/vt8231.c
+++ b/drivers/hwmon/vt8231.c
@@ -948,8 +948,7 @@ static int __devinit vt8231_pci_probe(struct pci_dev *dev,
address = val & ~(VT8231_EXTENT - 1);
if (address == 0) {
- dev_err(&dev->dev, "base address not set -\
- upgrade BIOS or use force_addr=0xaddr\n");
+ dev_err(&dev->dev, "base address not set - upgrade BIOS or use force_addr=0xaddr\n");
return -ENODEV;
}
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 9a2022b6749..9de81a4c15a 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -3,6 +3,10 @@
Copyright (C) 2006 Winbond Electronics Corp.
Yuan Mu
Rudolf Marek <r.marek@assembler.cz>
+ Copyright (C) 2009-2010 Sven Anders <anders@anduras.de>, ANDURAS AG.
+ Watchdog driver part
+ (Based partially on fschmd driver,
+ Copyright 2007-2008 by Hans de Goede)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -35,6 +39,16 @@
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
+#include <linux/fs.h>
+#include <linux/watchdog.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/kref.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
+
+/* Default values */
+#define WATCHDOG_TIMEOUT 2 /* 2 minute default timeout */
/* Addresses to scan */
static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f,
@@ -51,6 +65,18 @@ static int reset;
module_param(reset, bool, 0);
MODULE_PARM_DESC(reset, "Set to 1 to reset chip, not recommended");
+static int timeout = WATCHDOG_TIMEOUT; /* default timeout in minutes */
+module_param(timeout, int, 0);
+MODULE_PARM_DESC(timeout,
+ "Watchdog timeout in minutes. 2<= timeout <=255 (default="
+ __MODULE_STRING(WATCHDOG_TIMEOUT) ")");
+
+static int nowayout = WATCHDOG_NOWAYOUT;
+module_param(nowayout, int, 0);
+MODULE_PARM_DESC(nowayout,
+ "Watchdog cannot be stopped once started (default="
+ __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+
/*
Address 0x00, 0x0d, 0x0e, 0x0f in all three banks are reserved
as ID, Bank Select registers
@@ -72,6 +98,11 @@ MODULE_PARM_DESC(reset, "Set to 1 to reset chip, not recommended");
#define W83793_REG_VID_LATCHB 0x08
#define W83793_REG_VID_CTRL 0x59
+#define W83793_REG_WDT_LOCK 0x01
+#define W83793_REG_WDT_ENABLE 0x02
+#define W83793_REG_WDT_STATUS 0x03
+#define W83793_REG_WDT_TIMEOUT 0x04
+
static u16 W83793_REG_TEMP_MODE[2] = { 0x5e, 0x5f };
#define TEMP_READ 0
@@ -223,8 +254,37 @@ struct w83793_data {
u8 tolerance[3]; /* Temp tolerance(Smart Fan I/II) */
u8 sf2_pwm[6][7]; /* Smart FanII: Fan duty cycle */
u8 sf2_temp[6][7]; /* Smart FanII: Temp level point */
+
+ /* watchdog */
+ struct i2c_client *client;
+ struct mutex watchdog_lock;
+ struct list_head list; /* member of the watchdog_data_list */
+ struct kref kref;
+ struct miscdevice watchdog_miscdev;
+ unsigned long watchdog_is_open;
+ char watchdog_expect_close;
+ char watchdog_name[10]; /* must be unique to avoid sysfs conflict */
+ unsigned int watchdog_caused_reboot;
+ int watchdog_timeout; /* watchdog timeout in minutes */
};
+/* Somewhat ugly :( global data pointer list with all devices, so that
+ we can find our device data as when using misc_register. There is no
+ other method to get to one's device data from the open file-op and
+ for usage in the reboot notifier callback. */
+static LIST_HEAD(watchdog_data_list);
+
+/* Note this lock not only protect list access, but also data.kref access */
+static DEFINE_MUTEX(watchdog_data_mutex);
+
+/* Release our data struct when we're detached from the i2c client *and* all
+ references to our watchdog device are released */
+static void w83793_release_resources(struct kref *ref)
+{
+ struct w83793_data *data = container_of(ref, struct w83793_data, kref);
+ kfree(data);
+}
+
static u8 w83793_read_value(struct i2c_client *client, u16 reg);
static int w83793_write_value(struct i2c_client *client, u16 reg, u8 value);
static int w83793_probe(struct i2c_client *client,
@@ -1063,14 +1123,349 @@ static void w83793_init_client(struct i2c_client *client)
/* Start monitoring */
w83793_write_value(client, W83793_REG_CONFIG,
w83793_read_value(client, W83793_REG_CONFIG) | 0x01);
+}
+
+/*
+ * Watchdog routines
+ */
+
+static int watchdog_set_timeout(struct w83793_data *data, int timeout)
+{
+ int ret, mtimeout;
+
+ mtimeout = DIV_ROUND_UP(timeout, 60);
+
+ if (mtimeout > 255)
+ return -EINVAL;
+
+ mutex_lock(&data->watchdog_lock);
+ if (!data->client) {
+ ret = -ENODEV;
+ goto leave;
+ }
+
+ data->watchdog_timeout = mtimeout;
+
+ /* Set Timeout value (in Minutes) */
+ w83793_write_value(data->client, W83793_REG_WDT_TIMEOUT,
+ data->watchdog_timeout);
+
+ ret = mtimeout * 60;
+
+leave:
+ mutex_unlock(&data->watchdog_lock);
+ return ret;
+}
+
+static int watchdog_get_timeout(struct w83793_data *data)
+{
+ int timeout;
+
+ mutex_lock(&data->watchdog_lock);
+ timeout = data->watchdog_timeout * 60;
+ mutex_unlock(&data->watchdog_lock);
+
+ return timeout;
+}
+
+static int watchdog_trigger(struct w83793_data *data)
+{
+ int ret = 0;
+
+ mutex_lock(&data->watchdog_lock);
+ if (!data->client) {
+ ret = -ENODEV;
+ goto leave;
+ }
+
+ /* Set Timeout value (in Minutes) */
+ w83793_write_value(data->client, W83793_REG_WDT_TIMEOUT,
+ data->watchdog_timeout);
+
+leave:
+ mutex_unlock(&data->watchdog_lock);
+ return ret;
+}
+
+static int watchdog_enable(struct w83793_data *data)
+{
+ int ret = 0;
+
+ mutex_lock(&data->watchdog_lock);
+ if (!data->client) {
+ ret = -ENODEV;
+ goto leave;
+ }
+
+ /* Set initial timeout */
+ w83793_write_value(data->client, W83793_REG_WDT_TIMEOUT,
+ data->watchdog_timeout);
+
+ /* Enable Soft Watchdog */
+ w83793_write_value(data->client, W83793_REG_WDT_LOCK, 0x55);
+
+leave:
+ mutex_unlock(&data->watchdog_lock);
+ return ret;
+}
+
+static int watchdog_disable(struct w83793_data *data)
+{
+ int ret = 0;
+
+ mutex_lock(&data->watchdog_lock);
+ if (!data->client) {
+ ret = -ENODEV;
+ goto leave;
+ }
+
+ /* Disable Soft Watchdog */
+ w83793_write_value(data->client, W83793_REG_WDT_LOCK, 0xAA);
+
+leave:
+ mutex_unlock(&data->watchdog_lock);
+ return ret;
+}
+
+static int watchdog_open(struct inode *inode, struct file *filp)
+{
+ struct w83793_data *pos, *data = NULL;
+ int watchdog_is_open;
+
+ /* We get called from drivers/char/misc.c with misc_mtx hold, and we
+ call misc_register() from w83793_probe() with watchdog_data_mutex
+ hold, as misc_register() takes the misc_mtx lock, this is a possible
+ deadlock, so we use mutex_trylock here. */
+ if (!mutex_trylock(&watchdog_data_mutex))
+ return -ERESTARTSYS;
+ list_for_each_entry(pos, &watchdog_data_list, list) {
+ if (pos->watchdog_miscdev.minor == iminor(inode)) {
+ data = pos;
+ break;
+ }
+ }
+
+ /* Check, if device is already open */
+ watchdog_is_open = test_and_set_bit(0, &data->watchdog_is_open);
+
+ /* Increase data reference counter (if not already done).
+ Note we can never not have found data, so we don't check for this */
+ if (!watchdog_is_open)
+ kref_get(&data->kref);
+
+ mutex_unlock(&watchdog_data_mutex);
+
+ /* Check, if device is already open and possibly issue error */
+ if (watchdog_is_open)
+ return -EBUSY;
+
+ /* Enable Soft Watchdog */
+ watchdog_enable(data);
+
+ /* Store pointer to data into filp's private data */
+ filp->private_data = data;
+
+ return nonseekable_open(inode, filp);
+}
+
+static int watchdog_close(struct inode *inode, struct file *filp)
+{
+ struct w83793_data *data = filp->private_data;
+ if (data->watchdog_expect_close) {
+ watchdog_disable(data);
+ data->watchdog_expect_close = 0;
+ } else {
+ watchdog_trigger(data);
+ dev_crit(&data->client->dev,
+ "unexpected close, not stopping watchdog!\n");
+ }
+
+ clear_bit(0, &data->watchdog_is_open);
+
+ /* Decrease data reference counter */
+ mutex_lock(&watchdog_data_mutex);
+ kref_put(&data->kref, w83793_release_resources);
+ mutex_unlock(&watchdog_data_mutex);
+
+ return 0;
+}
+
+static ssize_t watchdog_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *offset)
+{
+ size_t ret;
+ struct w83793_data *data = filp->private_data;
+
+ if (count) {
+ if (!nowayout) {
+ size_t i;
+
+ /* Clear it in case it was set with a previous write */
+ data->watchdog_expect_close = 0;
+
+ for (i = 0; i != count; i++) {
+ char c;
+ if (get_user(c, buf + i))
+ return -EFAULT;
+ if (c == 'V')
+ data->watchdog_expect_close = 1;
+ }
+ }
+ ret = watchdog_trigger(data);
+ if (ret < 0)
+ return ret;
+ }
+ return count;
+}
+
+static int watchdog_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ static struct watchdog_info ident = {
+ .options = WDIOF_KEEPALIVEPING |
+ WDIOF_SETTIMEOUT |
+ WDIOF_CARDRESET,
+ .identity = "w83793 watchdog"
+ };
+
+ int val, ret = 0;
+ struct w83793_data *data = filp->private_data;
+
+ switch (cmd) {
+ case WDIOC_GETSUPPORT:
+ if (!nowayout)
+ ident.options |= WDIOF_MAGICCLOSE;
+ if (copy_to_user((void __user *)arg, &ident, sizeof(ident)))
+ ret = -EFAULT;
+ break;
+
+ case WDIOC_GETSTATUS:
+ val = data->watchdog_caused_reboot ? WDIOF_CARDRESET : 0;
+ ret = put_user(val, (int __user *)arg);
+ break;
+
+ case WDIOC_GETBOOTSTATUS:
+ ret = put_user(0, (int __user *)arg);
+ break;
+
+ case WDIOC_KEEPALIVE:
+ ret = watchdog_trigger(data);
+ break;
+
+ case WDIOC_GETTIMEOUT:
+ val = watchdog_get_timeout(data);
+ ret = put_user(val, (int __user *)arg);
+ break;
+
+ case WDIOC_SETTIMEOUT:
+ if (get_user(val, (int __user *)arg)) {
+ ret = -EFAULT;
+ break;
+ }
+ ret = watchdog_set_timeout(data, val);
+ if (ret > 0)
+ ret = put_user(ret, (int __user *)arg);
+ break;
+
+ case WDIOC_SETOPTIONS:
+ if (get_user(val, (int __user *)arg)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ if (val & WDIOS_DISABLECARD)
+ ret = watchdog_disable(data);
+ else if (val & WDIOS_ENABLECARD)
+ ret = watchdog_enable(data);
+ else
+ ret = -EINVAL;
+
+ break;
+ default:
+ ret = -ENOTTY;
+ }
+
+ return ret;
+}
+
+static const struct file_operations watchdog_fops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .open = watchdog_open,
+ .release = watchdog_close,
+ .write = watchdog_write,
+ .ioctl = watchdog_ioctl,
+};
+
+/*
+ * Notifier for system down
+ */
+
+static int watchdog_notify_sys(struct notifier_block *this, unsigned long code,
+ void *unused)
+{
+ struct w83793_data *data = NULL;
+
+ if (code == SYS_DOWN || code == SYS_HALT) {
+
+ /* Disable each registered watchdog */
+ mutex_lock(&watchdog_data_mutex);
+ list_for_each_entry(data, &watchdog_data_list, list) {
+ if (data->watchdog_miscdev.minor)
+ watchdog_disable(data);
+ }
+ mutex_unlock(&watchdog_data_mutex);
+ }
+
+ return NOTIFY_DONE;
}
+/*
+ * The WDT needs to learn about soft shutdowns in order to
+ * turn the timebomb registers off.
+ */
+
+static struct notifier_block watchdog_notifier = {
+ .notifier_call = watchdog_notify_sys,
+};
+
+/*
+ * Init / remove routines
+ */
+
static int w83793_remove(struct i2c_client *client)
{
struct w83793_data *data = i2c_get_clientdata(client);
struct device *dev = &client->dev;
- int i;
+ int i, tmp;
+
+ /* Unregister the watchdog (if registered) */
+ if (data->watchdog_miscdev.minor) {
+ misc_deregister(&data->watchdog_miscdev);
+
+ if (data->watchdog_is_open) {
+ dev_warn(&client->dev,
+ "i2c client detached with watchdog open! "
+ "Stopping watchdog.\n");
+ watchdog_disable(data);
+ }
+
+ mutex_lock(&watchdog_data_mutex);
+ list_del(&data->list);
+ mutex_unlock(&watchdog_data_mutex);
+
+ /* Tell the watchdog code the client is gone */
+ mutex_lock(&data->watchdog_lock);
+ data->client = NULL;
+ mutex_unlock(&data->watchdog_lock);
+ }
+
+ /* Reset Configuration Register to Disable Watch Dog Registers */
+ tmp = w83793_read_value(client, W83793_REG_CONFIG);
+ w83793_write_value(client, W83793_REG_CONFIG, tmp & ~0x04);
+
+ unregister_reboot_notifier(&watchdog_notifier);
hwmon_device_unregister(data->hwmon_dev);
@@ -1099,7 +1494,10 @@ static int w83793_remove(struct i2c_client *client)
if (data->lm75[1] != NULL)
i2c_unregister_device(data->lm75[1]);
- kfree(data);
+ /* Decrease data reference counter */
+ mutex_lock(&watchdog_data_mutex);
+ kref_put(&data->kref, w83793_release_resources);
+ mutex_unlock(&watchdog_data_mutex);
return 0;
}
@@ -1203,6 +1601,7 @@ static int w83793_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
+ const int watchdog_minors[] = { WATCHDOG_MINOR, 212, 213, 214, 215 };
struct w83793_data *data;
int i, tmp, val, err;
int files_fan = ARRAY_SIZE(w83793_left_fan) / 7;
@@ -1218,6 +1617,14 @@ static int w83793_probe(struct i2c_client *client,
i2c_set_clientdata(client, data);
data->bank = i2c_smbus_read_byte_data(client, W83793_REG_BANKSEL);
mutex_init(&data->update_lock);
+ mutex_init(&data->watchdog_lock);
+ INIT_LIST_HEAD(&data->list);
+ kref_init(&data->kref);
+
+ /* Store client pointer in our data struct for watchdog usage
+ (where the client is found through a data ptr instead of the
+ otherway around) */
+ data->client = client;
err = w83793_detect_subclients(client);
if (err)
@@ -1380,8 +1787,77 @@ static int w83793_probe(struct i2c_client *client,
goto exit_remove;
}
+ /* Watchdog initialization */
+
+ /* Register boot notifier */
+ err = register_reboot_notifier(&watchdog_notifier);
+ if (err != 0) {
+ dev_err(&client->dev,
+ "cannot register reboot notifier (err=%d)\n", err);
+ goto exit_devunreg;
+ }
+
+ /* Enable Watchdog registers.
+ Set Configuration Register to Enable Watch Dog Registers
+ (Bit 2) = XXXX, X1XX. */
+ tmp = w83793_read_value(client, W83793_REG_CONFIG);
+ w83793_write_value(client, W83793_REG_CONFIG, tmp | 0x04);
+
+ /* Set the default watchdog timeout */
+ data->watchdog_timeout = timeout;
+
+ /* Check, if last reboot was caused by watchdog */
+ data->watchdog_caused_reboot =
+ w83793_read_value(data->client, W83793_REG_WDT_STATUS) & 0x01;
+
+ /* Disable Soft Watchdog during initialiation */
+ watchdog_disable(data);
+
+ /* We take the data_mutex lock early so that watchdog_open() cannot
+ run when misc_register() has completed, but we've not yet added
+ our data to the watchdog_data_list (and set the default timeout) */
+ mutex_lock(&watchdog_data_mutex);
+ for (i = 0; i < ARRAY_SIZE(watchdog_minors); i++) {
+ /* Register our watchdog part */
+ snprintf(data->watchdog_name, sizeof(data->watchdog_name),
+ "watchdog%c", (i == 0) ? '\0' : ('0' + i));
+ data->watchdog_miscdev.name = data->watchdog_name;
+ data->watchdog_miscdev.fops = &watchdog_fops;
+ data->watchdog_miscdev.minor = watchdog_minors[i];
+
+ err = misc_register(&data->watchdog_miscdev);
+ if (err == -EBUSY)
+ continue;
+ if (err) {
+ data->watchdog_miscdev.minor = 0;
+ dev_err(&client->dev,
+ "Registering watchdog chardev: %d\n", err);
+ break;
+ }
+
+ list_add(&data->list, &watchdog_data_list);
+
+ dev_info(&client->dev,
+ "Registered watchdog chardev major 10, minor: %d\n",
+ watchdog_minors[i]);
+ break;
+ }
+ if (i == ARRAY_SIZE(watchdog_minors)) {
+ data->watchdog_miscdev.minor = 0;
+ dev_warn(&client->dev, "Couldn't register watchdog chardev "
+ "(due to no free minor)\n");
+ }
+
+ mutex_unlock(&watchdog_data_mutex);
+
return 0;
+ /* Unregister hwmon device */
+
+exit_devunreg:
+
+ hwmon_device_unregister(data->hwmon_dev);
+
/* Unregister sysfs hooks */
exit_remove:
@@ -1628,7 +2104,7 @@ static void __exit sensors_w83793_exit(void)
i2c_del_driver(&w83793_driver);
}
-MODULE_AUTHOR("Yuan Mu");
+MODULE_AUTHOR("Yuan Mu, Sven Anders");
MODULE_DESCRIPTION("w83793 driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index 8d8a00e5a30..02ce9cff5fc 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -61,6 +61,16 @@ config I2C_HELPER_AUTO
In doubt, say Y.
+config I2C_SMBUS
+ tristate "SMBus-specific protocols" if !I2C_HELPER_AUTO
+ help
+ Say Y here if you want support for SMBus extensions to the I2C
+ specification. At the moment, the only supported extension is
+ the SMBus alert protocol.
+
+ This support is also available as a module. If so, the module
+ will be called i2c-smbus.
+
source drivers/i2c/algos/Kconfig
source drivers/i2c/busses/Kconfig
source drivers/i2c/chips/Kconfig
diff --git a/drivers/i2c/Makefile b/drivers/i2c/Makefile
index ba26e6cbe74..acd0250c16a 100644
--- a/drivers/i2c/Makefile
+++ b/drivers/i2c/Makefile
@@ -4,6 +4,7 @@
obj-$(CONFIG_I2C_BOARDINFO) += i2c-boardinfo.o
obj-$(CONFIG_I2C) += i2c-core.o
+obj-$(CONFIG_I2C_SMBUS) += i2c-smbus.o
obj-$(CONFIG_I2C_CHARDEV) += i2c-dev.o
obj-y += busses/ chips/ algos/
diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c
index 78d42aae008..dcdaf8e675b 100644
--- a/drivers/i2c/algos/i2c-algo-pca.c
+++ b/drivers/i2c/algos/i2c-algo-pca.c
@@ -453,8 +453,6 @@ static int pca_init(struct i2c_adapter *adap)
*/
int raise_fall_time;
- struct i2c_algo_pca_data *pca_data = adap->algo_data;
-
/* Ignore the reset function from the module,
* we can use the parallel bus reset
*/
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 737f05200b1..4cc3807bd31 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -77,7 +77,7 @@ config I2C_AMD8111
will be called i2c-amd8111.
config I2C_I801
- tristate "Intel 82801 (ICH)"
+ tristate "Intel 82801 (ICH/PCH)"
depends on PCI
help
If you say yes to this option, support will be included for the Intel
@@ -97,7 +97,8 @@ config I2C_I801
ICH9
Tolapai
ICH10
- PCH
+ 3400/5 Series (PCH)
+ Cougar Point (PCH)
This driver can also be built as a module. If so, the module
will be called i2c-i801.
@@ -580,6 +581,7 @@ config I2C_PARPORT
tristate "Parallel port adapter"
depends on PARPORT
select I2C_ALGOBIT
+ select I2C_SMBUS
help
This supports parallel port I2C adapters such as the ones made by
Philips or Velleman, Analog Devices evaluation boards, and more.
@@ -603,6 +605,7 @@ config I2C_PARPORT
config I2C_PARPORT_LIGHT
tristate "Parallel port adapter (light)"
select I2C_ALGOBIT
+ select I2C_SMBUS
help
This supports parallel port I2C adapters such as the ones made by
Philips or Velleman, Analog Devices evaluation boards, and more.
diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
index 8de7d7b87bb..bd8f1e4d9e6 100644
--- a/drivers/i2c/busses/i2c-ali1535.c
+++ b/drivers/i2c/busses/i2c-ali1535.c
@@ -480,7 +480,7 @@ static struct i2c_adapter ali1535_adapter = {
.algo = &smbus_algorithm,
};
-static struct pci_device_id ali1535_ids[] = {
+static const struct pci_device_id ali1535_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) },
{ },
};
diff --git a/drivers/i2c/busses/i2c-ali1563.c b/drivers/i2c/busses/i2c-ali1563.c
index 4687af40dd5..a409cfcf062 100644
--- a/drivers/i2c/busses/i2c-ali1563.c
+++ b/drivers/i2c/busses/i2c-ali1563.c
@@ -417,7 +417,7 @@ static void __devexit ali1563_remove(struct pci_dev * dev)
ali1563_shutdown(dev);
}
-static struct pci_device_id __devinitdata ali1563_id_table[] = {
+static const struct pci_device_id ali1563_id_table[] __devinitconst = {
{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1563) },
{},
};
diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c
index e7e3205f128..659f63f5e4a 100644
--- a/drivers/i2c/busses/i2c-ali15x3.c
+++ b/drivers/i2c/busses/i2c-ali15x3.c
@@ -477,7 +477,7 @@ static struct i2c_adapter ali15x3_adapter = {
.algo = &smbus_algorithm,
};
-static struct pci_device_id ali15x3_ids[] = {
+static const struct pci_device_id ali15x3_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
index 8f0b90ef8c7..c5a9fa488e7 100644
--- a/drivers/i2c/busses/i2c-amd756.c
+++ b/drivers/i2c/busses/i2c-amd756.c
@@ -308,7 +308,7 @@ static const char* chipname[] = {
"nVidia nForce", "AMD8111",
};
-static struct pci_device_id amd756_ids[] = {
+static const struct pci_device_id amd756_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_740B),
.driver_data = AMD756 },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7413),
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index 5b4ad86ca16..d0dc970d737 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -351,7 +351,7 @@ static const struct i2c_algorithm smbus_algorithm = {
};
-static struct pci_device_id amd8111_ids[] = {
+static const struct pci_device_id amd8111_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8111_SMBUS2) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-designware.c b/drivers/i2c/busses/i2c-designware.c
index 9e18ef97f15..3e72b69aa7f 100644
--- a/drivers/i2c/busses/i2c-designware.c
+++ b/drivers/i2c/busses/i2c-designware.c
@@ -497,13 +497,13 @@ static int i2c_dw_handle_tx_abort(struct dw_i2c_dev *dev)
int i;
if (abort_source & DW_IC_TX_ABRT_NOACK) {
- for_each_bit(i, &abort_source, ARRAY_SIZE(abort_sources))
+ for_each_set_bit(i, &abort_source, ARRAY_SIZE(abort_sources))
dev_dbg(dev->dev,
"%s: %s\n", __func__, abort_sources[i]);
return -EREMOTEIO;
}
- for_each_bit(i, &abort_source, ARRAY_SIZE(abort_sources))
+ for_each_set_bit(i, &abort_source, ARRAY_SIZE(abort_sources))
dev_err(dev->dev, "%s: %s\n", __func__, abort_sources[i]);
if (abort_source & DW_IC_TX_ARB_LOST)
diff --git a/drivers/i2c/busses/i2c-hydra.c b/drivers/i2c/busses/i2c-hydra.c
index bec9b845dd1..c767295ad1f 100644
--- a/drivers/i2c/busses/i2c-hydra.c
+++ b/drivers/i2c/busses/i2c-hydra.c
@@ -105,7 +105,7 @@ static struct i2c_adapter hydra_adap = {
.algo_data = &hydra_bit_data,
};
-static struct pci_device_id hydra_ids[] = {
+static const struct pci_device_id hydra_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_HYDRA) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index df6ab553f97..9da5b05cdb5 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -41,7 +41,8 @@
Tolapai 0x5032 32 hard yes yes yes
ICH10 0x3a30 32 hard yes yes yes
ICH10 0x3a60 32 hard yes yes yes
- PCH 0x3b30 32 hard yes yes yes
+ 3400/5 Series (PCH) 0x3b30 32 hard yes yes yes
+ Cougar Point (PCH) 0x1c22 32 hard yes yes yes
Features supported by this driver:
Software PEC no
@@ -561,7 +562,7 @@ static struct i2c_adapter i801_adapter = {
.algo = &smbus_algorithm,
};
-static struct pci_device_id i801_ids[] = {
+static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_3) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_3) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_2) },
@@ -578,6 +579,7 @@ static struct pci_device_id i801_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_4) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PCH_SMBUS) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CPT_SMBUS) },
{ 0, }
};
@@ -707,6 +709,7 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
case PCI_DEVICE_ID_INTEL_ICH10_4:
case PCI_DEVICE_ID_INTEL_ICH10_5:
case PCI_DEVICE_ID_INTEL_PCH_SMBUS:
+ case PCI_DEVICE_ID_INTEL_CPT_SMBUS:
i801_features |= FEATURE_I2C_BLOCK_READ;
/* fall through */
case PCI_DEVICE_ID_INTEL_82801DB_3:
diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c
index dba6eb053e2..69c22f79f23 100644
--- a/drivers/i2c/busses/i2c-isch.c
+++ b/drivers/i2c/busses/i2c-isch.c
@@ -256,7 +256,7 @@ static struct i2c_adapter sch_adapter = {
.algo = &smbus_algorithm,
};
-static struct pci_device_id sch_ids[] = {
+static const struct pci_device_id sch_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SCH_LPC) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index ec11d1c4e77..4a700587ef1 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -308,7 +308,7 @@ static struct i2c_algorithm smbus_algorithm = {
};
-static struct pci_device_id nforce2_ids[] = {
+static const struct pci_device_id nforce2_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE2S_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE3_SMBUS) },
diff --git a/drivers/i2c/busses/i2c-parport-light.c b/drivers/i2c/busses/i2c-parport-light.c
index 322c5691e38..5f41ec0f72d 100644
--- a/drivers/i2c/busses/i2c-parport-light.c
+++ b/drivers/i2c/busses/i2c-parport-light.c
@@ -1,7 +1,7 @@
/* ------------------------------------------------------------------------ *
* i2c-parport-light.c I2C bus over parallel port *
* ------------------------------------------------------------------------ *
- Copyright (C) 2003-2007 Jean Delvare <khali@linux-fr.org>
+ Copyright (C) 2003-2010 Jean Delvare <khali@linux-fr.org>
Based on older i2c-velleman.c driver
Copyright (C) 1995-2000 Simon G. Vogl
@@ -27,10 +27,12 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/ioport.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
+#include <linux/i2c-smbus.h>
#include <asm/io.h>
#include "i2c-parport.h"
@@ -43,6 +45,10 @@ static u16 base;
module_param(base, ushort, 0);
MODULE_PARM_DESC(base, "Base I/O address");
+static int irq;
+module_param(irq, int, 0);
+MODULE_PARM_DESC(irq, "IRQ (optional)");
+
/* ----- Low-level parallel port access ----------------------------------- */
static inline void port_write(unsigned char p, unsigned char d)
@@ -119,6 +125,16 @@ static struct i2c_adapter parport_adapter = {
.name = "Parallel port adapter (light)",
};
+/* SMBus alert support */
+static struct i2c_smbus_alert_setup alert_data = {
+ .alert_edge_triggered = 1,
+};
+static struct i2c_client *ara;
+static struct lineop parport_ctrl_irq = {
+ .val = (1 << 4),
+ .port = CTRL,
+};
+
static int __devinit i2c_parport_probe(struct platform_device *pdev)
{
int err;
@@ -127,18 +143,39 @@ static int __devinit i2c_parport_probe(struct platform_device *pdev)
parport_setsda(NULL, 1);
parport_setscl(NULL, 1);
/* Other init if needed (power on...) */
- if (adapter_parm[type].init.val)
+ if (adapter_parm[type].init.val) {
line_set(1, &adapter_parm[type].init);
+ /* Give powered devices some time to settle */
+ msleep(100);
+ }
parport_adapter.dev.parent = &pdev->dev;
err = i2c_bit_add_bus(&parport_adapter);
- if (err)
+ if (err) {
dev_err(&pdev->dev, "Unable to register with I2C\n");
- return err;
+ return err;
+ }
+
+ /* Setup SMBus alert if supported */
+ if (adapter_parm[type].smbus_alert && irq) {
+ alert_data.irq = irq;
+ ara = i2c_setup_smbus_alert(&parport_adapter, &alert_data);
+ if (ara)
+ line_set(1, &parport_ctrl_irq);
+ else
+ dev_warn(&pdev->dev, "Failed to register ARA client\n");
+ }
+
+ return 0;
}
static int __devexit i2c_parport_remove(struct platform_device *pdev)
{
+ if (ara) {
+ line_set(0, &parport_ctrl_irq);
+ i2c_unregister_device(ara);
+ ara = NULL;
+ }
i2c_del_adapter(&parport_adapter);
/* Un-init if needed (power off...) */
@@ -205,6 +242,9 @@ static int __init i2c_parport_init(void)
if (!request_region(base, 3, DRVNAME))
return -EBUSY;
+ if (irq != 0)
+ pr_info(DRVNAME ": using irq %d\n", irq);
+
if (!adapter_parm[type].getscl.val)
parport_algo_data.getscl = NULL;
diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c
index 0d8998610c7..220fca7f23a 100644
--- a/drivers/i2c/busses/i2c-parport.c
+++ b/drivers/i2c/busses/i2c-parport.c
@@ -1,7 +1,7 @@
/* ------------------------------------------------------------------------ *
* i2c-parport.c I2C bus over parallel port *
* ------------------------------------------------------------------------ *
- Copyright (C) 2003-2007 Jean Delvare <khali@linux-fr.org>
+ Copyright (C) 2003-2010 Jean Delvare <khali@linux-fr.org>
Based on older i2c-philips-par.c driver
Copyright (C) 1995-2000 Simon G. Vogl
@@ -27,9 +27,11 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/delay.h>
#include <linux/parport.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
+#include <linux/i2c-smbus.h>
#include "i2c-parport.h"
/* ----- Device list ------------------------------------------------------ */
@@ -38,6 +40,8 @@ struct i2c_par {
struct pardevice *pdev;
struct i2c_adapter adapter;
struct i2c_algo_bit_data algo_data;
+ struct i2c_smbus_alert_setup alert_data;
+ struct i2c_client *ara;
struct i2c_par *next;
};
@@ -143,6 +147,19 @@ static struct i2c_algo_bit_data parport_algo_data = {
/* ----- I2c and parallel port call-back functions and structures --------- */
+void i2c_parport_irq(void *data)
+{
+ struct i2c_par *adapter = data;
+ struct i2c_client *ara = adapter->ara;
+
+ if (ara) {
+ dev_dbg(&ara->dev, "SMBus alert received\n");
+ i2c_handle_smbus_alert(ara);
+ } else
+ dev_dbg(&adapter->adapter.dev,
+ "SMBus alert received but no ARA client!\n");
+}
+
static void i2c_parport_attach (struct parport *port)
{
struct i2c_par *adapter;
@@ -154,8 +171,9 @@ static void i2c_parport_attach (struct parport *port)
}
pr_debug("i2c-parport: attaching to %s\n", port->name);
+ parport_disable_irq(port);
adapter->pdev = parport_register_device(port, "i2c-parport",
- NULL, NULL, NULL, PARPORT_FLAG_EXCL, NULL);
+ NULL, NULL, i2c_parport_irq, PARPORT_FLAG_EXCL, adapter);
if (!adapter->pdev) {
printk(KERN_ERR "i2c-parport: Unable to register with parport\n");
goto ERROR0;
@@ -185,14 +203,29 @@ static void i2c_parport_attach (struct parport *port)
parport_setsda(port, 1);
parport_setscl(port, 1);
/* Other init if needed (power on...) */
- if (adapter_parm[type].init.val)
+ if (adapter_parm[type].init.val) {
line_set(port, 1, &adapter_parm[type].init);
+ /* Give powered devices some time to settle */
+ msleep(100);
+ }
if (i2c_bit_add_bus(&adapter->adapter) < 0) {
printk(KERN_ERR "i2c-parport: Unable to register with I2C\n");
goto ERROR1;
}
+ /* Setup SMBus alert if supported */
+ if (adapter_parm[type].smbus_alert) {
+ adapter->alert_data.alert_edge_triggered = 1;
+ adapter->ara = i2c_setup_smbus_alert(&adapter->adapter,
+ &adapter->alert_data);
+ if (adapter->ara)
+ parport_enable_irq(port);
+ else
+ printk(KERN_WARNING "i2c-parport: Failed to register "
+ "ARA client\n");
+ }
+
/* Add the new adapter to the list */
adapter->next = adapter_list;
adapter_list = adapter;
@@ -213,6 +246,10 @@ static void i2c_parport_detach (struct parport *port)
for (prev = NULL, adapter = adapter_list; adapter;
prev = adapter, adapter = adapter->next) {
if (adapter->pdev->port == port) {
+ if (adapter->ara) {
+ parport_disable_irq(port);
+ i2c_unregister_device(adapter->ara);
+ }
i2c_del_adapter(&adapter->adapter);
/* Un-init if needed (power off...) */
diff --git a/drivers/i2c/busses/i2c-parport.h b/drivers/i2c/busses/i2c-parport.h
index ed69d846cb9..a9f66816546 100644
--- a/drivers/i2c/busses/i2c-parport.h
+++ b/drivers/i2c/busses/i2c-parport.h
@@ -1,7 +1,7 @@
/* ------------------------------------------------------------------------ *
* i2c-parport.h I2C bus over parallel port *
* ------------------------------------------------------------------------ *
- Copyright (C) 2003-2004 Jean Delvare <khali@linux-fr.org>
+ Copyright (C) 2003-2010 Jean Delvare <khali@linux-fr.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -38,6 +38,7 @@ struct adapter_parm {
struct lineop getsda;
struct lineop getscl;
struct lineop init;
+ unsigned int smbus_alert:1;
};
static struct adapter_parm adapter_parm[] = {
@@ -73,6 +74,7 @@ static struct adapter_parm adapter_parm[] = {
.setscl = { 0x01, DATA, 1 },
.getsda = { 0x10, STAT, 1 },
.init = { 0xf0, DATA, 0 },
+ .smbus_alert = 1,
},
/* type 5: ADM1025, ADM1030 and ADM1031 evaluation boards */
{
diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c
index adf0fbb902f..0d20ff46a51 100644
--- a/drivers/i2c/busses/i2c-pasemi.c
+++ b/drivers/i2c/busses/i2c-pasemi.c
@@ -400,7 +400,7 @@ static void __devexit pasemi_smb_remove(struct pci_dev *dev)
kfree(smbus);
}
-static struct pci_device_id pasemi_smb_ids[] = {
+static const struct pci_device_id pasemi_smb_ids[] = {
{ PCI_DEVICE(0x1959, 0xa003) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index e56e4b6823c..ee9da6fcf69 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -472,7 +472,7 @@ static struct i2c_adapter piix4_adapter = {
.algo = &smbus_algorithm,
};
-static struct pci_device_id piix4_ids[] = {
+static const struct pci_device_id piix4_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3) },
{ PCI_DEVICE(PCI_VENDOR_ID_EFAR, PCI_DEVICE_ID_EFAR_SLC90E66_3) },
diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c
index 844569f7d8b..55a71370c79 100644
--- a/drivers/i2c/busses/i2c-sis5595.c
+++ b/drivers/i2c/busses/i2c-sis5595.c
@@ -369,7 +369,7 @@ static struct i2c_adapter sis5595_adapter = {
.algo = &smbus_algorithm,
};
-static struct pci_device_id sis5595_ids[] __devinitdata = {
+static const struct pci_device_id sis5595_ids[] __devinitconst = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
index 68cff7af701..2309c7f1bde 100644
--- a/drivers/i2c/busses/i2c-sis630.c
+++ b/drivers/i2c/busses/i2c-sis630.c
@@ -468,7 +468,7 @@ static struct i2c_adapter sis630_adapter = {
.algo = &smbus_algorithm,
};
-static struct pci_device_id sis630_ids[] __devinitdata = {
+static const struct pci_device_id sis630_ids[] __devinitconst = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) },
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC) },
{ 0, }
diff --git a/drivers/i2c/busses/i2c-sis96x.c b/drivers/i2c/busses/i2c-sis96x.c
index 1649963b00d..d43d8f8943d 100644
--- a/drivers/i2c/busses/i2c-sis96x.c
+++ b/drivers/i2c/busses/i2c-sis96x.c
@@ -245,7 +245,7 @@ static struct i2c_adapter sis96x_adapter = {
.algo = &smbus_algorithm,
};
-static struct pci_device_id sis96x_ids[] = {
+static const struct pci_device_id sis96x_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_SMBUS) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c
index e29b6d5ba8e..b5b1bbf37d3 100644
--- a/drivers/i2c/busses/i2c-tiny-usb.c
+++ b/drivers/i2c/busses/i2c-tiny-usb.c
@@ -31,11 +31,13 @@
#define CMD_I2C_IO_BEGIN (1<<0)
#define CMD_I2C_IO_END (1<<1)
-/* i2c bit delay, default is 10us -> 100kHz */
+/* i2c bit delay, default is 10us -> 100kHz max
+ (in practice, due to additional delays in the i2c bitbanging
+ code this results in a i2c clock of about 50kHz) */
static unsigned short delay = 10;
module_param(delay, ushort, 0);
-MODULE_PARM_DESC(delay, "bit delay in microseconds, "
- "e.g. 10 for 100kHz (default is 100kHz)");
+MODULE_PARM_DESC(delay, "bit delay in microseconds "
+ "(default is 10us for 100kHz max)");
static int usb_read(struct i2c_adapter *adapter, int cmd,
int value, int index, void *data, int len);
@@ -137,7 +139,7 @@ static const struct i2c_algorithm usb_algorithm = {
* Future Technology Devices International Ltd., later a pair was
* bought from EZPrototypes
*/
-static struct usb_device_id i2c_tiny_usb_table [] = {
+static const struct usb_device_id i2c_tiny_usb_table[] = {
{ USB_DEVICE(0x0403, 0xc631) }, /* FTDI */
{ USB_DEVICE(0x1c40, 0x0534) }, /* EZPrototypes */
{ } /* Terminating entry */
diff --git a/drivers/i2c/busses/i2c-via.c b/drivers/i2c/busses/i2c-via.c
index 8b24f192103..de78283bddb 100644
--- a/drivers/i2c/busses/i2c-via.c
+++ b/drivers/i2c/busses/i2c-via.c
@@ -89,7 +89,7 @@ static struct i2c_adapter vt586b_adapter = {
};
-static struct pci_device_id vt586b_ids[] __devinitdata = {
+static const struct pci_device_id vt586b_ids[] __devinitconst = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index a84a909e123..d57292e5dae 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -444,7 +444,7 @@ release_region:
return error;
}
-static struct pci_device_id vt596_ids[] = {
+static const struct pci_device_id vt596_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596_3),
.driver_data = SMBBA1 },
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C596B_3),
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 10be7b5fbe9..3202a86f420 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -34,6 +34,7 @@
#include <linux/hardirq.h>
#include <linux/irqflags.h>
#include <linux/rwsem.h>
+#include <linux/pm_runtime.h>
#include <asm/uaccess.h>
#include "i2c-core.h"
@@ -184,6 +185,52 @@ static int i2c_device_pm_resume(struct device *dev)
#define i2c_device_pm_resume NULL
#endif
+#ifdef CONFIG_PM_RUNTIME
+static int i2c_device_runtime_suspend(struct device *dev)
+{
+ const struct dev_pm_ops *pm;
+
+ if (!dev->driver)
+ return 0;
+ pm = dev->driver->pm;
+ if (!pm || !pm->runtime_suspend)
+ return 0;
+ return pm->runtime_suspend(dev);
+}
+
+static int i2c_device_runtime_resume(struct device *dev)
+{
+ const struct dev_pm_ops *pm;
+
+ if (!dev->driver)
+ return 0;
+ pm = dev->driver->pm;
+ if (!pm || !pm->runtime_resume)
+ return 0;
+ return pm->runtime_resume(dev);
+}
+
+static int i2c_device_runtime_idle(struct device *dev)
+{
+ const struct dev_pm_ops *pm = NULL;
+ int ret;
+
+ if (dev->driver)
+ pm = dev->driver->pm;
+ if (pm && pm->runtime_idle) {
+ ret = pm->runtime_idle(dev);
+ if (ret)
+ return ret;
+ }
+
+ return pm_runtime_suspend(dev);
+}
+#else
+#define i2c_device_runtime_suspend NULL
+#define i2c_device_runtime_resume NULL
+#define i2c_device_runtime_idle NULL
+#endif
+
static int i2c_device_suspend(struct device *dev, pm_message_t mesg)
{
struct i2c_client *client = i2c_verify_client(dev);
@@ -251,6 +298,9 @@ static const struct attribute_group *i2c_dev_attr_groups[] = {
static const struct dev_pm_ops i2c_device_pm_ops = {
.suspend = i2c_device_pm_suspend,
.resume = i2c_device_pm_resume,
+ .runtime_suspend = i2c_device_runtime_suspend,
+ .runtime_resume = i2c_device_runtime_resume,
+ .runtime_idle = i2c_device_runtime_idle,
};
struct bus_type i2c_bus_type = {
@@ -1133,7 +1183,7 @@ EXPORT_SYMBOL(i2c_transfer);
* i2c_master_send - issue a single I2C message in master transmit mode
* @client: Handle to slave device
* @buf: Data that will be written to the slave
- * @count: How many bytes to write
+ * @count: How many bytes to write, must be less than 64k since msg.len is u16
*
* Returns negative errno, or else the number of bytes written.
*/
@@ -1160,7 +1210,7 @@ EXPORT_SYMBOL(i2c_master_send);
* i2c_master_recv - issue a single I2C message in master receive mode
* @client: Handle to slave device
* @buf: Where to store data read from slave
- * @count: How many bytes to read
+ * @count: How many bytes to read, must be less than 64k since msg.len is u16
*
* Returns negative errno, or else the number of bytes read.
*/
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
new file mode 100644
index 00000000000..42127822124
--- /dev/null
+++ b/drivers/i2c/i2c-smbus.c
@@ -0,0 +1,263 @@
+/*
+ * i2c-smbus.c - SMBus extensions to the I2C protocol
+ *
+ * Copyright (C) 2008 David Brownell
+ * Copyright (C) 2010 Jean Delvare <khali@linux-fr.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/semaphore.h>
+#include <linux/interrupt.h>
+#include <linux/workqueue.h>
+#include <linux/i2c.h>
+#include <linux/i2c-smbus.h>
+
+struct i2c_smbus_alert {
+ unsigned int alert_edge_triggered:1;
+ int irq;
+ struct work_struct alert;
+ struct i2c_client *ara; /* Alert response address */
+};
+
+struct alert_data {
+ unsigned short addr;
+ u8 flag:1;
+};
+
+/* If this is the alerting device, notify its driver */
+static int smbus_do_alert(struct device *dev, void *addrp)
+{
+ struct i2c_client *client = i2c_verify_client(dev);
+ struct alert_data *data = addrp;
+
+ if (!client || client->addr != data->addr)
+ return 0;
+ if (client->flags & I2C_CLIENT_TEN)
+ return 0;
+
+ /*
+ * Drivers should either disable alerts, or provide at least
+ * a minimal handler. Lock so client->driver won't change.
+ */
+ down(&dev->sem);
+ if (client->driver) {
+ if (client->driver->alert)
+ client->driver->alert(client, data->flag);
+ else
+ dev_warn(&client->dev, "no driver alert()!\n");
+ } else
+ dev_dbg(&client->dev, "alert with no driver\n");
+ up(&dev->sem);
+
+ /* Stop iterating after we find the device */
+ return -EBUSY;
+}
+
+/*
+ * The alert IRQ handler needs to hand work off to a task which can issue
+ * SMBus calls, because those sleeping calls can't be made in IRQ context.
+ */
+static void smbus_alert(struct work_struct *work)
+{
+ struct i2c_smbus_alert *alert;
+ struct i2c_client *ara;
+ unsigned short prev_addr = 0; /* Not a valid address */
+
+ alert = container_of(work, struct i2c_smbus_alert, alert);
+ ara = alert->ara;
+
+ for (;;) {
+ s32 status;
+ struct alert_data data;
+
+ /*
+ * Devices with pending alerts reply in address order, low
+ * to high, because of slave transmit arbitration. After
+ * responding, an SMBus device stops asserting SMBALERT#.
+ *
+ * Note that SMBus 2.0 reserves 10-bit addresess for future
+ * use. We neither handle them, nor try to use PEC here.
+ */
+ status = i2c_smbus_read_byte(ara);
+ if (status < 0)
+ break;
+
+ data.flag = status & 1;
+ data.addr = status >> 1;
+
+ if (data.addr == prev_addr) {
+ dev_warn(&ara->dev, "Duplicate SMBALERT# from dev "
+ "0x%02x, skipping\n", data.addr);
+ break;
+ }
+ dev_dbg(&ara->dev, "SMBALERT# from dev 0x%02x, flag %d\n",
+ data.addr, data.flag);
+
+ /* Notify driver for the device which issued the alert */
+ device_for_each_child(&ara->adapter->dev, &data,
+ smbus_do_alert);
+ prev_addr = data.addr;
+ }
+
+ /* We handled all alerts; re-enable level-triggered IRQs */
+ if (!alert->alert_edge_triggered)
+ enable_irq(alert->irq);
+}
+
+static irqreturn_t smbalert_irq(int irq, void *d)
+{
+ struct i2c_smbus_alert *alert = d;
+
+ /* Disable level-triggered IRQs until we handle them */
+ if (!alert->alert_edge_triggered)
+ disable_irq_nosync(irq);
+
+ schedule_work(&alert->alert);
+ return IRQ_HANDLED;
+}
+
+/* Setup SMBALERT# infrastructure */
+static int smbalert_probe(struct i2c_client *ara,
+ const struct i2c_device_id *id)
+{
+ struct i2c_smbus_alert_setup *setup = ara->dev.platform_data;
+ struct i2c_smbus_alert *alert;
+ struct i2c_adapter *adapter = ara->adapter;
+ int res;
+
+ alert = kzalloc(sizeof(struct i2c_smbus_alert), GFP_KERNEL);
+ if (!alert)
+ return -ENOMEM;
+
+ alert->alert_edge_triggered = setup->alert_edge_triggered;
+ alert->irq = setup->irq;
+ INIT_WORK(&alert->alert, smbus_alert);
+ alert->ara = ara;
+
+ if (setup->irq > 0) {
+ res = devm_request_irq(&ara->dev, setup->irq, smbalert_irq,
+ 0, "smbus_alert", alert);
+ if (res) {
+ kfree(alert);
+ return res;
+ }
+ }
+
+ i2c_set_clientdata(ara, alert);
+ dev_info(&adapter->dev, "supports SMBALERT#, %s trigger\n",
+ setup->alert_edge_triggered ? "edge" : "level");
+
+ return 0;
+}
+
+/* IRQ resource is managed so it is freed automatically */
+static int smbalert_remove(struct i2c_client *ara)
+{
+ struct i2c_smbus_alert *alert = i2c_get_clientdata(ara);
+
+ cancel_work_sync(&alert->alert);
+
+ i2c_set_clientdata(ara, NULL);
+ kfree(alert);
+ return 0;
+}
+
+static const struct i2c_device_id smbalert_ids[] = {
+ { "smbus_alert", 0 },
+ { /* LIST END */ }
+};
+MODULE_DEVICE_TABLE(i2c, smbalert_ids);
+
+static struct i2c_driver smbalert_driver = {
+ .driver = {
+ .name = "smbus_alert",
+ },
+ .probe = smbalert_probe,
+ .remove = smbalert_remove,
+ .id_table = smbalert_ids,
+};
+
+/**
+ * i2c_setup_smbus_alert - Setup SMBus alert support
+ * @adapter: the target adapter
+ * @setup: setup data for the SMBus alert handler
+ * Context: can sleep
+ *
+ * Setup handling of the SMBus alert protocol on a given I2C bus segment.
+ *
+ * Handling can be done either through our IRQ handler, or by the
+ * adapter (from its handler, periodic polling, or whatever).
+ *
+ * NOTE that if we manage the IRQ, we *MUST* know if it's level or
+ * edge triggered in order to hand it to the workqueue correctly.
+ * If triggering the alert seems to wedge the system, you probably
+ * should have said it's level triggered.
+ *
+ * This returns the ara client, which should be saved for later use with
+ * i2c_handle_smbus_alert() and ultimately i2c_unregister_device(); or NULL
+ * to indicate an error.
+ */
+struct i2c_client *i2c_setup_smbus_alert(struct i2c_adapter *adapter,
+ struct i2c_smbus_alert_setup *setup)
+{
+ struct i2c_board_info ara_board_info = {
+ I2C_BOARD_INFO("smbus_alert", 0x0c),
+ .platform_data = setup,
+ };
+
+ return i2c_new_device(adapter, &ara_board_info);
+}
+EXPORT_SYMBOL_GPL(i2c_setup_smbus_alert);
+
+/**
+ * i2c_handle_smbus_alert - Handle an SMBus alert
+ * @ara: the ARA client on the relevant adapter
+ * Context: can't sleep
+ *
+ * Helper function to be called from an I2C bus driver's interrupt
+ * handler. It will schedule the alert work, in turn calling the
+ * corresponding I2C device driver's alert function.
+ *
+ * It is assumed that ara is a valid i2c client previously returned by
+ * i2c_setup_smbus_alert().
+ */
+int i2c_handle_smbus_alert(struct i2c_client *ara)
+{
+ struct i2c_smbus_alert *alert = i2c_get_clientdata(ara);
+
+ return schedule_work(&alert->alert);
+}
+EXPORT_SYMBOL_GPL(i2c_handle_smbus_alert);
+
+static int __init i2c_smbus_init(void)
+{
+ return i2c_add_driver(&smbalert_driver);
+}
+
+static void __exit i2c_smbus_exit(void)
+{
+ i2c_del_driver(&smbalert_driver);
+}
+
+module_init(i2c_smbus_init);
+module_exit(i2c_smbus_exit);
+
+MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>");
+MODULE_DESCRIPTION("SMBus protocol extensions support");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
index 878f8ec6dbe..57d00caefc8 100644
--- a/drivers/ide/aec62xx.c
+++ b/drivers/ide/aec62xx.c
@@ -81,15 +81,15 @@ static u8 pci_bus_clock_list_ultra (u8 speed, struct chipset_bus_clock_list_entr
return chipset_table->ultra_settings;
}
-static void aec6210_set_mode(ide_drive_t *drive, const u8 speed)
+static void aec6210_set_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
struct ide_host *host = pci_get_drvdata(dev);
struct chipset_bus_clock_list_entry *bus_clock = host->host_priv;
u16 d_conf = 0;
u8 ultra = 0, ultra_conf = 0;
u8 tmp0 = 0, tmp1 = 0, tmp2 = 0;
+ const u8 speed = drive->dma_mode;
unsigned long flags;
local_irq_save(flags);
@@ -109,15 +109,15 @@ static void aec6210_set_mode(ide_drive_t *drive, const u8 speed)
local_irq_restore(flags);
}
-static void aec6260_set_mode(ide_drive_t *drive, const u8 speed)
+static void aec6260_set_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
struct ide_host *host = pci_get_drvdata(dev);
struct chipset_bus_clock_list_entry *bus_clock = host->host_priv;
u8 unit = drive->dn & 1;
u8 tmp1 = 0, tmp2 = 0;
u8 ultra = 0, drive_conf = 0, ultra_conf = 0;
+ const u8 speed = drive->dma_mode;
unsigned long flags;
local_irq_save(flags);
@@ -134,9 +134,10 @@ static void aec6260_set_mode(ide_drive_t *drive, const u8 speed)
local_irq_restore(flags);
}
-static void aec_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void aec_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- drive->hwif->port_ops->set_dma_mode(drive, pio + XFER_PIO_0);
+ drive->dma_mode = drive->pio_mode;
+ hwif->port_ops->set_dma_mode(hwif, drive);
}
static int init_chipset_aec62xx(struct pci_dev *dev)
diff --git a/drivers/ide/ali14xx.c b/drivers/ide/ali14xx.c
index 90da1f953ed..25b9fe3a9f8 100644
--- a/drivers/ide/ali14xx.c
+++ b/drivers/ide/ali14xx.c
@@ -109,13 +109,14 @@ static DEFINE_SPINLOCK(ali14xx_lock);
* This function computes timing parameters
* and sets controller registers accordingly.
*/
-static void ali14xx_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void ali14xx_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
int driveNum;
int time1, time2;
u8 param1, param2, param3, param4;
unsigned long flags;
int bus_speed = ide_vlb_clk ? ide_vlb_clk : 50;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
/* calculate timing, according to PIO mode */
diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
index 0abc43f3101..2c8016ad0e2 100644
--- a/drivers/ide/alim15x3.c
+++ b/drivers/ide/alim15x3.c
@@ -8,7 +8,7 @@
* Copyright (C) 2002 Alan Cox
* ALi (now ULi M5228) support by Clear Zhang <Clear.Zhang@ali.com.tw>
* Copyright (C) 2007 MontaVista Software, Inc. <source@mvista.com>
- * Copyright (C) 2007 Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
+ * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz
*
* (U)DMA capable version of ali 1533/1543(C), 1535(D)
*
@@ -48,61 +48,84 @@ static u8 m5229_revision;
static u8 chip_is_1543c_e;
static struct pci_dev *isa_dev;
+static void ali_fifo_control(ide_hwif_t *hwif, ide_drive_t *drive, int on)
+{
+ struct pci_dev *pdev = to_pci_dev(hwif->dev);
+ int pio_fifo = 0x54 + hwif->channel;
+ u8 fifo;
+ int shift = 4 * (drive->dn & 1);
+
+ pci_read_config_byte(pdev, pio_fifo, &fifo);
+ fifo &= ~(0x0F << shift);
+ fifo |= (on << shift);
+ pci_write_config_byte(pdev, pio_fifo, fifo);
+}
+
+static void ali_program_timings(ide_hwif_t *hwif, ide_drive_t *drive,
+ struct ide_timing *t, u8 ultra)
+{
+ struct pci_dev *dev = to_pci_dev(hwif->dev);
+ int port = hwif->channel ? 0x5c : 0x58;
+ int udmat = 0x56 + hwif->channel;
+ u8 unit = drive->dn & 1, udma;
+ int shift = 4 * unit;
+
+ /* Set up the UDMA */
+ pci_read_config_byte(dev, udmat, &udma);
+ udma &= ~(0x0F << shift);
+ udma |= ultra << shift;
+ pci_write_config_byte(dev, udmat, udma);
+
+ if (t == NULL)
+ return;
+
+ t->setup = clamp_val(t->setup, 1, 8) & 7;
+ t->act8b = clamp_val(t->act8b, 1, 8) & 7;
+ t->rec8b = clamp_val(t->rec8b, 1, 16) & 15;
+ t->active = clamp_val(t->active, 1, 8) & 7;
+ t->recover = clamp_val(t->recover, 1, 16) & 15;
+
+ pci_write_config_byte(dev, port, t->setup);
+ pci_write_config_byte(dev, port + 1, (t->act8b << 4) | t->rec8b);
+ pci_write_config_byte(dev, port + unit + 2,
+ (t->active << 4) | t->recover);
+}
+
/**
* ali_set_pio_mode - set host controller for PIO mode
+ * @hwif: port
* @drive: drive
- * @pio: PIO mode number
*
* Program the controller for the given PIO mode.
*/
-static void ali_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void ali_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
- int s_time = t->setup, a_time = t->active, c_time = t->cycle;
- u8 s_clc, a_clc, r_clc;
- unsigned long flags;
+ ide_drive_t *pair = ide_get_pair_dev(drive);
int bus_speed = ide_pci_clk ? ide_pci_clk : 33;
- int port = hwif->channel ? 0x5c : 0x58;
- int portFIFO = hwif->channel ? 0x55 : 0x54;
- u8 cd_dma_fifo = 0, unit = drive->dn & 1;
-
- if ((s_clc = (s_time * bus_speed + 999) / 1000) >= 8)
- s_clc = 0;
- if ((a_clc = (a_time * bus_speed + 999) / 1000) >= 8)
- a_clc = 0;
-
- if (!(r_clc = (c_time * bus_speed + 999) / 1000 - a_clc - s_clc)) {
- r_clc = 1;
- } else {
- if (r_clc >= 16)
- r_clc = 0;
+ unsigned long T = 1000000 / bus_speed; /* PCI clock based */
+ struct ide_timing t;
+
+ ide_timing_compute(drive, drive->pio_mode, &t, T, 1);
+ if (pair) {
+ struct ide_timing p;
+
+ ide_timing_compute(pair, pair->pio_mode, &p, T, 1);
+ ide_timing_merge(&p, &t, &t,
+ IDE_TIMING_SETUP | IDE_TIMING_8BIT);
+ if (pair->dma_mode) {
+ ide_timing_compute(pair, pair->dma_mode, &p, T, 1);
+ ide_timing_merge(&p, &t, &t,
+ IDE_TIMING_SETUP | IDE_TIMING_8BIT);
+ }
}
- local_irq_save(flags);
-
+
/*
* PIO mode => ATA FIFO on, ATAPI FIFO off
*/
- pci_read_config_byte(dev, portFIFO, &cd_dma_fifo);
- if (drive->media==ide_disk) {
- if (unit) {
- pci_write_config_byte(dev, portFIFO, (cd_dma_fifo & 0x0F) | 0x50);
- } else {
- pci_write_config_byte(dev, portFIFO, (cd_dma_fifo & 0xF0) | 0x05);
- }
- } else {
- if (unit) {
- pci_write_config_byte(dev, portFIFO, cd_dma_fifo & 0x0F);
- } else {
- pci_write_config_byte(dev, portFIFO, cd_dma_fifo & 0xF0);
- }
- }
-
- pci_write_config_byte(dev, port, s_clc);
- pci_write_config_byte(dev, port + unit + 2, (a_clc << 4) | r_clc);
- local_irq_restore(flags);
+ ali_fifo_control(hwif, drive, (drive->media == ide_disk) ? 0x05 : 0x00);
+
+ ali_program_timings(hwif, drive, &t, 0);
}
/**
@@ -132,44 +155,42 @@ static u8 ali_udma_filter(ide_drive_t *drive)
/**
* ali_set_dma_mode - set host controller for DMA mode
+ * @hwif: port
* @drive: drive
- * @speed: DMA mode
*
* Configure the hardware for the desired IDE transfer mode.
*/
-static void ali_set_dma_mode(ide_drive_t *drive, const u8 speed)
+static void ali_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
+ static u8 udma_timing[7] = { 0xC, 0xB, 0xA, 0x9, 0x8, 0xF, 0xD };
struct pci_dev *dev = to_pci_dev(hwif->dev);
- u8 speed1 = speed;
- u8 unit = drive->dn & 1;
+ ide_drive_t *pair = ide_get_pair_dev(drive);
+ int bus_speed = ide_pci_clk ? ide_pci_clk : 33;
+ unsigned long T = 1000000 / bus_speed; /* PCI clock based */
+ const u8 speed = drive->dma_mode;
u8 tmpbyte = 0x00;
- int m5229_udma = (hwif->channel) ? 0x57 : 0x56;
-
- if (speed == XFER_UDMA_6)
- speed1 = 0x47;
+ struct ide_timing t;
if (speed < XFER_UDMA_0) {
- u8 ultra_enable = (unit) ? 0x7f : 0xf7;
- /*
- * clear "ultra enable" bit
- */
- pci_read_config_byte(dev, m5229_udma, &tmpbyte);
- tmpbyte &= ultra_enable;
- pci_write_config_byte(dev, m5229_udma, tmpbyte);
-
- /*
- * FIXME: Oh, my... DMA timings are never set.
- */
+ ide_timing_compute(drive, drive->dma_mode, &t, T, 1);
+ if (pair) {
+ struct ide_timing p;
+
+ ide_timing_compute(pair, pair->pio_mode, &p, T, 1);
+ ide_timing_merge(&p, &t, &t,
+ IDE_TIMING_SETUP | IDE_TIMING_8BIT);
+ if (pair->dma_mode) {
+ ide_timing_compute(pair, pair->dma_mode,
+ &p, T, 1);
+ ide_timing_merge(&p, &t, &t,
+ IDE_TIMING_SETUP | IDE_TIMING_8BIT);
+ }
+ }
+ ali_program_timings(hwif, drive, &t, 0);
} else {
- pci_read_config_byte(dev, m5229_udma, &tmpbyte);
- tmpbyte &= (0x0f << ((1-unit) << 2));
- /*
- * enable ultra dma and set timing
- */
- tmpbyte |= ((0x08 | ((4-speed1)&0x07)) << (unit << 2));
- pci_write_config_byte(dev, m5229_udma, tmpbyte);
+ ali_program_timings(hwif, drive, NULL,
+ udma_timing[speed - XFER_UDMA_0]);
if (speed >= XFER_UDMA_3) {
pci_read_config_byte(dev, 0x4b, &tmpbyte);
tmpbyte |= 1;
@@ -355,19 +376,13 @@ static int ali_cable_override(struct pci_dev *pdev)
*
* This checks if the controller and the cable are capable
* of UDMA66 transfers. It doesn't check the drives.
- * But see note 2 below!
- *
- * FIXME: frobs bits that are not defined on newer ALi devicea
*/
static u8 ali_cable_detect(ide_hwif_t *hwif)
{
struct pci_dev *dev = to_pci_dev(hwif->dev);
- unsigned long flags;
u8 cbl = ATA_CBL_PATA40, tmpbyte;
- local_irq_save(flags);
-
if (m5229_revision >= 0xC2) {
/*
* m5229 80-pin cable detection (from Host View)
@@ -387,8 +402,6 @@ static u8 ali_cable_detect(ide_hwif_t *hwif)
}
}
- local_irq_restore(flags);
-
return cbl;
}
@@ -584,6 +597,6 @@ static void __exit ali15x3_ide_exit(void)
module_init(ali15x3_ide_init);
module_exit(ali15x3_ide_exit);
-MODULE_AUTHOR("Michael Aubry, Andrzej Krzysztofowicz, CJ, Andre Hedrick, Alan Cox");
+MODULE_AUTHOR("Michael Aubry, Andrzej Krzysztofowicz, CJ, Andre Hedrick, Alan Cox, Bartlomiej Zolnierkiewicz");
MODULE_DESCRIPTION("PCI driver module for ALi 15x3 IDE");
MODULE_LICENSE("GPL");
diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
index 628cd2e5fed..3747b2561f0 100644
--- a/drivers/ide/amd74xx.c
+++ b/drivers/ide/amd74xx.c
@@ -3,7 +3,7 @@
* IDE driver for Linux.
*
* Copyright (c) 2000-2002 Vojtech Pavlik
- * Copyright (c) 2007-2008 Bartlomiej Zolnierkiewicz
+ * Copyright (c) 2007-2010 Bartlomiej Zolnierkiewicz
*
* Based on the work of:
* Andre Hedrick
@@ -70,7 +70,8 @@ static void amd_set_speed(struct pci_dev *dev, u8 dn, u8 udma_mask,
default: return;
}
- pci_write_config_byte(dev, AMD_UDMA_TIMING + offset + (3 - dn), t);
+ if (timing->udma)
+ pci_write_config_byte(dev, AMD_UDMA_TIMING + offset + 3 - dn, t);
}
/*
@@ -78,14 +79,14 @@ static void amd_set_speed(struct pci_dev *dev, u8 dn, u8 udma_mask,
* to a desired transfer mode. It also can be called by upper layers.
*/
-static void amd_set_drive(ide_drive_t *drive, const u8 speed)
+static void amd_set_drive(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
ide_drive_t *peer = ide_get_pair_dev(drive);
struct ide_timing t, p;
int T, UT;
u8 udma_mask = hwif->ultra_mask;
+ const u8 speed = drive->dma_mode;
T = 1000000000 / amd_clock;
UT = (udma_mask == ATA_UDMA2) ? T : (T / 2);
@@ -93,7 +94,7 @@ static void amd_set_drive(ide_drive_t *drive, const u8 speed)
ide_timing_compute(drive, speed, &t, T, UT);
if (peer) {
- ide_timing_compute(peer, peer->current_speed, &p, T, UT);
+ ide_timing_compute(peer, peer->pio_mode, &p, T, UT);
ide_timing_merge(&p, &t, &t, IDE_TIMING_8BIT);
}
@@ -107,9 +108,10 @@ static void amd_set_drive(ide_drive_t *drive, const u8 speed)
* amd_set_pio_mode() is a callback from upper layers for PIO-only tuning.
*/
-static void amd_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void amd_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- amd_set_drive(drive, XFER_PIO_0 + pio);
+ drive->dma_mode = drive->pio_mode;
+ amd_set_drive(hwif, drive);
}
static void amd7409_cable_detect(struct pci_dev *dev)
@@ -340,6 +342,6 @@ static void __exit amd74xx_ide_exit(void)
module_init(amd74xx_ide_init);
module_exit(amd74xx_ide_exit);
-MODULE_AUTHOR("Vojtech Pavlik");
+MODULE_AUTHOR("Vojtech Pavlik, Bartlomiej Zolnierkiewicz");
MODULE_DESCRIPTION("AMD PCI IDE driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/ide/at91_ide.c b/drivers/ide/at91_ide.c
index 248219a89a6..000a78e5246 100644
--- a/drivers/ide/at91_ide.c
+++ b/drivers/ide/at91_ide.c
@@ -172,11 +172,12 @@ static void at91_ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd,
leave_16bit(chipselect, mode);
}
-static void at91_ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void at91_ide_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
struct ide_timing *timing;
- u8 chipselect = drive->hwif->select_data;
+ u8 chipselect = hwif->select_data;
int use_iordy = 0;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
pdbg("chipselect %u pio %u\n", chipselect, pio);
diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
index 837322b10a4..15f0ead89f5 100644
--- a/drivers/ide/atiixp.c
+++ b/drivers/ide/atiixp.c
@@ -42,19 +42,20 @@ static DEFINE_SPINLOCK(atiixp_lock);
/**
* atiixp_set_pio_mode - set host controller for PIO mode
+ * @hwif: port
* @drive: drive
- * @pio: PIO mode number
*
* Set the interface PIO mode.
*/
-static void atiixp_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void atiixp_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
+ struct pci_dev *dev = to_pci_dev(hwif->dev);
unsigned long flags;
int timing_shift = (drive->dn ^ 1) * 8;
u32 pio_timing_data;
u16 pio_mode_data;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
spin_lock_irqsave(&atiixp_lock, flags);
@@ -74,21 +75,22 @@ static void atiixp_set_pio_mode(ide_drive_t *drive, const u8 pio)
/**
* atiixp_set_dma_mode - set host controller for DMA mode
+ * @hwif: port
* @drive: drive
- * @speed: DMA mode
*
* Set a ATIIXP host controller to the desired DMA mode. This involves
* programming the right timing data into the PCI configuration space.
*/
-static void atiixp_set_dma_mode(ide_drive_t *drive, const u8 speed)
+static void atiixp_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
+ struct pci_dev *dev = to_pci_dev(hwif->dev);
unsigned long flags;
int timing_shift = (drive->dn ^ 1) * 8;
u32 tmp32;
u16 tmp16;
u16 udma_ctl = 0;
+ const u8 speed = drive->dma_mode;
spin_lock_irqsave(&atiixp_lock, flags);
diff --git a/drivers/ide/au1xxx-ide.c b/drivers/ide/au1xxx-ide.c
index 349a67bf1a3..b26c23416fa 100644
--- a/drivers/ide/au1xxx-ide.c
+++ b/drivers/ide/au1xxx-ide.c
@@ -99,12 +99,11 @@ static void au1xxx_output_data(ide_drive_t *drive, struct ide_cmd *cmd,
}
#endif
-static void au1xxx_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void au1xxx_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
int mem_sttime = 0, mem_stcfg = au_readl(MEM_STCFG2);
- /* set pio mode! */
- switch(pio) {
+ switch (drive->pio_mode - XFER_PIO_0) {
case 0:
mem_sttime = SBC_IDE_TIMING(PIO0);
@@ -161,11 +160,11 @@ static void au1xxx_set_pio_mode(ide_drive_t *drive, const u8 pio)
au_writel(mem_stcfg,MEM_STCFG2);
}
-static void auide_set_dma_mode(ide_drive_t *drive, const u8 speed)
+static void auide_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
int mem_sttime = 0, mem_stcfg = au_readl(MEM_STCFG2);
- switch(speed) {
+ switch (drive->dma_mode) {
#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
case XFER_MW_DMA_2:
mem_sttime = SBC_IDE_TIMING(MDMA2);
@@ -297,8 +296,8 @@ static int auide_dma_test_irq(ide_drive_t *drive)
*/
drive->waiting_for_dma++;
if (drive->waiting_for_dma >= DMA_WAIT_TIMEOUT) {
- printk(KERN_WARNING "%s: timeout waiting for ddma to \
- complete\n", drive->name);
+ printk(KERN_WARNING "%s: timeout waiting for ddma to complete\n",
+ drive->name);
return 1;
}
udelay(10);
diff --git a/drivers/ide/cmd640.c b/drivers/ide/cmd640.c
index 1a32d62ed86..d2b8b272bc2 100644
--- a/drivers/ide/cmd640.c
+++ b/drivers/ide/cmd640.c
@@ -572,9 +572,10 @@ static void cmd640_set_mode(ide_drive_t *drive, unsigned int index,
program_drive_counts(drive, index);
}
-static void cmd640_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void cmd640_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
unsigned int index = 0, cycle_time;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
u8 b;
switch (pio) {
@@ -605,7 +606,7 @@ static void cmd640_set_pio_mode(ide_drive_t *drive, const u8 pio)
}
#endif /* CONFIG_BLK_DEV_CMD640_ENHANCED */
-static void cmd640_init_dev(ide_drive_t *drive)
+static void __init cmd640_init_dev(ide_drive_t *drive)
{
unsigned int i = drive->hwif->channel * 2 + (drive->dn & 1);
diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
index f2500c8826b..5f80312e636 100644
--- a/drivers/ide/cmd64x.c
+++ b/drivers/ide/cmd64x.c
@@ -7,6 +7,7 @@
* Copyright (C) 1998 David S. Miller (davem@redhat.com)
*
* Copyright (C) 1999-2002 Andre Hedrick <andre@linux-ide.org>
+ * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz
* Copyright (C) 2007,2009 MontaVista Software, Inc. <source@mvista.com>
*/
@@ -50,72 +51,42 @@
#define UDIDETCR1 0x7B
#define DTPR1 0x7C
-static u8 quantize_timing(int timing, int quant)
-{
- return (timing + quant - 1) / quant;
-}
-
-/*
- * This routine calculates active/recovery counts and then writes them into
- * the chipset registers.
- */
-static void program_cycle_times (ide_drive_t *drive, int cycle_time, int active_time)
+static void cmd64x_program_timings(ide_drive_t *drive, u8 mode)
{
+ ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
- int clock_time = 1000 / (ide_pci_clk ? ide_pci_clk : 33);
- u8 cycle_count, active_count, recovery_count, drwtim;
+ int bus_speed = ide_pci_clk ? ide_pci_clk : 33;
+ const unsigned long T = 1000000 / bus_speed;
static const u8 recovery_values[] =
{15, 15, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 0};
+ static const u8 setup_values[] = {0x40, 0x40, 0x40, 0x80, 0, 0xc0};
+ static const u8 arttim_regs[4] = {ARTTIM0, ARTTIM1, ARTTIM23, ARTTIM23};
static const u8 drwtim_regs[4] = {DRWTIM0, DRWTIM1, DRWTIM2, DRWTIM3};
+ struct ide_timing t;
+ u8 arttim = 0;
- cycle_count = quantize_timing( cycle_time, clock_time);
- active_count = quantize_timing(active_time, clock_time);
- recovery_count = cycle_count - active_count;
+ ide_timing_compute(drive, mode, &t, T, 0);
/*
* In case we've got too long recovery phase, try to lengthen
* the active phase
*/
- if (recovery_count > 16) {
- active_count += recovery_count - 16;
- recovery_count = 16;
+ if (t.recover > 16) {
+ t.active += t.recover - 16;
+ t.recover = 16;
}
- if (active_count > 16) /* shouldn't actually happen... */
- active_count = 16;
+ if (t.active > 16) /* shouldn't actually happen... */
+ t.active = 16;
/*
* Convert values to internal chipset representation
*/
- recovery_count = recovery_values[recovery_count];
- active_count &= 0x0f;
+ t.recover = recovery_values[t.recover];
+ t.active &= 0x0f;
/* Program the active/recovery counts into the DRWTIM register */
- drwtim = (active_count << 4) | recovery_count;
- (void) pci_write_config_byte(dev, drwtim_regs[drive->dn], drwtim);
-}
-
-/*
- * This routine writes into the chipset registers
- * PIO setup/active/recovery timings.
- */
-static void cmd64x_tune_pio(ide_drive_t *drive, const u8 pio)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct pci_dev *dev = to_pci_dev(hwif->dev);
- struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
- unsigned long setup_count;
- unsigned int cycle_time;
- u8 arttim = 0;
-
- static const u8 setup_values[] = {0x40, 0x40, 0x40, 0x80, 0, 0xc0};
- static const u8 arttim_regs[4] = {ARTTIM0, ARTTIM1, ARTTIM23, ARTTIM23};
-
- cycle_time = ide_pio_cycle_time(drive, pio);
-
- program_cycle_times(drive, cycle_time, t->active);
-
- setup_count = quantize_timing(t->setup,
- 1000 / (ide_pci_clk ? ide_pci_clk : 33));
+ pci_write_config_byte(dev, drwtim_regs[drive->dn],
+ (t.active << 4) | t.recover);
/*
* The primary channel has individual address setup timing registers
@@ -126,15 +97,21 @@ static void cmd64x_tune_pio(ide_drive_t *drive, const u8 pio)
if (hwif->channel) {
ide_drive_t *pair = ide_get_pair_dev(drive);
- ide_set_drivedata(drive, (void *)setup_count);
+ if (pair) {
+ struct ide_timing tp;
- if (pair)
- setup_count = max_t(u8, setup_count,
- (unsigned long)ide_get_drivedata(pair));
+ ide_timing_compute(pair, pair->pio_mode, &tp, T, 0);
+ ide_timing_merge(&t, &tp, &t, IDE_TIMING_SETUP);
+ if (pair->dma_mode) {
+ ide_timing_compute(pair, pair->dma_mode,
+ &tp, T, 0);
+ ide_timing_merge(&tp, &t, &t, IDE_TIMING_SETUP);
+ }
+ }
}
- if (setup_count > 5) /* shouldn't actually happen... */
- setup_count = 5;
+ if (t.setup > 5) /* shouldn't actually happen... */
+ t.setup = 5;
/*
* Program the address setup clocks into the ARTTIM registers.
@@ -144,7 +121,7 @@ static void cmd64x_tune_pio(ide_drive_t *drive, const u8 pio)
if (hwif->channel)
arttim &= ~ARTTIM23_INTR_CH1;
arttim &= ~0xc0;
- arttim |= setup_values[setup_count];
+ arttim |= setup_values[t.setup];
(void) pci_write_config_byte(dev, arttim_regs[drive->dn], arttim);
}
@@ -153,8 +130,10 @@ static void cmd64x_tune_pio(ide_drive_t *drive, const u8 pio)
* Special cases are 8: prefetch off, 9: prefetch on (both never worked)
*/
-static void cmd64x_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void cmd64x_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
+
/*
* Filter out the prefetch control values
* to prevent PIO5 from being programmed
@@ -162,20 +141,18 @@ static void cmd64x_set_pio_mode(ide_drive_t *drive, const u8 pio)
if (pio == 8 || pio == 9)
return;
- cmd64x_tune_pio(drive, pio);
+ cmd64x_program_timings(drive, XFER_PIO_0 + pio);
}
-static void cmd64x_set_dma_mode(ide_drive_t *drive, const u8 speed)
+static void cmd64x_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
u8 unit = drive->dn & 0x01;
u8 regU = 0, pciU = hwif->channel ? UDIDETCR1 : UDIDETCR0;
+ const u8 speed = drive->dma_mode;
- if (speed >= XFER_SW_DMA_0) {
- (void) pci_read_config_byte(dev, pciU, &regU);
- regU &= ~(unit ? 0xCA : 0x35);
- }
+ pci_read_config_byte(dev, pciU, &regU);
+ regU &= ~(unit ? 0xCA : 0x35);
switch(speed) {
case XFER_UDMA_5:
@@ -197,18 +174,13 @@ static void cmd64x_set_dma_mode(ide_drive_t *drive, const u8 speed)
regU |= unit ? 0xC2 : 0x31;
break;
case XFER_MW_DMA_2:
- program_cycle_times(drive, 120, 70);
- break;
case XFER_MW_DMA_1:
- program_cycle_times(drive, 150, 80);
- break;
case XFER_MW_DMA_0:
- program_cycle_times(drive, 480, 215);
+ cmd64x_program_timings(drive, speed);
break;
}
- if (speed >= XFER_SW_DMA_0)
- (void) pci_write_config_byte(dev, pciU, regU);
+ pci_write_config_byte(dev, pciU, regU);
}
static void cmd648_clear_irq(ide_drive_t *drive)
@@ -471,6 +443,6 @@ static void __exit cmd64x_ide_exit(void)
module_init(cmd64x_ide_init);
module_exit(cmd64x_ide_exit);
-MODULE_AUTHOR("Eddie Dost, David Miller, Andre Hedrick");
+MODULE_AUTHOR("Eddie Dost, David Miller, Andre Hedrick, Bartlomiej Zolnierkiewicz");
MODULE_DESCRIPTION("PCI driver module for CMD64x IDE");
MODULE_LICENSE("GPL");
diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
index 09f98ed0731..2c1e5f7cd26 100644
--- a/drivers/ide/cs5520.c
+++ b/drivers/ide/cs5520.c
@@ -57,11 +57,11 @@ static struct pio_clocks cs5520_pio_clocks[]={
{1, 2, 1}
};
-static void cs5520_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void cs5520_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *pdev = to_pci_dev(hwif->dev);
int controller = drive->dn > 1 ? 1 : 0;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
/* 8bit CAT/CRT - 8bit command timing for channel */
pci_write_config_byte(pdev, 0x62 + controller,
@@ -81,11 +81,12 @@ static void cs5520_set_pio_mode(ide_drive_t *drive, const u8 pio)
(cs5520_pio_clocks[pio].assert));
}
-static void cs5520_set_dma_mode(ide_drive_t *drive, const u8 speed)
+static void cs5520_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
printk(KERN_ERR "cs55x0: bad ide timing.\n");
- cs5520_set_pio_mode(drive, 0);
+ drive->pio_mode = XFER_PIO_0 + 0;
+ cs5520_set_pio_mode(hwif, drive);
}
static const struct ide_port_ops cs5520_port_ops = {
diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
index 40bf05eddf6..4dc4eb92b07 100644
--- a/drivers/ide/cs5530.c
+++ b/drivers/ide/cs5530.c
@@ -41,8 +41,8 @@ static unsigned int cs5530_pio_timings[2][5] = {
/**
* cs5530_set_pio_mode - set host controller for PIO mode
+ * @hwif: port
* @drive: drive
- * @pio: PIO mode number
*
* Handles setting of PIO mode for the chipset.
*
@@ -50,10 +50,11 @@ static unsigned int cs5530_pio_timings[2][5] = {
* will have valid default PIO timings set up before we get here.
*/
-static void cs5530_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void cs5530_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- unsigned long basereg = CS5530_BASEREG(drive->hwif);
+ unsigned long basereg = CS5530_BASEREG(hwif);
unsigned int format = (inl(basereg + 4) >> 31) & 1;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
outl(cs5530_pio_timings[format][pio], basereg + ((drive->dn & 1)<<3));
}
@@ -99,12 +100,12 @@ out:
return mask;
}
-static void cs5530_set_dma_mode(ide_drive_t *drive, const u8 mode)
+static void cs5530_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
unsigned long basereg;
unsigned int reg, timings = 0;
- switch (mode) {
+ switch (drive->dma_mode) {
case XFER_UDMA_0: timings = 0x00921250; break;
case XFER_UDMA_1: timings = 0x00911140; break;
case XFER_UDMA_2: timings = 0x00911030; break;
@@ -112,7 +113,7 @@ static void cs5530_set_dma_mode(ide_drive_t *drive, const u8 mode)
case XFER_MW_DMA_1: timings = 0x00012121; break;
case XFER_MW_DMA_2: timings = 0x00002020; break;
}
- basereg = CS5530_BASEREG(drive->hwif);
+ basereg = CS5530_BASEREG(hwif);
reg = inl(basereg + 4); /* get drive0 config register */
timings |= reg & 0x80000000; /* preserve PIO format bit */
if ((drive-> dn & 1) == 0) { /* are we configuring drive0? */
diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
index b883838adc2..5059fafadf2 100644
--- a/drivers/ide/cs5535.c
+++ b/drivers/ide/cs5535.c
@@ -86,7 +86,7 @@ static void cs5535_set_speed(ide_drive_t *drive, const u8 speed)
cmd = pioa = speed - XFER_PIO_0;
if (pair) {
- u8 piob = ide_get_best_pio_mode(pair, 255, 4);
+ u8 piob = pair->pio_mode - XFER_PIO_0;
if (piob < cmd)
cmd = piob;
@@ -129,28 +129,28 @@ static void cs5535_set_speed(ide_drive_t *drive, const u8 speed)
/**
* cs5535_set_dma_mode - set host controller for DMA mode
+ * @hwif: port
* @drive: drive
- * @speed: DMA mode
*
* Programs the chipset for DMA mode.
*/
-static void cs5535_set_dma_mode(ide_drive_t *drive, const u8 speed)
+static void cs5535_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- cs5535_set_speed(drive, speed);
+ cs5535_set_speed(drive, drive->dma_mode);
}
/**
* cs5535_set_pio_mode - set host controller for PIO mode
+ * @hwif: port
* @drive: drive
- * @pio: PIO mode number
*
* A callback from the upper layers for PIO-only tuning.
*/
-static void cs5535_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void cs5535_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- cs5535_set_speed(drive, XFER_PIO_0 + pio);
+ cs5535_set_speed(drive, drive->pio_mode);
}
static u8 cs5535_cable_detect(ide_hwif_t *hwif)
diff --git a/drivers/ide/cs5536.c b/drivers/ide/cs5536.c
index 9623b852c61..24214ab60ac 100644
--- a/drivers/ide/cs5536.c
+++ b/drivers/ide/cs5536.c
@@ -125,11 +125,11 @@ static u8 cs5536_cable_detect(ide_hwif_t *hwif)
/**
* cs5536_set_pio_mode - PIO timing setup
+ * @hwif: ATA port
* @drive: ATA device
- * @pio: PIO mode number
*/
-static void cs5536_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void cs5536_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
static const u8 drv_timings[5] = {
0x98, 0x55, 0x32, 0x21, 0x20,
@@ -143,15 +143,16 @@ static void cs5536_set_pio_mode(ide_drive_t *drive, const u8 pio)
0x99, 0x92, 0x90, 0x22, 0x20,
};
- struct pci_dev *pdev = to_pci_dev(drive->hwif->dev);
+ struct pci_dev *pdev = to_pci_dev(hwif->dev);
ide_drive_t *pair = ide_get_pair_dev(drive);
int cshift = (drive->dn & 1) ? IDE_CAST_D1_SHIFT : IDE_CAST_D0_SHIFT;
unsigned long timings = (unsigned long)ide_get_drivedata(drive);
u32 cast;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
u8 cmd_pio = pio;
if (pair)
- cmd_pio = min(pio, ide_get_best_pio_mode(pair, 255, 4));
+ cmd_pio = min_t(u8, pio, pair->pio_mode - XFER_PIO_0);
timings &= (IDE_DRV_MASK << 8);
timings |= drv_timings[pio];
@@ -172,11 +173,11 @@ static void cs5536_set_pio_mode(ide_drive_t *drive, const u8 pio)
/**
* cs5536_set_dma_mode - DMA timing setup
+ * @hwif: ATA port
* @drive: ATA device
- * @mode: DMA mode
*/
-static void cs5536_set_dma_mode(ide_drive_t *drive, const u8 mode)
+static void cs5536_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
static const u8 udma_timings[6] = {
0xc2, 0xc1, 0xc0, 0xc4, 0xc5, 0xc6,
@@ -186,10 +187,11 @@ static void cs5536_set_dma_mode(ide_drive_t *drive, const u8 mode)
0x67, 0x21, 0x20,
};
- struct pci_dev *pdev = to_pci_dev(drive->hwif->dev);
+ struct pci_dev *pdev = to_pci_dev(hwif->dev);
int dshift = (drive->dn & 1) ? IDE_D1_SHIFT : IDE_D0_SHIFT;
unsigned long timings = (unsigned long)ide_get_drivedata(drive);
u32 etc;
+ const u8 mode = drive->dma_mode;
cs5536_read(pdev, ETC, &etc);
diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
index d6e2cbbc53a..9383f67deae 100644
--- a/drivers/ide/cy82c693.c
+++ b/drivers/ide/cy82c693.c
@@ -1,43 +1,11 @@
/*
* Copyright (C) 1998-2000 Andreas S. Krebs (akrebs@altavista.net), Maintainer
* Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org>, Integrator
+ * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz
*
* CYPRESS CY82C693 chipset IDE controller
*
* The CY82C693 chipset is used on Digital's PC-Alpha 164SX boards.
- * Writing the driver was quite simple, since most of the job is
- * done by the generic pci-ide support.
- * The hard part was finding the CY82C693's datasheet on Cypress's
- * web page :-(. But Altavista solved this problem :-).
- *
- *
- * Notes:
- * - I recently got a 16.8G IBM DTTA, so I was able to test it with
- * a large and fast disk - the results look great, so I'd say the
- * driver is working fine :-)
- * hdparm -t reports 8.17 MB/sec at about 6% CPU usage for the DTTA
- * - this is my first linux driver, so there's probably a lot of room
- * for optimizations and bug fixing, so feel free to do it.
- * - if using PIO mode it's a good idea to set the PIO mode and
- * 32-bit I/O support (if possible), e.g. hdparm -p2 -c1 /dev/hda
- * - I had some problems with my IBM DHEA with PIO modes < 2
- * (lost interrupts) ?????
- * - first tests with DMA look okay, they seem to work, but there is a
- * problem with sound - the BusMaster IDE TimeOut should fixed this
- *
- * Ancient History:
- * AMH@1999-08-24: v0.34 init_cy82c693_chip moved to pci_init_cy82c693
- * ASK@1999-01-23: v0.33 made a few minor code clean ups
- * removed DMA clock speed setting by default
- * added boot message
- * ASK@1998-11-01: v0.32 added support to set BusMaster IDE TimeOut
- * added support to set DMA Controller Clock Speed
- * ASK@1998-10-31: v0.31 fixed problem with setting to high DMA modes
- * on some drives.
- * ASK@1998-10-29: v0.3 added support to set DMA modes
- * ASK@1998-10-28: v0.2 added support to set PIO modes
- * ASK@1998-10-27: v0.1 first version - chipset detection
- *
*/
#include <linux/module.h>
@@ -81,87 +49,13 @@
#define CY82_INDEX_CHANNEL1 0x31
#define CY82_INDEX_TIMEOUT 0x32
-/* the min and max PCI bus speed in MHz - from datasheet */
-#define CY82C963_MIN_BUS_SPEED 25
-#define CY82C963_MAX_BUS_SPEED 33
-
-/* the struct for the PIO mode timings */
-typedef struct pio_clocks_s {
- u8 address_time; /* Address setup (clocks) */
- u8 time_16r; /* clocks for 16bit IOR (0xF0=Active/data, 0x0F=Recovery) */
- u8 time_16w; /* clocks for 16bit IOW (0xF0=Active/data, 0x0F=Recovery) */
- u8 time_8; /* clocks for 8bit (0xF0=Active/data, 0x0F=Recovery) */
-} pio_clocks_t;
-
-/*
- * calc clocks using bus_speed
- * returns (rounded up) time in bus clocks for time in ns
- */
-static int calc_clk(int time, int bus_speed)
-{
- int clocks;
-
- clocks = (time*bus_speed+999)/1000 - 1;
-
- if (clocks < 0)
- clocks = 0;
-
- if (clocks > 0x0F)
- clocks = 0x0F;
-
- return clocks;
-}
-
-/*
- * compute the values for the clock registers for PIO
- * mode and pci_clk [MHz] speed
- *
- * NOTE: for mode 0,1 and 2 drives 8-bit IDE command control registers are used
- * for mode 3 and 4 drives 8 and 16-bit timings are the same
- *
- */
-static void compute_clocks(u8 pio, pio_clocks_t *p_pclk)
-{
- struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
- int clk1, clk2;
- int bus_speed = ide_pci_clk ? ide_pci_clk : 33;
-
- /* we don't check against CY82C693's min and max speed,
- * so you can play with the idebus=xx parameter
- */
-
- /* let's calc the address setup time clocks */
- p_pclk->address_time = (u8)calc_clk(t->setup, bus_speed);
-
- /* let's calc the active and recovery time clocks */
- clk1 = calc_clk(t->active, bus_speed);
-
- /* calc recovery timing */
- clk2 = t->cycle - t->active - t->setup;
-
- clk2 = calc_clk(clk2, bus_speed);
-
- clk1 = (clk1<<4)|clk2; /* combine active and recovery clocks */
-
- /* note: we use the same values for 16bit IOR and IOW
- * those are all the same, since I don't have other
- * timings than those from ide-lib.c
- */
-
- p_pclk->time_16r = (u8)clk1;
- p_pclk->time_16w = (u8)clk1;
-
- /* what are good values for 8bit ?? */
- p_pclk->time_8 = (u8)clk1;
-}
-
/*
* set DMA mode a specific channel for CY82C693
*/
-static void cy82c693_set_dma_mode(ide_drive_t *drive, const u8 mode)
+static void cy82c693_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
+ const u8 mode = drive->dma_mode;
u8 single = (mode & 0x10) >> 4, index = 0, data = 0;
index = hwif->channel ? CY82_INDEX_CHANNEL1 : CY82_INDEX_CHANNEL0;
@@ -186,12 +80,14 @@ static void cy82c693_set_dma_mode(ide_drive_t *drive, const u8 mode)
outb(data, CY82_DATA_PORT);
}
-static void cy82c693_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void cy82c693_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
- pio_clocks_t pclk;
+ int bus_speed = ide_pci_clk ? ide_pci_clk : 33;
+ const unsigned long T = 1000000 / bus_speed;
unsigned int addrCtrl;
+ struct ide_timing t;
+ u8 time_16, time_8;
/* select primary or secondary channel */
if (hwif->index > 0) { /* drive is on the secondary channel */
@@ -204,8 +100,12 @@ static void cy82c693_set_pio_mode(ide_drive_t *drive, const u8 pio)
}
}
- /* let's calc the values for this PIO mode */
- compute_clocks(pio, &pclk);
+ ide_timing_compute(drive, drive->pio_mode, &t, T, 1);
+
+ time_16 = clamp_val(t.recover - 1, 0, 15) |
+ (clamp_val(t.active - 1, 0, 15) << 4);
+ time_8 = clamp_val(t.act8b - 1, 0, 15) |
+ (clamp_val(t.rec8b - 1, 0, 15) << 4);
/* now let's write the clocks registers */
if ((drive->dn & 1) == 0) {
@@ -217,13 +117,13 @@ static void cy82c693_set_pio_mode(ide_drive_t *drive, const u8 pio)
pci_read_config_dword(dev, CY82_IDE_ADDRSETUP, &addrCtrl);
addrCtrl &= (~0xF);
- addrCtrl |= (unsigned int)pclk.address_time;
+ addrCtrl |= clamp_val(t.setup - 1, 0, 15);
pci_write_config_dword(dev, CY82_IDE_ADDRSETUP, addrCtrl);
/* now let's set the remaining registers */
- pci_write_config_byte(dev, CY82_IDE_MASTER_IOR, pclk.time_16r);
- pci_write_config_byte(dev, CY82_IDE_MASTER_IOW, pclk.time_16w);
- pci_write_config_byte(dev, CY82_IDE_MASTER_8BIT, pclk.time_8);
+ pci_write_config_byte(dev, CY82_IDE_MASTER_IOR, time_16);
+ pci_write_config_byte(dev, CY82_IDE_MASTER_IOW, time_16);
+ pci_write_config_byte(dev, CY82_IDE_MASTER_8BIT, time_8);
} else {
/*
* set slave drive
@@ -233,13 +133,13 @@ static void cy82c693_set_pio_mode(ide_drive_t *drive, const u8 pio)
pci_read_config_dword(dev, CY82_IDE_ADDRSETUP, &addrCtrl);
addrCtrl &= (~0xF0);
- addrCtrl |= ((unsigned int)pclk.address_time<<4);
+ addrCtrl |= (clamp_val(t.setup - 1, 0, 15) << 4);
pci_write_config_dword(dev, CY82_IDE_ADDRSETUP, addrCtrl);
/* now let's set the remaining registers */
- pci_write_config_byte(dev, CY82_IDE_SLAVE_IOR, pclk.time_16r);
- pci_write_config_byte(dev, CY82_IDE_SLAVE_IOW, pclk.time_16w);
- pci_write_config_byte(dev, CY82_IDE_SLAVE_8BIT, pclk.time_8);
+ pci_write_config_byte(dev, CY82_IDE_SLAVE_IOR, time_16);
+ pci_write_config_byte(dev, CY82_IDE_SLAVE_IOW, time_16);
+ pci_write_config_byte(dev, CY82_IDE_SLAVE_8BIT, time_8);
}
}
@@ -325,6 +225,6 @@ static void __exit cy82c693_ide_exit(void)
module_init(cy82c693_ide_init);
module_exit(cy82c693_ide_exit);
-MODULE_AUTHOR("Andreas Krebs, Andre Hedrick");
+MODULE_AUTHOR("Andreas Krebs, Andre Hedrick, Bartlomiej Zolnierkiewicz");
MODULE_DESCRIPTION("PCI driver module for the Cypress CY82C693 IDE");
MODULE_LICENSE("GPL");
diff --git a/drivers/ide/dtc2278.c b/drivers/ide/dtc2278.c
index c6b13812298..6929f7fce93 100644
--- a/drivers/ide/dtc2278.c
+++ b/drivers/ide/dtc2278.c
@@ -68,11 +68,11 @@ static void sub22 (char b, char c)
static DEFINE_SPINLOCK(dtc2278_lock);
-static void dtc2278_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void dtc2278_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
unsigned long flags;
- if (pio >= 3) {
+ if (drive->pio_mode >= XFER_PIO_3) {
spin_lock_irqsave(&dtc2278_lock, flags);
/*
* This enables PIO mode4 (3?) on the first interface
diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
index 4d90ac2dbb1..b885c1d548f 100644
--- a/drivers/ide/hpt366.c
+++ b/drivers/ide/hpt366.c
@@ -627,14 +627,14 @@ static u32 get_speed_setting(u8 speed, struct hpt_info *info)
return info->timings->clock_table[info->clock][i];
}
-static void hpt3xx_set_mode(ide_drive_t *drive, const u8 speed)
+static void hpt3xx_set_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
struct hpt_info *info = hpt3xx_get_info(hwif->dev);
struct hpt_timings *t = info->timings;
u8 itr_addr = 0x40 + (drive->dn * 4);
u32 old_itr = 0;
+ const u8 speed = drive->dma_mode;
u32 new_itr = get_speed_setting(speed, info);
u32 itr_mask = speed < XFER_MW_DMA_0 ? t->pio_mask :
(speed < XFER_UDMA_0 ? t->dma_mask :
@@ -651,9 +651,10 @@ static void hpt3xx_set_mode(ide_drive_t *drive, const u8 speed)
pci_write_config_dword(dev, itr_addr, new_itr);
}
-static void hpt3xx_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void hpt3xx_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- hpt3xx_set_mode(drive, XFER_PIO_0 + pio);
+ drive->dma_mode = drive->pio_mode;
+ hpt3xx_set_mode(hwif, drive);
}
static void hpt3xx_maskproc(ide_drive_t *drive, int mask)
diff --git a/drivers/ide/ht6560b.c b/drivers/ide/ht6560b.c
index aafed8060e1..d81e49680c3 100644
--- a/drivers/ide/ht6560b.c
+++ b/drivers/ide/ht6560b.c
@@ -279,9 +279,10 @@ static void ht_set_prefetch(ide_drive_t *drive, u8 state)
#endif
}
-static void ht6560b_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void ht6560b_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
unsigned long flags, config;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
u8 timing;
switch (pio) {
diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c
index 0f67f1abbbd..4a697a238e2 100644
--- a/drivers/ide/icside.c
+++ b/drivers/ide/icside.c
@@ -65,6 +65,8 @@ static struct cardinfo icside_cardinfo_v6_2 = {
};
struct icside_state {
+ unsigned int channel;
+ unsigned int enabled;
void __iomem *irq_port;
void __iomem *ioc_base;
unsigned int sel;
@@ -114,11 +116,18 @@ static void icside_irqenable_arcin_v6 (struct expansion_card *ec, int irqnr)
struct icside_state *state = ec->irq_data;
void __iomem *base = state->irq_port;
- writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1);
- readb(base + ICS_ARCIN_V6_INTROFFSET_2);
+ state->enabled = 1;
- writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2);
- readb(base + ICS_ARCIN_V6_INTROFFSET_1);
+ switch (state->channel) {
+ case 0:
+ writeb(0, base + ICS_ARCIN_V6_INTROFFSET_1);
+ readb(base + ICS_ARCIN_V6_INTROFFSET_2);
+ break;
+ case 1:
+ writeb(0, base + ICS_ARCIN_V6_INTROFFSET_2);
+ readb(base + ICS_ARCIN_V6_INTROFFSET_1);
+ break;
+ }
}
/* Prototype: icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
@@ -128,6 +137,8 @@ static void icside_irqdisable_arcin_v6 (struct expansion_card *ec, int irqnr)
{
struct icside_state *state = ec->irq_data;
+ state->enabled = 0;
+
readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
}
@@ -149,6 +160,44 @@ static const expansioncard_ops_t icside_ops_arcin_v6 = {
.irqpending = icside_irqpending_arcin_v6,
};
+/*
+ * Handle routing of interrupts. This is called before
+ * we write the command to the drive.
+ */
+static void icside_maskproc(ide_drive_t *drive, int mask)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct expansion_card *ec = ECARD_DEV(hwif->dev);
+ struct icside_state *state = ecard_get_drvdata(ec);
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ state->channel = hwif->channel;
+
+ if (state->enabled && !mask) {
+ switch (hwif->channel) {
+ case 0:
+ writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
+ readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
+ break;
+ case 1:
+ writeb(0, state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
+ readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
+ break;
+ }
+ } else {
+ readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_2);
+ readb(state->irq_port + ICS_ARCIN_V6_INTROFFSET_1);
+ }
+
+ local_irq_restore(flags);
+}
+
+static const struct ide_port_ops icside_v6_no_dma_port_ops = {
+ .maskproc = icside_maskproc,
+};
+
#ifdef CONFIG_BLK_DEV_IDEDMA_ICS
/*
* SG-DMA support.
@@ -185,10 +234,11 @@ static const expansioncard_ops_t icside_ops_arcin_v6 = {
* MW1 80 50 50 150 C
* MW2 70 25 25 120 C
*/
-static void icside_set_dma_mode(ide_drive_t *drive, const u8 xfer_mode)
+static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
unsigned long cycle_time;
int use_dma_info = 0;
+ const u8 xfer_mode = drive->dma_mode;
switch (xfer_mode) {
case XFER_MW_DMA_2:
@@ -228,6 +278,7 @@ static void icside_set_dma_mode(ide_drive_t *drive, const u8 xfer_mode)
static const struct ide_port_ops icside_v6_port_ops = {
.set_dma_mode = icside_set_dma_mode,
+ .maskproc = icside_maskproc,
};
static void icside_dma_host_set(ide_drive_t *drive, int on)
@@ -272,6 +323,11 @@ static int icside_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
BUG_ON(dma_channel_active(ec->dma));
/*
+ * Ensure that we have the right interrupt routed.
+ */
+ icside_maskproc(drive, 0);
+
+ /*
* Route the DMA signals to the correct interface.
*/
writeb(state->sel | hwif->channel, state->ioc_base);
@@ -399,6 +455,7 @@ err_free:
static const struct ide_port_info icside_v6_port_info __initdata = {
.init_dma = icside_dma_off_init,
+ .port_ops = &icside_v6_no_dma_port_ops,
.dma_ops = &icside_v6_dma_ops,
.host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO,
.mwdma_mask = ATA_MWDMA2,
diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c
index dd6396384c2..ab87e4f7cec 100644
--- a/drivers/ide/ide-cs.c
+++ b/drivers/ide/ide-cs.c
@@ -121,19 +121,11 @@ static int ide_probe(struct pcmcia_device *link)
static void ide_detach(struct pcmcia_device *link)
{
ide_info_t *info = link->priv;
- ide_hwif_t *hwif = info->host->ports[0];
- unsigned long data_addr, ctl_addr;
dev_dbg(&link->dev, "ide_detach(0x%p)\n", link);
- data_addr = hwif->io_ports.data_addr;
- ctl_addr = hwif->io_ports.ctl_addr;
-
ide_release(link);
- release_region(ctl_addr, 1);
- release_region(data_addr, 8);
-
kfree(info);
} /* ide_detach */
@@ -354,12 +346,19 @@ static void ide_release(struct pcmcia_device *link)
dev_dbg(&link->dev, "ide_release(0x%p)\n", link);
- if (info->ndev)
- /* FIXME: if this fails we need to queue the cleanup somehow
- -- need to investigate the required PCMCIA magic */
+ if (info->ndev) {
+ ide_hwif_t *hwif = host->ports[0];
+ unsigned long data_addr, ctl_addr;
+
+ data_addr = hwif->io_ports.data_addr;
+ ctl_addr = hwif->io_ports.ctl_addr;
+
ide_host_remove(host);
+ info->ndev = 0;
- info->ndev = 0;
+ release_region(ctl_addr, 1);
+ release_region(data_addr, 8);
+ }
pcmcia_disable_device(link);
} /* ide_release */
diff --git a/drivers/ide/ide-devsets.c b/drivers/ide/ide-devsets.c
index 1099bf7cf96..c6935c78757 100644
--- a/drivers/ide/ide-devsets.c
+++ b/drivers/ide/ide-devsets.c
@@ -105,15 +105,17 @@ static int set_pio_mode(ide_drive_t *drive, int arg)
return -ENOSYS;
if (set_pio_mode_abuse(drive->hwif, arg)) {
+ drive->pio_mode = arg + XFER_PIO_0;
+
if (arg == 8 || arg == 9) {
unsigned long flags;
/* take lock for IDE_DFLAG_[NO_]UNMASK/[NO_]IO_32BIT */
spin_lock_irqsave(&hwif->lock, flags);
- port_ops->set_pio_mode(drive, arg);
+ port_ops->set_pio_mode(hwif, drive);
spin_unlock_irqrestore(&hwif->lock, flags);
} else
- port_ops->set_pio_mode(drive, arg);
+ port_ops->set_pio_mode(hwif, drive);
} else {
int keep_dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA);
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index 222c1ef65fb..376f2dc410c 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -231,7 +231,7 @@ u8 eighty_ninty_three(ide_drive_t *drive)
u16 *id = drive->id;
int ivb = ide_in_drive_list(id, ivb_list);
- if (hwif->cbl == ATA_CBL_PATA40_SHORT)
+ if (hwif->cbl == ATA_CBL_SATA || hwif->cbl == ATA_CBL_PATA40_SHORT)
return 1;
if (ivb)
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index f8c1ae6ad74..fbedd35feb4 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -1042,6 +1042,8 @@ static void ide_port_init_devices(ide_hwif_t *hwif)
if (hwif->host_flags & IDE_HFLAG_NO_UNMASK_IRQS)
drive->dev_flags |= IDE_DFLAG_NO_UNMASK;
+ drive->pio_mode = XFER_PIO_0;
+
if (port_ops && port_ops->init_dev)
port_ops->init_dev(drive);
}
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c
index 6a0e6254216..b07232880ec 100644
--- a/drivers/ide/ide-tape.c
+++ b/drivers/ide/ide-tape.c
@@ -1365,7 +1365,7 @@ static int idetape_mtioctop(ide_drive_t *drive, short mt_op, int mt_count)
* supported here, and not in the corresponding block interface. Our own
* ide-tape ioctls are supported on both interfaces.
*/
-static int idetape_chrdev_ioctl(struct inode *inode, struct file *file,
+static long do_idetape_chrdev_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
struct ide_tape_obj *tape = file->private_data;
@@ -1420,6 +1420,16 @@ static int idetape_chrdev_ioctl(struct inode *inode, struct file *file,
}
}
+static long idetape_chrdev_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ long ret;
+ lock_kernel();
+ ret = do_idetape_chrdev_ioctl(file, cmd, arg);
+ unlock_kernel();
+ return ret;
+}
+
/*
* Do a mode sense page 0 with block descriptor and if it succeeds set the tape
* block size with the reported value.
@@ -1888,7 +1898,7 @@ static const struct file_operations idetape_fops = {
.owner = THIS_MODULE,
.read = idetape_chrdev_read,
.write = idetape_chrdev_write,
- .ioctl = idetape_chrdev_ioctl,
+ .unlocked_ioctl = idetape_chrdev_ioctl,
.open = idetape_chrdev_open,
.release = idetape_chrdev_release,
};
diff --git a/drivers/ide/ide-timings.c b/drivers/ide/ide-timings.c
index 001a56365be..0e05f75934c 100644
--- a/drivers/ide/ide-timings.c
+++ b/drivers/ide/ide-timings.c
@@ -166,12 +166,13 @@ int ide_timing_compute(ide_drive_t *drive, u8 speed,
if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
memset(&p, 0, sizeof(p));
- if (speed <= XFER_PIO_2)
- p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
- else if ((speed <= XFER_PIO_4) ||
- (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
- p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
- else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
+ if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) {
+ if (speed <= XFER_PIO_2)
+ p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO];
+ else if ((speed <= XFER_PIO_4) ||
+ (speed == XFER_PIO_5 && !ata_id_is_cfa(id)))
+ p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY];
+ } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
p.cycle = id[ATA_ID_EIDE_DMA_MIN];
ide_timing_merge(&p, t, t, IDE_TIMING_CYCLE | IDE_TIMING_CYC8B);
@@ -185,11 +186,10 @@ int ide_timing_compute(ide_drive_t *drive, u8 speed,
/*
* Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
* S.M.A.R.T and some other commands. We have to ensure that the
- * DMA cycle timing is slower/equal than the fastest PIO timing.
+ * DMA cycle timing is slower/equal than the current PIO timing.
*/
if (speed >= XFER_SW_DMA_0) {
- u8 pio = ide_get_best_pio_mode(drive, 255, 5);
- ide_timing_compute(drive, XFER_PIO_0 + pio, &p, T, UT);
+ ide_timing_compute(drive, drive->pio_mode, &p, T, UT);
ide_timing_merge(&p, t, t, IDE_TIMING_ALL);
}
diff --git a/drivers/ide/ide-xfer-mode.c b/drivers/ide/ide-xfer-mode.c
index 46d203ce60c..5fc8d5c17de 100644
--- a/drivers/ide/ide-xfer-mode.c
+++ b/drivers/ide/ide-xfer-mode.c
@@ -58,7 +58,7 @@ EXPORT_SYMBOL(ide_xfer_verbose);
* This is used by most chipset support modules when "auto-tuning".
*/
-u8 ide_get_best_pio_mode(ide_drive_t *drive, u8 mode_wanted, u8 max_mode)
+static u8 ide_get_best_pio_mode(ide_drive_t *drive, u8 mode_wanted, u8 max_mode)
{
u16 *id = drive->id;
int pio_mode = -1, overridden = 0;
@@ -105,7 +105,6 @@ u8 ide_get_best_pio_mode(ide_drive_t *drive, u8 mode_wanted, u8 max_mode)
return pio_mode;
}
-EXPORT_SYMBOL_GPL(ide_get_best_pio_mode);
int ide_pio_need_iordy(ide_drive_t *drive, const u8 pio)
{
@@ -135,17 +134,20 @@ int ide_set_pio_mode(ide_drive_t *drive, const u8 mode)
* set transfer mode on the device in ->set_pio_mode method...
*/
if (port_ops->set_dma_mode == NULL) {
- port_ops->set_pio_mode(drive, mode - XFER_PIO_0);
+ drive->pio_mode = mode;
+ port_ops->set_pio_mode(hwif, drive);
return 0;
}
if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) {
if (ide_config_drive_speed(drive, mode))
return -1;
- port_ops->set_pio_mode(drive, mode - XFER_PIO_0);
+ drive->pio_mode = mode;
+ port_ops->set_pio_mode(hwif, drive);
return 0;
} else {
- port_ops->set_pio_mode(drive, mode - XFER_PIO_0);
+ drive->pio_mode = mode;
+ port_ops->set_pio_mode(hwif, drive);
return ide_config_drive_speed(drive, mode);
}
}
@@ -164,10 +166,12 @@ int ide_set_dma_mode(ide_drive_t *drive, const u8 mode)
if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) {
if (ide_config_drive_speed(drive, mode))
return -1;
- port_ops->set_dma_mode(drive, mode);
+ drive->dma_mode = mode;
+ port_ops->set_dma_mode(hwif, drive);
return 0;
} else {
- port_ops->set_dma_mode(drive, mode);
+ drive->dma_mode = mode;
+ port_ops->set_dma_mode(hwif, drive);
return ide_config_drive_speed(drive, mode);
}
}
diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
index 0d266a5b524..560e66d0765 100644
--- a/drivers/ide/it8172.c
+++ b/drivers/ide/it8172.c
@@ -37,12 +37,12 @@
#define DRV_NAME "IT8172"
-static void it8172_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void it8172_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
u16 drive_enables;
u32 drive_timing;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
/*
* The highest value of DIOR/DIOW pulse width and recovery time
@@ -77,14 +77,14 @@ static void it8172_set_pio_mode(ide_drive_t *drive, const u8 pio)
pci_write_config_dword(dev, 0x44, drive_timing);
}
-static void it8172_set_dma_mode(ide_drive_t *drive, const u8 speed)
+static void it8172_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
int a_speed = 3 << (drive->dn * 4);
int u_flag = 1 << drive->dn;
int u_speed = 0;
u8 reg48, reg4a;
+ const u8 speed = drive->dma_mode;
pci_read_config_byte(dev, 0x48, &reg48);
pci_read_config_byte(dev, 0x4a, &reg4a);
@@ -98,14 +98,14 @@ static void it8172_set_dma_mode(ide_drive_t *drive, const u8 speed)
pci_write_config_byte(dev, 0x4a, reg4a | u_speed);
} else {
const u8 mwdma_to_pio[] = { 0, 3, 4 };
- u8 pio;
pci_write_config_byte(dev, 0x48, reg48 & ~u_flag);
pci_write_config_byte(dev, 0x4a, reg4a & ~a_speed);
- pio = mwdma_to_pio[speed - XFER_MW_DMA_0];
+ drive->pio_mode =
+ mwdma_to_pio[speed - XFER_MW_DMA_0] + XFER_PIO_0;
- it8172_set_pio_mode(drive, pio);
+ it8172_set_pio_mode(hwif, drive);
}
}
diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
index 47976167796..46816ba2641 100644
--- a/drivers/ide/it8213.c
+++ b/drivers/ide/it8213.c
@@ -17,15 +17,14 @@
/**
* it8213_set_pio_mode - set host controller for PIO mode
+ * @hwif: port
* @drive: drive
- * @pio: PIO mode number
*
* Set the interface PIO mode.
*/
-static void it8213_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void it8213_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
int is_slave = drive->dn & 1;
int master_port = 0x40;
@@ -35,6 +34,7 @@ static void it8213_set_pio_mode(ide_drive_t *drive, const u8 pio)
u8 slave_data;
static DEFINE_SPINLOCK(tune_lock);
int control = 0;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
static const u8 timings[][2] = {
{ 0, 0 },
@@ -74,15 +74,14 @@ static void it8213_set_pio_mode(ide_drive_t *drive, const u8 pio)
/**
* it8213_set_dma_mode - set host controller for DMA mode
+ * @hwif: port
* @drive: drive
- * @speed: DMA mode
*
* Tune the ITE chipset for the DMA mode.
*/
-static void it8213_set_dma_mode(ide_drive_t *drive, const u8 speed)
+static void it8213_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
u8 maslave = 0x40;
int a_speed = 3 << (drive->dn * 4);
@@ -92,6 +91,7 @@ static void it8213_set_dma_mode(ide_drive_t *drive, const u8 speed)
int u_speed = 0;
u16 reg4042, reg4a;
u8 reg48, reg54, reg55;
+ const u8 speed = drive->dma_mode;
pci_read_config_word(dev, maslave, &reg4042);
pci_read_config_byte(dev, 0x48, &reg48);
@@ -120,7 +120,6 @@ static void it8213_set_dma_mode(ide_drive_t *drive, const u8 speed)
pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
} else {
const u8 mwdma_to_pio[] = { 0, 3, 4 };
- u8 pio;
if (reg48 & u_flag)
pci_write_config_byte(dev, 0x48, reg48 & ~u_flag);
@@ -132,11 +131,12 @@ static void it8213_set_dma_mode(ide_drive_t *drive, const u8 speed)
pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
if (speed >= XFER_MW_DMA_0)
- pio = mwdma_to_pio[speed - XFER_MW_DMA_0];
+ drive->pio_mode =
+ mwdma_to_pio[speed - XFER_MW_DMA_0] + XFER_PIO_0;
else
- pio = 2; /* only SWDMA2 is allowed */
+ drive->pio_mode = XFER_PIO_2; /* for SWDMA2 */
- it8213_set_pio_mode(drive, pio);
+ it8213_set_pio_mode(hwif, drive);
}
}
diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
index 51aa745246d..b2709c73348 100644
--- a/drivers/ide/it821x.c
+++ b/drivers/ide/it821x.c
@@ -228,18 +228,18 @@ static void it821x_clock_strategy(ide_drive_t *drive)
/**
* it821x_set_pio_mode - set host controller for PIO mode
+ * @hwif: port
* @drive: drive
- * @pio: PIO mode number
*
* Tune the host to the desired PIO mode taking into the consideration
* the maximum PIO mode supported by the other device on the cable.
*/
-static void it821x_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void it821x_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct it821x_dev *itdev = ide_get_hwifdata(hwif);
ide_drive_t *pair = ide_get_pair_dev(drive);
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
u8 unit = drive->dn & 1, set_pio = pio;
/* Spec says 89 ref driver uses 88 */
@@ -252,7 +252,7 @@ static void it821x_set_pio_mode(ide_drive_t *drive, const u8 pio)
* on the cable.
*/
if (pair) {
- u8 pair_pio = ide_get_best_pio_mode(pair, 255, 4);
+ u8 pair_pio = pair->pio_mode - XFER_PIO_0;
/* trim PIO to the slowest of the master/slave */
if (pair_pio < set_pio)
set_pio = pair_pio;
@@ -393,14 +393,16 @@ static int it821x_dma_end(ide_drive_t *drive)
/**
* it821x_set_dma_mode - set host controller for DMA mode
+ * @hwif: port
* @drive: drive
- * @speed: DMA mode
*
* Tune the ITE chipset for the desired DMA mode.
*/
-static void it821x_set_dma_mode(ide_drive_t *drive, const u8 speed)
+static void it821x_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
+ const u8 speed = drive->dma_mode;
+
/*
* MWDMA tuning is really hard because our MWDMA and PIO
* timings are kept in the same place. We can switch in the
diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
index bf2be6431b2..74c2c4a6d90 100644
--- a/drivers/ide/jmicron.c
+++ b/drivers/ide/jmicron.c
@@ -80,19 +80,19 @@ static u8 jmicron_cable_detect(ide_hwif_t *hwif)
return ATA_CBL_PATA80;
}
-static void jmicron_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void jmicron_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
}
/**
* jmicron_set_dma_mode - set host controller for DMA mode
+ * @hwif: port
* @drive: drive
- * @mode: DMA mode
*
* As the JMicron snoops for timings we don't need to do anything here.
*/
-static void jmicron_set_dma_mode(ide_drive_t *drive, const u8 mode)
+static void jmicron_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
}
diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
index f1d70d6630f..1a53a4c375e 100644
--- a/drivers/ide/opti621.c
+++ b/drivers/ide/opti621.c
@@ -8,77 +8,6 @@
* Jan Harkes <jaharkes@cwi.nl>,
* Mark Lord <mlord@pobox.com>
* Some parts of code are from ali14xx.c and from rz1000.c.
- *
- * OPTi is trademark of OPTi, Octek is trademark of Octek.
- *
- * I used docs from OPTi databook, from ftp.opti.com, file 9123-0002.ps
- * and disassembled/traced setupvic.exe (DOS program).
- * It increases kernel code about 2 kB.
- * I don't have this card no more, but I hope I can get some in case
- * of needed development.
- * My card is Octek PIDE 1.01 (on card) or OPTiViC (program).
- * It has a place for a secondary connector in circuit, but nothing
- * is there. Also BIOS says no address for
- * secondary controller (see bellow in ide_init_opti621).
- * I've only tested this on my system, which only has one disk.
- * It's Western Digital WDAC2850, with PIO mode 3. The PCI bus
- * is at 20 MHz (I have DX2/80, I tried PCI at 40, but I got random
- * lockups). I tried the OCTEK double speed CD-ROM and
- * it does not work! But I can't boot DOS also, so it's probably
- * hardware fault. I have connected Conner 80MB, the Seagate 850MB (no
- * problems) and Seagate 1GB (as slave, WD as master). My experiences
- * with the third, 1GB drive: I got 3MB/s (hdparm), but sometimes
- * it slows to about 100kB/s! I don't know why and I have
- * not this drive now, so I can't try it again.
- * I write this driver because I lost the paper ("manual") with
- * settings of jumpers on the card and I have to boot Linux with
- * Loadlin except LILO, cause I have to run the setupvic.exe program
- * already or I get disk errors (my test: rpm -Vf
- * /usr/X11R6/bin/XF86_SVGA - or any big file).
- * Some numbers from hdparm -t /dev/hda:
- * Timing buffer-cache reads: 32 MB in 3.02 seconds =10.60 MB/sec
- * Timing buffered disk reads: 16 MB in 5.52 seconds = 2.90 MB/sec
- * I have 4 Megs/s before, but I don't know why (maybe changes
- * in hdparm test).
- * After release of 0.1, I got some successful reports, so it might work.
- *
- * The main problem with OPTi is that some timings for master
- * and slave must be the same. For example, if you have master
- * PIO 3 and slave PIO 0, driver have to set some timings of
- * master for PIO 0. Second problem is that opti621_set_pio_mode
- * got only one drive to set, but have to set both drives.
- * This is solved in compute_pios. If you don't set
- * the second drive, compute_pios use ide_get_best_pio_mode
- * for autoselect mode (you can change it to PIO 0, if you want).
- * If you then set the second drive to another PIO, the old value
- * (automatically selected) will be overrided by yours.
- * There is a 25/33MHz switch in configuration
- * register, but driver is written for use at any frequency.
- *
- * Version 0.1, Nov 8, 1996
- * by Jaromir Koutek, for 2.1.8.
- * Initial version of driver.
- *
- * Version 0.2
- * Number 0.2 skipped.
- *
- * Version 0.3, Nov 29, 1997
- * by Mark Lord (probably), for 2.1.68
- * Updates for use with new IDE block driver.
- *
- * Version 0.4, Dec 14, 1997
- * by Jan Harkes
- * Fixed some errors and cleaned the code.
- *
- * Version 0.5, Jan 2, 1998
- * by Jaromir Koutek
- * Updates for use with (again) new IDE block driver.
- * Update of documentation.
- *
- * Version 0.6, Jan 2, 1999
- * by Jaromir Koutek
- * Reversed to version 0.3 of the driver, because
- * 0.5 doesn't work.
*/
#include <linux/types.h>
@@ -133,12 +62,12 @@ static u8 read_reg(int reg)
return ret;
}
-static void opti621_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void opti621_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
ide_drive_t *pair = ide_get_pair_dev(drive);
unsigned long flags;
- unsigned long mode = XFER_PIO_0 + pio, pair_mode;
+ unsigned long mode = drive->pio_mode, pair_mode;
+ const u8 pio = mode - XFER_PIO_0;
u8 tim, misc, addr_pio = pio, clk;
/* DRDY is default 2 (by OPTi Databook) */
diff --git a/drivers/ide/palm_bk3710.c b/drivers/ide/palm_bk3710.c
index f8eddf05ecb..9e8f4e1b0cc 100644
--- a/drivers/ide/palm_bk3710.c
+++ b/drivers/ide/palm_bk3710.c
@@ -166,7 +166,7 @@ static void palm_bk3710_setpiomode(void __iomem *base, ide_drive_t *mate,
writel(val32, base + BK3710_DATRCVR);
if (mate) {
- u8 mode2 = ide_get_best_pio_mode(mate, 255, 4);
+ u8 mode2 = mate->pio_mode - XFER_PIO_0;
if (mode2 < mode)
mode = mode2;
@@ -188,10 +188,11 @@ static void palm_bk3710_setpiomode(void __iomem *base, ide_drive_t *mate,
writel(val32, base + BK3710_REGRCVR);
}
-static void palm_bk3710_set_dma_mode(ide_drive_t *drive, u8 xferspeed)
+static void palm_bk3710_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
int is_slave = drive->dn & 1;
- void __iomem *base = (void *)drive->hwif->dma_base;
+ void __iomem *base = (void *)hwif->dma_base;
+ const u8 xferspeed = drive->dma_mode;
if (xferspeed >= XFER_UDMA_0) {
palm_bk3710_setudmamode(base, is_slave,
@@ -203,12 +204,13 @@ static void palm_bk3710_set_dma_mode(ide_drive_t *drive, u8 xferspeed)
}
}
-static void palm_bk3710_set_pio_mode(ide_drive_t *drive, u8 pio)
+static void palm_bk3710_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
unsigned int cycle_time;
int is_slave = drive->dn & 1;
ide_drive_t *mate;
- void __iomem *base = (void *)drive->hwif->dma_base;
+ void __iomem *base = (void *)hwif->dma_base;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
/*
* Obtain the drive PIO data for tuning the Palm Chip registers
diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
index 65ba8239e7b..9546fe2a93f 100644
--- a/drivers/ide/pdc202xx_new.c
+++ b/drivers/ide/pdc202xx_new.c
@@ -129,11 +129,11 @@ static struct udma_timing {
{ 0x1a, 0x01, 0xcb }, /* UDMA mode 6 */
};
-static void pdcnew_set_dma_mode(ide_drive_t *drive, const u8 speed)
+static void pdcnew_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
u8 adj = (drive->dn & 1) ? 0x08 : 0x00;
+ const u8 speed = drive->dma_mode;
/*
* IDE core issues SETFEATURES_XFER to the drive first (thanks to
@@ -167,11 +167,11 @@ static void pdcnew_set_dma_mode(ide_drive_t *drive, const u8 speed)
}
}
-static void pdcnew_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void pdcnew_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
u8 adj = (drive->dn & 1) ? 0x08 : 0x00;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
if (max_dma_rate(dev) == 4) {
set_indexed_reg(hwif, 0x0c + adj, pio_timings[pio].reg0c);
diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
index 35161dd840a..c5f3841af36 100644
--- a/drivers/ide/pdc202xx_old.c
+++ b/drivers/ide/pdc202xx_old.c
@@ -1,7 +1,7 @@
/*
* Copyright (C) 1998-2002 Andre Hedrick <andre@linux-ide.org>
* Copyright (C) 2006-2007, 2009 MontaVista Software, Inc.
- * Copyright (C) 2007 Bartlomiej Zolnierkiewicz
+ * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz
*
* Portions Copyright (C) 1999 Promise Technology, Inc.
* Author: Frank Tiernan (frankt@promise.com)
@@ -21,23 +21,15 @@
#define DRV_NAME "pdc202xx_old"
-static void pdc_old_disable_66MHz_clock(ide_hwif_t *);
-
-static void pdc202xx_set_mode(ide_drive_t *drive, const u8 speed)
+static void pdc202xx_set_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
u8 drive_pci = 0x60 + (drive->dn << 2);
+ const u8 speed = drive->dma_mode;
u8 AP = 0, BP = 0, CP = 0;
u8 TA = 0, TB = 0, TC = 0;
- /*
- * TODO: do this once per channel
- */
- if (dev->device != PCI_DEVICE_ID_PROMISE_20246)
- pdc_old_disable_66MHz_clock(hwif);
-
pci_read_config_byte(dev, drive_pci, &AP);
pci_read_config_byte(dev, drive_pci + 1, &BP);
pci_read_config_byte(dev, drive_pci + 2, &CP);
@@ -84,9 +76,10 @@ static void pdc202xx_set_mode(ide_drive_t *drive, const u8 speed)
}
}
-static void pdc202xx_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void pdc202xx_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- pdc202xx_set_mode(drive, XFER_PIO_0 + pio);
+ drive->dma_mode = drive->pio_mode;
+ pdc202xx_set_mode(hwif, drive);
}
static int pdc202xx_test_irq(ide_hwif_t *hwif)
@@ -100,13 +93,13 @@ static int pdc202xx_test_irq(ide_hwif_t *hwif)
* bit 7: error, bit 6: interrupting,
* bit 5: FIFO full, bit 4: FIFO empty
*/
- return ((sc1d & 0x50) == 0x40) ? 1 : 0;
+ return ((sc1d & 0x50) == 0x50) ? 1 : 0;
} else {
/*
* bit 3: error, bit 2: interrupting,
* bit 1: FIFO full, bit 0: FIFO empty
*/
- return ((sc1d & 0x05) == 0x04) ? 1 : 0;
+ return ((sc1d & 0x05) == 0x05) ? 1 : 0;
}
}
@@ -145,6 +138,11 @@ static void pdc_old_disable_66MHz_clock(ide_hwif_t *hwif)
outb(clock & ~(hwif->channel ? 0x08 : 0x02), clock_reg);
}
+static void pdc2026x_init_hwif(ide_hwif_t *hwif)
+{
+ pdc_old_disable_66MHz_clock(hwif);
+}
+
static void pdc202xx_dma_start(ide_drive_t *drive)
{
if (drive->current_speed > XFER_UDMA_2)
@@ -261,6 +259,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
{ \
.name = DRV_NAME, \
.init_chipset = init_chipset_pdc202xx, \
+ .init_hwif = pdc2026x_init_hwif, \
.port_ops = &pdc2026x_port_ops, \
.dma_ops = &pdc2026x_dma_ops, \
.host_flags = IDE_HFLAGS_PDC202XX, \
@@ -356,6 +355,6 @@ static void __exit pdc202xx_ide_exit(void)
module_init(pdc202xx_ide_init);
module_exit(pdc202xx_ide_exit);
-MODULE_AUTHOR("Andre Hedrick, Frank Tiernan");
+MODULE_AUTHOR("Andre Hedrick, Frank Tiernan, Bartlomiej Zolnierkiewicz");
MODULE_DESCRIPTION("PCI driver module for older Promise IDE");
MODULE_LICENSE("GPL");
diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
index bf14f39bd3a..1bdca49e5a0 100644
--- a/drivers/ide/piix.c
+++ b/drivers/ide/piix.c
@@ -59,15 +59,14 @@ static int no_piix_dma;
/**
* piix_set_pio_mode - set host controller for PIO mode
+ * @port: port
* @drive: drive
- * @pio: PIO mode number
*
* Set the interface PIO mode based upon the settings done by AMI BIOS.
*/
-static void piix_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void piix_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
int is_slave = drive->dn & 1;
int master_port = hwif->channel ? 0x42 : 0x40;
@@ -77,6 +76,7 @@ static void piix_set_pio_mode(ide_drive_t *drive, const u8 pio)
u8 slave_data;
static DEFINE_SPINLOCK(tune_lock);
int control = 0;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
/* ISP RTC */
static const u8 timings[][2]= {
@@ -127,16 +127,15 @@ static void piix_set_pio_mode(ide_drive_t *drive, const u8 pio)
/**
* piix_set_dma_mode - set host controller for DMA mode
+ * @hwif: port
* @drive: drive
- * @speed: DMA mode
*
* Set a PIIX host controller to the desired DMA mode. This involves
* programming the right timing data into the PCI configuration space.
*/
-static void piix_set_dma_mode(ide_drive_t *drive, const u8 speed)
+static void piix_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
u8 maslave = hwif->channel ? 0x42 : 0x40;
int a_speed = 3 << (drive->dn * 4);
@@ -147,6 +146,7 @@ static void piix_set_dma_mode(ide_drive_t *drive, const u8 speed)
int sitre;
u16 reg4042, reg4a;
u8 reg48, reg54, reg55;
+ const u8 speed = drive->dma_mode;
pci_read_config_word(dev, maslave, &reg4042);
sitre = (reg4042 & 0x4000) ? 1 : 0;
@@ -176,7 +176,6 @@ static void piix_set_dma_mode(ide_drive_t *drive, const u8 speed)
pci_write_config_byte(dev, 0x54, reg54 & ~v_flag);
} else {
const u8 mwdma_to_pio[] = { 0, 3, 4 };
- u8 pio;
if (reg48 & u_flag)
pci_write_config_byte(dev, 0x48, reg48 & ~u_flag);
@@ -188,11 +187,12 @@ static void piix_set_dma_mode(ide_drive_t *drive, const u8 speed)
pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag);
if (speed >= XFER_MW_DMA_0)
- pio = mwdma_to_pio[speed - XFER_MW_DMA_0];
+ drive->pio_mode =
+ mwdma_to_pio[speed - XFER_MW_DMA_0] + XFER_PIO_0;
else
- pio = 2; /* only SWDMA2 is allowed */
+ drive->pio_mode = XFER_PIO_2; /* for SWDMA2 */
- piix_set_pio_mode(drive, pio);
+ piix_set_pio_mode(hwif, drive);
}
}
diff --git a/drivers/ide/pmac.c b/drivers/ide/pmac.c
index 7a4e788cab2..850ee452e9b 100644
--- a/drivers/ide/pmac.c
+++ b/drivers/ide/pmac.c
@@ -496,12 +496,11 @@ static void pmac_write_devctl(ide_hwif_t *hwif, u8 ctl)
/*
* Old tuning functions (called on hdparm -p), sets up drive PIO timings
*/
-static void
-pmac_ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void pmac_ide_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
pmac_ide_hwif_t *pmif =
(pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
struct ide_timing *tim = ide_timing_find_mode(XFER_PIO_0 + pio);
u32 *timings, t;
unsigned accessTicks, recTicks;
@@ -778,14 +777,14 @@ set_timings_mdma(ide_drive_t *drive, int intf_type, u32 *timings, u32 *timings2,
#endif
}
-static void pmac_ide_set_dma_mode(ide_drive_t *drive, const u8 speed)
+static void pmac_ide_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
pmac_ide_hwif_t *pmif =
(pmac_ide_hwif_t *)dev_get_drvdata(hwif->gendev.parent);
int ret = 0;
u32 *timings, *timings2, tl[2];
u8 unit = drive->dn & 1;
+ const u8 speed = drive->dma_mode;
timings = &pmif->timings[unit];
timings2 = &pmif->timings[unit+2];
@@ -1651,8 +1650,8 @@ pmac_ide_dma_test_irq (ide_drive_t *drive)
if ((status & FLUSH) == 0)
break;
if (++timeout > 100) {
- printk(KERN_WARNING "ide%d, ide_dma_test_irq \
- timeout flushing channel\n", hwif->index);
+ printk(KERN_WARNING "ide%d, ide_dma_test_irq timeout flushing channel\n",
+ hwif->index);
break;
}
}
diff --git a/drivers/ide/qd65xx.c b/drivers/ide/qd65xx.c
index 74696edc8d1..3f0244fd8e6 100644
--- a/drivers/ide/qd65xx.c
+++ b/drivers/ide/qd65xx.c
@@ -189,15 +189,13 @@ static void qd_set_timing (ide_drive_t *drive, u8 timing)
printk(KERN_DEBUG "%s: %#x\n", drive->name, timing);
}
-static void qd6500_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void qd6500_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
u16 *id = drive->id;
int active_time = 175;
int recovery_time = 415; /* worst case values from the dos driver */
- /*
- * FIXME: use "pio" value
- */
+ /* FIXME: use drive->pio_mode value */
if (!qd_find_disk_type(drive, &active_time, &recovery_time) &&
(id[ATA_ID_OLD_PIO_MODES] & 0xff) && (id[ATA_ID_FIELD_VALID] & 2) &&
id[ATA_ID_EIDE_PIO] >= 240) {
@@ -211,9 +209,9 @@ static void qd6500_set_pio_mode(ide_drive_t *drive, const u8 pio)
active_time, recovery_time));
}
-static void qd6580_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void qd6580_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
struct ide_timing *t = ide_timing_find_mode(XFER_PIO_0 + pio);
unsigned int cycle_time;
int active_time = 175;
diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
index d467478d68d..134f1fd1386 100644
--- a/drivers/ide/sc1200.c
+++ b/drivers/ide/sc1200.c
@@ -122,13 +122,13 @@ out:
return mask;
}
-static void sc1200_set_dma_mode(ide_drive_t *drive, const u8 mode)
+static void sc1200_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
unsigned int reg, timings;
unsigned short pci_clock;
unsigned int basereg = hwif->channel ? 0x50 : 0x40;
+ const u8 mode = drive->dma_mode;
static const u32 udma_timing[3][3] = {
{ 0x00921250, 0x00911140, 0x00911030 },
@@ -193,10 +193,10 @@ static int sc1200_dma_end(ide_drive_t *drive)
* will have valid default PIO timings set up before we get here.
*/
-static void sc1200_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void sc1200_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
int mode = -1;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
/*
* bad abuse of ->set_pio_mode interface
diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
index 1104bb301eb..b7f5b0c4310 100644
--- a/drivers/ide/scc_pata.c
+++ b/drivers/ide/scc_pata.c
@@ -199,16 +199,15 @@ scc_ide_outsl(unsigned long port, void *addr, u32 count)
/**
* scc_set_pio_mode - set host controller for PIO mode
+ * @hwif: port
* @drive: drive
- * @pio: PIO mode number
*
* Load the timing settings for this device mode into the
* controller.
*/
-static void scc_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void scc_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct scc_ports *ports = ide_get_hwifdata(hwif);
unsigned long ctl_base = ports->ctl;
unsigned long cckctrl_port = ctl_base + 0xff0;
@@ -216,6 +215,7 @@ static void scc_set_pio_mode(ide_drive_t *drive, const u8 pio)
unsigned long pioct_port = ctl_base + 0x004;
unsigned long reg;
int offset;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
reg = in_be32((void __iomem *)cckctrl_port);
if (reg & CCKCTRL_ATACLKOEN) {
@@ -231,16 +231,15 @@ static void scc_set_pio_mode(ide_drive_t *drive, const u8 pio)
/**
* scc_set_dma_mode - set host controller for DMA mode
+ * @hwif: port
* @drive: drive
- * @speed: DMA mode
*
* Load the timing settings for this device mode into the
* controller.
*/
-static void scc_set_dma_mode(ide_drive_t *drive, const u8 speed)
+static void scc_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct scc_ports *ports = ide_get_hwifdata(hwif);
unsigned long ctl_base = ports->ctl;
unsigned long cckctrl_port = ctl_base + 0xff0;
@@ -254,6 +253,7 @@ static void scc_set_dma_mode(ide_drive_t *drive, const u8 speed)
int offset, idx;
unsigned long reg;
unsigned long jcactsel;
+ const u8 speed = drive->dma_mode;
reg = in_be32((void __iomem *)cckctrl_port);
if (reg & CCKCTRL_ATACLKOEN) {
@@ -872,20 +872,18 @@ static struct pci_driver scc_pci_driver = {
.remove = __devexit_p(scc_remove),
};
-static int scc_ide_init(void)
+static int __init scc_ide_init(void)
{
return ide_pci_register_driver(&scc_pci_driver);
}
-module_init(scc_ide_init);
-/* -- No exit code?
-static void scc_ide_exit(void)
+static void __exit scc_ide_exit(void)
{
- ide_pci_unregister_driver(&scc_pci_driver);
+ pci_unregister_driver(&scc_pci_driver);
}
-module_exit(scc_ide_exit);
- */
+module_init(scc_ide_init);
+module_exit(scc_ide_exit);
MODULE_DESCRIPTION("PCI driver module for Toshiba SCC IDE");
MODULE_LICENSE("GPL");
diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
index b6554ef9271..35fb8dabb55 100644
--- a/drivers/ide/serverworks.c
+++ b/drivers/ide/serverworks.c
@@ -2,7 +2,7 @@
* Copyright (C) 1998-2000 Michel Aubry
* Copyright (C) 1998-2000 Andrzej Krzysztofowicz
* Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2007 Bartlomiej Zolnierkiewicz
+ * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz
* Portions copyright (c) 2001 Sun Microsystems
*
*
@@ -52,8 +52,6 @@ static const char *svwks_bad_ata100[] = {
NULL
};
-static struct pci_dev *isa_dev;
-
static int check_in_drive_lists (ide_drive_t *drive, const char **list)
{
char *m = (char *)&drive->id[ATA_ID_PROD];
@@ -67,26 +65,14 @@ static int check_in_drive_lists (ide_drive_t *drive, const char **list)
static u8 svwks_udma_filter(ide_drive_t *drive)
{
struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
- u8 mask = 0;
- if (dev->device == PCI_DEVICE_ID_SERVERWORKS_HT1000IDE)
+ if (dev->device == PCI_DEVICE_ID_SERVERWORKS_HT1000IDE) {
return 0x1f;
- if (dev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) {
- u32 reg = 0;
- if (isa_dev)
- pci_read_config_dword(isa_dev, 0x64, &reg);
-
- /*
- * Don't enable UDMA on disk devices for the moment
- */
- if(drive->media == ide_disk)
- return 0;
- /* Check the OSB4 DMA33 enable bit */
- return ((reg & 0x00004000) == 0x00004000) ? 0x07 : 0;
} else if (dev->revision < SVWKS_CSB5_REVISION_NEW) {
return 0x07;
- } else if (dev->revision >= SVWKS_CSB5_REVISION_NEW) {
- u8 btr = 0, mode;
+ } else {
+ u8 btr = 0, mode, mask;
+
pci_read_config_byte(dev, 0x5A, &btr);
mode = btr & 0x3;
@@ -101,13 +87,9 @@ static u8 svwks_udma_filter(ide_drive_t *drive)
case 1: mask = 0x07; break;
default: mask = 0x00; break;
}
- }
- if (((dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) ||
- (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)) &&
- (!(PCI_FUNC(dev->devfn) & 1)))
- mask = 0x1f;
- return mask;
+ return mask;
+ }
}
static u8 svwks_csb_check (struct pci_dev *dev)
@@ -124,12 +106,13 @@ static u8 svwks_csb_check (struct pci_dev *dev)
return 0;
}
-static void svwks_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void svwks_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
static const u8 pio_modes[] = { 0x5d, 0x47, 0x34, 0x22, 0x20 };
static const u8 drive_pci[] = { 0x41, 0x40, 0x43, 0x42 };
- struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
+ struct pci_dev *dev = to_pci_dev(hwif->dev);
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
pci_write_config_byte(dev, drive_pci[drive->dn], pio_modes[pio]);
@@ -145,14 +128,14 @@ static void svwks_set_pio_mode(ide_drive_t *drive, const u8 pio)
}
}
-static void svwks_set_dma_mode(ide_drive_t *drive, const u8 speed)
+static void svwks_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
static const u8 udma_modes[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05 };
static const u8 dma_modes[] = { 0x77, 0x21, 0x20 };
static const u8 drive_pci2[] = { 0x45, 0x44, 0x47, 0x46 };
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
+ const u8 speed = drive->dma_mode;
u8 unit = drive->dn & 1;
u8 ultra_enable = 0, ultra_timing = 0, dma_timing = 0;
@@ -185,8 +168,9 @@ static int init_chipset_svwks(struct pci_dev *dev)
/* OSB4 : South Bridge and IDE */
if (dev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) {
- isa_dev = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
- PCI_DEVICE_ID_SERVERWORKS_OSB4, NULL);
+ struct pci_dev *isa_dev =
+ pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
+ PCI_DEVICE_ID_SERVERWORKS_OSB4, NULL);
if (isa_dev) {
pci_read_config_dword(isa_dev, 0x64, &reg);
reg &= ~0x00002000; /* disable 600ns interrupt mask */
@@ -195,6 +179,7 @@ static int init_chipset_svwks(struct pci_dev *dev)
"enabled.\n", pci_name(dev));
reg |= 0x00004000; /* enable UDMA/33 support */
pci_write_config_dword(isa_dev, 0x64, reg);
+ pci_dev_put(isa_dev);
}
}
@@ -343,7 +328,6 @@ static u8 svwks_cable_detect(ide_hwif_t *hwif)
static const struct ide_port_ops osb4_port_ops = {
.set_pio_mode = svwks_set_pio_mode,
.set_dma_mode = svwks_set_dma_mode,
- .udma_filter = svwks_udma_filter,
};
static const struct ide_port_ops svwks_port_ops = {
@@ -460,6 +444,6 @@ static void __exit svwks_ide_exit(void)
module_init(svwks_ide_init);
module_exit(svwks_ide_exit);
-MODULE_AUTHOR("Michael Aubry. Andrzej Krzysztofowicz, Andre Hedrick");
+MODULE_AUTHOR("Michael Aubry. Andrzej Krzysztofowicz, Andre Hedrick, Bartlomiej Zolnierkiewicz");
MODULE_DESCRIPTION("PCI driver module for Serverworks OSB4/CSB5/CSB6 IDE");
MODULE_LICENSE("GPL");
diff --git a/drivers/ide/sgiioc4.c b/drivers/ide/sgiioc4.c
index b7d61dc6409..e3ea591f66d 100644
--- a/drivers/ide/sgiioc4.c
+++ b/drivers/ide/sgiioc4.c
@@ -255,7 +255,7 @@ static int sgiioc4_dma_end(ide_drive_t *drive)
return dma_stat;
}
-static void sgiioc4_set_dma_mode(ide_drive_t *drive, const u8 speed)
+static void sgiioc4_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
}
diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
index d95df528562..ddeda444a27 100644
--- a/drivers/ide/siimage.c
+++ b/drivers/ide/siimage.c
@@ -229,19 +229,18 @@ static u8 sil_sata_udma_filter(ide_drive_t *drive)
/**
* sil_set_pio_mode - set host controller for PIO mode
+ * @hwif: port
* @drive: drive
- * @pio: PIO mode number
*
* Load the timing settings for this device mode into the
* controller.
*/
-static void sil_set_pio_mode(ide_drive_t *drive, u8 pio)
+static void sil_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
static const u16 tf_speed[] = { 0x328a, 0x2283, 0x1281, 0x10c3, 0x10c1 };
static const u16 data_speed[] = { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 };
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
ide_drive_t *pair = ide_get_pair_dev(drive);
u32 speedt = 0;
@@ -249,6 +248,7 @@ static void sil_set_pio_mode(ide_drive_t *drive, u8 pio)
unsigned long addr = siimage_seldev(drive, 0x04);
unsigned long tfaddr = siimage_selreg(hwif, 0x02);
unsigned long base = (unsigned long)hwif->hwif_data;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
u8 tf_pio = pio;
u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0;
u8 addr_mask = hwif->channel ? (mmio ? 0xF4 : 0x84)
@@ -258,7 +258,7 @@ static void sil_set_pio_mode(ide_drive_t *drive, u8 pio)
/* trim *taskfile* PIO to the slowest of the master/slave */
if (pair) {
- u8 pair_pio = ide_get_best_pio_mode(pair, 255, 4);
+ u8 pair_pio = pair->pio_mode - XFER_PIO_0;
if (pair_pio < tf_pio)
tf_pio = pair_pio;
@@ -289,19 +289,18 @@ static void sil_set_pio_mode(ide_drive_t *drive, u8 pio)
/**
* sil_set_dma_mode - set host controller for DMA mode
+ * @hwif: port
* @drive: drive
- * @speed: DMA mode
*
* Tune the SiI chipset for the desired DMA mode.
*/
-static void sil_set_dma_mode(ide_drive_t *drive, const u8 speed)
+static void sil_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
static const u8 ultra6[] = { 0x0F, 0x0B, 0x07, 0x05, 0x03, 0x02, 0x01 };
static const u8 ultra5[] = { 0x0C, 0x07, 0x05, 0x04, 0x02, 0x01 };
static const u16 dma[] = { 0x2208, 0x10C2, 0x10C1 };
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
unsigned long base = (unsigned long)hwif->hwif_data;
u16 ultra = 0, multi = 0;
@@ -311,6 +310,7 @@ static void sil_set_dma_mode(ide_drive_t *drive, const u8 speed)
: (mmio ? 0xB4 : 0x80);
unsigned long ma = siimage_seldev(drive, 0x08);
unsigned long ua = siimage_seldev(drive, 0x0C);
+ const u8 speed = drive->dma_mode;
scsc = sil_ioread8 (dev, base + (mmio ? 0x4A : 0x8A));
mode = sil_ioread8 (dev, base + addr_mask);
diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
index 468706082fb..db7f4e761db 100644
--- a/drivers/ide/sis5513.c
+++ b/drivers/ide/sis5513.c
@@ -290,10 +290,10 @@ static void config_drive_art_rwp(ide_drive_t *drive)
pci_write_config_byte(dev, 0x4b, rw_prefetch);
}
-static void sis_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void sis_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
config_drive_art_rwp(drive);
- sis_program_timings(drive, XFER_PIO_0 + pio);
+ sis_program_timings(drive, drive->pio_mode);
}
static void sis_ata133_program_udma_timings(ide_drive_t *drive, const u8 mode)
@@ -340,8 +340,10 @@ static void sis_program_udma_timings(ide_drive_t *drive, const u8 mode)
sis_ata33_program_udma_timings(drive, mode);
}
-static void sis_set_dma_mode(ide_drive_t *drive, const u8 speed)
+static void sis_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
+ const u8 speed = drive->dma_mode;
+
if (speed >= XFER_UDMA_0)
sis_program_udma_timings(drive, speed);
else
diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
index 3c2bbf0057e..f21dc2ad768 100644
--- a/drivers/ide/sl82c105.c
+++ b/drivers/ide/sl82c105.c
@@ -63,12 +63,13 @@ static unsigned int get_pio_timings(ide_drive_t *drive, u8 pio)
/*
* Configure the chipset for PIO mode.
*/
-static void sl82c105_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void sl82c105_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- struct pci_dev *dev = to_pci_dev(drive->hwif->dev);
+ struct pci_dev *dev = to_pci_dev(hwif->dev);
unsigned long timings = (unsigned long)ide_get_drivedata(drive);
int reg = 0x44 + drive->dn * 4;
u16 drv_ctrl;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
drv_ctrl = get_pio_timings(drive, pio);
@@ -91,11 +92,12 @@ static void sl82c105_set_pio_mode(ide_drive_t *drive, const u8 pio)
/*
* Configure the chipset for DMA mode.
*/
-static void sl82c105_set_dma_mode(ide_drive_t *drive, const u8 speed)
+static void sl82c105_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
static u16 mwdma_timings[] = {0x0707, 0x0201, 0x0200};
unsigned long timings = (unsigned long)ide_get_drivedata(drive);
u16 drv_ctrl;
+ const u8 speed = drive->dma_mode;
drv_ctrl = mwdma_timings[speed - XFER_MW_DMA_0];
diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
index 1ccfb40e721..864ffe0e26d 100644
--- a/drivers/ide/slc90e66.c
+++ b/drivers/ide/slc90e66.c
@@ -18,9 +18,8 @@
static DEFINE_SPINLOCK(slc90e66_lock);
-static void slc90e66_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void slc90e66_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
int is_slave = drive->dn & 1;
int master_port = hwif->channel ? 0x42 : 0x40;
@@ -29,6 +28,8 @@ static void slc90e66_set_pio_mode(ide_drive_t *drive, const u8 pio)
u16 master_data;
u8 slave_data;
int control = 0;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
+
/* ISP RTC */
static const u8 timings[][2] = {
{ 0, 0 },
@@ -71,14 +72,14 @@ static void slc90e66_set_pio_mode(ide_drive_t *drive, const u8 pio)
spin_unlock_irqrestore(&slc90e66_lock, flags);
}
-static void slc90e66_set_dma_mode(ide_drive_t *drive, const u8 speed)
+static void slc90e66_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
u8 maslave = hwif->channel ? 0x42 : 0x40;
int sitre = 0, a_speed = 7 << (drive->dn * 4);
int u_speed = 0, u_flag = 1 << drive->dn;
u16 reg4042, reg44, reg48, reg4a;
+ const u8 speed = drive->dma_mode;
pci_read_config_word(dev, maslave, &reg4042);
sitre = (reg4042 & 0x4000) ? 1 : 0;
@@ -98,7 +99,6 @@ static void slc90e66_set_dma_mode(ide_drive_t *drive, const u8 speed)
}
} else {
const u8 mwdma_to_pio[] = { 0, 3, 4 };
- u8 pio;
if (reg48 & u_flag)
pci_write_config_word(dev, 0x48, reg48 & ~u_flag);
@@ -106,11 +106,12 @@ static void slc90e66_set_dma_mode(ide_drive_t *drive, const u8 speed)
pci_write_config_word(dev, 0x4a, reg4a & ~a_speed);
if (speed >= XFER_MW_DMA_0)
- pio = mwdma_to_pio[speed - XFER_MW_DMA_0];
+ drive->pio_mode =
+ mwdma_to_pio[speed - XFER_MW_DMA_0] + XFER_PIO_0;
else
- pio = 2; /* only SWDMA2 is allowed */
+ drive->pio_mode = XFER_PIO_2; /* for SWDMA2 */
- slc90e66_set_pio_mode(drive, pio);
+ slc90e66_set_pio_mode(hwif, drive);
}
}
diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
index 05a93d6baec..e444d24934b 100644
--- a/drivers/ide/tc86c001.c
+++ b/drivers/ide/tc86c001.c
@@ -13,11 +13,11 @@
#define DRV_NAME "tc86c001"
-static void tc86c001_set_mode(ide_drive_t *drive, const u8 speed)
+static void tc86c001_set_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
unsigned long scr_port = hwif->config_data + (drive->dn ? 0x02 : 0x00);
u16 mode, scr = inw(scr_port);
+ const u8 speed = drive->dma_mode;
switch (speed) {
case XFER_UDMA_4: mode = 0x00c0; break;
@@ -41,9 +41,10 @@ static void tc86c001_set_mode(ide_drive_t *drive, const u8 speed)
outw(scr, scr_port);
}
-static void tc86c001_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void tc86c001_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- tc86c001_set_mode(drive, XFER_PIO_0 + pio);
+ drive->dma_mode = drive->pio_mode;
+ tc86c001_set_mode(hwif, drive);
}
/*
diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
index 8773c3ba746..7953447eae0 100644
--- a/drivers/ide/triflex.c
+++ b/drivers/ide/triflex.c
@@ -34,9 +34,8 @@
#define DRV_NAME "triflex"
-static void triflex_set_mode(ide_drive_t *drive, const u8 speed)
+static void triflex_set_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct pci_dev *dev = to_pci_dev(hwif->dev);
u32 triflex_timings = 0;
u16 timing = 0;
@@ -44,7 +43,7 @@ static void triflex_set_mode(ide_drive_t *drive, const u8 speed)
pci_read_config_dword(dev, channel_offset, &triflex_timings);
- switch(speed) {
+ switch (drive->dma_mode) {
case XFER_MW_DMA_2:
timing = 0x0103;
break;
@@ -82,9 +81,10 @@ static void triflex_set_mode(ide_drive_t *drive, const u8 speed)
pci_write_config_dword(dev, channel_offset, triflex_timings);
}
-static void triflex_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void triflex_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- triflex_set_mode(drive, XFER_PIO_0 + pio);
+ drive->dma_mode = drive->pio_mode;
+ triflex_set_mode(hwif, drive);
}
static const struct ide_port_ops triflex_port_ops = {
diff --git a/drivers/ide/tx4938ide.c b/drivers/ide/tx4938ide.c
index fd59c0d235b..1d80f1fdbc9 100644
--- a/drivers/ide/tx4938ide.c
+++ b/drivers/ide/tx4938ide.c
@@ -56,16 +56,15 @@ static void tx4938ide_tune_ebusc(unsigned int ebus_ch,
&tx4938_ebuscptr->cr[ebus_ch]);
}
-static void tx4938ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void tx4938ide_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
struct tx4938ide_platform_info *pdata = hwif->dev->platform_data;
- u8 safe = pio;
+ u8 safe = drive->pio_mode - XFER_PIO_0;
ide_drive_t *pair;
pair = ide_get_pair_dev(drive);
if (pair)
- safe = min(safe, ide_get_best_pio_mode(pair, 255, 5));
+ safe = min(safe, pair->pio_mode - XFER_PIO_0);
tx4938ide_tune_ebusc(pdata->ebus_ch, pdata->gbus_clock, safe);
}
diff --git a/drivers/ide/tx4939ide.c b/drivers/ide/tx4939ide.c
index 64b58ecc3f0..3c736775187 100644
--- a/drivers/ide/tx4939ide.c
+++ b/drivers/ide/tx4939ide.c
@@ -104,17 +104,17 @@ static void tx4939ide_writeb(u8 val, void __iomem *base, u32 reg)
#define TX4939IDE_BASE(hwif) ((void __iomem *)(hwif)->extra_base)
-static void tx4939ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void tx4939ide_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
int is_slave = drive->dn;
u32 mask, val;
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
u8 safe = pio;
ide_drive_t *pair;
pair = ide_get_pair_dev(drive);
if (pair)
- safe = min(safe, ide_get_best_pio_mode(pair, 255, 4));
+ safe = min(safe, pair->pio_mode - XFER_PIO_0);
/*
* Update Command Transfer Mode for master/slave and Data
* Transfer Mode for this drive.
@@ -125,10 +125,10 @@ static void tx4939ide_set_pio_mode(ide_drive_t *drive, const u8 pio)
/* tx4939ide_tf_load_fixup() will set the Sys_Ctl register */
}
-static void tx4939ide_set_dma_mode(ide_drive_t *drive, const u8 mode)
+static void tx4939ide_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
u32 mask, val;
+ const u8 mode = drive->dma_mode;
/* Update Data Transfer Mode for this drive. */
if (mode >= XFER_UDMA_0)
diff --git a/drivers/ide/umc8672.c b/drivers/ide/umc8672.c
index 60f936e2319..47adcd09cb2 100644
--- a/drivers/ide/umc8672.c
+++ b/drivers/ide/umc8672.c
@@ -104,10 +104,11 @@ static void umc_set_speeds(u8 speeds[])
speeds[0], speeds[1], speeds[2], speeds[3]);
}
-static void umc_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void umc_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif, *mate = hwif->mate;
+ ide_hwif_t *mate = hwif->mate;
unsigned long uninitialized_var(flags);
+ const u8 pio = drive->pio_mode - XFER_PIO_0;
printk("%s: setting umc8672 to PIO mode%d (speed %d)\n",
drive->name, pio, pio_to_umc[pio]);
diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
index 028de26a25f..e65d010b708 100644
--- a/drivers/ide/via82cxxx.c
+++ b/drivers/ide/via82cxxx.c
@@ -6,7 +6,7 @@
* vt8235, vt8237, vt8237a
*
* Copyright (c) 2000-2002 Vojtech Pavlik
- * Copyright (c) 2007 Bartlomiej Zolnierkiewicz
+ * Copyright (c) 2007-2010 Bartlomiej Zolnierkiewicz
*
* Based on the work of:
* Michel Aubry
@@ -54,6 +54,11 @@
#define VIA_NO_UNMASK 0x08 /* Doesn't work with IRQ unmasking on */
#define VIA_BAD_ID 0x10 /* Has wrong vendor ID (0x1107) */
#define VIA_BAD_AST 0x20 /* Don't touch Address Setup Timing */
+#define VIA_SATA_PATA 0x80 /* SATA/PATA combined configuration */
+
+enum {
+ VIA_IDFLAG_SINGLE = (1 << 1), /* single channel controller */
+};
/*
* VIA SouthBridge chips.
@@ -67,11 +72,13 @@ static struct via_isa_bridge {
u8 udma_mask;
u8 flags;
} via_isa_bridges[] = {
- { "vx855", PCI_DEVICE_ID_VIA_VX855, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
- { "vx800", PCI_DEVICE_ID_VIA_VX800, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
- { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
+ { "vx855", PCI_DEVICE_ID_VIA_VX855, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST | VIA_SATA_PATA },
+ { "vx800", PCI_DEVICE_ID_VIA_VX800, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST | VIA_SATA_PATA },
+ { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST | VIA_SATA_PATA },
+ { "vt8261", PCI_DEVICE_ID_VIA_8261, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
{ "vt8237s", PCI_DEVICE_ID_VIA_8237S, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
{ "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
+ { "vt6415", PCI_DEVICE_ID_VIA_6410, 0x00, 0xff, ATA_UDMA6, VIA_BAD_AST },
{ "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
{ "vt8237", PCI_DEVICE_ID_VIA_8237, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
{ "vt8237a", PCI_DEVICE_ID_VIA_8237A, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
@@ -92,6 +99,7 @@ static struct via_isa_bridge {
{ "vt82c586", PCI_DEVICE_ID_VIA_82C586_0, 0x00, 0x0f, 0x00, VIA_SET_FIFO },
{ "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, 0x00, VIA_SET_FIFO | VIA_NO_UNMASK },
{ "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, 0x00, VIA_SET_FIFO | VIA_NO_UNMASK | VIA_BAD_ID },
+ { "vtxxxx", PCI_DEVICE_ID_VIA_ANON, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
{ NULL }
};
@@ -102,6 +110,7 @@ struct via82cxxx_dev
{
struct via_isa_bridge *via_config;
unsigned int via_80w;
+ u8 cached_device[2];
};
/**
@@ -137,30 +146,45 @@ static void via_set_speed(ide_hwif_t *hwif, u8 dn, struct ide_timing *timing)
case ATA_UDMA4: t = timing->udma ? (0xe8 | (clamp_val(timing->udma, 2, 9) - 2)) : 0x0f; break;
case ATA_UDMA5: t = timing->udma ? (0xe0 | (clamp_val(timing->udma, 2, 9) - 2)) : 0x07; break;
case ATA_UDMA6: t = timing->udma ? (0xe0 | (clamp_val(timing->udma, 2, 9) - 2)) : 0x07; break;
- default: return;
}
- pci_write_config_byte(dev, VIA_UDMA_TIMING + (3 - dn), t);
+ /* Set UDMA unless device is not UDMA capable */
+ if (vdev->via_config->udma_mask) {
+ u8 udma_etc;
+
+ pci_read_config_byte(dev, VIA_UDMA_TIMING + 3 - dn, &udma_etc);
+
+ /* clear transfer mode bit */
+ udma_etc &= ~0x20;
+
+ if (timing->udma) {
+ /* preserve 80-wire cable detection bit */
+ udma_etc &= 0x10;
+ udma_etc |= t;
+ }
+
+ pci_write_config_byte(dev, VIA_UDMA_TIMING + 3 - dn, udma_etc);
+ }
}
/**
* via_set_drive - configure transfer mode
+ * @hwif: port
* @drive: Drive to set up
- * @speed: desired speed
*
* via_set_drive() computes timing values configures the chipset to
* a desired transfer mode. It also can be called by upper layers.
*/
-static void via_set_drive(ide_drive_t *drive, const u8 speed)
+static void via_set_drive(ide_hwif_t *hwif, ide_drive_t *drive)
{
- ide_hwif_t *hwif = drive->hwif;
ide_drive_t *peer = ide_get_pair_dev(drive);
struct pci_dev *dev = to_pci_dev(hwif->dev);
struct ide_host *host = pci_get_drvdata(dev);
struct via82cxxx_dev *vdev = host->host_priv;
struct ide_timing t, p;
unsigned int T, UT;
+ const u8 speed = drive->dma_mode;
T = 1000000000 / via_clock;
@@ -175,7 +199,7 @@ static void via_set_drive(ide_drive_t *drive, const u8 speed)
ide_timing_compute(drive, speed, &t, T, UT);
if (peer) {
- ide_timing_compute(peer, peer->current_speed, &p, T, UT);
+ ide_timing_compute(peer, peer->pio_mode, &p, T, UT);
ide_timing_merge(&p, &t, &t, IDE_TIMING_8BIT);
}
@@ -184,22 +208,24 @@ static void via_set_drive(ide_drive_t *drive, const u8 speed)
/**
* via_set_pio_mode - set host controller for PIO mode
+ * @hwif: port
* @drive: drive
- * @pio: PIO mode number
*
* A callback from the upper layers for PIO-only tuning.
*/
-static void via_set_pio_mode(ide_drive_t *drive, const u8 pio)
+static void via_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
{
- via_set_drive(drive, XFER_PIO_0 + pio);
+ drive->dma_mode = drive->pio_mode;
+ via_set_drive(hwif, drive);
}
static struct via_isa_bridge *via_config_find(struct pci_dev **isa)
{
struct via_isa_bridge *via_config;
- for (via_config = via_isa_bridges; via_config->id; via_config++)
+ for (via_config = via_isa_bridges;
+ via_config->id != PCI_DEVICE_ID_VIA_ANON; via_config++)
if ((*isa = pci_get_device(PCI_VENDOR_ID_VIA +
!!(via_config->flags & VIA_BAD_ID),
via_config->id, NULL))) {
@@ -362,6 +388,9 @@ static u8 via82cxxx_cable_detect(ide_hwif_t *hwif)
if (via_cable_override(pdev))
return ATA_CBL_PATA40_SHORT;
+ if ((vdev->via_config->flags & VIA_SATA_PATA) && hwif->channel == 0)
+ return ATA_CBL_SATA;
+
if ((vdev->via_80w >> hwif->channel) & 1)
return ATA_CBL_PATA80;
else
@@ -374,10 +403,66 @@ static const struct ide_port_ops via_port_ops = {
.cable_detect = via82cxxx_cable_detect,
};
+static void via_write_devctl(ide_hwif_t *hwif, u8 ctl)
+{
+ struct via82cxxx_dev *vdev = hwif->host->host_priv;
+
+ outb(ctl, hwif->io_ports.ctl_addr);
+ outb(vdev->cached_device[hwif->channel], hwif->io_ports.device_addr);
+}
+
+static void __via_dev_select(ide_drive_t *drive, u8 select)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct via82cxxx_dev *vdev = hwif->host->host_priv;
+
+ outb(select, hwif->io_ports.device_addr);
+ vdev->cached_device[hwif->channel] = select;
+}
+
+static void via_dev_select(ide_drive_t *drive)
+{
+ __via_dev_select(drive, drive->select | ATA_DEVICE_OBS);
+}
+
+static void via_tf_load(ide_drive_t *drive, struct ide_taskfile *tf, u8 valid)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct ide_io_ports *io_ports = &hwif->io_ports;
+
+ if (valid & IDE_VALID_FEATURE)
+ outb(tf->feature, io_ports->feature_addr);
+ if (valid & IDE_VALID_NSECT)
+ outb(tf->nsect, io_ports->nsect_addr);
+ if (valid & IDE_VALID_LBAL)
+ outb(tf->lbal, io_ports->lbal_addr);
+ if (valid & IDE_VALID_LBAM)
+ outb(tf->lbam, io_ports->lbam_addr);
+ if (valid & IDE_VALID_LBAH)
+ outb(tf->lbah, io_ports->lbah_addr);
+ if (valid & IDE_VALID_DEVICE)
+ __via_dev_select(drive, tf->device);
+}
+
+const struct ide_tp_ops via_tp_ops = {
+ .exec_command = ide_exec_command,
+ .read_status = ide_read_status,
+ .read_altstatus = ide_read_altstatus,
+ .write_devctl = via_write_devctl,
+
+ .dev_select = via_dev_select,
+ .tf_load = via_tf_load,
+ .tf_read = ide_tf_read,
+
+ .input_data = ide_input_data,
+ .output_data = ide_output_data,
+};
+
static const struct ide_port_info via82cxxx_chipset __devinitdata = {
.name = DRV_NAME,
.init_chipset = init_chipset_via82cxxx,
.enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
+ .tp_ops = &via_tp_ops,
.port_ops = &via_port_ops,
.host_flags = IDE_HFLAG_PIO_NO_BLACKLIST |
IDE_HFLAG_POST_SET_MODE |
@@ -402,11 +487,6 @@ static int __devinit via_init_one(struct pci_dev *dev, const struct pci_device_i
* Find the ISA bridge and check we know what it is.
*/
via_config = via_config_find(&isa);
- if (!via_config->id) {
- printk(KERN_WARNING DRV_NAME " %s: unknown chipset, skipping\n",
- pci_name(dev));
- return -ENODEV;
- }
/*
* Print the boot message.
@@ -436,10 +516,13 @@ static int __devinit via_init_one(struct pci_dev *dev, const struct pci_device_i
via_clock = 33333;
}
- if (idx == 0)
- d.host_flags |= IDE_HFLAG_NO_AUTODMA;
- else
+ if (idx == 1)
d.enablebits[1].reg = d.enablebits[0].reg = 0;
+ else
+ d.host_flags |= IDE_HFLAG_NO_AUTODMA;
+
+ if (idx == VIA_IDFLAG_SINGLE)
+ d.host_flags |= IDE_HFLAG_SINGLE;
if ((via_config->flags & VIA_NO_UNMASK) == 0)
d.host_flags |= IDE_HFLAG_UNMASK_IRQS;
@@ -475,8 +558,9 @@ static const struct pci_device_id via_pci_tbl[] = {
{ PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C576_1), 0 },
{ PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C586_1), 0 },
{ PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_CX700_IDE), 0 },
- { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_VX855_IDE), 0 },
+ { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_VX855_IDE), VIA_IDFLAG_SINGLE },
{ PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_6410), 1 },
+ { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_6415), 1 },
{ PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_SATA_EIDE), 1 },
{ 0, },
};
@@ -504,6 +588,6 @@ static void __exit via_ide_exit(void)
module_init(via_ide_init);
module_exit(via_ide_exit);
-MODULE_AUTHOR("Vojtech Pavlik, Michel Aubry, Jeff Garzik, Andre Hedrick");
+MODULE_AUTHOR("Vojtech Pavlik, Bartlomiej Zolnierkiewicz, Michel Aubry, Jeff Garzik, Andre Hedrick");
MODULE_DESCRIPTION("PCI driver module for VIA IDE");
MODULE_LICENSE("GPL");
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig
index dd0db67bf8d..975adce5f40 100644
--- a/drivers/infiniband/Kconfig
+++ b/drivers/infiniband/Kconfig
@@ -20,6 +20,7 @@ config INFINIBAND_USER_MAD
config INFINIBAND_USER_ACCESS
tristate "InfiniBand userspace access (verbs and CM)"
+ select ANON_INODES
---help---
Userspace InfiniBand access support. This enables the
kernel side of userspace verbs and the userspace
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
index 7522008fda8..58463da814d 100644
--- a/drivers/infiniband/core/mad.c
+++ b/drivers/infiniband/core/mad.c
@@ -1193,10 +1193,7 @@ static int method_in_use(struct ib_mad_mgmt_method_table **method,
{
int i;
- for (i = find_first_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS);
- i < IB_MGMT_MAX_METHODS;
- i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
- 1+i)) {
+ for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS) {
if ((*method)->agent[i]) {
printk(KERN_ERR PFX "Method %d already in use\n", i);
return -EINVAL;
@@ -1330,13 +1327,9 @@ static int add_nonoui_reg_req(struct ib_mad_reg_req *mad_reg_req,
goto error3;
/* Finally, add in methods being registered */
- for (i = find_first_bit(mad_reg_req->method_mask,
- IB_MGMT_MAX_METHODS);
- i < IB_MGMT_MAX_METHODS;
- i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
- 1+i)) {
+ for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
(*method)->agent[i] = agent_priv;
- }
+
return 0;
error3:
@@ -1429,13 +1422,9 @@ check_in_use:
goto error4;
/* Finally, add in methods being registered */
- for (i = find_first_bit(mad_reg_req->method_mask,
- IB_MGMT_MAX_METHODS);
- i < IB_MGMT_MAX_METHODS;
- i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS,
- 1+i)) {
+ for_each_set_bit(i, mad_reg_req->method_mask, IB_MGMT_MAX_METHODS)
(*method)->agent[i] = agent_priv;
- }
+
return 0;
error4:
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index f504c9b00c1..1b09b735c5a 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -1215,15 +1215,18 @@ static void ib_ucm_release_dev(struct device *dev)
ucm_dev = container_of(dev, struct ib_ucm_device, dev);
cdev_del(&ucm_dev->cdev);
- clear_bit(ucm_dev->devnum, dev_map);
+ if (ucm_dev->devnum < IB_UCM_MAX_DEVICES)
+ clear_bit(ucm_dev->devnum, dev_map);
+ else
+ clear_bit(ucm_dev->devnum - IB_UCM_MAX_DEVICES, dev_map);
kfree(ucm_dev);
}
static const struct file_operations ucm_fops = {
- .owner = THIS_MODULE,
- .open = ib_ucm_open,
+ .owner = THIS_MODULE,
+ .open = ib_ucm_open,
.release = ib_ucm_close,
- .write = ib_ucm_write,
+ .write = ib_ucm_write,
.poll = ib_ucm_poll,
};
@@ -1237,8 +1240,32 @@ static ssize_t show_ibdev(struct device *dev, struct device_attribute *attr,
}
static DEVICE_ATTR(ibdev, S_IRUGO, show_ibdev, NULL);
+static dev_t overflow_maj;
+static DECLARE_BITMAP(overflow_map, IB_UCM_MAX_DEVICES);
+static int find_overflow_devnum(void)
+{
+ int ret;
+
+ if (!overflow_maj) {
+ ret = alloc_chrdev_region(&overflow_maj, 0, IB_UCM_MAX_DEVICES,
+ "infiniband_cm");
+ if (ret) {
+ printk(KERN_ERR "ucm: couldn't register dynamic device number\n");
+ return ret;
+ }
+ }
+
+ ret = find_first_zero_bit(overflow_map, IB_UCM_MAX_DEVICES);
+ if (ret >= IB_UCM_MAX_DEVICES)
+ return -1;
+
+ return ret;
+}
+
static void ib_ucm_add_one(struct ib_device *device)
{
+ int devnum;
+ dev_t base;
struct ib_ucm_device *ucm_dev;
if (!device->alloc_ucontext ||
@@ -1251,16 +1278,25 @@ static void ib_ucm_add_one(struct ib_device *device)
ucm_dev->ib_dev = device;
- ucm_dev->devnum = find_first_zero_bit(dev_map, IB_UCM_MAX_DEVICES);
- if (ucm_dev->devnum >= IB_UCM_MAX_DEVICES)
- goto err;
-
- set_bit(ucm_dev->devnum, dev_map);
+ devnum = find_first_zero_bit(dev_map, IB_UCM_MAX_DEVICES);
+ if (devnum >= IB_UCM_MAX_DEVICES) {
+ devnum = find_overflow_devnum();
+ if (devnum < 0)
+ goto err;
+
+ ucm_dev->devnum = devnum + IB_UCM_MAX_DEVICES;
+ base = devnum + overflow_maj;
+ set_bit(devnum, overflow_map);
+ } else {
+ ucm_dev->devnum = devnum;
+ base = devnum + IB_UCM_BASE_DEV;
+ set_bit(devnum, dev_map);
+ }
cdev_init(&ucm_dev->cdev, &ucm_fops);
ucm_dev->cdev.owner = THIS_MODULE;
kobject_set_name(&ucm_dev->cdev.kobj, "ucm%d", ucm_dev->devnum);
- if (cdev_add(&ucm_dev->cdev, IB_UCM_BASE_DEV + ucm_dev->devnum, 1))
+ if (cdev_add(&ucm_dev->cdev, base, 1))
goto err;
ucm_dev->dev.class = &cm_class;
@@ -1281,7 +1317,10 @@ err_dev:
device_unregister(&ucm_dev->dev);
err_cdev:
cdev_del(&ucm_dev->cdev);
- clear_bit(ucm_dev->devnum, dev_map);
+ if (ucm_dev->devnum < IB_UCM_MAX_DEVICES)
+ clear_bit(devnum, dev_map);
+ else
+ clear_bit(devnum, overflow_map);
err:
kfree(ucm_dev);
return;
@@ -1340,6 +1379,8 @@ static void __exit ib_ucm_cleanup(void)
ib_unregister_client(&ucm_client);
class_remove_file(&cm_class, &class_attr_abi_version);
unregister_chrdev_region(IB_UCM_BASE_DEV, IB_UCM_MAX_DEVICES);
+ if (overflow_maj)
+ unregister_chrdev_region(overflow_maj, IB_UCM_MAX_DEVICES);
idr_destroy(&ctx_id_table);
}
diff --git a/drivers/infiniband/core/ud_header.c b/drivers/infiniband/core/ud_header.c
index 8ec7876bedc..650b501eb14 100644
--- a/drivers/infiniband/core/ud_header.c
+++ b/drivers/infiniband/core/ud_header.c
@@ -181,6 +181,7 @@ static const struct ib_field deth_table[] = {
* ib_ud_header_init - Initialize UD header structure
* @payload_bytes:Length of packet payload
* @grh_present:GRH flag (if non-zero, GRH will be included)
+ * @immediate_present: specify if immediate data should be used
* @header:Structure to initialize
*
* ib_ud_header_init() initializes the lrh.link_version, lrh.link_next_header,
@@ -191,21 +192,13 @@ static const struct ib_field deth_table[] = {
*/
void ib_ud_header_init(int payload_bytes,
int grh_present,
+ int immediate_present,
struct ib_ud_header *header)
{
- int header_len;
u16 packet_length;
memset(header, 0, sizeof *header);
- header_len =
- IB_LRH_BYTES +
- IB_BTH_BYTES +
- IB_DETH_BYTES;
- if (grh_present) {
- header_len += IB_GRH_BYTES;
- }
-
header->lrh.link_version = 0;
header->lrh.link_next_header =
grh_present ? IB_LNH_IBA_GLOBAL : IB_LNH_IBA_LOCAL;
@@ -231,7 +224,8 @@ void ib_ud_header_init(int payload_bytes,
header->lrh.packet_length = cpu_to_be16(packet_length);
- if (header->immediate_present)
+ header->immediate_present = immediate_present;
+ if (immediate_present)
header->bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
else
header->bth.opcode = IB_OPCODE_UD_SEND_ONLY;
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 6f7c096abf1..4f906f0614f 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -136,7 +136,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
down_write(&current->mm->mmap_sem);
locked = npages + current->mm->locked_vm;
- lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
+ lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
ret = -ENOMEM;
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 7de02969ed7..02d360cfc2f 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -65,12 +65,9 @@ enum {
};
/*
- * Our lifetime rules for these structs are the following: each time a
- * device special file is opened, we look up the corresponding struct
- * ib_umad_port by minor in the umad_port[] table while holding the
- * port_lock. If this lookup succeeds, we take a reference on the
- * ib_umad_port's struct ib_umad_device while still holding the
- * port_lock; if the lookup fails, we fail the open(). We drop these
+ * Our lifetime rules for these structs are the following:
+ * device special file is opened, we take a reference on the
+ * ib_umad_port's struct ib_umad_device. We drop these
* references in the corresponding close().
*
* In addition to references coming from open character devices, there
@@ -78,19 +75,14 @@ enum {
* module's reference taken when allocating the ib_umad_device in
* ib_umad_add_one().
*
- * When destroying an ib_umad_device, we clear all of its
- * ib_umad_ports from umad_port[] while holding port_lock before
- * dropping the module's reference to the ib_umad_device. This is
- * always safe because any open() calls will either succeed and obtain
- * a reference before we clear the umad_port[] entries, or fail after
- * we clear the umad_port[] entries.
+ * When destroying an ib_umad_device, we drop the module's reference.
*/
struct ib_umad_port {
- struct cdev *cdev;
+ struct cdev cdev;
struct device *dev;
- struct cdev *sm_cdev;
+ struct cdev sm_cdev;
struct device *sm_dev;
struct semaphore sm_sem;
@@ -136,7 +128,6 @@ static struct class *umad_class;
static const dev_t base_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE);
static DEFINE_SPINLOCK(port_lock);
-static struct ib_umad_port *umad_port[IB_UMAD_MAX_PORTS];
static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS);
static void ib_umad_add_one(struct ib_device *device);
@@ -496,8 +487,8 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
ah_attr.ah_flags = IB_AH_GRH;
memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16);
ah_attr.grh.sgid_index = packet->mad.hdr.gid_index;
- ah_attr.grh.flow_label = be32_to_cpu(packet->mad.hdr.flow_label);
- ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit;
+ ah_attr.grh.flow_label = be32_to_cpu(packet->mad.hdr.flow_label);
+ ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit;
ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class;
}
@@ -528,9 +519,9 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
goto err_ah;
}
- packet->msg->ah = ah;
+ packet->msg->ah = ah;
packet->msg->timeout_ms = packet->mad.hdr.timeout_ms;
- packet->msg->retries = packet->mad.hdr.retries;
+ packet->msg->retries = packet->mad.hdr.retries;
packet->msg->context[0] = packet;
/* Copy MAD header. Any RMPP header is already in place. */
@@ -779,15 +770,11 @@ static long ib_umad_compat_ioctl(struct file *filp, unsigned int cmd,
/*
* ib_umad_open() does not need the BKL:
*
- * - umad_port[] accesses are protected by port_lock, the
- * ib_umad_port structures are properly reference counted, and
+ * - the ib_umad_port structures are properly reference counted, and
* everything else is purely local to the file being created, so
* races against other open calls are not a problem;
* - the ioctl method does not affect any global state outside of the
* file structure being operated on;
- * - the port is added to umad_port[] as the last part of module
- * initialization so the open method will either immediately run
- * -ENXIO, or all required initialization will be done.
*/
static int ib_umad_open(struct inode *inode, struct file *filp)
{
@@ -795,13 +782,10 @@ static int ib_umad_open(struct inode *inode, struct file *filp)
struct ib_umad_file *file;
int ret = 0;
- spin_lock(&port_lock);
- port = umad_port[iminor(inode) - IB_UMAD_MINOR_BASE];
+ port = container_of(inode->i_cdev, struct ib_umad_port, cdev);
if (port)
kref_get(&port->umad_dev->ref);
- spin_unlock(&port_lock);
-
- if (!port)
+ else
return -ENXIO;
mutex_lock(&port->file_mutex);
@@ -872,16 +856,16 @@ static int ib_umad_close(struct inode *inode, struct file *filp)
}
static const struct file_operations umad_fops = {
- .owner = THIS_MODULE,
- .read = ib_umad_read,
- .write = ib_umad_write,
- .poll = ib_umad_poll,
+ .owner = THIS_MODULE,
+ .read = ib_umad_read,
+ .write = ib_umad_write,
+ .poll = ib_umad_poll,
.unlocked_ioctl = ib_umad_ioctl,
#ifdef CONFIG_COMPAT
- .compat_ioctl = ib_umad_compat_ioctl,
+ .compat_ioctl = ib_umad_compat_ioctl,
#endif
- .open = ib_umad_open,
- .release = ib_umad_close
+ .open = ib_umad_open,
+ .release = ib_umad_close
};
static int ib_umad_sm_open(struct inode *inode, struct file *filp)
@@ -892,13 +876,10 @@ static int ib_umad_sm_open(struct inode *inode, struct file *filp)
};
int ret;
- spin_lock(&port_lock);
- port = umad_port[iminor(inode) - IB_UMAD_MINOR_BASE - IB_UMAD_MAX_PORTS];
+ port = container_of(inode->i_cdev, struct ib_umad_port, sm_cdev);
if (port)
kref_get(&port->umad_dev->ref);
- spin_unlock(&port_lock);
-
- if (!port)
+ else
return -ENXIO;
if (filp->f_flags & O_NONBLOCK) {
@@ -949,8 +930,8 @@ static int ib_umad_sm_close(struct inode *inode, struct file *filp)
}
static const struct file_operations umad_sm_fops = {
- .owner = THIS_MODULE,
- .open = ib_umad_sm_open,
+ .owner = THIS_MODULE,
+ .open = ib_umad_sm_open,
.release = ib_umad_sm_close
};
@@ -990,16 +971,51 @@ static ssize_t show_abi_version(struct class *class, char *buf)
}
static CLASS_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
+static dev_t overflow_maj;
+static DECLARE_BITMAP(overflow_map, IB_UMAD_MAX_PORTS);
+static int find_overflow_devnum(void)
+{
+ int ret;
+
+ if (!overflow_maj) {
+ ret = alloc_chrdev_region(&overflow_maj, 0, IB_UMAD_MAX_PORTS * 2,
+ "infiniband_mad");
+ if (ret) {
+ printk(KERN_ERR "user_mad: couldn't register dynamic device number\n");
+ return ret;
+ }
+ }
+
+ ret = find_first_zero_bit(overflow_map, IB_UMAD_MAX_PORTS);
+ if (ret >= IB_UMAD_MAX_PORTS)
+ return -1;
+
+ return ret;
+}
+
static int ib_umad_init_port(struct ib_device *device, int port_num,
struct ib_umad_port *port)
{
+ int devnum;
+ dev_t base;
+
spin_lock(&port_lock);
- port->dev_num = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS);
- if (port->dev_num >= IB_UMAD_MAX_PORTS) {
+ devnum = find_first_zero_bit(dev_map, IB_UMAD_MAX_PORTS);
+ if (devnum >= IB_UMAD_MAX_PORTS) {
spin_unlock(&port_lock);
- return -1;
+ devnum = find_overflow_devnum();
+ if (devnum < 0)
+ return -1;
+
+ spin_lock(&port_lock);
+ port->dev_num = devnum + IB_UMAD_MAX_PORTS;
+ base = devnum + overflow_maj;
+ set_bit(devnum, overflow_map);
+ } else {
+ port->dev_num = devnum;
+ base = devnum + base_dev;
+ set_bit(devnum, dev_map);
}
- set_bit(port->dev_num, dev_map);
spin_unlock(&port_lock);
port->ib_dev = device;
@@ -1008,17 +1024,14 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
mutex_init(&port->file_mutex);
INIT_LIST_HEAD(&port->file_list);
- port->cdev = cdev_alloc();
- if (!port->cdev)
- return -1;
- port->cdev->owner = THIS_MODULE;
- port->cdev->ops = &umad_fops;
- kobject_set_name(&port->cdev->kobj, "umad%d", port->dev_num);
- if (cdev_add(port->cdev, base_dev + port->dev_num, 1))
+ cdev_init(&port->cdev, &umad_fops);
+ port->cdev.owner = THIS_MODULE;
+ kobject_set_name(&port->cdev.kobj, "umad%d", port->dev_num);
+ if (cdev_add(&port->cdev, base, 1))
goto err_cdev;
port->dev = device_create(umad_class, device->dma_device,
- port->cdev->dev, port,
+ port->cdev.dev, port,
"umad%d", port->dev_num);
if (IS_ERR(port->dev))
goto err_cdev;
@@ -1028,17 +1041,15 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
if (device_create_file(port->dev, &dev_attr_port))
goto err_dev;
- port->sm_cdev = cdev_alloc();
- if (!port->sm_cdev)
- goto err_dev;
- port->sm_cdev->owner = THIS_MODULE;
- port->sm_cdev->ops = &umad_sm_fops;
- kobject_set_name(&port->sm_cdev->kobj, "issm%d", port->dev_num);
- if (cdev_add(port->sm_cdev, base_dev + port->dev_num + IB_UMAD_MAX_PORTS, 1))
+ base += IB_UMAD_MAX_PORTS;
+ cdev_init(&port->sm_cdev, &umad_sm_fops);
+ port->sm_cdev.owner = THIS_MODULE;
+ kobject_set_name(&port->sm_cdev.kobj, "issm%d", port->dev_num);
+ if (cdev_add(&port->sm_cdev, base, 1))
goto err_sm_cdev;
port->sm_dev = device_create(umad_class, device->dma_device,
- port->sm_cdev->dev, port,
+ port->sm_cdev.dev, port,
"issm%d", port->dev_num);
if (IS_ERR(port->sm_dev))
goto err_sm_cdev;
@@ -1048,24 +1059,23 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
if (device_create_file(port->sm_dev, &dev_attr_port))
goto err_sm_dev;
- spin_lock(&port_lock);
- umad_port[port->dev_num] = port;
- spin_unlock(&port_lock);
-
return 0;
err_sm_dev:
- device_destroy(umad_class, port->sm_cdev->dev);
+ device_destroy(umad_class, port->sm_cdev.dev);
err_sm_cdev:
- cdev_del(port->sm_cdev);
+ cdev_del(&port->sm_cdev);
err_dev:
- device_destroy(umad_class, port->cdev->dev);
+ device_destroy(umad_class, port->cdev.dev);
err_cdev:
- cdev_del(port->cdev);
- clear_bit(port->dev_num, dev_map);
+ cdev_del(&port->cdev);
+ if (port->dev_num < IB_UMAD_MAX_PORTS)
+ clear_bit(devnum, dev_map);
+ else
+ clear_bit(devnum, overflow_map);
return -1;
}
@@ -1079,15 +1089,11 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
dev_set_drvdata(port->dev, NULL);
dev_set_drvdata(port->sm_dev, NULL);
- device_destroy(umad_class, port->cdev->dev);
- device_destroy(umad_class, port->sm_cdev->dev);
+ device_destroy(umad_class, port->cdev.dev);
+ device_destroy(umad_class, port->sm_cdev.dev);
- cdev_del(port->cdev);
- cdev_del(port->sm_cdev);
-
- spin_lock(&port_lock);
- umad_port[port->dev_num] = NULL;
- spin_unlock(&port_lock);
+ cdev_del(&port->cdev);
+ cdev_del(&port->sm_cdev);
mutex_lock(&port->file_mutex);
@@ -1106,7 +1112,10 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
mutex_unlock(&port->file_mutex);
- clear_bit(port->dev_num, dev_map);
+ if (port->dev_num < IB_UMAD_MAX_PORTS)
+ clear_bit(port->dev_num, dev_map);
+ else
+ clear_bit(port->dev_num - IB_UMAD_MAX_PORTS, overflow_map);
}
static void ib_umad_add_one(struct ib_device *device)
@@ -1214,6 +1223,8 @@ static void __exit ib_umad_cleanup(void)
ib_unregister_client(&umad_client);
class_destroy(umad_class);
unregister_chrdev_region(base_dev, IB_UMAD_MAX_PORTS * 2);
+ if (overflow_maj)
+ unregister_chrdev_region(overflow_maj, IB_UMAD_MAX_PORTS * 2);
}
module_init(ib_umad_init);
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index b3ea9587dc8..a078e5624d2 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -41,6 +41,7 @@
#include <linux/idr.h>
#include <linux/mutex.h>
#include <linux/completion.h>
+#include <linux/cdev.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_umem.h>
@@ -69,23 +70,23 @@
struct ib_uverbs_device {
struct kref ref;
+ int num_comp_vectors;
struct completion comp;
- int devnum;
- struct cdev *cdev;
struct device *dev;
struct ib_device *ib_dev;
- int num_comp_vectors;
+ int devnum;
+ struct cdev cdev;
};
struct ib_uverbs_event_file {
struct kref ref;
+ int is_async;
struct ib_uverbs_file *uverbs_file;
spinlock_t lock;
+ int is_closed;
wait_queue_head_t poll_wait;
struct fasync_struct *async_queue;
struct list_head event_list;
- int is_async;
- int is_closed;
};
struct ib_uverbs_file {
@@ -145,7 +146,7 @@ extern struct idr ib_uverbs_srq_idr;
void idr_remove_uobj(struct idr *idp, struct ib_uobject *uobj);
struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
- int is_async, int *fd);
+ int is_async);
struct ib_uverbs_event_file *ib_uverbs_lookup_comp_file(int fd);
void ib_uverbs_release_ucq(struct ib_uverbs_file *file,
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 112d3970222..f71cf138d67 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -301,10 +301,15 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
resp.num_comp_vectors = file->device->num_comp_vectors;
- filp = ib_uverbs_alloc_event_file(file, 1, &resp.async_fd);
+ ret = get_unused_fd();
+ if (ret < 0)
+ goto err_free;
+ resp.async_fd = ret;
+
+ filp = ib_uverbs_alloc_event_file(file, 1);
if (IS_ERR(filp)) {
ret = PTR_ERR(filp);
- goto err_free;
+ goto err_fd;
}
if (copy_to_user((void __user *) (unsigned long) cmd.response,
@@ -332,9 +337,11 @@ ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
return in_len;
err_file:
- put_unused_fd(resp.async_fd);
fput(filp);
+err_fd:
+ put_unused_fd(resp.async_fd);
+
err_free:
ibdev->dealloc_ucontext(ucontext);
@@ -715,6 +722,7 @@ ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
struct ib_uverbs_create_comp_channel cmd;
struct ib_uverbs_create_comp_channel_resp resp;
struct file *filp;
+ int ret;
if (out_len < sizeof resp)
return -ENOSPC;
@@ -722,9 +730,16 @@ ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
if (copy_from_user(&cmd, buf, sizeof cmd))
return -EFAULT;
- filp = ib_uverbs_alloc_event_file(file, 0, &resp.fd);
- if (IS_ERR(filp))
+ ret = get_unused_fd();
+ if (ret < 0)
+ return ret;
+ resp.fd = ret;
+
+ filp = ib_uverbs_alloc_event_file(file, 0);
+ if (IS_ERR(filp)) {
+ put_unused_fd(resp.fd);
return PTR_ERR(filp);
+ }
if (copy_to_user((void __user *) (unsigned long) cmd.response,
&resp, sizeof resp)) {
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 5f284ffd430..4fa2e651644 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -42,8 +42,8 @@
#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/file.h>
-#include <linux/mount.h>
#include <linux/cdev.h>
+#include <linux/anon_inodes.h>
#include <asm/uaccess.h>
@@ -53,8 +53,6 @@ MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("InfiniBand userspace verbs access");
MODULE_LICENSE("Dual BSD/GPL");
-#define INFINIBANDEVENTFS_MAGIC 0x49426576 /* "IBev" */
-
enum {
IB_UVERBS_MAJOR = 231,
IB_UVERBS_BASE_MINOR = 192,
@@ -75,44 +73,41 @@ DEFINE_IDR(ib_uverbs_qp_idr);
DEFINE_IDR(ib_uverbs_srq_idr);
static DEFINE_SPINLOCK(map_lock);
-static struct ib_uverbs_device *dev_table[IB_UVERBS_MAX_DEVICES];
static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
static ssize_t (*uverbs_cmd_table[])(struct ib_uverbs_file *file,
const char __user *buf, int in_len,
int out_len) = {
- [IB_USER_VERBS_CMD_GET_CONTEXT] = ib_uverbs_get_context,
- [IB_USER_VERBS_CMD_QUERY_DEVICE] = ib_uverbs_query_device,
- [IB_USER_VERBS_CMD_QUERY_PORT] = ib_uverbs_query_port,
- [IB_USER_VERBS_CMD_ALLOC_PD] = ib_uverbs_alloc_pd,
- [IB_USER_VERBS_CMD_DEALLOC_PD] = ib_uverbs_dealloc_pd,
- [IB_USER_VERBS_CMD_REG_MR] = ib_uverbs_reg_mr,
- [IB_USER_VERBS_CMD_DEREG_MR] = ib_uverbs_dereg_mr,
+ [IB_USER_VERBS_CMD_GET_CONTEXT] = ib_uverbs_get_context,
+ [IB_USER_VERBS_CMD_QUERY_DEVICE] = ib_uverbs_query_device,
+ [IB_USER_VERBS_CMD_QUERY_PORT] = ib_uverbs_query_port,
+ [IB_USER_VERBS_CMD_ALLOC_PD] = ib_uverbs_alloc_pd,
+ [IB_USER_VERBS_CMD_DEALLOC_PD] = ib_uverbs_dealloc_pd,
+ [IB_USER_VERBS_CMD_REG_MR] = ib_uverbs_reg_mr,
+ [IB_USER_VERBS_CMD_DEREG_MR] = ib_uverbs_dereg_mr,
[IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL] = ib_uverbs_create_comp_channel,
- [IB_USER_VERBS_CMD_CREATE_CQ] = ib_uverbs_create_cq,
- [IB_USER_VERBS_CMD_RESIZE_CQ] = ib_uverbs_resize_cq,
- [IB_USER_VERBS_CMD_POLL_CQ] = ib_uverbs_poll_cq,
- [IB_USER_VERBS_CMD_REQ_NOTIFY_CQ] = ib_uverbs_req_notify_cq,
- [IB_USER_VERBS_CMD_DESTROY_CQ] = ib_uverbs_destroy_cq,
- [IB_USER_VERBS_CMD_CREATE_QP] = ib_uverbs_create_qp,
- [IB_USER_VERBS_CMD_QUERY_QP] = ib_uverbs_query_qp,
- [IB_USER_VERBS_CMD_MODIFY_QP] = ib_uverbs_modify_qp,
- [IB_USER_VERBS_CMD_DESTROY_QP] = ib_uverbs_destroy_qp,
- [IB_USER_VERBS_CMD_POST_SEND] = ib_uverbs_post_send,
- [IB_USER_VERBS_CMD_POST_RECV] = ib_uverbs_post_recv,
- [IB_USER_VERBS_CMD_POST_SRQ_RECV] = ib_uverbs_post_srq_recv,
- [IB_USER_VERBS_CMD_CREATE_AH] = ib_uverbs_create_ah,
- [IB_USER_VERBS_CMD_DESTROY_AH] = ib_uverbs_destroy_ah,
- [IB_USER_VERBS_CMD_ATTACH_MCAST] = ib_uverbs_attach_mcast,
- [IB_USER_VERBS_CMD_DETACH_MCAST] = ib_uverbs_detach_mcast,
- [IB_USER_VERBS_CMD_CREATE_SRQ] = ib_uverbs_create_srq,
- [IB_USER_VERBS_CMD_MODIFY_SRQ] = ib_uverbs_modify_srq,
- [IB_USER_VERBS_CMD_QUERY_SRQ] = ib_uverbs_query_srq,
- [IB_USER_VERBS_CMD_DESTROY_SRQ] = ib_uverbs_destroy_srq,
+ [IB_USER_VERBS_CMD_CREATE_CQ] = ib_uverbs_create_cq,
+ [IB_USER_VERBS_CMD_RESIZE_CQ] = ib_uverbs_resize_cq,
+ [IB_USER_VERBS_CMD_POLL_CQ] = ib_uverbs_poll_cq,
+ [IB_USER_VERBS_CMD_REQ_NOTIFY_CQ] = ib_uverbs_req_notify_cq,
+ [IB_USER_VERBS_CMD_DESTROY_CQ] = ib_uverbs_destroy_cq,
+ [IB_USER_VERBS_CMD_CREATE_QP] = ib_uverbs_create_qp,
+ [IB_USER_VERBS_CMD_QUERY_QP] = ib_uverbs_query_qp,
+ [IB_USER_VERBS_CMD_MODIFY_QP] = ib_uverbs_modify_qp,
+ [IB_USER_VERBS_CMD_DESTROY_QP] = ib_uverbs_destroy_qp,
+ [IB_USER_VERBS_CMD_POST_SEND] = ib_uverbs_post_send,
+ [IB_USER_VERBS_CMD_POST_RECV] = ib_uverbs_post_recv,
+ [IB_USER_VERBS_CMD_POST_SRQ_RECV] = ib_uverbs_post_srq_recv,
+ [IB_USER_VERBS_CMD_CREATE_AH] = ib_uverbs_create_ah,
+ [IB_USER_VERBS_CMD_DESTROY_AH] = ib_uverbs_destroy_ah,
+ [IB_USER_VERBS_CMD_ATTACH_MCAST] = ib_uverbs_attach_mcast,
+ [IB_USER_VERBS_CMD_DETACH_MCAST] = ib_uverbs_detach_mcast,
+ [IB_USER_VERBS_CMD_CREATE_SRQ] = ib_uverbs_create_srq,
+ [IB_USER_VERBS_CMD_MODIFY_SRQ] = ib_uverbs_modify_srq,
+ [IB_USER_VERBS_CMD_QUERY_SRQ] = ib_uverbs_query_srq,
+ [IB_USER_VERBS_CMD_DESTROY_SRQ] = ib_uverbs_destroy_srq,
};
-static struct vfsmount *uverbs_event_mnt;
-
static void ib_uverbs_add_one(struct ib_device *device);
static void ib_uverbs_remove_one(struct ib_device *device);
@@ -370,7 +365,7 @@ static int ib_uverbs_event_close(struct inode *inode, struct file *filp)
static const struct file_operations uverbs_event_fops = {
.owner = THIS_MODULE,
- .read = ib_uverbs_event_read,
+ .read = ib_uverbs_event_read,
.poll = ib_uverbs_event_poll,
.release = ib_uverbs_event_close,
.fasync = ib_uverbs_event_fasync
@@ -489,12 +484,10 @@ void ib_uverbs_event_handler(struct ib_event_handler *handler,
}
struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
- int is_async, int *fd)
+ int is_async)
{
struct ib_uverbs_event_file *ev_file;
- struct path path;
struct file *filp;
- int ret;
ev_file = kmalloc(sizeof *ev_file, GFP_KERNEL);
if (!ev_file)
@@ -509,38 +502,12 @@ struct file *ib_uverbs_alloc_event_file(struct ib_uverbs_file *uverbs_file,
ev_file->is_async = is_async;
ev_file->is_closed = 0;
- *fd = get_unused_fd();
- if (*fd < 0) {
- ret = *fd;
- goto err;
- }
-
- /*
- * fops_get() can't fail here, because we're coming from a
- * system call on a uverbs file, which will already have a
- * module reference.
- */
- path.mnt = uverbs_event_mnt;
- path.dentry = uverbs_event_mnt->mnt_root;
- path_get(&path);
- filp = alloc_file(&path, FMODE_READ, fops_get(&uverbs_event_fops));
- if (!filp) {
- ret = -ENFILE;
- goto err_fd;
- }
-
- filp->private_data = ev_file;
+ filp = anon_inode_getfile("[infinibandevent]", &uverbs_event_fops,
+ ev_file, O_RDONLY);
+ if (IS_ERR(filp))
+ kfree(ev_file);
return filp;
-
-err_fd:
- fops_put(&uverbs_event_fops);
- path_put(&path);
- put_unused_fd(*fd);
-
-err:
- kfree(ev_file);
- return ERR_PTR(ret);
}
/*
@@ -617,14 +584,12 @@ static int ib_uverbs_mmap(struct file *filp, struct vm_area_struct *vma)
/*
* ib_uverbs_open() does not need the BKL:
*
- * - dev_table[] accesses are protected by map_lock, the
- * ib_uverbs_device structures are properly reference counted, and
+ * - the ib_uverbs_device structures are properly reference counted and
* everything else is purely local to the file being created, so
* races against other open calls are not a problem;
* - there is no ioctl method to race against;
- * - the device is added to dev_table[] as the last part of module
- * initialization, the open method will either immediately run
- * -ENXIO, or all required initialization will be done.
+ * - the open method will either immediately run -ENXIO, or all
+ * required initialization will be done.
*/
static int ib_uverbs_open(struct inode *inode, struct file *filp)
{
@@ -632,13 +597,10 @@ static int ib_uverbs_open(struct inode *inode, struct file *filp)
struct ib_uverbs_file *file;
int ret;
- spin_lock(&map_lock);
- dev = dev_table[iminor(inode) - IB_UVERBS_BASE_MINOR];
+ dev = container_of(inode->i_cdev, struct ib_uverbs_device, cdev);
if (dev)
kref_get(&dev->ref);
- spin_unlock(&map_lock);
-
- if (!dev)
+ else
return -ENXIO;
if (!try_module_get(dev->ib_dev->owner)) {
@@ -685,17 +647,17 @@ static int ib_uverbs_close(struct inode *inode, struct file *filp)
}
static const struct file_operations uverbs_fops = {
- .owner = THIS_MODULE,
- .write = ib_uverbs_write,
- .open = ib_uverbs_open,
+ .owner = THIS_MODULE,
+ .write = ib_uverbs_write,
+ .open = ib_uverbs_open,
.release = ib_uverbs_close
};
static const struct file_operations uverbs_mmap_fops = {
- .owner = THIS_MODULE,
- .write = ib_uverbs_write,
+ .owner = THIS_MODULE,
+ .write = ib_uverbs_write,
.mmap = ib_uverbs_mmap,
- .open = ib_uverbs_open,
+ .open = ib_uverbs_open,
.release = ib_uverbs_close
};
@@ -735,8 +697,38 @@ static ssize_t show_abi_version(struct class *class, char *buf)
}
static CLASS_ATTR(abi_version, S_IRUGO, show_abi_version, NULL);
+static dev_t overflow_maj;
+static DECLARE_BITMAP(overflow_map, IB_UVERBS_MAX_DEVICES);
+
+/*
+ * If we have more than IB_UVERBS_MAX_DEVICES, dynamically overflow by
+ * requesting a new major number and doubling the number of max devices we
+ * support. It's stupid, but simple.
+ */
+static int find_overflow_devnum(void)
+{
+ int ret;
+
+ if (!overflow_maj) {
+ ret = alloc_chrdev_region(&overflow_maj, 0, IB_UVERBS_MAX_DEVICES,
+ "infiniband_verbs");
+ if (ret) {
+ printk(KERN_ERR "user_verbs: couldn't register dynamic device number\n");
+ return ret;
+ }
+ }
+
+ ret = find_first_zero_bit(overflow_map, IB_UVERBS_MAX_DEVICES);
+ if (ret >= IB_UVERBS_MAX_DEVICES)
+ return -1;
+
+ return ret;
+}
+
static void ib_uverbs_add_one(struct ib_device *device)
{
+ int devnum;
+ dev_t base;
struct ib_uverbs_device *uverbs_dev;
if (!device->alloc_ucontext)
@@ -750,28 +742,36 @@ static void ib_uverbs_add_one(struct ib_device *device)
init_completion(&uverbs_dev->comp);
spin_lock(&map_lock);
- uverbs_dev->devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES);
- if (uverbs_dev->devnum >= IB_UVERBS_MAX_DEVICES) {
+ devnum = find_first_zero_bit(dev_map, IB_UVERBS_MAX_DEVICES);
+ if (devnum >= IB_UVERBS_MAX_DEVICES) {
spin_unlock(&map_lock);
- goto err;
+ devnum = find_overflow_devnum();
+ if (devnum < 0)
+ goto err;
+
+ spin_lock(&map_lock);
+ uverbs_dev->devnum = devnum + IB_UVERBS_MAX_DEVICES;
+ base = devnum + overflow_maj;
+ set_bit(devnum, overflow_map);
+ } else {
+ uverbs_dev->devnum = devnum;
+ base = devnum + IB_UVERBS_BASE_DEV;
+ set_bit(devnum, dev_map);
}
- set_bit(uverbs_dev->devnum, dev_map);
spin_unlock(&map_lock);
uverbs_dev->ib_dev = device;
uverbs_dev->num_comp_vectors = device->num_comp_vectors;
- uverbs_dev->cdev = cdev_alloc();
- if (!uverbs_dev->cdev)
- goto err;
- uverbs_dev->cdev->owner = THIS_MODULE;
- uverbs_dev->cdev->ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops;
- kobject_set_name(&uverbs_dev->cdev->kobj, "uverbs%d", uverbs_dev->devnum);
- if (cdev_add(uverbs_dev->cdev, IB_UVERBS_BASE_DEV + uverbs_dev->devnum, 1))
+ cdev_init(&uverbs_dev->cdev, NULL);
+ uverbs_dev->cdev.owner = THIS_MODULE;
+ uverbs_dev->cdev.ops = device->mmap ? &uverbs_mmap_fops : &uverbs_fops;
+ kobject_set_name(&uverbs_dev->cdev.kobj, "uverbs%d", uverbs_dev->devnum);
+ if (cdev_add(&uverbs_dev->cdev, base, 1))
goto err_cdev;
uverbs_dev->dev = device_create(uverbs_class, device->dma_device,
- uverbs_dev->cdev->dev, uverbs_dev,
+ uverbs_dev->cdev.dev, uverbs_dev,
"uverbs%d", uverbs_dev->devnum);
if (IS_ERR(uverbs_dev->dev))
goto err_cdev;
@@ -781,20 +781,19 @@ static void ib_uverbs_add_one(struct ib_device *device)
if (device_create_file(uverbs_dev->dev, &dev_attr_abi_version))
goto err_class;
- spin_lock(&map_lock);
- dev_table[uverbs_dev->devnum] = uverbs_dev;
- spin_unlock(&map_lock);
-
ib_set_client_data(device, &uverbs_client, uverbs_dev);
return;
err_class:
- device_destroy(uverbs_class, uverbs_dev->cdev->dev);
+ device_destroy(uverbs_class, uverbs_dev->cdev.dev);
err_cdev:
- cdev_del(uverbs_dev->cdev);
- clear_bit(uverbs_dev->devnum, dev_map);
+ cdev_del(&uverbs_dev->cdev);
+ if (uverbs_dev->devnum < IB_UVERBS_MAX_DEVICES)
+ clear_bit(devnum, dev_map);
+ else
+ clear_bit(devnum, overflow_map);
err:
kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
@@ -811,35 +810,19 @@ static void ib_uverbs_remove_one(struct ib_device *device)
return;
dev_set_drvdata(uverbs_dev->dev, NULL);
- device_destroy(uverbs_class, uverbs_dev->cdev->dev);
- cdev_del(uverbs_dev->cdev);
+ device_destroy(uverbs_class, uverbs_dev->cdev.dev);
+ cdev_del(&uverbs_dev->cdev);
- spin_lock(&map_lock);
- dev_table[uverbs_dev->devnum] = NULL;
- spin_unlock(&map_lock);
-
- clear_bit(uverbs_dev->devnum, dev_map);
+ if (uverbs_dev->devnum < IB_UVERBS_MAX_DEVICES)
+ clear_bit(uverbs_dev->devnum, dev_map);
+ else
+ clear_bit(uverbs_dev->devnum - IB_UVERBS_MAX_DEVICES, overflow_map);
kref_put(&uverbs_dev->ref, ib_uverbs_release_dev);
wait_for_completion(&uverbs_dev->comp);
kfree(uverbs_dev);
}
-static int uverbs_event_get_sb(struct file_system_type *fs_type, int flags,
- const char *dev_name, void *data,
- struct vfsmount *mnt)
-{
- return get_sb_pseudo(fs_type, "infinibandevent:", NULL,
- INFINIBANDEVENTFS_MAGIC, mnt);
-}
-
-static struct file_system_type uverbs_event_fs = {
- /* No owner field so module can be unloaded */
- .name = "infinibandeventfs",
- .get_sb = uverbs_event_get_sb,
- .kill_sb = kill_litter_super
-};
-
static int __init ib_uverbs_init(void)
{
int ret;
@@ -864,33 +847,14 @@ static int __init ib_uverbs_init(void)
goto out_class;
}
- ret = register_filesystem(&uverbs_event_fs);
- if (ret) {
- printk(KERN_ERR "user_verbs: couldn't register infinibandeventfs\n");
- goto out_class;
- }
-
- uverbs_event_mnt = kern_mount(&uverbs_event_fs);
- if (IS_ERR(uverbs_event_mnt)) {
- ret = PTR_ERR(uverbs_event_mnt);
- printk(KERN_ERR "user_verbs: couldn't mount infinibandeventfs\n");
- goto out_fs;
- }
-
ret = ib_register_client(&uverbs_client);
if (ret) {
printk(KERN_ERR "user_verbs: couldn't register client\n");
- goto out_mnt;
+ goto out_class;
}
return 0;
-out_mnt:
- mntput(uverbs_event_mnt);
-
-out_fs:
- unregister_filesystem(&uverbs_event_fs);
-
out_class:
class_destroy(uverbs_class);
@@ -904,10 +868,10 @@ out:
static void __exit ib_uverbs_cleanup(void)
{
ib_unregister_client(&uverbs_client);
- mntput(uverbs_event_mnt);
- unregister_filesystem(&uverbs_event_fs);
class_destroy(uverbs_class);
unregister_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES);
+ if (overflow_maj)
+ unregister_chrdev_region(overflow_maj, IB_UVERBS_MAX_DEVICES);
idr_destroy(&ib_uverbs_pd_idr);
idr_destroy(&ib_uverbs_mr_idr);
idr_destroy(&ib_uverbs_mw_idr);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 0677fc7dfd5..a28e862f2d6 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -109,7 +109,6 @@ int cxio_hal_cq_op(struct cxio_rdev *rdev_p, struct t3_cq *cq,
while (!CQ_VLD_ENTRY(rptr, cq->size_log2, cqe)) {
udelay(1);
if (i++ > 1000000) {
- BUG_ON(1);
printk(KERN_ERR "%s: stalled rnic\n",
rdev_p->dev_name);
return -EIO;
@@ -155,7 +154,7 @@ static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid)
return iwch_cxgb3_ofld_send(rdev_p->t3cdev_p, skb);
}
-int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
+int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq, int kernel)
{
struct rdma_cq_setup setup;
int size = (1UL << (cq->size_log2)) * sizeof(struct t3_cqe);
@@ -163,12 +162,12 @@ int cxio_create_cq(struct cxio_rdev *rdev_p, struct t3_cq *cq)
cq->cqid = cxio_hal_get_cqid(rdev_p->rscp);
if (!cq->cqid)
return -ENOMEM;
- cq->sw_queue = kzalloc(size, GFP_KERNEL);
- if (!cq->sw_queue)
- return -ENOMEM;
- cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev),
- (1UL << (cq->size_log2)) *
- sizeof(struct t3_cqe),
+ if (kernel) {
+ cq->sw_queue = kzalloc(size, GFP_KERNEL);
+ if (!cq->sw_queue)
+ return -ENOMEM;
+ }
+ cq->queue = dma_alloc_coherent(&(rdev_p->rnic_info.pdev->dev), size,
&(cq->dma_addr), GFP_KERNEL);
if (!cq->queue) {
kfree(cq->sw_queue);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.h b/drivers/infiniband/hw/cxgb3/cxio_hal.h
index f3d440cc68f..073373c2c56 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.h
@@ -53,7 +53,7 @@
#define T3_MAX_PBL_SIZE 256
#define T3_MAX_RQ_SIZE 1024
#define T3_MAX_QP_DEPTH (T3_MAX_RQ_SIZE-1)
-#define T3_MAX_CQ_DEPTH 8192
+#define T3_MAX_CQ_DEPTH 262144
#define T3_MAX_NUM_STAG (1<<15)
#define T3_MAX_MR_SIZE 0x100000000ULL
#define T3_PAGESIZE_MASK 0xffff000 /* 4KB-128MB */
@@ -157,7 +157,7 @@ int cxio_rdev_open(struct cxio_rdev *rdev);
void cxio_rdev_close(struct cxio_rdev *rdev);
int cxio_hal_cq_op(struct cxio_rdev *rdev, struct t3_cq *cq,
enum t3_cq_opcode op, u32 credit);
-int cxio_create_cq(struct cxio_rdev *rdev, struct t3_cq *cq);
+int cxio_create_cq(struct cxio_rdev *rdev, struct t3_cq *cq, int kernel);
int cxio_destroy_cq(struct cxio_rdev *rdev, struct t3_cq *cq);
int cxio_resize_cq(struct cxio_rdev *rdev, struct t3_cq *cq);
void cxio_release_ucontext(struct cxio_rdev *rdev, struct cxio_ucontext *uctx);
diff --git a/drivers/infiniband/hw/cxgb3/cxio_wr.h b/drivers/infiniband/hw/cxgb3/cxio_wr.h
index a197a5b7ac7..15073b2da1c 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_wr.h
+++ b/drivers/infiniband/hw/cxgb3/cxio_wr.h
@@ -730,7 +730,22 @@ struct t3_cq {
static inline void cxio_set_wq_in_error(struct t3_wq *wq)
{
- wq->queue->wq_in_err.err = 1;
+ wq->queue->wq_in_err.err |= 1;
+}
+
+static inline void cxio_disable_wq_db(struct t3_wq *wq)
+{
+ wq->queue->wq_in_err.err |= 2;
+}
+
+static inline void cxio_enable_wq_db(struct t3_wq *wq)
+{
+ wq->queue->wq_in_err.err &= ~2;
+}
+
+static inline int cxio_wq_db_enabled(struct t3_wq *wq)
+{
+ return !(wq->queue->wq_in_err.err & 2);
}
static inline struct t3_cqe *cxio_next_hw_cqe(struct t3_cq *cq)
diff --git a/drivers/infiniband/hw/cxgb3/iwch.c b/drivers/infiniband/hw/cxgb3/iwch.c
index b0ea0105ddf..ee1d8b4d454 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.c
+++ b/drivers/infiniband/hw/cxgb3/iwch.c
@@ -65,6 +65,46 @@ struct cxgb3_client t3c_client = {
static LIST_HEAD(dev_list);
static DEFINE_MUTEX(dev_mutex);
+static int disable_qp_db(int id, void *p, void *data)
+{
+ struct iwch_qp *qhp = p;
+
+ cxio_disable_wq_db(&qhp->wq);
+ return 0;
+}
+
+static int enable_qp_db(int id, void *p, void *data)
+{
+ struct iwch_qp *qhp = p;
+
+ if (data)
+ ring_doorbell(qhp->rhp->rdev.ctrl_qp.doorbell, qhp->wq.qpid);
+ cxio_enable_wq_db(&qhp->wq);
+ return 0;
+}
+
+static void disable_dbs(struct iwch_dev *rnicp)
+{
+ spin_lock_irq(&rnicp->lock);
+ idr_for_each(&rnicp->qpidr, disable_qp_db, NULL);
+ spin_unlock_irq(&rnicp->lock);
+}
+
+static void enable_dbs(struct iwch_dev *rnicp, int ring_db)
+{
+ spin_lock_irq(&rnicp->lock);
+ idr_for_each(&rnicp->qpidr, enable_qp_db,
+ (void *)(unsigned long)ring_db);
+ spin_unlock_irq(&rnicp->lock);
+}
+
+static void iwch_db_drop_task(struct work_struct *work)
+{
+ struct iwch_dev *rnicp = container_of(work, struct iwch_dev,
+ db_drop_task.work);
+ enable_dbs(rnicp, 1);
+}
+
static void rnic_init(struct iwch_dev *rnicp)
{
PDBG("%s iwch_dev %p\n", __func__, rnicp);
@@ -72,6 +112,7 @@ static void rnic_init(struct iwch_dev *rnicp)
idr_init(&rnicp->qpidr);
idr_init(&rnicp->mmidr);
spin_lock_init(&rnicp->lock);
+ INIT_DELAYED_WORK(&rnicp->db_drop_task, iwch_db_drop_task);
rnicp->attr.max_qps = T3_MAX_NUM_QP - 32;
rnicp->attr.max_wrs = T3_MAX_QP_DEPTH;
@@ -147,6 +188,8 @@ static void close_rnic_dev(struct t3cdev *tdev)
mutex_lock(&dev_mutex);
list_for_each_entry_safe(dev, tmp, &dev_list, entry) {
if (dev->rdev.t3cdev_p == tdev) {
+ dev->rdev.flags = CXIO_ERROR_FATAL;
+ cancel_delayed_work_sync(&dev->db_drop_task);
list_del(&dev->entry);
iwch_unregister_device(dev);
cxio_rdev_close(&dev->rdev);
@@ -165,7 +208,8 @@ static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id)
struct cxio_rdev *rdev = tdev->ulp;
struct iwch_dev *rnicp;
struct ib_event event;
- u32 portnum = port_id + 1;
+ u32 portnum = port_id + 1;
+ int dispatch = 0;
if (!rdev)
return;
@@ -174,21 +218,49 @@ static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id)
case OFFLOAD_STATUS_DOWN: {
rdev->flags = CXIO_ERROR_FATAL;
event.event = IB_EVENT_DEVICE_FATAL;
+ dispatch = 1;
break;
}
case OFFLOAD_PORT_DOWN: {
event.event = IB_EVENT_PORT_ERR;
+ dispatch = 1;
break;
}
case OFFLOAD_PORT_UP: {
event.event = IB_EVENT_PORT_ACTIVE;
+ dispatch = 1;
+ break;
+ }
+ case OFFLOAD_DB_FULL: {
+ disable_dbs(rnicp);
+ break;
+ }
+ case OFFLOAD_DB_EMPTY: {
+ enable_dbs(rnicp, 1);
+ break;
+ }
+ case OFFLOAD_DB_DROP: {
+ unsigned long delay = 1000;
+ unsigned short r;
+
+ disable_dbs(rnicp);
+ get_random_bytes(&r, 2);
+ delay += r & 1023;
+
+ /*
+ * delay is between 1000-2023 usecs.
+ */
+ schedule_delayed_work(&rnicp->db_drop_task,
+ usecs_to_jiffies(delay));
break;
}
}
- event.device = &rnicp->ibdev;
- event.element.port_num = portnum;
- ib_dispatch_event(&event);
+ if (dispatch) {
+ event.device = &rnicp->ibdev;
+ event.element.port_num = portnum;
+ ib_dispatch_event(&event);
+ }
return;
}
diff --git a/drivers/infiniband/hw/cxgb3/iwch.h b/drivers/infiniband/hw/cxgb3/iwch.h
index 84735506333..a1c44578e03 100644
--- a/drivers/infiniband/hw/cxgb3/iwch.h
+++ b/drivers/infiniband/hw/cxgb3/iwch.h
@@ -36,6 +36,7 @@
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/idr.h>
+#include <linux/workqueue.h>
#include <rdma/ib_verbs.h>
@@ -110,6 +111,7 @@ struct iwch_dev {
struct idr mmidr;
spinlock_t lock;
struct list_head entry;
+ struct delayed_work db_drop_task;
};
static inline struct iwch_dev *to_iwch_dev(struct ib_device *ibdev)
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c
index 66b41351910..d94388b81a4 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_cm.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c
@@ -1371,15 +1371,8 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
tim.mac_addr = req->dst_mac;
tim.vlan_tag = ntohs(req->vlan_tag);
if (tdev->ctl(tdev, GET_IFF_FROM_MAC, &tim) < 0 || !tim.dev) {
- printk(KERN_ERR
- "%s bad dst mac %02x %02x %02x %02x %02x %02x\n",
- __func__,
- req->dst_mac[0],
- req->dst_mac[1],
- req->dst_mac[2],
- req->dst_mac[3],
- req->dst_mac[4],
- req->dst_mac[5]);
+ printk(KERN_ERR "%s bad dst mac %pM\n",
+ __func__, req->dst_mac);
goto reject;
}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index ed7175549eb..47b35c6608d 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -187,7 +187,7 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve
entries = roundup_pow_of_two(entries);
chp->cq.size_log2 = ilog2(entries);
- if (cxio_create_cq(&rhp->rdev, &chp->cq)) {
+ if (cxio_create_cq(&rhp->rdev, &chp->cq, !ucontext)) {
kfree(chp);
return ERR_PTR(-ENOMEM);
}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c
index 3eb8cecf81d..b4d893de365 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_qp.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c
@@ -452,7 +452,8 @@ int iwch_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
++(qhp->wq.sq_wptr);
}
spin_unlock_irqrestore(&qhp->lock, flag);
- ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
+ if (cxio_wq_db_enabled(&qhp->wq))
+ ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
out:
if (err)
@@ -514,7 +515,8 @@ int iwch_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
num_wrs--;
}
spin_unlock_irqrestore(&qhp->lock, flag);
- ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
+ if (cxio_wq_db_enabled(&qhp->wq))
+ ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
out:
if (err)
@@ -597,7 +599,8 @@ int iwch_bind_mw(struct ib_qp *qp,
++(qhp->wq.sq_wptr);
spin_unlock_irqrestore(&qhp->lock, flag);
- ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
+ if (cxio_wq_db_enabled(&qhp->wq))
+ ring_doorbell(qhp->wq.doorbell, qhp->wq.qpid);
return err;
}
diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c
index 42be0b15084..b2b6fea2b14 100644
--- a/drivers/infiniband/hw/ehca/ehca_irq.c
+++ b/drivers/infiniband/hw/ehca/ehca_irq.c
@@ -548,11 +548,10 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
struct ehca_eq *eq = &shca->eq;
struct ehca_eqe_cache_entry *eqe_cache = eq->eqe_cache;
u64 eqe_value, ret;
- unsigned long flags;
int eqe_cnt, i;
int eq_empty = 0;
- spin_lock_irqsave(&eq->irq_spinlock, flags);
+ spin_lock(&eq->irq_spinlock);
if (is_irq) {
const int max_query_cnt = 100;
int query_cnt = 0;
@@ -643,7 +642,7 @@ void ehca_process_eq(struct ehca_shca *shca, int is_irq)
} while (1);
unlock_irq_spinlock:
- spin_unlock_irqrestore(&eq->irq_spinlock, flags);
+ spin_unlock(&eq->irq_spinlock);
}
void ehca_tasklet_eq(unsigned long data)
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c
index 0338f1fabe8..b105f664d3e 100644
--- a/drivers/infiniband/hw/ehca/ehca_qp.c
+++ b/drivers/infiniband/hw/ehca/ehca_qp.c
@@ -55,9 +55,7 @@ static struct kmem_cache *qp_cache;
/*
* attributes not supported by query qp
*/
-#define QP_ATTR_QUERY_NOT_SUPPORTED (IB_QP_MAX_DEST_RD_ATOMIC | \
- IB_QP_MAX_QP_RD_ATOMIC | \
- IB_QP_ACCESS_FLAGS | \
+#define QP_ATTR_QUERY_NOT_SUPPORTED (IB_QP_ACCESS_FLAGS | \
IB_QP_EN_SQD_ASYNC_NOTIFY)
/*
diff --git a/drivers/infiniband/hw/ehca/ehca_sqp.c b/drivers/infiniband/hw/ehca/ehca_sqp.c
index 8c1213f8916..dba8f9f8b99 100644
--- a/drivers/infiniband/hw/ehca/ehca_sqp.c
+++ b/drivers/infiniband/hw/ehca/ehca_sqp.c
@@ -222,7 +222,7 @@ int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
{
int ret;
- if (!port_num || port_num > ibdev->phys_port_cnt)
+ if (!port_num || port_num > ibdev->phys_port_cnt || !in_wc)
return IB_MAD_RESULT_FAILURE;
/* accept only pma request */
diff --git a/drivers/infiniband/hw/ipath/ipath_user_pages.c b/drivers/infiniband/hw/ipath/ipath_user_pages.c
index 82878e34862..eb7d59abd12 100644
--- a/drivers/infiniband/hw/ipath/ipath_user_pages.c
+++ b/drivers/infiniband/hw/ipath/ipath_user_pages.c
@@ -59,8 +59,7 @@ static int __get_user_pages(unsigned long start_page, size_t num_pages,
size_t got;
int ret;
- lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >>
- PAGE_SHIFT;
+ lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
if (num_pages > lock_limit) {
ret = -ENOMEM;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index 2a97c964b9e..ae75389937d 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1214,7 +1214,7 @@ out:
static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
void *wqe, unsigned *mlx_seg_len)
{
- struct ib_device *ib_dev = &to_mdev(sqp->qp.ibqp.device)->ib_dev;
+ struct ib_device *ib_dev = sqp->qp.ibqp.device;
struct mlx4_wqe_mlx_seg *mlx = wqe;
struct mlx4_wqe_inline_seg *inl = wqe + sizeof *mlx;
struct mlx4_ib_ah *ah = to_mah(wr->wr.ud.ah);
@@ -1228,7 +1228,7 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,
for (i = 0; i < wr->num_sge; ++i)
send_size += wr->sg_list[i].length;
- ib_ud_header_init(send_size, mlx4_ib_ah_grh_present(ah), &sqp->ud_header);
+ ib_ud_header_init(send_size, mlx4_ib_ah_grh_present(ah), 0, &sqp->ud_header);
sqp->ud_header.lrh.service_level =
be32_to_cpu(ah->av.sl_tclass_flowlabel) >> 28;
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index c10576fa60c..d2d172e6289 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1494,7 +1494,7 @@ static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
u16 pkey;
ib_ud_header_init(256, /* assume a MAD */
- mthca_ah_grh_present(to_mah(wr->wr.ud.ah)),
+ mthca_ah_grh_present(to_mah(wr->wr.ud.ah)), 0,
&sqp->ud_header);
err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header);
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index b9d09bafd6c..4272c52e38a 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -110,6 +110,7 @@ static unsigned int sysfs_idx_addr;
static struct pci_device_id nes_pci_table[] = {
{PCI_VENDOR_ID_NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020, PCI_ANY_ID, PCI_ANY_ID},
+ {PCI_VENDOR_ID_NETEFFECT, PCI_DEVICE_ID_NETEFFECT_NE020_KR, PCI_ANY_ID, PCI_ANY_ID},
{0}
};
diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
index 98840564bb2..cc78fee1dd5 100644
--- a/drivers/infiniband/hw/nes/nes.h
+++ b/drivers/infiniband/hw/nes/nes.h
@@ -64,8 +64,9 @@
* NetEffect PCI vendor id and NE010 PCI device id.
*/
#ifndef PCI_VENDOR_ID_NETEFFECT /* not in pci.ids yet */
-#define PCI_VENDOR_ID_NETEFFECT 0x1678
-#define PCI_DEVICE_ID_NETEFFECT_NE020 0x0100
+#define PCI_VENDOR_ID_NETEFFECT 0x1678
+#define PCI_DEVICE_ID_NETEFFECT_NE020 0x0100
+#define PCI_DEVICE_ID_NETEFFECT_NE020_KR 0x0110
#endif
#define NE020_REV 4
@@ -193,8 +194,8 @@ extern u32 cm_packets_created;
extern u32 cm_packets_received;
extern u32 cm_packets_dropped;
extern u32 cm_packets_retrans;
-extern u32 cm_listens_created;
-extern u32 cm_listens_destroyed;
+extern atomic_t cm_listens_created;
+extern atomic_t cm_listens_destroyed;
extern u32 cm_backlog_drops;
extern atomic_t cm_loopbacks;
extern atomic_t cm_nodes_created;
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 39468c27703..2a49ee40b52 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -67,8 +67,8 @@ u32 cm_packets_dropped;
u32 cm_packets_retrans;
u32 cm_packets_created;
u32 cm_packets_received;
-u32 cm_listens_created;
-u32 cm_listens_destroyed;
+atomic_t cm_listens_created;
+atomic_t cm_listens_destroyed;
u32 cm_backlog_drops;
atomic_t cm_loopbacks;
atomic_t cm_nodes_created;
@@ -1011,9 +1011,10 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
event.cm_info.loc_port =
loopback->loc_port;
event.cm_info.cm_id = loopback->cm_id;
+ add_ref_cm_node(loopback);
+ loopback->state = NES_CM_STATE_CLOSED;
cm_event_connect_error(&event);
cm_node->state = NES_CM_STATE_LISTENER_DESTROYED;
- loopback->state = NES_CM_STATE_CLOSED;
rem_ref_cm_node(cm_node->cm_core,
cm_node);
@@ -1042,7 +1043,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
kfree(listener);
listener = NULL;
ret = 0;
- cm_listens_destroyed++;
+ atomic_inc(&cm_listens_destroyed);
} else {
spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
}
@@ -3172,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
return err;
}
- cm_listens_created++;
+ atomic_inc(&cm_listens_created);
}
cm_id->add_ref(cm_id);
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c
index b1c2cbb88f0..ce7f5383357 100644
--- a/drivers/infiniband/hw/nes/nes_hw.c
+++ b/drivers/infiniband/hw/nes/nes_hw.c
@@ -748,16 +748,28 @@ static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
if (hw_rev != NE020_REV) {
/* init serdes 0 */
- if (wide_ppm_offset && (nesadapter->phy_type[0] == NES_PHY_TYPE_CX4))
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000FFFAA);
- else
+ switch (nesadapter->phy_type[0]) {
+ case NES_PHY_TYPE_CX4:
+ if (wide_ppm_offset)
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000FFFAA);
+ else
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF);
+ break;
+ case NES_PHY_TYPE_KR:
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF);
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP0, 0x00000000);
+ break;
+ case NES_PHY_TYPE_PUMA_1G:
nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF);
-
- if (nesadapter->phy_type[0] == NES_PHY_TYPE_PUMA_1G) {
sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0);
sds |= 0x00000100;
nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0, sds);
+ break;
+ default:
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL0, 0x000000FF);
+ break;
}
+
if (!OneG_Mode)
nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_HIGHZ_LANE_MODE0, 0x11110000);
@@ -778,6 +790,9 @@ static int nes_init_serdes(struct nes_device *nesdev, u8 hw_rev, u8 port_count,
if (wide_ppm_offset)
nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_CDR_CONTROL1, 0x000FFFAA);
break;
+ case NES_PHY_TYPE_KR:
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_TX_EMP1, 0x00000000);
+ break;
case NES_PHY_TYPE_PUMA_1G:
sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL1);
sds |= 0x000000100;
@@ -1279,115 +1294,115 @@ int nes_destroy_cqp(struct nes_device *nesdev)
/**
- * nes_init_phy
+ * nes_init_1g_phy
*/
-int nes_init_phy(struct nes_device *nesdev)
+int nes_init_1g_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index)
{
- struct nes_adapter *nesadapter = nesdev->nesadapter;
u32 counter = 0;
- u32 sds;
- u32 mac_index = nesdev->mac_index;
- u32 tx_config = 0;
u16 phy_data;
- u32 temp_phy_data = 0;
- u32 temp_phy_data2 = 0;
- u8 phy_type = nesadapter->phy_type[mac_index];
- u8 phy_index = nesadapter->phy_index[mac_index];
-
- if ((nesadapter->OneG_Mode) &&
- (phy_type != NES_PHY_TYPE_PUMA_1G)) {
- nes_debug(NES_DBG_PHY, "1G PHY, mac_index = %d.\n", mac_index);
- if (phy_type == NES_PHY_TYPE_1G) {
- tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
- tx_config &= 0xFFFFFFE3;
- tx_config |= 0x04;
- nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
- }
+ int ret = 0;
- nes_read_1G_phy_reg(nesdev, 1, phy_index, &phy_data);
- nes_write_1G_phy_reg(nesdev, 23, phy_index, 0xb000);
+ nes_read_1G_phy_reg(nesdev, 1, phy_index, &phy_data);
+ nes_write_1G_phy_reg(nesdev, 23, phy_index, 0xb000);
- /* Reset the PHY */
- nes_write_1G_phy_reg(nesdev, 0, phy_index, 0x8000);
- udelay(100);
- counter = 0;
- do {
- nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
- if (counter++ > 100)
- break;
- } while (phy_data & 0x8000);
-
- /* Setting no phy loopback */
- phy_data &= 0xbfff;
- phy_data |= 0x1140;
- nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data);
+ /* Reset the PHY */
+ nes_write_1G_phy_reg(nesdev, 0, phy_index, 0x8000);
+ udelay(100);
+ counter = 0;
+ do {
nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
- nes_read_1G_phy_reg(nesdev, 0x17, phy_index, &phy_data);
- nes_read_1G_phy_reg(nesdev, 0x1e, phy_index, &phy_data);
-
- /* Setting the interrupt mask */
- nes_read_1G_phy_reg(nesdev, 0x19, phy_index, &phy_data);
- nes_write_1G_phy_reg(nesdev, 0x19, phy_index, 0xffee);
- nes_read_1G_phy_reg(nesdev, 0x19, phy_index, &phy_data);
+ if (counter++ > 100) {
+ ret = -1;
+ break;
+ }
+ } while (phy_data & 0x8000);
+
+ /* Setting no phy loopback */
+ phy_data &= 0xbfff;
+ phy_data |= 0x1140;
+ nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data);
+ nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
+ nes_read_1G_phy_reg(nesdev, 0x17, phy_index, &phy_data);
+ nes_read_1G_phy_reg(nesdev, 0x1e, phy_index, &phy_data);
+
+ /* Setting the interrupt mask */
+ nes_read_1G_phy_reg(nesdev, 0x19, phy_index, &phy_data);
+ nes_write_1G_phy_reg(nesdev, 0x19, phy_index, 0xffee);
+ nes_read_1G_phy_reg(nesdev, 0x19, phy_index, &phy_data);
+
+ /* turning on flow control */
+ nes_read_1G_phy_reg(nesdev, 4, phy_index, &phy_data);
+ nes_write_1G_phy_reg(nesdev, 4, phy_index, (phy_data & ~(0x03E0)) | 0xc00);
+ nes_read_1G_phy_reg(nesdev, 4, phy_index, &phy_data);
+
+ /* Clear Half duplex */
+ nes_read_1G_phy_reg(nesdev, 9, phy_index, &phy_data);
+ nes_write_1G_phy_reg(nesdev, 9, phy_index, phy_data & ~(0x0100));
+ nes_read_1G_phy_reg(nesdev, 9, phy_index, &phy_data);
+
+ nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
+ nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data | 0x0300);
+
+ return ret;
+}
- /* turning on flow control */
- nes_read_1G_phy_reg(nesdev, 4, phy_index, &phy_data);
- nes_write_1G_phy_reg(nesdev, 4, phy_index, (phy_data & ~(0x03E0)) | 0xc00);
- nes_read_1G_phy_reg(nesdev, 4, phy_index, &phy_data);
- /* Clear Half duplex */
- nes_read_1G_phy_reg(nesdev, 9, phy_index, &phy_data);
- nes_write_1G_phy_reg(nesdev, 9, phy_index, phy_data & ~(0x0100));
- nes_read_1G_phy_reg(nesdev, 9, phy_index, &phy_data);
+/**
+ * nes_init_2025_phy
+ */
+int nes_init_2025_phy(struct nes_device *nesdev, u8 phy_type, u8 phy_index)
+{
+ u32 temp_phy_data = 0;
+ u32 temp_phy_data2 = 0;
+ u32 counter = 0;
+ u32 sds;
+ u32 mac_index = nesdev->mac_index;
+ int ret = 0;
+ unsigned int first_attempt = 1;
- nes_read_1G_phy_reg(nesdev, 0, phy_index, &phy_data);
- nes_write_1G_phy_reg(nesdev, 0, phy_index, phy_data | 0x0300);
+ /* Check firmware heartbeat */
+ nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
+ temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+ udelay(1500);
+ nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
+ temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- return 0;
+ if (temp_phy_data != temp_phy_data2) {
+ nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd);
+ temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+ if ((temp_phy_data & 0xff) > 0x20)
+ return 0;
+ printk(PFX "Reinitialize external PHY\n");
}
- if ((phy_type == NES_PHY_TYPE_IRIS) ||
- (phy_type == NES_PHY_TYPE_ARGUS) ||
- (phy_type == NES_PHY_TYPE_SFP_D)) {
- /* setup 10G MDIO operation */
- tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
- tx_config &= 0xFFFFFFE3;
- tx_config |= 0x15;
- nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
- }
- if ((phy_type == NES_PHY_TYPE_ARGUS) ||
- (phy_type == NES_PHY_TYPE_SFP_D)) {
- u32 first_time = 1;
+ /* no heartbeat, configure the PHY */
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0x0000, 0x8000);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc300, 0x0000);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc316, 0x000A);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc318, 0x0052);
- /* Check firmware heartbeat */
- nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
- temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- udelay(1500);
- nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
- temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+ switch (phy_type) {
+ case NES_PHY_TYPE_ARGUS:
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc316, 0x000A);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc318, 0x0052);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x000C);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0008);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0001);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc31a, 0x0098);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0026, 0x0E00);
- if (temp_phy_data != temp_phy_data2) {
- nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd);
- temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- if ((temp_phy_data & 0xff) > 0x20)
- return 0;
- printk(PFX "Reinitializing PHY\n");
- }
+ /* setup LEDs */
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd006, 0x0007);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd007, 0x000A);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd008, 0x0009);
+ break;
- /* no heartbeat, configure the PHY */
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0x0000, 0x8000);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc300, 0x0000);
+ case NES_PHY_TYPE_SFP_D:
nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc316, 0x000A);
nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc318, 0x0052);
- if (phy_type == NES_PHY_TYPE_ARGUS) {
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x000C);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0008);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0001);
- } else {
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x0004);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0038);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0013);
- }
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x0004);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0038);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0013);
nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc31a, 0x0098);
nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0026, 0x0E00);
@@ -1395,71 +1410,136 @@ int nes_init_phy(struct nes_device *nesdev)
nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd006, 0x0007);
nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd007, 0x000A);
nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd008, 0x0009);
+ break;
+
+ case NES_PHY_TYPE_KR:
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc316, 0x000A);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc318, 0x0052);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc302, 0x000C);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc319, 0x0010);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0027, 0x0013);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc31a, 0x0080);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0026, 0x0E00);
+
+ /* setup LEDs */
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd006, 0x000B);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd007, 0x0003);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd008, 0x0004);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0028, 0xA528);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0022, 0x406D);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0023, 0x0020);
+ break;
+ }
+
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0x0028, 0xA528);
- /* Bring PHY out of reset */
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc300, 0x0002);
+ /* Bring PHY out of reset */
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc300, 0x0002);
- /* Check for heartbeat */
- counter = 0;
- mdelay(690);
+ /* Check for heartbeat */
+ counter = 0;
+ mdelay(690);
+ nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
+ temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+ do {
+ if (counter++ > 150) {
+ printk(PFX "No PHY heartbeat\n");
+ break;
+ }
+ mdelay(1);
nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
+ temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
+ } while ((temp_phy_data2 == temp_phy_data));
+
+ /* wait for tracking */
+ counter = 0;
+ do {
+ nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd);
temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- do {
- if (counter++ > 150) {
- printk(PFX "No PHY heartbeat\n");
+ if (counter++ > 300) {
+ if (((temp_phy_data & 0xff) == 0x0) && first_attempt) {
+ first_attempt = 0;
+ counter = 0;
+ /* reset AMCC PHY and try again */
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0xe854, 0x00c0);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0xe854, 0x0040);
+ continue;
+ } else {
+ ret = 1;
break;
}
- mdelay(1);
- nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7ee);
- temp_phy_data2 = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- } while ((temp_phy_data2 == temp_phy_data));
-
- /* wait for tracking */
- counter = 0;
- do {
- nes_read_10G_phy_reg(nesdev, phy_index, 0x3, 0xd7fd);
- temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- if (counter++ > 300) {
- if (((temp_phy_data & 0xff) == 0x0) && first_time) {
- first_time = 0;
- counter = 0;
- /* reset AMCC PHY and try again */
- nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0xe854, 0x00c0);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x3, 0xe854, 0x0040);
- continue;
- } else {
- printk(PFX "PHY did not track\n");
- break;
- }
- }
- mdelay(10);
- } while ((temp_phy_data & 0xff) < 0x30);
-
- /* setup signal integrity */
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd003, 0x0000);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00D, 0x00FE);
- nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00E, 0x0032);
+ }
+ mdelay(10);
+ } while ((temp_phy_data & 0xff) < 0x30);
+
+ /* setup signal integrity */
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xd003, 0x0000);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00D, 0x00FE);
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00E, 0x0032);
+ if (phy_type == NES_PHY_TYPE_KR) {
+ nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00F, 0x000C);
+ } else {
nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xF00F, 0x0002);
nes_write_10G_phy_reg(nesdev, phy_index, 0x1, 0xc314, 0x0063);
+ }
+
+ /* reset serdes */
+ sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 + mac_index * 0x200);
+ sds |= 0x1;
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 + mac_index * 0x200, sds);
+ sds &= 0xfffffffe;
+ nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 + mac_index * 0x200, sds);
+
+ counter = 0;
+ while (((nes_read32(nesdev->regs + NES_SOFTWARE_RESET) & 0x00000040) != 0x00000040)
+ && (counter++ < 5000))
+ ;
+
+ return ret;
+}
+
- /* reset serdes */
- sds = nes_read_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 +
- mac_index * 0x200);
- sds |= 0x1;
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 +
- mac_index * 0x200, sds);
- sds &= 0xfffffffe;
- nes_write_indexed(nesdev, NES_IDX_ETH_SERDES_COMMON_CONTROL0 +
- mac_index * 0x200, sds);
-
- counter = 0;
- while (((nes_read32(nesdev->regs + NES_SOFTWARE_RESET) & 0x00000040) != 0x00000040)
- && (counter++ < 5000))
- ;
+/**
+ * nes_init_phy
+ */
+int nes_init_phy(struct nes_device *nesdev)
+{
+ struct nes_adapter *nesadapter = nesdev->nesadapter;
+ u32 mac_index = nesdev->mac_index;
+ u32 tx_config = 0;
+ unsigned long flags;
+ u8 phy_type = nesadapter->phy_type[mac_index];
+ u8 phy_index = nesadapter->phy_index[mac_index];
+ int ret = 0;
+
+ tx_config = nes_read_indexed(nesdev, NES_IDX_MAC_TX_CONFIG);
+ if (phy_type == NES_PHY_TYPE_1G) {
+ /* setup 1G MDIO operation */
+ tx_config &= 0xFFFFFFE3;
+ tx_config |= 0x04;
+ } else {
+ /* setup 10G MDIO operation */
+ tx_config &= 0xFFFFFFE3;
+ tx_config |= 0x15;
}
- return 0;
+ nes_write_indexed(nesdev, NES_IDX_MAC_TX_CONFIG, tx_config);
+
+ spin_lock_irqsave(&nesdev->nesadapter->phy_lock, flags);
+
+ switch (phy_type) {
+ case NES_PHY_TYPE_1G:
+ ret = nes_init_1g_phy(nesdev, phy_type, phy_index);
+ break;
+ case NES_PHY_TYPE_ARGUS:
+ case NES_PHY_TYPE_SFP_D:
+ case NES_PHY_TYPE_KR:
+ ret = nes_init_2025_phy(nesdev, phy_type, phy_index);
+ break;
+ }
+
+ spin_unlock_irqrestore(&nesdev->nesadapter->phy_lock, flags);
+
+ return ret;
}
@@ -2460,23 +2540,9 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number)
}
} else {
switch (nesadapter->phy_type[mac_index]) {
- case NES_PHY_TYPE_IRIS:
- nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 1);
- temp_phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- u32temp = 20;
- do {
- nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 1, 1);
- phy_data = (u16)nes_read_indexed(nesdev, NES_IDX_MAC_MDIO_CONTROL);
- if ((phy_data == temp_phy_data) || (!(--u32temp)))
- break;
- temp_phy_data = phy_data;
- } while (1);
- nes_debug(NES_DBG_PHY, "%s: Phy data = 0x%04X, link was %s.\n",
- __func__, phy_data, nesadapter->mac_link_down[mac_index] ? "DOWN" : "UP");
- break;
-
case NES_PHY_TYPE_ARGUS:
case NES_PHY_TYPE_SFP_D:
+ case NES_PHY_TYPE_KR:
/* clear the alarms */
nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0x0008);
nes_read_10G_phy_reg(nesdev, nesadapter->phy_index[mac_index], 4, 0xc001);
@@ -3352,8 +3418,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
u16 async_event_id;
u8 tcp_state;
u8 iwarp_state;
- int must_disconn = 1;
- int must_terminate = 0;
struct ib_event ibevent;
nes_debug(NES_DBG_AEQ, "\n");
@@ -3367,6 +3431,8 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
BUG_ON(!context);
}
+ /* context is nesqp unless async_event_id == CQ ERROR */
+ nesqp = (struct nes_qp *)(unsigned long)context;
async_event_id = (u16)aeq_info;
tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT;
iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT;
@@ -3378,8 +3444,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
switch (async_event_id) {
case NES_AEQE_AEID_LLP_FIN_RECEIVED:
- nesqp = (struct nes_qp *)(unsigned long)context;
-
if (nesqp->term_flags)
return; /* Ignore it, wait for close complete */
@@ -3394,79 +3458,48 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
async_event_id, nesqp->last_aeq, tcp_state);
}
- if ((tcp_state != NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
- (nesqp->ibqp_state != IB_QPS_RTS)) {
- /* FIN Received but tcp state or IB state moved on,
- should expect a close complete */
- return;
- }
-
+ break;
case NES_AEQE_AEID_LLP_CLOSE_COMPLETE:
- nesqp = (struct nes_qp *)(unsigned long)context;
if (nesqp->term_flags) {
nes_terminate_done(nesqp, 0);
return;
}
+ spin_lock_irqsave(&nesqp->lock, flags);
+ nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ nes_hw_modify_qp(nesdev, nesqp, NES_CQP_QP_IWARP_STATE_CLOSING, 0, 0);
+ nes_cm_disconn(nesqp);
+ break;
- case NES_AEQE_AEID_LLP_CONNECTION_RESET:
case NES_AEQE_AEID_RESET_SENT:
- nesqp = (struct nes_qp *)(unsigned long)context;
- if (async_event_id == NES_AEQE_AEID_RESET_SENT) {
- tcp_state = NES_AEQE_TCP_STATE_CLOSED;
- }
+ tcp_state = NES_AEQE_TCP_STATE_CLOSED;
spin_lock_irqsave(&nesqp->lock, flags);
nesqp->hw_iwarp_state = iwarp_state;
nesqp->hw_tcp_state = tcp_state;
nesqp->last_aeq = async_event_id;
-
- if ((tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
- (tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT)) {
- nesqp->hte_added = 0;
- next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_DEL_HTE;
- }
-
- if ((nesqp->ibqp_state == IB_QPS_RTS) &&
- ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
- (async_event_id == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
- switch (nesqp->hw_iwarp_state) {
- case NES_AEQE_IWARP_STATE_RTS:
- next_iwarp_state = NES_CQP_QP_IWARP_STATE_CLOSING;
- nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
- break;
- case NES_AEQE_IWARP_STATE_TERMINATE:
- must_disconn = 0; /* terminate path takes care of disconn */
- if (nesqp->term_flags == 0)
- must_terminate = 1;
- break;
- }
- } else {
- if (async_event_id == NES_AEQE_AEID_LLP_FIN_RECEIVED) {
- /* FIN Received but ib state not RTS,
- close complete will be on its way */
- must_disconn = 0;
- }
- }
+ nesqp->hte_added = 0;
spin_unlock_irqrestore(&nesqp->lock, flags);
+ next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_DEL_HTE;
+ nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
+ nes_cm_disconn(nesqp);
+ break;
- if (must_terminate)
- nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL);
- else if (must_disconn) {
- if (next_iwarp_state) {
- nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X\n",
- nesqp->hwqp.qp_id, next_iwarp_state);
- nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
- }
- nes_cm_disconn(nesqp);
- }
+ case NES_AEQE_AEID_LLP_CONNECTION_RESET:
+ if (atomic_read(&nesqp->close_timer_started))
+ return;
+ spin_lock_irqsave(&nesqp->lock, flags);
+ nesqp->hw_iwarp_state = iwarp_state;
+ nesqp->hw_tcp_state = tcp_state;
+ nesqp->last_aeq = async_event_id;
+ spin_unlock_irqrestore(&nesqp->lock, flags);
+ nes_cm_disconn(nesqp);
break;
case NES_AEQE_AEID_TERMINATE_SENT:
- nesqp = (struct nes_qp *)(unsigned long)context;
nes_terminate_send_fin(nesdev, nesqp, aeqe);
break;
case NES_AEQE_AEID_LLP_TERMINATE_RECEIVED:
- nesqp = (struct nes_qp *)(unsigned long)context;
nes_terminate_received(nesdev, nesqp, aeqe);
break;
@@ -3480,7 +3513,8 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION:
case NES_AEQE_AEID_AMP_TO_WRAP:
- nesqp = (struct nes_qp *)(unsigned long)context;
+ printk(KERN_ERR PFX "QP[%u] async_event_id=0x%04X IB_EVENT_QP_ACCESS_ERR\n",
+ nesqp->hwqp.qp_id, async_event_id);
nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_ACCESS_ERR);
break;
@@ -3488,7 +3522,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
case NES_AEQE_AEID_LLP_SEGMENT_TOO_SMALL:
case NES_AEQE_AEID_DDP_UBE_INVALID_MO:
case NES_AEQE_AEID_DDP_UBE_INVALID_QN:
- nesqp = (struct nes_qp *)(unsigned long)context;
if (iwarp_opcode(nesqp, aeq_info) > IWARP_OPCODE_TERM) {
aeq_info &= 0xffff0000;
aeq_info |= NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE;
@@ -3530,7 +3563,8 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
case NES_AEQE_AEID_STAG_ZERO_INVALID:
case NES_AEQE_AEID_ROE_INVALID_RDMA_READ_REQUEST:
case NES_AEQE_AEID_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
- nesqp = (struct nes_qp *)(unsigned long)context;
+ printk(KERN_ERR PFX "QP[%u] async_event_id=0x%04X IB_EVENT_QP_FATAL\n",
+ nesqp->hwqp.qp_id, async_event_id);
nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL);
break;
diff --git a/drivers/infiniband/hw/nes/nes_hw.h b/drivers/infiniband/hw/nes/nes_hw.h
index 084be0ee689..9b1e7f869d8 100644
--- a/drivers/infiniband/hw/nes/nes_hw.h
+++ b/drivers/infiniband/hw/nes/nes_hw.h
@@ -37,12 +37,12 @@
#define NES_PHY_TYPE_CX4 1
#define NES_PHY_TYPE_1G 2
-#define NES_PHY_TYPE_IRIS 3
#define NES_PHY_TYPE_ARGUS 4
#define NES_PHY_TYPE_PUMA_1G 5
#define NES_PHY_TYPE_PUMA_10G 6
#define NES_PHY_TYPE_GLADIUS 7
#define NES_PHY_TYPE_SFP_D 8
+#define NES_PHY_TYPE_KR 9
#define NES_MULTICAST_PF_MAX 8
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index ab110278018..a1d79b6856a 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -810,6 +810,20 @@ static int nes_netdev_set_mac_address(struct net_device *netdev, void *p)
}
+static void set_allmulti(struct nes_device *nesdev, u32 nic_active_bit)
+{
+ u32 nic_active;
+
+ nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL);
+ nic_active |= nic_active_bit;
+ nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active);
+ nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL);
+ nic_active &= ~nic_active_bit;
+ nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
+}
+
+#define get_addr(addrs, index) ((addrs) + (index) * ETH_ALEN)
+
/**
* nes_netdev_set_multicast_list
*/
@@ -818,7 +832,6 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
struct nes_vnic *nesvnic = netdev_priv(netdev);
struct nes_device *nesdev = nesvnic->nesdev;
struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter;
- struct dev_mc_list *multicast_addr;
u32 nic_active_bit;
u32 nic_active;
u32 perfect_filter_register_address;
@@ -831,6 +844,7 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
nics_per_function, 4);
u8 max_pft_entries_avaiable = NES_PFT_SIZE - pft_entries_preallocated;
unsigned long flags;
+ int mc_count = netdev_mc_count(netdev);
spin_lock_irqsave(&nesadapter->resource_lock, flags);
nic_active_bit = 1 << nesvnic->nic_index;
@@ -845,12 +859,7 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
mc_all_on = 1;
} else if ((netdev->flags & IFF_ALLMULTI) ||
(nesvnic->nic_index > 3)) {
- nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL);
- nic_active |= nic_active_bit;
- nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL, nic_active);
- nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL);
- nic_active &= ~nic_active_bit;
- nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL, nic_active);
+ set_allmulti(nesdev, nic_active_bit);
mc_all_on = 1;
} else {
nic_active = nes_read_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL);
@@ -862,19 +871,30 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
}
nes_debug(NES_DBG_NIC_RX, "Number of MC entries = %d, Promiscous = %d, All Multicast = %d.\n",
- netdev->mc_count, !!(netdev->flags & IFF_PROMISC),
+ mc_count, !!(netdev->flags & IFF_PROMISC),
!!(netdev->flags & IFF_ALLMULTI));
if (!mc_all_on) {
- multicast_addr = netdev->mc_list;
+ char *addrs;
+ int i;
+ struct dev_mc_list *mcaddr;
+
+ addrs = kmalloc(ETH_ALEN * mc_count, GFP_ATOMIC);
+ if (!addrs) {
+ set_allmulti(nesdev, nic_active_bit);
+ goto unlock;
+ }
+ i = 0;
+ netdev_for_each_mc_addr(mcaddr, netdev)
+ memcpy(get_addr(addrs, i++),
+ mcaddr->dmi_addr, ETH_ALEN);
+
perfect_filter_register_address = NES_IDX_PERFECT_FILTER_LOW +
pft_entries_preallocated * 0x8;
- for (mc_index = 0; mc_index < max_pft_entries_avaiable;
- mc_index++) {
- while (multicast_addr && nesvnic->mcrq_mcast_filter &&
+ for (i = 0, mc_index = 0; mc_index < max_pft_entries_avaiable;
+ mc_index++) {
+ while (i < mc_count && nesvnic->mcrq_mcast_filter &&
((mc_nic_index = nesvnic->mcrq_mcast_filter(nesvnic,
- multicast_addr->dmi_addr)) == 0)) {
- multicast_addr = multicast_addr->next;
- }
+ get_addr(addrs, i++))) == 0));
if (mc_nic_index < 0)
mc_nic_index = nesvnic->nic_index;
while (nesadapter->pft_mcast_map[mc_index] < 16 &&
@@ -890,17 +910,19 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
}
if (mc_index >= max_pft_entries_avaiable)
break;
- if (multicast_addr) {
+ if (i < mc_count) {
+ char *addr = get_addr(addrs, i++);
+
nes_debug(NES_DBG_NIC_RX, "Assigning MC Address %pM to register 0x%04X nic_idx=%d\n",
- multicast_addr->dmi_addr,
+ addr,
perfect_filter_register_address+(mc_index * 8),
mc_nic_index);
- macaddr_high = ((u16)multicast_addr->dmi_addr[0]) << 8;
- macaddr_high += (u16)multicast_addr->dmi_addr[1];
- macaddr_low = ((u32)multicast_addr->dmi_addr[2]) << 24;
- macaddr_low += ((u32)multicast_addr->dmi_addr[3]) << 16;
- macaddr_low += ((u32)multicast_addr->dmi_addr[4]) << 8;
- macaddr_low += (u32)multicast_addr->dmi_addr[5];
+ macaddr_high = ((u16) addr[0]) << 8;
+ macaddr_high += (u16) addr[1];
+ macaddr_low = ((u32) addr[2]) << 24;
+ macaddr_low += ((u32) addr[3]) << 16;
+ macaddr_low += ((u32) addr[4]) << 8;
+ macaddr_low += (u32) addr[5];
nes_write_indexed(nesdev,
perfect_filter_register_address+(mc_index * 8),
macaddr_low);
@@ -908,7 +930,6 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
perfect_filter_register_address+4+(mc_index * 8),
(u32)macaddr_high | NES_MAC_ADDR_VALID |
((((u32)(1<<mc_nic_index)) << 16)));
- multicast_addr = multicast_addr->next;
nesadapter->pft_mcast_map[mc_index] =
nesvnic->nic_index;
} else {
@@ -920,21 +941,13 @@ static void nes_netdev_set_multicast_list(struct net_device *netdev)
nesadapter->pft_mcast_map[mc_index] = 255;
}
}
+ kfree(addrs);
/* PFT is not large enough */
- if (multicast_addr && multicast_addr->next) {
- nic_active = nes_read_indexed(nesdev,
- NES_IDX_NIC_MULTICAST_ALL);
- nic_active |= nic_active_bit;
- nes_write_indexed(nesdev, NES_IDX_NIC_MULTICAST_ALL,
- nic_active);
- nic_active = nes_read_indexed(nesdev,
- NES_IDX_NIC_UNICAST_ALL);
- nic_active &= ~nic_active_bit;
- nes_write_indexed(nesdev, NES_IDX_NIC_UNICAST_ALL,
- nic_active);
- }
+ if (i < mc_count)
+ set_allmulti(nesdev, nic_active_bit);
}
+unlock:
spin_unlock_irqrestore(&nesadapter->resource_lock, flags);
}
@@ -1230,8 +1243,8 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
target_stat_values[++index] = cm_packets_received;
target_stat_values[++index] = cm_packets_dropped;
target_stat_values[++index] = cm_packets_retrans;
- target_stat_values[++index] = cm_listens_created;
- target_stat_values[++index] = cm_listens_destroyed;
+ target_stat_values[++index] = atomic_read(&cm_listens_created);
+ target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
target_stat_values[++index] = cm_backlog_drops;
target_stat_values[++index] = atomic_read(&cm_loopbacks);
target_stat_values[++index] = atomic_read(&cm_nodes_created);
@@ -1461,9 +1474,9 @@ static int nes_netdev_get_settings(struct net_device *netdev, struct ethtool_cmd
}
return 0;
}
- if ((phy_type == NES_PHY_TYPE_IRIS) ||
- (phy_type == NES_PHY_TYPE_ARGUS) ||
- (phy_type == NES_PHY_TYPE_SFP_D)) {
+ if ((phy_type == NES_PHY_TYPE_ARGUS) ||
+ (phy_type == NES_PHY_TYPE_SFP_D) ||
+ (phy_type == NES_PHY_TYPE_KR)) {
et_cmd->transceiver = XCVR_EXTERNAL;
et_cmd->port = PORT_FIBRE;
et_cmd->supported = SUPPORTED_FIBRE;
@@ -1583,8 +1596,7 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
struct net_device *netdev;
struct nic_qp_map *curr_qp_map;
u32 u32temp;
- u16 phy_data;
- u16 temp_phy_data;
+ u8 phy_type = nesdev->nesadapter->phy_type[nesdev->mac_index];
netdev = alloc_etherdev(sizeof(struct nes_vnic));
if (!netdev) {
@@ -1692,65 +1704,23 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
if ((nesdev->netdev_count == 0) &&
((PCI_FUNC(nesdev->pcidev->devfn) == nesdev->mac_index) ||
- ((nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_PUMA_1G) &&
+ ((phy_type == NES_PHY_TYPE_PUMA_1G) &&
(((PCI_FUNC(nesdev->pcidev->devfn) == 1) && (nesdev->mac_index == 2)) ||
((PCI_FUNC(nesdev->pcidev->devfn) == 2) && (nesdev->mac_index == 1)))))) {
- /*
- * nes_debug(NES_DBG_INIT, "Setting up PHY interrupt mask. Using register index 0x%04X\n",
- * NES_IDX_PHY_PCS_CONTROL_STATUS0 + (0x200 * (nesvnic->logical_port & 1)));
- */
u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
(0x200 * (nesdev->mac_index & 1)));
- if (nesdev->nesadapter->phy_type[nesdev->mac_index] != NES_PHY_TYPE_PUMA_1G) {
+ if (phy_type != NES_PHY_TYPE_PUMA_1G) {
u32temp |= 0x00200000;
nes_write_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
(0x200 * (nesdev->mac_index & 1)), u32temp);
}
- u32temp = nes_read_indexed(nesdev, NES_IDX_PHY_PCS_CONTROL_STATUS0 +
- (0x200 * (nesdev->mac_index & 1)));
-
- if ((u32temp&0x0f1f0000) == 0x0f0f0000) {
- if (nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_IRIS) {
- nes_init_phy(nesdev);
- nes_read_10G_phy_reg(nesdev, nesdev->nesadapter->phy_index[nesdev->mac_index], 1, 1);
- temp_phy_data = (u16)nes_read_indexed(nesdev,
- NES_IDX_MAC_MDIO_CONTROL);
- u32temp = 20;
- do {
- nes_read_10G_phy_reg(nesdev, nesdev->nesadapter->phy_index[nesdev->mac_index], 1, 1);
- phy_data = (u16)nes_read_indexed(nesdev,
- NES_IDX_MAC_MDIO_CONTROL);
- if ((phy_data == temp_phy_data) || (!(--u32temp)))
- break;
- temp_phy_data = phy_data;
- } while (1);
- if (phy_data & 4) {
- nes_debug(NES_DBG_INIT, "The Link is UP!!.\n");
- nesvnic->linkup = 1;
- } else {
- nes_debug(NES_DBG_INIT, "The Link is DOWN!!.\n");
- }
- } else {
- nes_debug(NES_DBG_INIT, "The Link is UP!!.\n");
- nesvnic->linkup = 1;
- }
- } else if (nesdev->nesadapter->phy_type[nesdev->mac_index] == NES_PHY_TYPE_PUMA_1G) {
- nes_debug(NES_DBG_INIT, "mac_index=%d, logical_port=%d, u32temp=0x%04X, PCI_FUNC=%d\n",
- nesdev->mac_index, nesvnic->logical_port, u32temp, PCI_FUNC(nesdev->pcidev->devfn));
- if (((nesdev->mac_index < 2) && ((u32temp&0x01010000) == 0x01010000)) ||
- ((nesdev->mac_index > 1) && ((u32temp&0x02020000) == 0x02020000))) {
- nes_debug(NES_DBG_INIT, "The Link is UP!!.\n");
- nesvnic->linkup = 1;
- }
- }
/* clear the MAC interrupt status, assumes direct logical to physical mapping */
u32temp = nes_read_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index));
nes_debug(NES_DBG_INIT, "Phy interrupt status = 0x%X.\n", u32temp);
nes_write_indexed(nesdev, NES_IDX_MAC_INT_STATUS + (0x200 * nesdev->mac_index), u32temp);
- if (nesdev->nesadapter->phy_type[nesdev->mac_index] != NES_PHY_TYPE_IRIS)
- nes_init_phy(nesdev);
+ nes_init_phy(nesdev);
}
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index 64d3136e374..815725f886c 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -228,7 +228,7 @@ static int nes_bind_mw(struct ib_qp *ibqp, struct ib_mw *ibmw,
/* Check for SQ overflow */
if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) {
spin_unlock_irqrestore(&nesqp->lock, flags);
- return -EINVAL;
+ return -ENOMEM;
}
wqe = &nesqp->hwqp.sq_vbase[head];
@@ -3294,7 +3294,7 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
/* Check for SQ overflow */
if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) {
- err = -EINVAL;
+ err = -ENOMEM;
break;
}
@@ -3577,7 +3577,7 @@ static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
}
/* Check for RQ overflow */
if (((head + (2 * qsize) - nesqp->hwqp.rq_tail) % qsize) == (qsize - 1)) {
- err = -EINVAL;
+ err = -ENOMEM;
break;
}
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 30bdf427ee6..83a7751c38d 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1374,7 +1374,7 @@ static void ipoib_cm_skb_reap(struct work_struct *work)
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
else if (skb->protocol == htons(ETH_P_IPV6))
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, priv->dev);
+ icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
#endif
dev_kfree_skb_any(skb);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
index e9795f60e5d..d10b4ec68d2 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ethtool.c
@@ -55,9 +55,7 @@ static int ipoib_get_coalesce(struct net_device *dev,
struct ipoib_dev_priv *priv = netdev_priv(dev);
coal->rx_coalesce_usecs = priv->ethtool.coalesce_usecs;
- coal->tx_coalesce_usecs = priv->ethtool.coalesce_usecs;
coal->rx_max_coalesced_frames = priv->ethtool.max_coalesced_frames;
- coal->tx_max_coalesced_frames = priv->ethtool.max_coalesced_frames;
return 0;
}
@@ -69,10 +67,8 @@ static int ipoib_set_coalesce(struct net_device *dev,
int ret;
/*
- * Since IPoIB uses a single CQ for both rx and tx, we assume
- * that rx params dictate the configuration. These values are
- * saved in the private data and returned when ipoib_get_coalesce()
- * is called.
+ * These values are saved in the private data and returned
+ * when ipoib_get_coalesce() is called
*/
if (coal->rx_coalesce_usecs > 0xffff ||
coal->rx_max_coalesced_frames > 0xffff)
@@ -85,8 +81,6 @@ static int ipoib_set_coalesce(struct net_device *dev,
return ret;
}
- coal->tx_coalesce_usecs = coal->rx_coalesce_usecs;
- coal->tx_max_coalesced_frames = coal->rx_max_coalesced_frames;
priv->ethtool.coalesce_usecs = coal->rx_coalesce_usecs;
priv->ethtool.max_coalesced_frames = coal->rx_max_coalesced_frames;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 8763c1ea5eb..d41ea27be5e 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -811,7 +811,7 @@ void ipoib_mcast_restart_task(struct work_struct *work)
clear_bit(IPOIB_MCAST_FLAG_FOUND, &mcast->flags);
/* Mark all of the entries that are found or don't exist */
- for (mclist = dev->mc_list; mclist; mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
union ib_gid mgid;
if (!ipoib_mcast_addr_is_valid(mclist->dmi_addr,
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 5f7a6fca0a4..71237f8f78f 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -128,6 +128,28 @@ static int iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
return 0;
}
+int iser_initialize_task_headers(struct iscsi_task *task,
+ struct iser_tx_desc *tx_desc)
+{
+ struct iscsi_iser_conn *iser_conn = task->conn->dd_data;
+ struct iser_device *device = iser_conn->ib_conn->device;
+ struct iscsi_iser_task *iser_task = task->dd_data;
+ u64 dma_addr;
+
+ dma_addr = ib_dma_map_single(device->ib_device, (void *)tx_desc,
+ ISER_HEADERS_LEN, DMA_TO_DEVICE);
+ if (ib_dma_mapping_error(device->ib_device, dma_addr))
+ return -ENOMEM;
+
+ tx_desc->dma_addr = dma_addr;
+ tx_desc->tx_sg[0].addr = tx_desc->dma_addr;
+ tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
+ tx_desc->tx_sg[0].lkey = device->mr->lkey;
+
+ iser_task->headers_initialized = 1;
+ iser_task->iser_conn = iser_conn;
+ return 0;
+}
/**
* iscsi_iser_task_init - Initialize task
* @task: iscsi task
@@ -137,17 +159,17 @@ static int iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
static int
iscsi_iser_task_init(struct iscsi_task *task)
{
- struct iscsi_iser_conn *iser_conn = task->conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
+ if (!iser_task->headers_initialized)
+ if (iser_initialize_task_headers(task, &iser_task->desc))
+ return -ENOMEM;
+
/* mgmt task */
- if (!task->sc) {
- iser_task->desc.data = task->data;
+ if (!task->sc)
return 0;
- }
iser_task->command_sent = 0;
- iser_task->iser_conn = iser_conn;
iser_task_rdma_init(iser_task);
return 0;
}
@@ -168,7 +190,7 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
{
int error = 0;
- iser_dbg("task deq [cid %d itt 0x%x]\n", conn->id, task->itt);
+ iser_dbg("mtask xmit [cid %d itt 0x%x]\n", conn->id, task->itt);
error = iser_send_control(conn, task);
@@ -178,9 +200,6 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
* - if yes, the task is recycled at iscsi_complete_pdu
* - if no, the task is recycled at iser_snd_completion
*/
- if (error && error != -ENOBUFS)
- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
-
return error;
}
@@ -232,7 +251,7 @@ iscsi_iser_task_xmit(struct iscsi_task *task)
task->imm_count, task->unsol_r2t.data_length);
}
- iser_dbg("task deq [cid %d itt 0x%x]\n",
+ iser_dbg("ctask xmit [cid %d itt 0x%x]\n",
conn->id, task->itt);
/* Send the cmd PDU */
@@ -248,8 +267,6 @@ iscsi_iser_task_xmit(struct iscsi_task *task)
error = iscsi_iser_task_xmit_unsol_data(conn, task);
iscsi_iser_task_xmit_exit:
- if (error && error != -ENOBUFS)
- iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
return error;
}
@@ -283,7 +300,7 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, uint32_t conn_idx)
* due to issues with the login code re iser sematics
* this not set in iscsi_conn_setup - FIXME
*/
- conn->max_recv_dlength = 128;
+ conn->max_recv_dlength = ISER_RECV_DATA_SEG_LEN;
iser_conn = conn->dd_data;
conn->dd_data = iser_conn;
@@ -401,7 +418,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
struct Scsi_Host *shost;
struct iser_conn *ib_conn;
- shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 1);
+ shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0);
if (!shost)
return NULL;
shost->transportt = iscsi_iser_scsi_transport;
@@ -675,7 +692,7 @@ static int __init iser_init(void)
memset(&ig, 0, sizeof(struct iser_global));
ig.desc_cache = kmem_cache_create("iser_descriptors",
- sizeof (struct iser_desc),
+ sizeof(struct iser_tx_desc),
0, SLAB_HWCACHE_ALIGN,
NULL);
if (ig.desc_cache == NULL)
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 9d529cae1f0..036934cdcb9 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -102,9 +102,9 @@
#define ISER_MAX_TX_MISC_PDUS 6 /* NOOP_OUT(2), TEXT(1), *
* SCSI_TMFUNC(2), LOGOUT(1) */
-#define ISER_QP_MAX_RECV_DTOS (ISCSI_DEF_XMIT_CMDS_MAX + \
- ISER_MAX_RX_MISC_PDUS + \
- ISER_MAX_TX_MISC_PDUS)
+#define ISER_QP_MAX_RECV_DTOS (ISCSI_DEF_XMIT_CMDS_MAX)
+
+#define ISER_MIN_POSTED_RX (ISCSI_DEF_XMIT_CMDS_MAX >> 2)
/* the max TX (send) WR supported by the iSER QP is defined by *
* max_send_wr = T * (1 + D) + C ; D is how many inflight dataouts we expect *
@@ -132,6 +132,12 @@ struct iser_hdr {
__be64 read_va;
} __attribute__((packed));
+/* Constant PDU lengths calculations */
+#define ISER_HEADERS_LEN (sizeof(struct iser_hdr) + sizeof(struct iscsi_hdr))
+
+#define ISER_RECV_DATA_SEG_LEN 128
+#define ISER_RX_PAYLOAD_SIZE (ISER_HEADERS_LEN + ISER_RECV_DATA_SEG_LEN)
+#define ISER_RX_LOGIN_SIZE (ISER_HEADERS_LEN + ISCSI_DEF_MAX_RECV_SEG_LEN)
/* Length of an object name string */
#define ISER_OBJECT_NAME_SIZE 64
@@ -187,51 +193,43 @@ struct iser_regd_buf {
struct iser_mem_reg reg; /* memory registration info */
void *virt_addr;
struct iser_device *device; /* device->device for dma_unmap */
- u64 dma_addr; /* if non zero, addr for dma_unmap */
enum dma_data_direction direction; /* direction for dma_unmap */
unsigned int data_size;
- atomic_t ref_count; /* refcount, freed when dec to 0 */
-};
-
-#define MAX_REGD_BUF_VECTOR_LEN 2
-
-struct iser_dto {
- struct iscsi_iser_task *task;
- struct iser_conn *ib_conn;
- int notify_enable;
-
- /* vector of registered buffers */
- unsigned int regd_vector_len;
- struct iser_regd_buf *regd[MAX_REGD_BUF_VECTOR_LEN];
-
- /* offset into the registered buffer may be specified */
- unsigned int offset[MAX_REGD_BUF_VECTOR_LEN];
-
- /* a smaller size may be specified, if 0, then full size is used */
- unsigned int used_sz[MAX_REGD_BUF_VECTOR_LEN];
};
enum iser_desc_type {
- ISCSI_RX,
ISCSI_TX_CONTROL ,
ISCSI_TX_SCSI_COMMAND,
ISCSI_TX_DATAOUT
};
-struct iser_desc {
+struct iser_tx_desc {
struct iser_hdr iser_header;
struct iscsi_hdr iscsi_header;
- struct iser_regd_buf hdr_regd_buf;
- void *data; /* used by RX & TX_CONTROL */
- struct iser_regd_buf data_regd_buf; /* used by RX & TX_CONTROL */
enum iser_desc_type type;
- struct iser_dto dto;
+ u64 dma_addr;
+ /* sg[0] points to iser/iscsi headers, sg[1] optionally points to either
+ of immediate data, unsolicited data-out or control (login,text) */
+ struct ib_sge tx_sg[2];
+ int num_sge;
};
+#define ISER_RX_PAD_SIZE (256 - (ISER_RX_PAYLOAD_SIZE + \
+ sizeof(u64) + sizeof(struct ib_sge)))
+struct iser_rx_desc {
+ struct iser_hdr iser_header;
+ struct iscsi_hdr iscsi_header;
+ char data[ISER_RECV_DATA_SEG_LEN];
+ u64 dma_addr;
+ struct ib_sge rx_sg;
+ char pad[ISER_RX_PAD_SIZE];
+} __attribute__((packed));
+
struct iser_device {
struct ib_device *ib_device;
struct ib_pd *pd;
- struct ib_cq *cq;
+ struct ib_cq *rx_cq;
+ struct ib_cq *tx_cq;
struct ib_mr *mr;
struct tasklet_struct cq_tasklet;
struct list_head ig_list; /* entry in ig devices list */
@@ -250,15 +248,18 @@ struct iser_conn {
struct ib_fmr_pool *fmr_pool; /* pool of IB FMRs */
int disc_evt_flag; /* disconn event delivered */
wait_queue_head_t wait; /* waitq for conn/disconn */
- atomic_t post_recv_buf_count; /* posted rx count */
+ int post_recv_buf_count; /* posted rx count */
atomic_t post_send_buf_count; /* posted tx count */
- atomic_t unexpected_pdu_count;/* count of received *
- * unexpected pdus *
- * not yet retired */
char name[ISER_OBJECT_NAME_SIZE];
struct iser_page_vec *page_vec; /* represents SG to fmr maps*
* maps serialized as tx is*/
struct list_head conn_list; /* entry in ig conn list */
+
+ char *login_buf;
+ u64 login_dma;
+ unsigned int rx_desc_head;
+ struct iser_rx_desc *rx_descs;
+ struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX];
};
struct iscsi_iser_conn {
@@ -267,7 +268,7 @@ struct iscsi_iser_conn {
};
struct iscsi_iser_task {
- struct iser_desc desc;
+ struct iser_tx_desc desc;
struct iscsi_iser_conn *iser_conn;
enum iser_task_status status;
int command_sent; /* set if command sent */
@@ -275,6 +276,7 @@ struct iscsi_iser_task {
struct iser_regd_buf rdma_regd[ISER_DIRS_NUM];/* regd rdma buf */
struct iser_data_buf data[ISER_DIRS_NUM]; /* orig. data des*/
struct iser_data_buf data_copy[ISER_DIRS_NUM];/* contig. copy */
+ int headers_initialized;
};
struct iser_page_vec {
@@ -322,22 +324,17 @@ void iser_conn_put(struct iser_conn *ib_conn);
void iser_conn_terminate(struct iser_conn *ib_conn);
-void iser_rcv_completion(struct iser_desc *desc,
- unsigned long dto_xfer_len);
+void iser_rcv_completion(struct iser_rx_desc *desc,
+ unsigned long dto_xfer_len,
+ struct iser_conn *ib_conn);
-void iser_snd_completion(struct iser_desc *desc);
+void iser_snd_completion(struct iser_tx_desc *desc, struct iser_conn *ib_conn);
void iser_task_rdma_init(struct iscsi_iser_task *task);
void iser_task_rdma_finalize(struct iscsi_iser_task *task);
-void iser_dto_buffs_release(struct iser_dto *dto);
-
-int iser_regd_buff_release(struct iser_regd_buf *regd_buf);
-
-void iser_reg_single(struct iser_device *device,
- struct iser_regd_buf *regd_buf,
- enum dma_data_direction direction);
+void iser_free_rx_descriptors(struct iser_conn *ib_conn);
void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *task,
enum iser_data_dir cmd_dir);
@@ -356,11 +353,9 @@ int iser_reg_page_vec(struct iser_conn *ib_conn,
void iser_unreg_mem(struct iser_mem_reg *mem_reg);
-int iser_post_recv(struct iser_desc *rx_desc);
-int iser_post_send(struct iser_desc *tx_desc);
-
-int iser_conn_state_comp(struct iser_conn *ib_conn,
- enum iser_ib_conn_state comp);
+int iser_post_recvl(struct iser_conn *ib_conn);
+int iser_post_recvm(struct iser_conn *ib_conn, int count);
+int iser_post_send(struct iser_conn *ib_conn, struct iser_tx_desc *tx_desc);
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
struct iser_data_buf *data,
@@ -368,4 +363,6 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
enum dma_data_direction dma_dir);
void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task);
+int iser_initialize_task_headers(struct iscsi_task *task,
+ struct iser_tx_desc *tx_desc);
#endif
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 9de640200ad..0b9ef071658 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -39,29 +39,6 @@
#include "iscsi_iser.h"
-/* Constant PDU lengths calculations */
-#define ISER_TOTAL_HEADERS_LEN (sizeof (struct iser_hdr) + \
- sizeof (struct iscsi_hdr))
-
-/* iser_dto_add_regd_buff - increments the reference count for *
- * the registered buffer & adds it to the DTO object */
-static void iser_dto_add_regd_buff(struct iser_dto *dto,
- struct iser_regd_buf *regd_buf,
- unsigned long use_offset,
- unsigned long use_size)
-{
- int add_idx;
-
- atomic_inc(&regd_buf->ref_count);
-
- add_idx = dto->regd_vector_len;
- dto->regd[add_idx] = regd_buf;
- dto->used_sz[add_idx] = use_size;
- dto->offset[add_idx] = use_offset;
-
- dto->regd_vector_len++;
-}
-
/* Register user buffer memory and initialize passive rdma
* dto descriptor. Total data size is stored in
* iser_task->data[ISER_DIR_IN].data_len
@@ -122,9 +99,9 @@ iser_prepare_write_cmd(struct iscsi_task *task,
struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_regd_buf *regd_buf;
int err;
- struct iser_dto *send_dto = &iser_task->desc.dto;
struct iser_hdr *hdr = &iser_task->desc.iser_header;
struct iser_data_buf *buf_out = &iser_task->data[ISER_DIR_OUT];
+ struct ib_sge *tx_dsg = &iser_task->desc.tx_sg[1];
err = iser_dma_map_task_data(iser_task,
buf_out,
@@ -163,135 +140,100 @@ iser_prepare_write_cmd(struct iscsi_task *task,
if (imm_sz > 0) {
iser_dbg("Cmd itt:%d, WRITE, adding imm.data sz: %d\n",
task->itt, imm_sz);
- iser_dto_add_regd_buff(send_dto,
- regd_buf,
- 0,
- imm_sz);
+ tx_dsg->addr = regd_buf->reg.va;
+ tx_dsg->length = imm_sz;
+ tx_dsg->lkey = regd_buf->reg.lkey;
+ iser_task->desc.num_sge = 2;
}
return 0;
}
-/**
- * iser_post_receive_control - allocates, initializes and posts receive DTO.
- */
-static int iser_post_receive_control(struct iscsi_conn *conn)
+/* creates a new tx descriptor and adds header regd buffer */
+static void iser_create_send_desc(struct iser_conn *ib_conn,
+ struct iser_tx_desc *tx_desc)
{
- struct iscsi_iser_conn *iser_conn = conn->dd_data;
- struct iser_desc *rx_desc;
- struct iser_regd_buf *regd_hdr;
- struct iser_regd_buf *regd_data;
- struct iser_dto *recv_dto = NULL;
- struct iser_device *device = iser_conn->ib_conn->device;
- int rx_data_size, err;
- int posts, outstanding_unexp_pdus;
-
- /* for the login sequence we must support rx of upto 8K; login is done
- * after conn create/bind (connect) and conn stop/bind (reconnect),
- * what's common for both schemes is that the connection is not started
- */
- if (conn->c_stage != ISCSI_CONN_STARTED)
- rx_data_size = ISCSI_DEF_MAX_RECV_SEG_LEN;
- else /* FIXME till user space sets conn->max_recv_dlength correctly */
- rx_data_size = 128;
-
- outstanding_unexp_pdus =
- atomic_xchg(&iser_conn->ib_conn->unexpected_pdu_count, 0);
-
- /*
- * in addition to the response buffer, replace those consumed by
- * unexpected pdus.
- */
- for (posts = 0; posts < 1 + outstanding_unexp_pdus; posts++) {
- rx_desc = kmem_cache_alloc(ig.desc_cache, GFP_NOIO);
- if (rx_desc == NULL) {
- iser_err("Failed to alloc desc for post recv %d\n",
- posts);
- err = -ENOMEM;
- goto post_rx_cache_alloc_failure;
- }
- rx_desc->type = ISCSI_RX;
- rx_desc->data = kmalloc(rx_data_size, GFP_NOIO);
- if (rx_desc->data == NULL) {
- iser_err("Failed to alloc data buf for post recv %d\n",
- posts);
- err = -ENOMEM;
- goto post_rx_kmalloc_failure;
- }
-
- recv_dto = &rx_desc->dto;
- recv_dto->ib_conn = iser_conn->ib_conn;
- recv_dto->regd_vector_len = 0;
+ struct iser_device *device = ib_conn->device;
- regd_hdr = &rx_desc->hdr_regd_buf;
- memset(regd_hdr, 0, sizeof(struct iser_regd_buf));
- regd_hdr->device = device;
- regd_hdr->virt_addr = rx_desc; /* == &rx_desc->iser_header */
- regd_hdr->data_size = ISER_TOTAL_HEADERS_LEN;
+ ib_dma_sync_single_for_cpu(device->ib_device,
+ tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
- iser_reg_single(device, regd_hdr, DMA_FROM_DEVICE);
-
- iser_dto_add_regd_buff(recv_dto, regd_hdr, 0, 0);
+ memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
+ tx_desc->iser_header.flags = ISER_VER;
- regd_data = &rx_desc->data_regd_buf;
- memset(regd_data, 0, sizeof(struct iser_regd_buf));
- regd_data->device = device;
- regd_data->virt_addr = rx_desc->data;
- regd_data->data_size = rx_data_size;
+ tx_desc->num_sge = 1;
- iser_reg_single(device, regd_data, DMA_FROM_DEVICE);
+ if (tx_desc->tx_sg[0].lkey != device->mr->lkey) {
+ tx_desc->tx_sg[0].lkey = device->mr->lkey;
+ iser_dbg("sdesc %p lkey mismatch, fixing\n", tx_desc);
+ }
+}
- iser_dto_add_regd_buff(recv_dto, regd_data, 0, 0);
- err = iser_post_recv(rx_desc);
- if (err) {
- iser_err("Failed iser_post_recv for post %d\n", posts);
- goto post_rx_post_recv_failure;
- }
+int iser_alloc_rx_descriptors(struct iser_conn *ib_conn)
+{
+ int i, j;
+ u64 dma_addr;
+ struct iser_rx_desc *rx_desc;
+ struct ib_sge *rx_sg;
+ struct iser_device *device = ib_conn->device;
+
+ ib_conn->rx_descs = kmalloc(ISER_QP_MAX_RECV_DTOS *
+ sizeof(struct iser_rx_desc), GFP_KERNEL);
+ if (!ib_conn->rx_descs)
+ goto rx_desc_alloc_fail;
+
+ rx_desc = ib_conn->rx_descs;
+
+ for (i = 0; i < ISER_QP_MAX_RECV_DTOS; i++, rx_desc++) {
+ dma_addr = ib_dma_map_single(device->ib_device, (void *)rx_desc,
+ ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+ if (ib_dma_mapping_error(device->ib_device, dma_addr))
+ goto rx_desc_dma_map_failed;
+
+ rx_desc->dma_addr = dma_addr;
+
+ rx_sg = &rx_desc->rx_sg;
+ rx_sg->addr = rx_desc->dma_addr;
+ rx_sg->length = ISER_RX_PAYLOAD_SIZE;
+ rx_sg->lkey = device->mr->lkey;
}
- /* all posts successful */
- return 0;
-post_rx_post_recv_failure:
- iser_dto_buffs_release(recv_dto);
- kfree(rx_desc->data);
-post_rx_kmalloc_failure:
- kmem_cache_free(ig.desc_cache, rx_desc);
-post_rx_cache_alloc_failure:
- if (posts > 0) {
- /*
- * response buffer posted, but did not replace all unexpected
- * pdu recv bufs. Ignore error, retry occurs next send
- */
- outstanding_unexp_pdus -= (posts - 1);
- err = 0;
- }
- atomic_add(outstanding_unexp_pdus,
- &iser_conn->ib_conn->unexpected_pdu_count);
+ ib_conn->rx_desc_head = 0;
+ return 0;
- return err;
+rx_desc_dma_map_failed:
+ rx_desc = ib_conn->rx_descs;
+ for (j = 0; j < i; j++, rx_desc++)
+ ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
+ ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+ kfree(ib_conn->rx_descs);
+ ib_conn->rx_descs = NULL;
+rx_desc_alloc_fail:
+ iser_err("failed allocating rx descriptors / data buffers\n");
+ return -ENOMEM;
}
-/* creates a new tx descriptor and adds header regd buffer */
-static void iser_create_send_desc(struct iscsi_iser_conn *iser_conn,
- struct iser_desc *tx_desc)
+void iser_free_rx_descriptors(struct iser_conn *ib_conn)
{
- struct iser_regd_buf *regd_hdr = &tx_desc->hdr_regd_buf;
- struct iser_dto *send_dto = &tx_desc->dto;
+ int i;
+ struct iser_rx_desc *rx_desc;
+ struct iser_device *device = ib_conn->device;
- memset(regd_hdr, 0, sizeof(struct iser_regd_buf));
- regd_hdr->device = iser_conn->ib_conn->device;
- regd_hdr->virt_addr = tx_desc; /* == &tx_desc->iser_header */
- regd_hdr->data_size = ISER_TOTAL_HEADERS_LEN;
+ if (ib_conn->login_buf) {
+ ib_dma_unmap_single(device->ib_device, ib_conn->login_dma,
+ ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
+ kfree(ib_conn->login_buf);
+ }
- send_dto->ib_conn = iser_conn->ib_conn;
- send_dto->notify_enable = 1;
- send_dto->regd_vector_len = 0;
+ if (!ib_conn->rx_descs)
+ return;
- memset(&tx_desc->iser_header, 0, sizeof(struct iser_hdr));
- tx_desc->iser_header.flags = ISER_VER;
-
- iser_dto_add_regd_buff(send_dto, regd_hdr, 0, 0);
+ rx_desc = ib_conn->rx_descs;
+ for (i = 0; i < ISER_QP_MAX_RECV_DTOS; i++, rx_desc++)
+ ib_dma_unmap_single(device->ib_device, rx_desc->dma_addr,
+ ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
+ kfree(ib_conn->rx_descs);
}
/**
@@ -301,46 +243,23 @@ int iser_conn_set_full_featured_mode(struct iscsi_conn *conn)
{
struct iscsi_iser_conn *iser_conn = conn->dd_data;
- int i;
- /*
- * FIXME this value should be declared to the target during login with
- * the MaxOutstandingUnexpectedPDUs key when supported
- */
- int initial_post_recv_bufs_num = ISER_MAX_RX_MISC_PDUS;
-
- iser_dbg("Initially post: %d\n", initial_post_recv_bufs_num);
+ iser_dbg("Initially post: %d\n", ISER_MIN_POSTED_RX);
/* Check that there is no posted recv or send buffers left - */
/* they must be consumed during the login phase */
- BUG_ON(atomic_read(&iser_conn->ib_conn->post_recv_buf_count) != 0);
+ BUG_ON(iser_conn->ib_conn->post_recv_buf_count != 0);
BUG_ON(atomic_read(&iser_conn->ib_conn->post_send_buf_count) != 0);
- /* Initial post receive buffers */
- for (i = 0; i < initial_post_recv_bufs_num; i++) {
- if (iser_post_receive_control(conn) != 0) {
- iser_err("Failed to post recv bufs at:%d conn:0x%p\n",
- i, conn);
- return -ENOMEM;
- }
- }
- iser_dbg("Posted %d post recv bufs, conn:0x%p\n", i, conn);
- return 0;
-}
+ if (iser_alloc_rx_descriptors(iser_conn->ib_conn))
+ return -ENOMEM;
-static int
-iser_check_xmit(struct iscsi_conn *conn, void *task)
-{
- struct iscsi_iser_conn *iser_conn = conn->dd_data;
+ /* Initial post receive buffers */
+ if (iser_post_recvm(iser_conn->ib_conn, ISER_MIN_POSTED_RX))
+ return -ENOMEM;
- if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) ==
- ISER_QP_MAX_REQ_DTOS) {
- iser_dbg("%ld can't xmit task %p\n",jiffies,task);
- return -ENOBUFS;
- }
return 0;
}
-
/**
* iser_send_command - send command PDU
*/
@@ -349,27 +268,18 @@ int iser_send_command(struct iscsi_conn *conn,
{
struct iscsi_iser_conn *iser_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
- struct iser_dto *send_dto = NULL;
unsigned long edtl;
- int err = 0;
+ int err;
struct iser_data_buf *data_buf;
struct iscsi_cmd *hdr = (struct iscsi_cmd *)task->hdr;
struct scsi_cmnd *sc = task->sc;
-
- if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
- iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn);
- return -EPERM;
- }
- if (iser_check_xmit(conn, task))
- return -ENOBUFS;
+ struct iser_tx_desc *tx_desc = &iser_task->desc;
edtl = ntohl(hdr->data_length);
/* build the tx desc regd header and add it to the tx desc dto */
- iser_task->desc.type = ISCSI_TX_SCSI_COMMAND;
- send_dto = &iser_task->desc.dto;
- send_dto->task = iser_task;
- iser_create_send_desc(iser_conn, &iser_task->desc);
+ tx_desc->type = ISCSI_TX_SCSI_COMMAND;
+ iser_create_send_desc(iser_conn->ib_conn, tx_desc);
if (hdr->flags & ISCSI_FLAG_CMD_READ)
data_buf = &iser_task->data[ISER_DIR_IN];
@@ -398,23 +308,13 @@ int iser_send_command(struct iscsi_conn *conn,
goto send_command_error;
}
- iser_reg_single(iser_conn->ib_conn->device,
- send_dto->regd[0], DMA_TO_DEVICE);
-
- if (iser_post_receive_control(conn) != 0) {
- iser_err("post_recv failed!\n");
- err = -ENOMEM;
- goto send_command_error;
- }
-
iser_task->status = ISER_TASK_STATUS_STARTED;
- err = iser_post_send(&iser_task->desc);
+ err = iser_post_send(iser_conn->ib_conn, tx_desc);
if (!err)
return 0;
send_command_error:
- iser_dto_buffs_release(send_dto);
iser_err("conn %p failed task->itt %d err %d\n",conn, task->itt, err);
return err;
}
@@ -428,20 +328,13 @@ int iser_send_data_out(struct iscsi_conn *conn,
{
struct iscsi_iser_conn *iser_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
- struct iser_desc *tx_desc = NULL;
- struct iser_dto *send_dto = NULL;
+ struct iser_tx_desc *tx_desc = NULL;
+ struct iser_regd_buf *regd_buf;
unsigned long buf_offset;
unsigned long data_seg_len;
uint32_t itt;
int err = 0;
-
- if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
- iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn);
- return -EPERM;
- }
-
- if (iser_check_xmit(conn, task))
- return -ENOBUFS;
+ struct ib_sge *tx_dsg;
itt = (__force uint32_t)hdr->itt;
data_seg_len = ntoh24(hdr->dlength);
@@ -450,28 +343,25 @@ int iser_send_data_out(struct iscsi_conn *conn,
iser_dbg("%s itt %d dseg_len %d offset %d\n",
__func__,(int)itt,(int)data_seg_len,(int)buf_offset);
- tx_desc = kmem_cache_alloc(ig.desc_cache, GFP_NOIO);
+ tx_desc = kmem_cache_zalloc(ig.desc_cache, GFP_ATOMIC);
if (tx_desc == NULL) {
iser_err("Failed to alloc desc for post dataout\n");
return -ENOMEM;
}
tx_desc->type = ISCSI_TX_DATAOUT;
+ tx_desc->iser_header.flags = ISER_VER;
memcpy(&tx_desc->iscsi_header, hdr, sizeof(struct iscsi_hdr));
- /* build the tx desc regd header and add it to the tx desc dto */
- send_dto = &tx_desc->dto;
- send_dto->task = iser_task;
- iser_create_send_desc(iser_conn, tx_desc);
-
- iser_reg_single(iser_conn->ib_conn->device,
- send_dto->regd[0], DMA_TO_DEVICE);
+ /* build the tx desc */
+ iser_initialize_task_headers(task, tx_desc);
- /* all data was registered for RDMA, we can use the lkey */
- iser_dto_add_regd_buff(send_dto,
- &iser_task->rdma_regd[ISER_DIR_OUT],
- buf_offset,
- data_seg_len);
+ regd_buf = &iser_task->rdma_regd[ISER_DIR_OUT];
+ tx_dsg = &tx_desc->tx_sg[1];
+ tx_dsg->addr = regd_buf->reg.va + buf_offset;
+ tx_dsg->length = data_seg_len;
+ tx_dsg->lkey = regd_buf->reg.lkey;
+ tx_desc->num_sge = 2;
if (buf_offset + data_seg_len > iser_task->data[ISER_DIR_OUT].data_len) {
iser_err("Offset:%ld & DSL:%ld in Data-Out "
@@ -485,12 +375,11 @@ int iser_send_data_out(struct iscsi_conn *conn,
itt, buf_offset, data_seg_len);
- err = iser_post_send(tx_desc);
+ err = iser_post_send(iser_conn->ib_conn, tx_desc);
if (!err)
return 0;
send_data_out_error:
- iser_dto_buffs_release(send_dto);
kmem_cache_free(ig.desc_cache, tx_desc);
iser_err("conn %p failed err %d\n",conn, err);
return err;
@@ -501,64 +390,44 @@ int iser_send_control(struct iscsi_conn *conn,
{
struct iscsi_iser_conn *iser_conn = conn->dd_data;
struct iscsi_iser_task *iser_task = task->dd_data;
- struct iser_desc *mdesc = &iser_task->desc;
- struct iser_dto *send_dto = NULL;
+ struct iser_tx_desc *mdesc = &iser_task->desc;
unsigned long data_seg_len;
int err = 0;
- struct iser_regd_buf *regd_buf;
struct iser_device *device;
- unsigned char opcode;
-
- if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
- iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn);
- return -EPERM;
- }
-
- if (iser_check_xmit(conn, task))
- return -ENOBUFS;
/* build the tx desc regd header and add it to the tx desc dto */
mdesc->type = ISCSI_TX_CONTROL;
- send_dto = &mdesc->dto;
- send_dto->task = NULL;
- iser_create_send_desc(iser_conn, mdesc);
+ iser_create_send_desc(iser_conn->ib_conn, mdesc);
device = iser_conn->ib_conn->device;
- iser_reg_single(device, send_dto->regd[0], DMA_TO_DEVICE);
-
data_seg_len = ntoh24(task->hdr->dlength);
if (data_seg_len > 0) {
- regd_buf = &mdesc->data_regd_buf;
- memset(regd_buf, 0, sizeof(struct iser_regd_buf));
- regd_buf->device = device;
- regd_buf->virt_addr = task->data;
- regd_buf->data_size = task->data_count;
- iser_reg_single(device, regd_buf,
- DMA_TO_DEVICE);
- iser_dto_add_regd_buff(send_dto, regd_buf,
- 0,
- data_seg_len);
+ struct ib_sge *tx_dsg = &mdesc->tx_sg[1];
+ if (task != conn->login_task) {
+ iser_err("data present on non login task!!!\n");
+ goto send_control_error;
+ }
+ memcpy(iser_conn->ib_conn->login_buf, task->data,
+ task->data_count);
+ tx_dsg->addr = iser_conn->ib_conn->login_dma;
+ tx_dsg->length = data_seg_len;
+ tx_dsg->lkey = device->mr->lkey;
+ mdesc->num_sge = 2;
}
- opcode = task->hdr->opcode & ISCSI_OPCODE_MASK;
-
- /* post recv buffer for response if one is expected */
- if (!(opcode == ISCSI_OP_NOOP_OUT && task->hdr->itt == RESERVED_ITT)) {
- if (iser_post_receive_control(conn) != 0) {
- iser_err("post_rcv_buff failed!\n");
- err = -ENOMEM;
+ if (task == conn->login_task) {
+ err = iser_post_recvl(iser_conn->ib_conn);
+ if (err)
goto send_control_error;
- }
}
- err = iser_post_send(mdesc);
+ err = iser_post_send(iser_conn->ib_conn, mdesc);
if (!err)
return 0;
send_control_error:
- iser_dto_buffs_release(send_dto);
iser_err("conn %p failed err %d\n",conn, err);
return err;
}
@@ -566,104 +435,71 @@ send_control_error:
/**
* iser_rcv_dto_completion - recv DTO completion
*/
-void iser_rcv_completion(struct iser_desc *rx_desc,
- unsigned long dto_xfer_len)
+void iser_rcv_completion(struct iser_rx_desc *rx_desc,
+ unsigned long rx_xfer_len,
+ struct iser_conn *ib_conn)
{
- struct iser_dto *dto = &rx_desc->dto;
- struct iscsi_iser_conn *conn = dto->ib_conn->iser_conn;
- struct iscsi_task *task;
- struct iscsi_iser_task *iser_task;
+ struct iscsi_iser_conn *conn = ib_conn->iser_conn;
struct iscsi_hdr *hdr;
- char *rx_data = NULL;
- int rx_data_len = 0;
- unsigned char opcode;
-
- hdr = &rx_desc->iscsi_header;
+ u64 rx_dma;
+ int rx_buflen, outstanding, count, err;
+
+ /* differentiate between login to all other PDUs */
+ if ((char *)rx_desc == ib_conn->login_buf) {
+ rx_dma = ib_conn->login_dma;
+ rx_buflen = ISER_RX_LOGIN_SIZE;
+ } else {
+ rx_dma = rx_desc->dma_addr;
+ rx_buflen = ISER_RX_PAYLOAD_SIZE;
+ }
- iser_dbg("op 0x%x itt 0x%x\n", hdr->opcode,hdr->itt);
+ ib_dma_sync_single_for_cpu(ib_conn->device->ib_device, rx_dma,
+ rx_buflen, DMA_FROM_DEVICE);
- if (dto_xfer_len > ISER_TOTAL_HEADERS_LEN) { /* we have data */
- rx_data_len = dto_xfer_len - ISER_TOTAL_HEADERS_LEN;
- rx_data = dto->regd[1]->virt_addr;
- rx_data += dto->offset[1];
- }
+ hdr = &rx_desc->iscsi_header;
- opcode = hdr->opcode & ISCSI_OPCODE_MASK;
-
- if (opcode == ISCSI_OP_SCSI_CMD_RSP) {
- spin_lock(&conn->iscsi_conn->session->lock);
- task = iscsi_itt_to_ctask(conn->iscsi_conn, hdr->itt);
- if (task)
- __iscsi_get_task(task);
- spin_unlock(&conn->iscsi_conn->session->lock);
-
- if (!task)
- iser_err("itt can't be matched to task!!! "
- "conn %p opcode %d itt %d\n",
- conn->iscsi_conn, opcode, hdr->itt);
- else {
- iser_task = task->dd_data;
- iser_dbg("itt %d task %p\n",hdr->itt, task);
- iser_task->status = ISER_TASK_STATUS_COMPLETED;
- iser_task_rdma_finalize(iser_task);
- iscsi_put_task(task);
- }
- }
- iser_dto_buffs_release(dto);
+ iser_dbg("op 0x%x itt 0x%x dlen %d\n", hdr->opcode,
+ hdr->itt, (int)(rx_xfer_len - ISER_HEADERS_LEN));
- iscsi_iser_recv(conn->iscsi_conn, hdr, rx_data, rx_data_len);
+ iscsi_iser_recv(conn->iscsi_conn, hdr,
+ rx_desc->data, rx_xfer_len - ISER_HEADERS_LEN);
- kfree(rx_desc->data);
- kmem_cache_free(ig.desc_cache, rx_desc);
+ ib_dma_sync_single_for_device(ib_conn->device->ib_device, rx_dma,
+ rx_buflen, DMA_FROM_DEVICE);
/* decrementing conn->post_recv_buf_count only --after-- freeing the *
* task eliminates the need to worry on tasks which are completed in *
* parallel to the execution of iser_conn_term. So the code that waits *
* for the posted rx bufs refcount to become zero handles everything */
- atomic_dec(&conn->ib_conn->post_recv_buf_count);
+ conn->ib_conn->post_recv_buf_count--;
- /*
- * if an unexpected PDU was received then the recv wr consumed must
- * be replaced, this is done in the next send of a control-type PDU
- */
- if (opcode == ISCSI_OP_NOOP_IN && hdr->itt == RESERVED_ITT) {
- /* nop-in with itt = 0xffffffff */
- atomic_inc(&conn->ib_conn->unexpected_pdu_count);
- }
- else if (opcode == ISCSI_OP_ASYNC_EVENT) {
- /* asyncronous message */
- atomic_inc(&conn->ib_conn->unexpected_pdu_count);
+ if (rx_dma == ib_conn->login_dma)
+ return;
+
+ outstanding = ib_conn->post_recv_buf_count;
+ if (outstanding + ISER_MIN_POSTED_RX <= ISER_QP_MAX_RECV_DTOS) {
+ count = min(ISER_QP_MAX_RECV_DTOS - outstanding,
+ ISER_MIN_POSTED_RX);
+ err = iser_post_recvm(ib_conn, count);
+ if (err)
+ iser_err("posting %d rx bufs err %d\n", count, err);
}
- /* a reject PDU consumes the recv buf posted for the response */
}
-void iser_snd_completion(struct iser_desc *tx_desc)
+void iser_snd_completion(struct iser_tx_desc *tx_desc,
+ struct iser_conn *ib_conn)
{
- struct iser_dto *dto = &tx_desc->dto;
- struct iser_conn *ib_conn = dto->ib_conn;
- struct iscsi_iser_conn *iser_conn = ib_conn->iser_conn;
- struct iscsi_conn *conn = iser_conn->iscsi_conn;
struct iscsi_task *task;
- int resume_tx = 0;
-
- iser_dbg("Initiator, Data sent dto=0x%p\n", dto);
-
- iser_dto_buffs_release(dto);
+ struct iser_device *device = ib_conn->device;
- if (tx_desc->type == ISCSI_TX_DATAOUT)
+ if (tx_desc->type == ISCSI_TX_DATAOUT) {
+ ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
+ ISER_HEADERS_LEN, DMA_TO_DEVICE);
kmem_cache_free(ig.desc_cache, tx_desc);
-
- if (atomic_read(&iser_conn->ib_conn->post_send_buf_count) ==
- ISER_QP_MAX_REQ_DTOS)
- resume_tx = 1;
+ }
atomic_dec(&ib_conn->post_send_buf_count);
- if (resume_tx) {
- iser_dbg("%ld resuming tx\n",jiffies);
- iscsi_conn_queue_work(conn);
- }
-
if (tx_desc->type == ISCSI_TX_CONTROL) {
/* this arithmetic is legal by libiscsi dd_data allocation */
task = (void *) ((long)(void *)tx_desc -
@@ -692,7 +528,6 @@ void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
{
- int deferred;
int is_rdma_aligned = 1;
struct iser_regd_buf *regd;
@@ -710,32 +545,17 @@ void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
if (iser_task->dir[ISER_DIR_IN]) {
regd = &iser_task->rdma_regd[ISER_DIR_IN];
- deferred = iser_regd_buff_release(regd);
- if (deferred) {
- iser_err("%d references remain for BUF-IN rdma reg\n",
- atomic_read(&regd->ref_count));
- }
+ if (regd->reg.is_fmr)
+ iser_unreg_mem(&regd->reg);
}
if (iser_task->dir[ISER_DIR_OUT]) {
regd = &iser_task->rdma_regd[ISER_DIR_OUT];
- deferred = iser_regd_buff_release(regd);
- if (deferred) {
- iser_err("%d references remain for BUF-OUT rdma reg\n",
- atomic_read(&regd->ref_count));
- }
+ if (regd->reg.is_fmr)
+ iser_unreg_mem(&regd->reg);
}
/* if the data was unaligned, it was already unmapped and then copied */
if (is_rdma_aligned)
iser_dma_unmap_task_data(iser_task);
}
-
-void iser_dto_buffs_release(struct iser_dto *dto)
-{
- int i;
-
- for (i = 0; i < dto->regd_vector_len; i++)
- iser_regd_buff_release(dto->regd[i]);
-}
-
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index 274c883ef3e..fb88d6896b6 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -41,62 +41,6 @@
#define ISER_KMALLOC_THRESHOLD 0x20000 /* 128K - kmalloc limit */
/**
- * Decrements the reference count for the
- * registered buffer & releases it
- *
- * returns 0 if released, 1 if deferred
- */
-int iser_regd_buff_release(struct iser_regd_buf *regd_buf)
-{
- struct ib_device *dev;
-
- if ((atomic_read(&regd_buf->ref_count) == 0) ||
- atomic_dec_and_test(&regd_buf->ref_count)) {
- /* if we used the dma mr, unreg is just NOP */
- if (regd_buf->reg.is_fmr)
- iser_unreg_mem(&regd_buf->reg);
-
- if (regd_buf->dma_addr) {
- dev = regd_buf->device->ib_device;
- ib_dma_unmap_single(dev,
- regd_buf->dma_addr,
- regd_buf->data_size,
- regd_buf->direction);
- }
- /* else this regd buf is associated with task which we */
- /* dma_unmap_single/sg later */
- return 0;
- } else {
- iser_dbg("Release deferred, regd.buff: 0x%p\n", regd_buf);
- return 1;
- }
-}
-
-/**
- * iser_reg_single - fills registered buffer descriptor with
- * registration information
- */
-void iser_reg_single(struct iser_device *device,
- struct iser_regd_buf *regd_buf,
- enum dma_data_direction direction)
-{
- u64 dma_addr;
-
- dma_addr = ib_dma_map_single(device->ib_device,
- regd_buf->virt_addr,
- regd_buf->data_size, direction);
- BUG_ON(ib_dma_mapping_error(device->ib_device, dma_addr));
-
- regd_buf->reg.lkey = device->mr->lkey;
- regd_buf->reg.len = regd_buf->data_size;
- regd_buf->reg.va = dma_addr;
- regd_buf->reg.is_fmr = 0;
-
- regd_buf->dma_addr = dma_addr;
- regd_buf->direction = direction;
-}
-
-/**
* iser_start_rdma_unaligned_sg
*/
static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
@@ -109,10 +53,10 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
unsigned long cmd_data_len = data->data_len;
if (cmd_data_len > ISER_KMALLOC_THRESHOLD)
- mem = (void *)__get_free_pages(GFP_NOIO,
+ mem = (void *)__get_free_pages(GFP_ATOMIC,
ilog2(roundup_pow_of_two(cmd_data_len)) - PAGE_SHIFT);
else
- mem = kmalloc(cmd_data_len, GFP_NOIO);
+ mem = kmalloc(cmd_data_len, GFP_ATOMIC);
if (mem == NULL) {
iser_err("Failed to allocate mem size %d %d for copying sglist\n",
@@ -474,9 +418,5 @@ int iser_reg_rdma_mem(struct iscsi_iser_task *iser_task,
return err;
}
}
-
- /* take a reference on this regd buf such that it will not be released *
- * (eg in send dto completion) before we get the scsi response */
- atomic_inc(&regd_buf->ref_count);
return 0;
}
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 8579f32ce38..308d17bb514 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -37,9 +37,8 @@
#include "iscsi_iser.h"
#define ISCSI_ISER_MAX_CONN 8
-#define ISER_MAX_CQ_LEN ((ISER_QP_MAX_RECV_DTOS + \
- ISER_QP_MAX_REQ_DTOS) * \
- ISCSI_ISER_MAX_CONN)
+#define ISER_MAX_RX_CQ_LEN (ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN)
+#define ISER_MAX_TX_CQ_LEN (ISER_QP_MAX_REQ_DTOS * ISCSI_ISER_MAX_CONN)
static void iser_cq_tasklet_fn(unsigned long data);
static void iser_cq_callback(struct ib_cq *cq, void *cq_context);
@@ -67,15 +66,23 @@ static int iser_create_device_ib_res(struct iser_device *device)
if (IS_ERR(device->pd))
goto pd_err;
- device->cq = ib_create_cq(device->ib_device,
+ device->rx_cq = ib_create_cq(device->ib_device,
iser_cq_callback,
iser_cq_event_callback,
(void *)device,
- ISER_MAX_CQ_LEN, 0);
- if (IS_ERR(device->cq))
- goto cq_err;
+ ISER_MAX_RX_CQ_LEN, 0);
+ if (IS_ERR(device->rx_cq))
+ goto rx_cq_err;
- if (ib_req_notify_cq(device->cq, IB_CQ_NEXT_COMP))
+ device->tx_cq = ib_create_cq(device->ib_device,
+ NULL, iser_cq_event_callback,
+ (void *)device,
+ ISER_MAX_TX_CQ_LEN, 0);
+
+ if (IS_ERR(device->tx_cq))
+ goto tx_cq_err;
+
+ if (ib_req_notify_cq(device->rx_cq, IB_CQ_NEXT_COMP))
goto cq_arm_err;
tasklet_init(&device->cq_tasklet,
@@ -93,8 +100,10 @@ static int iser_create_device_ib_res(struct iser_device *device)
dma_mr_err:
tasklet_kill(&device->cq_tasklet);
cq_arm_err:
- ib_destroy_cq(device->cq);
-cq_err:
+ ib_destroy_cq(device->tx_cq);
+tx_cq_err:
+ ib_destroy_cq(device->rx_cq);
+rx_cq_err:
ib_dealloc_pd(device->pd);
pd_err:
iser_err("failed to allocate an IB resource\n");
@@ -112,11 +121,13 @@ static void iser_free_device_ib_res(struct iser_device *device)
tasklet_kill(&device->cq_tasklet);
(void)ib_dereg_mr(device->mr);
- (void)ib_destroy_cq(device->cq);
+ (void)ib_destroy_cq(device->tx_cq);
+ (void)ib_destroy_cq(device->rx_cq);
(void)ib_dealloc_pd(device->pd);
device->mr = NULL;
- device->cq = NULL;
+ device->tx_cq = NULL;
+ device->rx_cq = NULL;
device->pd = NULL;
}
@@ -129,13 +140,23 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
{
struct iser_device *device;
struct ib_qp_init_attr init_attr;
- int ret;
+ int ret = -ENOMEM;
struct ib_fmr_pool_param params;
BUG_ON(ib_conn->device == NULL);
device = ib_conn->device;
+ ib_conn->login_buf = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL);
+ if (!ib_conn->login_buf) {
+ goto alloc_err;
+ ret = -ENOMEM;
+ }
+
+ ib_conn->login_dma = ib_dma_map_single(ib_conn->device->ib_device,
+ (void *)ib_conn->login_buf, ISER_RX_LOGIN_SIZE,
+ DMA_FROM_DEVICE);
+
ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) +
(sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)),
GFP_KERNEL);
@@ -169,12 +190,12 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
init_attr.event_handler = iser_qp_event_callback;
init_attr.qp_context = (void *)ib_conn;
- init_attr.send_cq = device->cq;
- init_attr.recv_cq = device->cq;
+ init_attr.send_cq = device->tx_cq;
+ init_attr.recv_cq = device->rx_cq;
init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS;
init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS;
- init_attr.cap.max_send_sge = MAX_REGD_BUF_VECTOR_LEN;
- init_attr.cap.max_recv_sge = 2;
+ init_attr.cap.max_send_sge = 2;
+ init_attr.cap.max_recv_sge = 1;
init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
init_attr.qp_type = IB_QPT_RC;
@@ -192,6 +213,7 @@ qp_err:
(void)ib_destroy_fmr_pool(ib_conn->fmr_pool);
fmr_pool_err:
kfree(ib_conn->page_vec);
+ kfree(ib_conn->login_buf);
alloc_err:
iser_err("unable to alloc mem or create resource, err %d\n", ret);
return ret;
@@ -278,17 +300,6 @@ static void iser_device_try_release(struct iser_device *device)
mutex_unlock(&ig.device_list_mutex);
}
-int iser_conn_state_comp(struct iser_conn *ib_conn,
- enum iser_ib_conn_state comp)
-{
- int ret;
-
- spin_lock_bh(&ib_conn->lock);
- ret = (ib_conn->state == comp);
- spin_unlock_bh(&ib_conn->lock);
- return ret;
-}
-
static int iser_conn_state_comp_exch(struct iser_conn *ib_conn,
enum iser_ib_conn_state comp,
enum iser_ib_conn_state exch)
@@ -314,7 +325,7 @@ static void iser_conn_release(struct iser_conn *ib_conn)
mutex_lock(&ig.connlist_mutex);
list_del(&ib_conn->conn_list);
mutex_unlock(&ig.connlist_mutex);
-
+ iser_free_rx_descriptors(ib_conn);
iser_free_ib_conn_res(ib_conn);
ib_conn->device = NULL;
/* on EVENT_ADDR_ERROR there's no device yet for this conn */
@@ -442,7 +453,7 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
ISCSI_ERR_CONN_FAILED);
/* Complete the termination process if no posts are pending */
- if ((atomic_read(&ib_conn->post_recv_buf_count) == 0) &&
+ if (ib_conn->post_recv_buf_count == 0 &&
(atomic_read(&ib_conn->post_send_buf_count) == 0)) {
ib_conn->state = ISER_CONN_DOWN;
wake_up_interruptible(&ib_conn->wait);
@@ -489,9 +500,8 @@ void iser_conn_init(struct iser_conn *ib_conn)
{
ib_conn->state = ISER_CONN_INIT;
init_waitqueue_head(&ib_conn->wait);
- atomic_set(&ib_conn->post_recv_buf_count, 0);
+ ib_conn->post_recv_buf_count = 0;
atomic_set(&ib_conn->post_send_buf_count, 0);
- atomic_set(&ib_conn->unexpected_pdu_count, 0);
atomic_set(&ib_conn->refcount, 1);
INIT_LIST_HEAD(&ib_conn->conn_list);
spin_lock_init(&ib_conn->lock);
@@ -626,136 +636,97 @@ void iser_unreg_mem(struct iser_mem_reg *reg)
reg->mem_h = NULL;
}
-/**
- * iser_dto_to_iov - builds IOV from a dto descriptor
- */
-static void iser_dto_to_iov(struct iser_dto *dto, struct ib_sge *iov, int iov_len)
+int iser_post_recvl(struct iser_conn *ib_conn)
{
- int i;
- struct ib_sge *sge;
- struct iser_regd_buf *regd_buf;
-
- if (dto->regd_vector_len > iov_len) {
- iser_err("iov size %d too small for posting dto of len %d\n",
- iov_len, dto->regd_vector_len);
- BUG();
- }
+ struct ib_recv_wr rx_wr, *rx_wr_failed;
+ struct ib_sge sge;
+ int ib_ret;
- for (i = 0; i < dto->regd_vector_len; i++) {
- sge = &iov[i];
- regd_buf = dto->regd[i];
-
- sge->addr = regd_buf->reg.va;
- sge->length = regd_buf->reg.len;
- sge->lkey = regd_buf->reg.lkey;
-
- if (dto->used_sz[i] > 0) /* Adjust size */
- sge->length = dto->used_sz[i];
-
- /* offset and length should not exceed the regd buf length */
- if (sge->length + dto->offset[i] > regd_buf->reg.len) {
- iser_err("Used len:%ld + offset:%d, exceed reg.buf.len:"
- "%ld in dto:0x%p [%d], va:0x%08lX\n",
- (unsigned long)sge->length, dto->offset[i],
- (unsigned long)regd_buf->reg.len, dto, i,
- (unsigned long)sge->addr);
- BUG();
- }
+ sge.addr = ib_conn->login_dma;
+ sge.length = ISER_RX_LOGIN_SIZE;
+ sge.lkey = ib_conn->device->mr->lkey;
- sge->addr += dto->offset[i]; /* Adjust offset */
+ rx_wr.wr_id = (unsigned long)ib_conn->login_buf;
+ rx_wr.sg_list = &sge;
+ rx_wr.num_sge = 1;
+ rx_wr.next = NULL;
+
+ ib_conn->post_recv_buf_count++;
+ ib_ret = ib_post_recv(ib_conn->qp, &rx_wr, &rx_wr_failed);
+ if (ib_ret) {
+ iser_err("ib_post_recv failed ret=%d\n", ib_ret);
+ ib_conn->post_recv_buf_count--;
}
+ return ib_ret;
}
-/**
- * iser_post_recv - Posts a receive buffer.
- *
- * returns 0 on success, -1 on failure
- */
-int iser_post_recv(struct iser_desc *rx_desc)
+int iser_post_recvm(struct iser_conn *ib_conn, int count)
{
- int ib_ret, ret_val = 0;
- struct ib_recv_wr recv_wr, *recv_wr_failed;
- struct ib_sge iov[2];
- struct iser_conn *ib_conn;
- struct iser_dto *recv_dto = &rx_desc->dto;
-
- /* Retrieve conn */
- ib_conn = recv_dto->ib_conn;
-
- iser_dto_to_iov(recv_dto, iov, 2);
+ struct ib_recv_wr *rx_wr, *rx_wr_failed;
+ int i, ib_ret;
+ unsigned int my_rx_head = ib_conn->rx_desc_head;
+ struct iser_rx_desc *rx_desc;
+
+ for (rx_wr = ib_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
+ rx_desc = &ib_conn->rx_descs[my_rx_head];
+ rx_wr->wr_id = (unsigned long)rx_desc;
+ rx_wr->sg_list = &rx_desc->rx_sg;
+ rx_wr->num_sge = 1;
+ rx_wr->next = rx_wr + 1;
+ my_rx_head = (my_rx_head + 1) & (ISER_QP_MAX_RECV_DTOS - 1);
+ }
- recv_wr.next = NULL;
- recv_wr.sg_list = iov;
- recv_wr.num_sge = recv_dto->regd_vector_len;
- recv_wr.wr_id = (unsigned long)rx_desc;
+ rx_wr--;
+ rx_wr->next = NULL; /* mark end of work requests list */
- atomic_inc(&ib_conn->post_recv_buf_count);
- ib_ret = ib_post_recv(ib_conn->qp, &recv_wr, &recv_wr_failed);
+ ib_conn->post_recv_buf_count += count;
+ ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &rx_wr_failed);
if (ib_ret) {
iser_err("ib_post_recv failed ret=%d\n", ib_ret);
- atomic_dec(&ib_conn->post_recv_buf_count);
- ret_val = -1;
- }
-
- return ret_val;
+ ib_conn->post_recv_buf_count -= count;
+ } else
+ ib_conn->rx_desc_head = my_rx_head;
+ return ib_ret;
}
+
/**
* iser_start_send - Initiate a Send DTO operation
*
* returns 0 on success, -1 on failure
*/
-int iser_post_send(struct iser_desc *tx_desc)
+int iser_post_send(struct iser_conn *ib_conn, struct iser_tx_desc *tx_desc)
{
- int ib_ret, ret_val = 0;
+ int ib_ret;
struct ib_send_wr send_wr, *send_wr_failed;
- struct ib_sge iov[MAX_REGD_BUF_VECTOR_LEN];
- struct iser_conn *ib_conn;
- struct iser_dto *dto = &tx_desc->dto;
- ib_conn = dto->ib_conn;
-
- iser_dto_to_iov(dto, iov, MAX_REGD_BUF_VECTOR_LEN);
+ ib_dma_sync_single_for_device(ib_conn->device->ib_device,
+ tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
send_wr.next = NULL;
send_wr.wr_id = (unsigned long)tx_desc;
- send_wr.sg_list = iov;
- send_wr.num_sge = dto->regd_vector_len;
+ send_wr.sg_list = tx_desc->tx_sg;
+ send_wr.num_sge = tx_desc->num_sge;
send_wr.opcode = IB_WR_SEND;
- send_wr.send_flags = dto->notify_enable ? IB_SEND_SIGNALED : 0;
+ send_wr.send_flags = IB_SEND_SIGNALED;
atomic_inc(&ib_conn->post_send_buf_count);
ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed);
if (ib_ret) {
- iser_err("Failed to start SEND DTO, dto: 0x%p, IOV len: %d\n",
- dto, dto->regd_vector_len);
iser_err("ib_post_send failed, ret:%d\n", ib_ret);
atomic_dec(&ib_conn->post_send_buf_count);
- ret_val = -1;
}
-
- return ret_val;
+ return ib_ret;
}
-static void iser_handle_comp_error(struct iser_desc *desc)
+static void iser_handle_comp_error(struct iser_tx_desc *desc,
+ struct iser_conn *ib_conn)
{
- struct iser_dto *dto = &desc->dto;
- struct iser_conn *ib_conn = dto->ib_conn;
-
- iser_dto_buffs_release(dto);
-
- if (desc->type == ISCSI_RX) {
- kfree(desc->data);
+ if (desc && desc->type == ISCSI_TX_DATAOUT)
kmem_cache_free(ig.desc_cache, desc);
- atomic_dec(&ib_conn->post_recv_buf_count);
- } else { /* type is TX control/command/dataout */
- if (desc->type == ISCSI_TX_DATAOUT)
- kmem_cache_free(ig.desc_cache, desc);
- atomic_dec(&ib_conn->post_send_buf_count);
- }
- if (atomic_read(&ib_conn->post_recv_buf_count) == 0 &&
+ if (ib_conn->post_recv_buf_count == 0 &&
atomic_read(&ib_conn->post_send_buf_count) == 0) {
/* getting here when the state is UP means that the conn is *
* being terminated asynchronously from the iSCSI layer's *
@@ -774,32 +745,74 @@ static void iser_handle_comp_error(struct iser_desc *desc)
}
}
+static int iser_drain_tx_cq(struct iser_device *device)
+{
+ struct ib_cq *cq = device->tx_cq;
+ struct ib_wc wc;
+ struct iser_tx_desc *tx_desc;
+ struct iser_conn *ib_conn;
+ int completed_tx = 0;
+
+ while (ib_poll_cq(cq, 1, &wc) == 1) {
+ tx_desc = (struct iser_tx_desc *) (unsigned long) wc.wr_id;
+ ib_conn = wc.qp->qp_context;
+ if (wc.status == IB_WC_SUCCESS) {
+ if (wc.opcode == IB_WC_SEND)
+ iser_snd_completion(tx_desc, ib_conn);
+ else
+ iser_err("expected opcode %d got %d\n",
+ IB_WC_SEND, wc.opcode);
+ } else {
+ iser_err("tx id %llx status %d vend_err %x\n",
+ wc.wr_id, wc.status, wc.vendor_err);
+ atomic_dec(&ib_conn->post_send_buf_count);
+ iser_handle_comp_error(tx_desc, ib_conn);
+ }
+ completed_tx++;
+ }
+ return completed_tx;
+}
+
+
static void iser_cq_tasklet_fn(unsigned long data)
{
struct iser_device *device = (struct iser_device *)data;
- struct ib_cq *cq = device->cq;
+ struct ib_cq *cq = device->rx_cq;
struct ib_wc wc;
- struct iser_desc *desc;
+ struct iser_rx_desc *desc;
unsigned long xfer_len;
+ struct iser_conn *ib_conn;
+ int completed_tx, completed_rx;
+ completed_tx = completed_rx = 0;
while (ib_poll_cq(cq, 1, &wc) == 1) {
- desc = (struct iser_desc *) (unsigned long) wc.wr_id;
+ desc = (struct iser_rx_desc *) (unsigned long) wc.wr_id;
BUG_ON(desc == NULL);
-
+ ib_conn = wc.qp->qp_context;
if (wc.status == IB_WC_SUCCESS) {
- if (desc->type == ISCSI_RX) {
+ if (wc.opcode == IB_WC_RECV) {
xfer_len = (unsigned long)wc.byte_len;
- iser_rcv_completion(desc, xfer_len);
- } else /* type == ISCSI_TX_CONTROL/SCSI_CMD/DOUT */
- iser_snd_completion(desc);
+ iser_rcv_completion(desc, xfer_len, ib_conn);
+ } else
+ iser_err("expected opcode %d got %d\n",
+ IB_WC_RECV, wc.opcode);
} else {
- iser_err("comp w. error op %d status %d\n",desc->type,wc.status);
- iser_handle_comp_error(desc);
+ if (wc.status != IB_WC_WR_FLUSH_ERR)
+ iser_err("rx id %llx status %d vend_err %x\n",
+ wc.wr_id, wc.status, wc.vendor_err);
+ ib_conn->post_recv_buf_count--;
+ iser_handle_comp_error(NULL, ib_conn);
}
+ completed_rx++;
+ if (!(completed_rx & 63))
+ completed_tx += iser_drain_tx_cq(device);
}
/* #warning "it is assumed here that arming CQ only once its empty" *
* " would not cause interrupts to be missed" */
ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+
+ completed_tx += iser_drain_tx_cq(device);
+ iser_dbg("got %d rx %d tx completions\n", completed_rx, completed_tx);
}
static void iser_cq_callback(struct ib_cq *cq, void *cq_context)
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 54c8fe25c42..ed3f9ebae88 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -80,7 +80,8 @@ MODULE_PARM_DESC(mellanox_workarounds,
static void srp_add_one(struct ib_device *device);
static void srp_remove_one(struct ib_device *device);
-static void srp_completion(struct ib_cq *cq, void *target_ptr);
+static void srp_recv_completion(struct ib_cq *cq, void *target_ptr);
+static void srp_send_completion(struct ib_cq *cq, void *target_ptr);
static int srp_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event);
static struct scsi_transport_template *ib_srp_transport_template;
@@ -227,14 +228,21 @@ static int srp_create_target_ib(struct srp_target_port *target)
if (!init_attr)
return -ENOMEM;
- target->cq = ib_create_cq(target->srp_host->srp_dev->dev,
- srp_completion, NULL, target, SRP_CQ_SIZE, 0);
- if (IS_ERR(target->cq)) {
- ret = PTR_ERR(target->cq);
- goto out;
+ target->recv_cq = ib_create_cq(target->srp_host->srp_dev->dev,
+ srp_recv_completion, NULL, target, SRP_RQ_SIZE, 0);
+ if (IS_ERR(target->recv_cq)) {
+ ret = PTR_ERR(target->recv_cq);
+ goto err;
}
- ib_req_notify_cq(target->cq, IB_CQ_NEXT_COMP);
+ target->send_cq = ib_create_cq(target->srp_host->srp_dev->dev,
+ srp_send_completion, NULL, target, SRP_SQ_SIZE, 0);
+ if (IS_ERR(target->send_cq)) {
+ ret = PTR_ERR(target->send_cq);
+ goto err_recv_cq;
+ }
+
+ ib_req_notify_cq(target->recv_cq, IB_CQ_NEXT_COMP);
init_attr->event_handler = srp_qp_event;
init_attr->cap.max_send_wr = SRP_SQ_SIZE;
@@ -243,24 +251,32 @@ static int srp_create_target_ib(struct srp_target_port *target)
init_attr->cap.max_send_sge = 1;
init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
init_attr->qp_type = IB_QPT_RC;
- init_attr->send_cq = target->cq;
- init_attr->recv_cq = target->cq;
+ init_attr->send_cq = target->send_cq;
+ init_attr->recv_cq = target->recv_cq;
target->qp = ib_create_qp(target->srp_host->srp_dev->pd, init_attr);
if (IS_ERR(target->qp)) {
ret = PTR_ERR(target->qp);
- ib_destroy_cq(target->cq);
- goto out;
+ goto err_send_cq;
}
ret = srp_init_qp(target, target->qp);
- if (ret) {
- ib_destroy_qp(target->qp);
- ib_destroy_cq(target->cq);
- goto out;
- }
+ if (ret)
+ goto err_qp;
-out:
+ kfree(init_attr);
+ return 0;
+
+err_qp:
+ ib_destroy_qp(target->qp);
+
+err_send_cq:
+ ib_destroy_cq(target->send_cq);
+
+err_recv_cq:
+ ib_destroy_cq(target->recv_cq);
+
+err:
kfree(init_attr);
return ret;
}
@@ -270,7 +286,8 @@ static void srp_free_target_ib(struct srp_target_port *target)
int i;
ib_destroy_qp(target->qp);
- ib_destroy_cq(target->cq);
+ ib_destroy_cq(target->send_cq);
+ ib_destroy_cq(target->recv_cq);
for (i = 0; i < SRP_RQ_SIZE; ++i)
srp_free_iu(target->srp_host, target->rx_ring[i]);
@@ -568,7 +585,9 @@ static int srp_reconnect_target(struct srp_target_port *target)
if (ret)
goto err;
- while (ib_poll_cq(target->cq, 1, &wc) > 0)
+ while (ib_poll_cq(target->recv_cq, 1, &wc) > 0)
+ ; /* nothing */
+ while (ib_poll_cq(target->send_cq, 1, &wc) > 0)
; /* nothing */
spin_lock_irq(target->scsi_host->host_lock);
@@ -851,7 +870,7 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
struct srp_iu *iu;
u8 opcode;
- iu = target->rx_ring[wc->wr_id & ~SRP_OP_RECV];
+ iu = target->rx_ring[wc->wr_id];
dev = target->srp_host->srp_dev->dev;
ib_dma_sync_single_for_cpu(dev, iu->dma, target->max_ti_iu_len,
@@ -898,7 +917,7 @@ static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc)
DMA_FROM_DEVICE);
}
-static void srp_completion(struct ib_cq *cq, void *target_ptr)
+static void srp_recv_completion(struct ib_cq *cq, void *target_ptr)
{
struct srp_target_port *target = target_ptr;
struct ib_wc wc;
@@ -907,17 +926,31 @@ static void srp_completion(struct ib_cq *cq, void *target_ptr)
while (ib_poll_cq(cq, 1, &wc) > 0) {
if (wc.status) {
shost_printk(KERN_ERR, target->scsi_host,
- PFX "failed %s status %d\n",
- wc.wr_id & SRP_OP_RECV ? "receive" : "send",
+ PFX "failed receive status %d\n",
wc.status);
target->qp_in_error = 1;
break;
}
- if (wc.wr_id & SRP_OP_RECV)
- srp_handle_recv(target, &wc);
- else
- ++target->tx_tail;
+ srp_handle_recv(target, &wc);
+ }
+}
+
+static void srp_send_completion(struct ib_cq *cq, void *target_ptr)
+{
+ struct srp_target_port *target = target_ptr;
+ struct ib_wc wc;
+
+ while (ib_poll_cq(cq, 1, &wc) > 0) {
+ if (wc.status) {
+ shost_printk(KERN_ERR, target->scsi_host,
+ PFX "failed send status %d\n",
+ wc.status);
+ target->qp_in_error = 1;
+ break;
+ }
+
+ ++target->tx_tail;
}
}
@@ -930,7 +963,7 @@ static int __srp_post_recv(struct srp_target_port *target)
int ret;
next = target->rx_head & (SRP_RQ_SIZE - 1);
- wr.wr_id = next | SRP_OP_RECV;
+ wr.wr_id = next;
iu = target->rx_ring[next];
list.addr = iu->dma;
@@ -970,6 +1003,8 @@ static struct srp_iu *__srp_get_tx_iu(struct srp_target_port *target,
{
s32 min = (req_type == SRP_REQ_TASK_MGMT) ? 1 : 2;
+ srp_send_completion(target->send_cq, target);
+
if (target->tx_head - target->tx_tail >= SRP_SQ_SIZE)
return NULL;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.h b/drivers/infiniband/ulp/srp/ib_srp.h
index e185b907fc1..5a80eac6fda 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.h
+++ b/drivers/infiniband/ulp/srp/ib_srp.h
@@ -60,7 +60,6 @@ enum {
SRP_RQ_SHIFT = 6,
SRP_RQ_SIZE = 1 << SRP_RQ_SHIFT,
SRP_SQ_SIZE = SRP_RQ_SIZE - 1,
- SRP_CQ_SIZE = SRP_SQ_SIZE + SRP_RQ_SIZE,
SRP_TAG_TSK_MGMT = 1 << (SRP_RQ_SHIFT + 1),
@@ -69,8 +68,6 @@ enum {
SRP_FMR_DIRTY_SIZE = SRP_FMR_POOL_SIZE / 4
};
-#define SRP_OP_RECV (1 << 31)
-
enum srp_target_state {
SRP_TARGET_LIVE,
SRP_TARGET_CONNECTING,
@@ -133,7 +130,8 @@ struct srp_target_port {
int path_query_id;
struct ib_cm_id *cm_id;
- struct ib_cq *cq;
+ struct ib_cq *recv_cq;
+ struct ib_cq *send_cq;
struct ib_qp *qp;
int max_ti_iu_len;
diff --git a/drivers/input/touchscreen/mc13783_ts.c b/drivers/input/touchscreen/mc13783_ts.c
index be115b3b65e..be54fd639ac 100644
--- a/drivers/input/touchscreen/mc13783_ts.c
+++ b/drivers/input/touchscreen/mc13783_ts.c
@@ -44,7 +44,7 @@ static irqreturn_t mc13783_ts_handler(int irq, void *data)
{
struct mc13783_ts_priv *priv = data;
- mc13783_ackirq(priv->mc13783, irq);
+ mc13783_irq_ack(priv->mc13783, irq);
/*
* Kick off reading coordinates. Note that if work happens already
@@ -135,7 +135,7 @@ static int mc13783_ts_open(struct input_dev *dev)
mc13783_lock(priv->mc13783);
- mc13783_ackirq(priv->mc13783, MC13783_IRQ_TS);
+ mc13783_irq_ack(priv->mc13783, MC13783_IRQ_TS);
ret = mc13783_irq_request(priv->mc13783, MC13783_IRQ_TS,
mc13783_ts_handler, MC13783_TS_NAME, priv);
diff --git a/drivers/isdn/Kconfig b/drivers/isdn/Kconfig
index 022a1945295..4fb601670de 100644
--- a/drivers/isdn/Kconfig
+++ b/drivers/isdn/Kconfig
@@ -7,15 +7,14 @@ menuconfig ISDN
depends on NET
depends on !S390
---help---
- ISDN ("Integrated Services Digital Networks", called RNIS in France)
- is a special type of fully digital telephone service; it's mostly
- used to connect to your Internet service provider (with SLIP or
- PPP). The main advantage is that the speed is higher than ordinary
- modem/telephone connections, and that you can have voice
- conversations while downloading stuff. It only works if your
- computer is equipped with an ISDN card and both you and your service
- provider purchased an ISDN line from the phone company. For
- details, read <http://www.alumni.caltech.edu/~dank/isdn/> on the WWW.
+ ISDN ("Integrated Services Digital Network", called RNIS in France)
+ is a fully digital telephone service that can be used for voice and
+ data connections. If your computer is equipped with an ISDN
+ adapter you can use it to connect to your Internet service provider
+ (with SLIP or PPP) faster than via a conventional telephone modem
+ (though still much slower than with DSL) or to make and accept
+ voice calls (eg. turning your PC into a software answering machine
+ or PABX).
Select this option if you want your kernel to support ISDN.
@@ -39,17 +38,22 @@ menuconfig ISDN_I4L
It is still available, though, for use with adapters that are not
supported by the new CAPI subsystem yet.
-source "drivers/isdn/mISDN/Kconfig"
-
source "drivers/isdn/i4l/Kconfig"
menuconfig ISDN_CAPI
tristate "CAPI 2.0 subsystem"
help
- This provides the CAPI (Common ISDN Application Programming
- Interface, a standard making it easy for programs to access ISDN
- hardware, see <http://www.capi.org/>. This is needed for AVM's set
- of active ISDN controllers like B1, T1, M1.
+ This provides CAPI (the Common ISDN Application Programming
+ Interface) Version 2.0, a standard making it easy for programs to
+ access ISDN hardware in a device independent way. (For details see
+ <http://www.capi.org/>.) CAPI supports making and accepting voice
+ and data connections, controlling call options and protocols,
+ as well as ISDN supplementary services like call forwarding or
+ three-party conferences (if supported by the specific hardware
+ driver).
+
+ Select this option and the appropriate hardware driver below if
+ you have an ISDN adapter supported by the CAPI subsystem.
if ISDN_CAPI
@@ -61,4 +65,13 @@ endif # ISDN_CAPI
source "drivers/isdn/gigaset/Kconfig"
+source "drivers/isdn/hysdn/Kconfig"
+
+source "drivers/isdn/mISDN/Kconfig"
+
+config ISDN_HDLC
+ tristate
+ select CRC_CCITT
+ select BITREVERSE
+
endif # ISDN
diff --git a/drivers/isdn/capi/Kconfig b/drivers/isdn/capi/Kconfig
index b2a04755c96..a168e8a891b 100644
--- a/drivers/isdn/capi/Kconfig
+++ b/drivers/isdn/capi/Kconfig
@@ -17,8 +17,7 @@ config CAPI_TRACE
If unsure, say Y.
config ISDN_CAPI_MIDDLEWARE
- bool "CAPI2.0 Middleware support (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ bool "CAPI2.0 Middleware support"
help
This option will enhance the capabilities of the /dev/capi20
interface. It will provide a means of moving a data connection,
@@ -35,18 +34,19 @@ config ISDN_CAPI_CAPI20
Y/M here.
config ISDN_CAPI_CAPIFS_BOOL
- bool "CAPI2.0 filesystem support"
+ bool "CAPI2.0 filesystem support (DEPRECATED)"
depends on ISDN_CAPI_MIDDLEWARE && ISDN_CAPI_CAPI20
+ help
+ This option provides a special file system, similar to /dev/pts with
+ device nodes for the special ttys established by using the
+ middleware extension above.
+ You no longer need this, udev fully replaces it. This feature is
+ scheduled for removal.
config ISDN_CAPI_CAPIFS
tristate
depends on ISDN_CAPI_CAPIFS_BOOL
default ISDN_CAPI_CAPI20
- help
- This option provides a special file system, similar to /dev/pts with
- device nodes for the special ttys established by using the
- middleware extension above. If you want to use pppd with
- pppdcapiplugin to dial up to your ISP, say Y here.
config ISDN_CAPI_CAPIDRV
tristate "CAPI2.0 capidrv interface support"
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
index 65bf91e16a4..ee5837522f5 100644
--- a/drivers/isdn/capi/capi.c
+++ b/drivers/isdn/capi/capi.c
@@ -23,16 +23,13 @@
#include <linux/smp_lock.h>
#include <linux/timer.h>
#include <linux/wait.h>
-#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
#include <linux/tty.h>
-#ifdef CONFIG_PPP
#include <linux/netdevice.h>
#include <linux/ppp_defs.h>
#include <linux/if_ppp.h>
-#endif /* CONFIG_PPP */
-#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/poll.h>
#include <linux/capi.h>
#include <linux/kernelcapi.h>
@@ -41,35 +38,29 @@
#include <linux/moduleparam.h>
#include <linux/isdn/capiutil.h>
#include <linux/isdn/capicmd.h>
-#if defined(CONFIG_ISDN_CAPI_CAPIFS) || defined(CONFIG_ISDN_CAPI_CAPIFS_MODULE)
-#include "capifs.h"
-#endif
-static char *revision = "$Revision: 1.1.2.7 $";
+#include "capifs.h"
MODULE_DESCRIPTION("CAPI4Linux: Userspace /dev/capi20 interface");
MODULE_AUTHOR("Carsten Paeth");
MODULE_LICENSE("GPL");
-#undef _DEBUG_REFCOUNT /* alloc/free and open/close debug */
#undef _DEBUG_TTYFUNCS /* call to tty_driver */
#undef _DEBUG_DATAFLOW /* data flow */
/* -------- driver information -------------------------------------- */
static struct class *capi_class;
-
static int capi_major = 68; /* allocated */
+
+module_param_named(major, capi_major, uint, 0);
+
#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
-#define CAPINC_NR_PORTS 32
+#define CAPINC_NR_PORTS 32
#define CAPINC_MAX_PORTS 256
-static int capi_ttymajor = 191;
+
static int capi_ttyminors = CAPINC_NR_PORTS;
-#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
-module_param_named(major, capi_major, uint, 0);
-#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
-module_param_named(ttymajor, capi_ttymajor, uint, 0);
module_param_named(ttyminors, capi_ttyminors, uint, 0);
#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
@@ -83,53 +74,43 @@ module_param_named(ttyminors, capi_ttyminors, uint, 0);
struct capidev;
struct capincci;
-#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
struct capiminor;
-struct datahandle_queue {
+struct ackqueue_entry {
struct list_head list;
u16 datahandle;
};
struct capiminor {
- struct list_head list;
- struct capincci *nccip;
+ struct kref kref;
+
unsigned int minor;
+ struct dentry *capifs_dentry;
- struct capi20_appl *ap;
- u32 ncci;
- u16 datahandle;
- u16 msgid;
+ struct capi20_appl *ap;
+ u32 ncci;
+ atomic_t datahandle;
+ atomic_t msgid;
- struct tty_struct *tty;
+ struct tty_port port;
int ttyinstop;
int ttyoutstop;
- struct sk_buff *ttyskb;
- atomic_t ttyopencount;
- struct sk_buff_head inqueue;
- int inbytes;
- struct sk_buff_head outqueue;
- int outbytes;
+ struct sk_buff_head inqueue;
+
+ struct sk_buff_head outqueue;
+ int outbytes;
+ struct sk_buff *outskb;
+ spinlock_t outlock;
/* transmit path */
struct list_head ackqueue;
int nack;
spinlock_t ackqlock;
};
-#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
-
-/* FIXME: The following lock is a sledgehammer-workaround to a
- * locking issue with the capiminor (and maybe other) data structure(s).
- * Access to this data is done in a racy way and crashes the machine with
- * a FritzCard DSL driver; sooner or later. This is a workaround
- * which trades scalability vs stability, so it doesn't crash the kernel anymore.
- * The correct (and scalable) fix for the issue seems to require
- * an API change to the drivers... . */
-static DEFINE_SPINLOCK(workaround_lock);
struct capincci {
- struct capincci *next;
+ struct list_head list;
u32 ncci;
struct capidev *cdev;
#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
@@ -146,28 +127,28 @@ struct capidev {
struct sk_buff_head recvqueue;
wait_queue_head_t recvwait;
- struct capincci *nccis;
+ struct list_head nccis;
- struct mutex ncci_list_mtx;
+ struct mutex lock;
};
/* -------- global variables ---------------------------------------- */
-static DEFINE_RWLOCK(capidev_list_lock);
+static DEFINE_MUTEX(capidev_list_lock);
static LIST_HEAD(capidev_list);
#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
-static DEFINE_RWLOCK(capiminor_list_lock);
-static LIST_HEAD(capiminor_list);
-#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
-#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
+static DEFINE_SPINLOCK(capiminors_lock);
+static struct capiminor **capiminors;
+
+static struct tty_driver *capinc_tty_driver;
+
/* -------- datahandles --------------------------------------------- */
-static int capincci_add_ack(struct capiminor *mp, u16 datahandle)
+static int capiminor_add_ack(struct capiminor *mp, u16 datahandle)
{
- struct datahandle_queue *n;
- unsigned long flags;
+ struct ackqueue_entry *n;
n = kmalloc(sizeof(*n), GFP_ATOMIC);
if (unlikely(!n)) {
@@ -176,253 +157,246 @@ static int capincci_add_ack(struct capiminor *mp, u16 datahandle)
}
n->datahandle = datahandle;
INIT_LIST_HEAD(&n->list);
- spin_lock_irqsave(&mp->ackqlock, flags);
+ spin_lock_bh(&mp->ackqlock);
list_add_tail(&n->list, &mp->ackqueue);
mp->nack++;
- spin_unlock_irqrestore(&mp->ackqlock, flags);
+ spin_unlock_bh(&mp->ackqlock);
return 0;
}
static int capiminor_del_ack(struct capiminor *mp, u16 datahandle)
{
- struct datahandle_queue *p, *tmp;
- unsigned long flags;
+ struct ackqueue_entry *p, *tmp;
- spin_lock_irqsave(&mp->ackqlock, flags);
+ spin_lock_bh(&mp->ackqlock);
list_for_each_entry_safe(p, tmp, &mp->ackqueue, list) {
if (p->datahandle == datahandle) {
list_del(&p->list);
- kfree(p);
mp->nack--;
- spin_unlock_irqrestore(&mp->ackqlock, flags);
+ spin_unlock_bh(&mp->ackqlock);
+ kfree(p);
return 0;
}
}
- spin_unlock_irqrestore(&mp->ackqlock, flags);
+ spin_unlock_bh(&mp->ackqlock);
return -1;
}
static void capiminor_del_all_ack(struct capiminor *mp)
{
- struct datahandle_queue *p, *tmp;
- unsigned long flags;
+ struct ackqueue_entry *p, *tmp;
- spin_lock_irqsave(&mp->ackqlock, flags);
list_for_each_entry_safe(p, tmp, &mp->ackqueue, list) {
list_del(&p->list);
kfree(p);
mp->nack--;
}
- spin_unlock_irqrestore(&mp->ackqlock, flags);
}
/* -------- struct capiminor ---------------------------------------- */
+static const struct tty_port_operations capiminor_port_ops; /* we have none */
+
static struct capiminor *capiminor_alloc(struct capi20_appl *ap, u32 ncci)
{
- struct capiminor *mp, *p;
- unsigned int minor = 0;
- unsigned long flags;
+ struct capiminor *mp;
+ struct device *dev;
+ unsigned int minor;
- mp = kzalloc(sizeof(*mp), GFP_ATOMIC);
+ mp = kzalloc(sizeof(*mp), GFP_KERNEL);
if (!mp) {
printk(KERN_ERR "capi: can't alloc capiminor\n");
return NULL;
}
+ kref_init(&mp->kref);
+
mp->ap = ap;
mp->ncci = ncci;
- mp->msgid = 0;
- atomic_set(&mp->ttyopencount,0);
INIT_LIST_HEAD(&mp->ackqueue);
spin_lock_init(&mp->ackqlock);
skb_queue_head_init(&mp->inqueue);
skb_queue_head_init(&mp->outqueue);
+ spin_lock_init(&mp->outlock);
- /* Allocate the least unused minor number.
- */
- write_lock_irqsave(&capiminor_list_lock, flags);
- if (list_empty(&capiminor_list))
- list_add(&mp->list, &capiminor_list);
- else {
- list_for_each_entry(p, &capiminor_list, list) {
- if (p->minor > minor)
- break;
- minor++;
- }
-
- if (minor < capi_ttyminors) {
- mp->minor = minor;
- list_add(&mp->list, p->list.prev);
+ tty_port_init(&mp->port);
+ mp->port.ops = &capiminor_port_ops;
+
+ /* Allocate the least unused minor number. */
+ spin_lock(&capiminors_lock);
+ for (minor = 0; minor < capi_ttyminors; minor++)
+ if (!capiminors[minor]) {
+ capiminors[minor] = mp;
+ break;
}
- }
- write_unlock_irqrestore(&capiminor_list_lock, flags);
+ spin_unlock(&capiminors_lock);
- if (!(minor < capi_ttyminors)) {
+ if (minor == capi_ttyminors) {
printk(KERN_NOTICE "capi: out of minors\n");
- kfree(mp);
- return NULL;
+ goto err_out1;
}
+ mp->minor = minor;
+
+ dev = tty_register_device(capinc_tty_driver, minor, NULL);
+ if (IS_ERR(dev))
+ goto err_out2;
+
return mp;
+
+err_out2:
+ spin_lock(&capiminors_lock);
+ capiminors[minor] = NULL;
+ spin_unlock(&capiminors_lock);
+
+err_out1:
+ kfree(mp);
+ return NULL;
}
-static void capiminor_free(struct capiminor *mp)
+static void capiminor_destroy(struct kref *kref)
{
- unsigned long flags;
-
- write_lock_irqsave(&capiminor_list_lock, flags);
- list_del(&mp->list);
- write_unlock_irqrestore(&capiminor_list_lock, flags);
+ struct capiminor *mp = container_of(kref, struct capiminor, kref);
- kfree_skb(mp->ttyskb);
- mp->ttyskb = NULL;
+ kfree_skb(mp->outskb);
skb_queue_purge(&mp->inqueue);
skb_queue_purge(&mp->outqueue);
capiminor_del_all_ack(mp);
kfree(mp);
}
-static struct capiminor *capiminor_find(unsigned int minor)
+static struct capiminor *capiminor_get(unsigned int minor)
{
- struct list_head *l;
- struct capiminor *p = NULL;
+ struct capiminor *mp;
- read_lock(&capiminor_list_lock);
- list_for_each(l, &capiminor_list) {
- p = list_entry(l, struct capiminor, list);
- if (p->minor == minor)
- break;
- }
- read_unlock(&capiminor_list_lock);
- if (l == &capiminor_list)
- return NULL;
+ spin_lock(&capiminors_lock);
+ mp = capiminors[minor];
+ if (mp)
+ kref_get(&mp->kref);
+ spin_unlock(&capiminors_lock);
- return p;
+ return mp;
+}
+
+static inline void capiminor_put(struct capiminor *mp)
+{
+ kref_put(&mp->kref, capiminor_destroy);
+}
+
+static void capiminor_free(struct capiminor *mp)
+{
+ tty_unregister_device(capinc_tty_driver, mp->minor);
+
+ spin_lock(&capiminors_lock);
+ capiminors[mp->minor] = NULL;
+ spin_unlock(&capiminors_lock);
+
+ capiminor_put(mp);
}
-#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
/* -------- struct capincci ----------------------------------------- */
-static struct capincci *capincci_alloc(struct capidev *cdev, u32 ncci)
+static void capincci_alloc_minor(struct capidev *cdev, struct capincci *np)
{
- struct capincci *np, **pp;
-#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
- struct capiminor *mp = NULL;
-#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
+ struct capiminor *mp;
+ dev_t device;
- np = kzalloc(sizeof(*np), GFP_ATOMIC);
- if (!np)
- return NULL;
- np->ncci = ncci;
- np->cdev = cdev;
-#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
- mp = NULL;
- if (cdev->userflags & CAPIFLAG_HIGHJACKING)
- mp = np->minorp = capiminor_alloc(&cdev->ap, ncci);
+ if (!(cdev->userflags & CAPIFLAG_HIGHJACKING))
+ return;
+
+ mp = np->minorp = capiminor_alloc(&cdev->ap, np->ncci);
if (mp) {
- mp->nccip = np;
-#ifdef _DEBUG_REFCOUNT
- printk(KERN_DEBUG "set mp->nccip\n");
-#endif
-#if defined(CONFIG_ISDN_CAPI_CAPIFS) || defined(CONFIG_ISDN_CAPI_CAPIFS_MODULE)
- capifs_new_ncci(mp->minor, MKDEV(capi_ttymajor, mp->minor));
-#endif
+ device = MKDEV(capinc_tty_driver->major, mp->minor);
+ mp->capifs_dentry = capifs_new_ncci(mp->minor, device);
}
-#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
- for (pp=&cdev->nccis; *pp; pp = &(*pp)->next)
- ;
- *pp = np;
- return np;
}
-static void capincci_free(struct capidev *cdev, u32 ncci)
+static void capincci_free_minor(struct capincci *np)
{
- struct capincci *np, **pp;
-#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
- struct capiminor *mp;
-#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
+ struct capiminor *mp = np->minorp;
+ struct tty_struct *tty;
- pp=&cdev->nccis;
- while (*pp) {
- np = *pp;
- if (ncci == 0xffffffff || np->ncci == ncci) {
- *pp = (*pp)->next;
-#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
- if ((mp = np->minorp) != NULL) {
-#if defined(CONFIG_ISDN_CAPI_CAPIFS) || defined(CONFIG_ISDN_CAPI_CAPIFS_MODULE)
- capifs_free_ncci(mp->minor);
-#endif
- if (mp->tty) {
- mp->nccip = NULL;
-#ifdef _DEBUG_REFCOUNT
- printk(KERN_DEBUG "reset mp->nccip\n");
-#endif
- tty_hangup(mp->tty);
- } else {
- capiminor_free(mp);
- }
- }
-#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
- kfree(np);
- if (*pp == NULL) return;
- } else {
- pp = &(*pp)->next;
+ if (mp) {
+ capifs_free_ncci(mp->capifs_dentry);
+
+ tty = tty_port_tty_get(&mp->port);
+ if (tty) {
+ tty_vhangup(tty);
+ tty_kref_put(tty);
}
+
+ capiminor_free(mp);
}
}
-static struct capincci *capincci_find(struct capidev *cdev, u32 ncci)
+static inline unsigned int capincci_minor_opencount(struct capincci *np)
{
- struct capincci *p;
+ struct capiminor *mp = np->minorp;
+ unsigned int count = 0;
+ struct tty_struct *tty;
- for (p=cdev->nccis; p ; p = p->next) {
- if (p->ncci == ncci)
- break;
+ if (mp) {
+ tty = tty_port_tty_get(&mp->port);
+ if (tty) {
+ count = tty->count;
+ tty_kref_put(tty);
+ }
}
- return p;
+ return count;
}
-/* -------- struct capidev ------------------------------------------ */
+#else /* !CONFIG_ISDN_CAPI_MIDDLEWARE */
+
+static inline void
+capincci_alloc_minor(struct capidev *cdev, struct capincci *np) { }
+static inline void capincci_free_minor(struct capincci *np) { }
-static struct capidev *capidev_alloc(void)
+static inline unsigned int capincci_minor_opencount(struct capincci *np)
{
- struct capidev *cdev;
- unsigned long flags;
+ return 0;
+}
- cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
- if (!cdev)
+#endif /* !CONFIG_ISDN_CAPI_MIDDLEWARE */
+
+static struct capincci *capincci_alloc(struct capidev *cdev, u32 ncci)
+{
+ struct capincci *np;
+
+ np = kzalloc(sizeof(*np), GFP_KERNEL);
+ if (!np)
return NULL;
+ np->ncci = ncci;
+ np->cdev = cdev;
- mutex_init(&cdev->ncci_list_mtx);
- skb_queue_head_init(&cdev->recvqueue);
- init_waitqueue_head(&cdev->recvwait);
- write_lock_irqsave(&capidev_list_lock, flags);
- list_add_tail(&cdev->list, &capidev_list);
- write_unlock_irqrestore(&capidev_list_lock, flags);
- return cdev;
+ capincci_alloc_minor(cdev, np);
+
+ list_add_tail(&np->list, &cdev->nccis);
+
+ return np;
}
-static void capidev_free(struct capidev *cdev)
+static void capincci_free(struct capidev *cdev, u32 ncci)
{
- unsigned long flags;
+ struct capincci *np, *tmp;
- if (cdev->ap.applid) {
- capi20_release(&cdev->ap);
- cdev->ap.applid = 0;
- }
- skb_queue_purge(&cdev->recvqueue);
+ list_for_each_entry_safe(np, tmp, &cdev->nccis, list)
+ if (ncci == 0xffffffff || np->ncci == ncci) {
+ capincci_free_minor(np);
+ list_del(&np->list);
+ kfree(np);
+ }
+}
- mutex_lock(&cdev->ncci_list_mtx);
- capincci_free(cdev, 0xffffffff);
- mutex_unlock(&cdev->ncci_list_mtx);
+static struct capincci *capincci_find(struct capidev *cdev, u32 ncci)
+{
+ struct capincci *np;
- write_lock_irqsave(&capidev_list_lock, flags);
- list_del(&cdev->list);
- write_unlock_irqrestore(&capidev_list_lock, flags);
- kfree(cdev);
+ list_for_each_entry(np, &cdev->nccis, list)
+ if (np->ncci == ncci)
+ return np;
+ return NULL;
}
#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
@@ -432,7 +406,7 @@ static struct sk_buff *
gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
{
struct sk_buff *nskb;
- nskb = alloc_skb(CAPI_DATA_B3_RESP_LEN, GFP_ATOMIC);
+ nskb = alloc_skb(CAPI_DATA_B3_RESP_LEN, GFP_KERNEL);
if (nskb) {
u16 datahandle = CAPIMSG_U16(skb->data,CAPIMSG_BASELEN+4+4+2);
unsigned char *s = skb_put(nskb, CAPI_DATA_B3_RESP_LEN);
@@ -440,7 +414,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
capimsg_setu16(s, 2, mp->ap->applid);
capimsg_setu8 (s, 4, CAPI_DATA_B3);
capimsg_setu8 (s, 5, CAPI_RESP);
- capimsg_setu16(s, 6, mp->msgid++);
+ capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
capimsg_setu32(s, 8, mp->ncci);
capimsg_setu16(s, 12, datahandle);
}
@@ -449,122 +423,156 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
static int handle_recv_skb(struct capiminor *mp, struct sk_buff *skb)
{
+ unsigned int datalen = skb->len - CAPIMSG_LEN(skb->data);
+ struct tty_struct *tty;
struct sk_buff *nskb;
- int datalen;
u16 errcode, datahandle;
struct tty_ldisc *ld;
-
- datalen = skb->len - CAPIMSG_LEN(skb->data);
- if (mp->tty == NULL)
- {
+ int ret = -1;
+
+ tty = tty_port_tty_get(&mp->port);
+ if (!tty) {
#ifdef _DEBUG_DATAFLOW
printk(KERN_DEBUG "capi: currently no receiver\n");
#endif
return -1;
}
- ld = tty_ldisc_ref(mp->tty);
- if (ld == NULL)
- return -1;
+ ld = tty_ldisc_ref(tty);
+ if (!ld) {
+ /* fatal error, do not requeue */
+ ret = 0;
+ kfree_skb(skb);
+ goto deref_tty;
+ }
+
if (ld->ops->receive_buf == NULL) {
#if defined(_DEBUG_DATAFLOW) || defined(_DEBUG_TTYFUNCS)
printk(KERN_DEBUG "capi: ldisc has no receive_buf function\n");
#endif
- goto bad;
+ /* fatal error, do not requeue */
+ goto free_skb;
}
if (mp->ttyinstop) {
#if defined(_DEBUG_DATAFLOW) || defined(_DEBUG_TTYFUNCS)
printk(KERN_DEBUG "capi: recv tty throttled\n");
#endif
- goto bad;
+ goto deref_ldisc;
}
- if (mp->tty->receive_room < datalen) {
+
+ if (tty->receive_room < datalen) {
#if defined(_DEBUG_DATAFLOW) || defined(_DEBUG_TTYFUNCS)
printk(KERN_DEBUG "capi: no room in tty\n");
#endif
- goto bad;
+ goto deref_ldisc;
}
- if ((nskb = gen_data_b3_resp_for(mp, skb)) == NULL) {
+
+ nskb = gen_data_b3_resp_for(mp, skb);
+ if (!nskb) {
printk(KERN_ERR "capi: gen_data_b3_resp failed\n");
- goto bad;
+ goto deref_ldisc;
}
- datahandle = CAPIMSG_U16(skb->data,CAPIMSG_BASELEN+4);
+
+ datahandle = CAPIMSG_U16(skb->data, CAPIMSG_BASELEN + 4);
+
errcode = capi20_put_message(mp->ap, nskb);
- if (errcode != CAPI_NOERROR) {
+
+ if (errcode == CAPI_NOERROR) {
+ skb_pull(skb, CAPIMSG_LEN(skb->data));
+#ifdef _DEBUG_DATAFLOW
+ printk(KERN_DEBUG "capi: DATA_B3_RESP %u len=%d => ldisc\n",
+ datahandle, skb->len);
+#endif
+ ld->ops->receive_buf(tty, skb->data, NULL, skb->len);
+ } else {
printk(KERN_ERR "capi: send DATA_B3_RESP failed=%x\n",
errcode);
kfree_skb(nskb);
- goto bad;
+
+ if (errcode == CAPI_SENDQUEUEFULL)
+ goto deref_ldisc;
}
- (void)skb_pull(skb, CAPIMSG_LEN(skb->data));
-#ifdef _DEBUG_DATAFLOW
- printk(KERN_DEBUG "capi: DATA_B3_RESP %u len=%d => ldisc\n",
- datahandle, skb->len);
-#endif
- ld->ops->receive_buf(mp->tty, skb->data, NULL, skb->len);
+
+free_skb:
+ ret = 0;
kfree_skb(skb);
+
+deref_ldisc:
tty_ldisc_deref(ld);
- return 0;
-bad:
- tty_ldisc_deref(ld);
- return -1;
+
+deref_tty:
+ tty_kref_put(tty);
+ return ret;
}
static void handle_minor_recv(struct capiminor *mp)
{
struct sk_buff *skb;
- while ((skb = skb_dequeue(&mp->inqueue)) != NULL) {
- unsigned int len = skb->len;
- mp->inbytes -= len;
+
+ while ((skb = skb_dequeue(&mp->inqueue)) != NULL)
if (handle_recv_skb(mp, skb) < 0) {
skb_queue_head(&mp->inqueue, skb);
- mp->inbytes += len;
return;
}
- }
}
-static int handle_minor_send(struct capiminor *mp)
+static void handle_minor_send(struct capiminor *mp)
{
+ struct tty_struct *tty;
struct sk_buff *skb;
u16 len;
- int count = 0;
u16 errcode;
u16 datahandle;
- if (mp->tty && mp->ttyoutstop) {
+ tty = tty_port_tty_get(&mp->port);
+ if (!tty)
+ return;
+
+ if (mp->ttyoutstop) {
#if defined(_DEBUG_DATAFLOW) || defined(_DEBUG_TTYFUNCS)
printk(KERN_DEBUG "capi: send: tty stopped\n");
#endif
- return 0;
+ tty_kref_put(tty);
+ return;
}
- while ((skb = skb_dequeue(&mp->outqueue)) != NULL) {
- datahandle = mp->datahandle;
+ while (1) {
+ spin_lock_bh(&mp->outlock);
+ skb = __skb_dequeue(&mp->outqueue);
+ if (!skb) {
+ spin_unlock_bh(&mp->outlock);
+ break;
+ }
len = (u16)skb->len;
+ mp->outbytes -= len;
+ spin_unlock_bh(&mp->outlock);
+
+ datahandle = atomic_inc_return(&mp->datahandle);
skb_push(skb, CAPI_DATA_B3_REQ_LEN);
memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
capimsg_setu16(skb->data, 2, mp->ap->applid);
capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
capimsg_setu8 (skb->data, 5, CAPI_REQ);
- capimsg_setu16(skb->data, 6, mp->msgid++);
+ capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
capimsg_setu16(skb->data, 16, len); /* Data length */
capimsg_setu16(skb->data, 18, datahandle);
capimsg_setu16(skb->data, 20, 0); /* Flags */
- if (capincci_add_ack(mp, datahandle) < 0) {
+ if (capiminor_add_ack(mp, datahandle) < 0) {
skb_pull(skb, CAPI_DATA_B3_REQ_LEN);
- skb_queue_head(&mp->outqueue, skb);
- return count;
+
+ spin_lock_bh(&mp->outlock);
+ __skb_queue_head(&mp->outqueue, skb);
+ mp->outbytes += len;
+ spin_unlock_bh(&mp->outlock);
+
+ break;
}
errcode = capi20_put_message(mp->ap, skb);
if (errcode == CAPI_NOERROR) {
- mp->datahandle++;
- count++;
- mp->outbytes -= len;
#ifdef _DEBUG_DATAFLOW
printk(KERN_DEBUG "capi: DATA_B3_REQ %u len=%u\n",
datahandle, len);
@@ -575,16 +583,20 @@ static int handle_minor_send(struct capiminor *mp)
if (errcode == CAPI_SENDQUEUEFULL) {
skb_pull(skb, CAPI_DATA_B3_REQ_LEN);
- skb_queue_head(&mp->outqueue, skb);
+
+ spin_lock_bh(&mp->outlock);
+ __skb_queue_head(&mp->outqueue, skb);
+ mp->outbytes += len;
+ spin_unlock_bh(&mp->outlock);
+
break;
}
/* ups, drop packet */
printk(KERN_ERR "capi: put_message = %x\n", errcode);
- mp->outbytes -= len;
kfree_skb(skb);
}
- return count;
+ tty_kref_put(tty);
}
#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
@@ -594,65 +606,56 @@ static void capi_recv_message(struct capi20_appl *ap, struct sk_buff *skb)
{
struct capidev *cdev = ap->private;
#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
+ struct tty_struct *tty;
struct capiminor *mp;
u16 datahandle;
#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
struct capincci *np;
- u32 ncci;
- unsigned long flags;
+
+ mutex_lock(&cdev->lock);
if (CAPIMSG_CMD(skb->data) == CAPI_CONNECT_B3_CONF) {
u16 info = CAPIMSG_U16(skb->data, 12); // Info field
- if ((info & 0xff00) == 0) {
- mutex_lock(&cdev->ncci_list_mtx);
+ if ((info & 0xff00) == 0)
capincci_alloc(cdev, CAPIMSG_NCCI(skb->data));
- mutex_unlock(&cdev->ncci_list_mtx);
- }
}
- if (CAPIMSG_CMD(skb->data) == CAPI_CONNECT_B3_IND) {
- mutex_lock(&cdev->ncci_list_mtx);
+ if (CAPIMSG_CMD(skb->data) == CAPI_CONNECT_B3_IND)
capincci_alloc(cdev, CAPIMSG_NCCI(skb->data));
- mutex_unlock(&cdev->ncci_list_mtx);
- }
- spin_lock_irqsave(&workaround_lock, flags);
+
if (CAPIMSG_COMMAND(skb->data) != CAPI_DATA_B3) {
skb_queue_tail(&cdev->recvqueue, skb);
wake_up_interruptible(&cdev->recvwait);
- spin_unlock_irqrestore(&workaround_lock, flags);
- return;
+ goto unlock_out;
}
- ncci = CAPIMSG_CONTROL(skb->data);
- for (np = cdev->nccis; np && np->ncci != ncci; np = np->next)
- ;
+
+ np = capincci_find(cdev, CAPIMSG_CONTROL(skb->data));
if (!np) {
printk(KERN_ERR "BUG: capi_signal: ncci not found\n");
skb_queue_tail(&cdev->recvqueue, skb);
wake_up_interruptible(&cdev->recvwait);
- spin_unlock_irqrestore(&workaround_lock, flags);
- return;
+ goto unlock_out;
}
+
#ifndef CONFIG_ISDN_CAPI_MIDDLEWARE
skb_queue_tail(&cdev->recvqueue, skb);
wake_up_interruptible(&cdev->recvwait);
+
#else /* CONFIG_ISDN_CAPI_MIDDLEWARE */
+
mp = np->minorp;
if (!mp) {
skb_queue_tail(&cdev->recvqueue, skb);
wake_up_interruptible(&cdev->recvwait);
- spin_unlock_irqrestore(&workaround_lock, flags);
- return;
+ goto unlock_out;
}
-
-
if (CAPIMSG_SUBCOMMAND(skb->data) == CAPI_IND) {
-
datahandle = CAPIMSG_U16(skb->data, CAPIMSG_BASELEN+4+4+2);
#ifdef _DEBUG_DATAFLOW
printk(KERN_DEBUG "capi_signal: DATA_B3_IND %u len=%d\n",
datahandle, skb->len-CAPIMSG_LEN(skb->data));
#endif
skb_queue_tail(&mp->inqueue, skb);
- mp->inbytes += skb->len;
+
handle_minor_recv(mp);
} else if (CAPIMSG_SUBCOMMAND(skb->data) == CAPI_CONF) {
@@ -664,10 +667,13 @@ static void capi_recv_message(struct capi20_appl *ap, struct sk_buff *skb)
CAPIMSG_U16(skb->data, CAPIMSG_BASELEN+4+2));
#endif
kfree_skb(skb);
- (void)capiminor_del_ack(mp, datahandle);
- if (mp->tty)
- tty_wakeup(mp->tty);
- (void)handle_minor_send(mp);
+ capiminor_del_ack(mp, datahandle);
+ tty = tty_port_tty_get(&mp->port);
+ if (tty) {
+ tty_wakeup(tty);
+ tty_kref_put(tty);
+ }
+ handle_minor_send(mp);
} else {
/* ups, let capi application handle it :-) */
@@ -675,7 +681,9 @@ static void capi_recv_message(struct capi20_appl *ap, struct sk_buff *skb)
wake_up_interruptible(&cdev->recvwait);
}
#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
- spin_unlock_irqrestore(&workaround_lock, flags);
+
+unlock_out:
+ mutex_unlock(&cdev->lock);
}
/* -------- file_operations for capidev ----------------------------- */
@@ -686,24 +694,19 @@ capi_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
struct capidev *cdev = (struct capidev *)file->private_data;
struct sk_buff *skb;
size_t copied;
+ int err;
if (!cdev->ap.applid)
return -ENODEV;
- if ((skb = skb_dequeue(&cdev->recvqueue)) == NULL) {
-
+ skb = skb_dequeue(&cdev->recvqueue);
+ if (!skb) {
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
-
- for (;;) {
- interruptible_sleep_on(&cdev->recvwait);
- if ((skb = skb_dequeue(&cdev->recvqueue)) != NULL)
- break;
- if (signal_pending(current))
- break;
- }
- if (skb == NULL)
- return -ERESTARTNOHAND;
+ err = wait_event_interruptible(cdev->recvwait,
+ (skb = skb_dequeue(&cdev->recvqueue)));
+ if (err)
+ return err;
}
if (skb->len > count) {
skb_queue_head(&cdev->recvqueue, skb);
@@ -753,9 +756,9 @@ capi_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos
CAPIMSG_SETAPPID(skb->data, cdev->ap.applid);
if (CAPIMSG_CMD(skb->data) == CAPI_DISCONNECT_B3_RESP) {
- mutex_lock(&cdev->ncci_list_mtx);
+ mutex_lock(&cdev->lock);
capincci_free(cdev, CAPIMSG_NCCI(skb->data));
- mutex_unlock(&cdev->ncci_list_mtx);
+ mutex_unlock(&cdev->lock);
}
cdev->errcode = capi20_put_message(&cdev->ap, skb);
@@ -788,30 +791,35 @@ capi_ioctl(struct inode *inode, struct file *file,
unsigned int cmd, unsigned long arg)
{
struct capidev *cdev = file->private_data;
- struct capi20_appl *ap = &cdev->ap;
capi_ioctl_struct data;
int retval = -EINVAL;
void __user *argp = (void __user *)arg;
switch (cmd) {
case CAPI_REGISTER:
- {
- if (ap->applid)
- return -EEXIST;
+ mutex_lock(&cdev->lock);
- if (copy_from_user(&cdev->ap.rparam, argp,
- sizeof(struct capi_register_params)))
- return -EFAULT;
-
- cdev->ap.private = cdev;
- cdev->ap.recv_message = capi_recv_message;
- cdev->errcode = capi20_register(ap);
- if (cdev->errcode) {
- ap->applid = 0;
- return -EIO;
- }
+ if (cdev->ap.applid) {
+ retval = -EEXIST;
+ goto register_out;
+ }
+ if (copy_from_user(&cdev->ap.rparam, argp,
+ sizeof(struct capi_register_params))) {
+ retval = -EFAULT;
+ goto register_out;
+ }
+ cdev->ap.private = cdev;
+ cdev->ap.recv_message = capi_recv_message;
+ cdev->errcode = capi20_register(&cdev->ap);
+ retval = (int)cdev->ap.applid;
+ if (cdev->errcode) {
+ cdev->ap.applid = 0;
+ retval = -EIO;
}
- return (int)ap->applid;
+
+register_out:
+ mutex_unlock(&cdev->lock);
+ return retval;
case CAPI_GET_VERSION:
{
@@ -910,101 +918,104 @@ capi_ioctl(struct inode *inode, struct file *file,
return 0;
case CAPI_SET_FLAGS:
- case CAPI_CLR_FLAGS:
- {
- unsigned userflags;
- if (copy_from_user(&userflags, argp,
- sizeof(userflags)))
- return -EFAULT;
- if (cmd == CAPI_SET_FLAGS)
- cdev->userflags |= userflags;
- else
- cdev->userflags &= ~userflags;
- }
- return 0;
+ case CAPI_CLR_FLAGS: {
+ unsigned userflags;
+
+ if (copy_from_user(&userflags, argp, sizeof(userflags)))
+ return -EFAULT;
+ mutex_lock(&cdev->lock);
+ if (cmd == CAPI_SET_FLAGS)
+ cdev->userflags |= userflags;
+ else
+ cdev->userflags &= ~userflags;
+ mutex_unlock(&cdev->lock);
+ return 0;
+ }
case CAPI_GET_FLAGS:
if (copy_to_user(argp, &cdev->userflags,
sizeof(cdev->userflags)))
return -EFAULT;
return 0;
- case CAPI_NCCI_OPENCOUNT:
- {
- struct capincci *nccip;
-#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
- struct capiminor *mp;
-#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
- unsigned ncci;
- int count = 0;
- if (copy_from_user(&ncci, argp, sizeof(ncci)))
- return -EFAULT;
+ case CAPI_NCCI_OPENCOUNT: {
+ struct capincci *nccip;
+ unsigned ncci;
+ int count = 0;
- mutex_lock(&cdev->ncci_list_mtx);
- if ((nccip = capincci_find(cdev, (u32) ncci)) == NULL) {
- mutex_unlock(&cdev->ncci_list_mtx);
- return 0;
- }
-#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
- if ((mp = nccip->minorp) != NULL) {
- count += atomic_read(&mp->ttyopencount);
- }
-#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
- mutex_unlock(&cdev->ncci_list_mtx);
- return count;
- }
- return 0;
+ if (copy_from_user(&ncci, argp, sizeof(ncci)))
+ return -EFAULT;
+
+ mutex_lock(&cdev->lock);
+ nccip = capincci_find(cdev, (u32)ncci);
+ if (nccip)
+ count = capincci_minor_opencount(nccip);
+ mutex_unlock(&cdev->lock);
+ return count;
+ }
#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
- case CAPI_NCCI_GETUNIT:
- {
- struct capincci *nccip;
- struct capiminor *mp;
- unsigned ncci;
- int unit = 0;
- if (copy_from_user(&ncci, argp,
- sizeof(ncci)))
- return -EFAULT;
- mutex_lock(&cdev->ncci_list_mtx);
- nccip = capincci_find(cdev, (u32) ncci);
- if (!nccip || (mp = nccip->minorp) == NULL) {
- mutex_unlock(&cdev->ncci_list_mtx);
- return -ESRCH;
- }
- unit = mp->minor;
- mutex_unlock(&cdev->ncci_list_mtx);
- return unit;
+ case CAPI_NCCI_GETUNIT: {
+ struct capincci *nccip;
+ struct capiminor *mp;
+ unsigned ncci;
+ int unit = -ESRCH;
+
+ if (copy_from_user(&ncci, argp, sizeof(ncci)))
+ return -EFAULT;
+
+ mutex_lock(&cdev->lock);
+ nccip = capincci_find(cdev, (u32)ncci);
+ if (nccip) {
+ mp = nccip->minorp;
+ if (mp)
+ unit = mp->minor;
}
- return 0;
+ mutex_unlock(&cdev->lock);
+ return unit;
+ }
#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
+
+ default:
+ return -EINVAL;
}
- return -EINVAL;
}
-static int
-capi_open(struct inode *inode, struct file *file)
+static int capi_open(struct inode *inode, struct file *file)
{
- int ret;
-
- lock_kernel();
- if (file->private_data)
- ret = -EEXIST;
- else if ((file->private_data = capidev_alloc()) == NULL)
- ret = -ENOMEM;
- else
- ret = nonseekable_open(inode, file);
- unlock_kernel();
- return ret;
+ struct capidev *cdev;
+
+ cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
+ if (!cdev)
+ return -ENOMEM;
+
+ mutex_init(&cdev->lock);
+ skb_queue_head_init(&cdev->recvqueue);
+ init_waitqueue_head(&cdev->recvwait);
+ INIT_LIST_HEAD(&cdev->nccis);
+ file->private_data = cdev;
+
+ mutex_lock(&capidev_list_lock);
+ list_add_tail(&cdev->list, &capidev_list);
+ mutex_unlock(&capidev_list_lock);
+
+ return nonseekable_open(inode, file);
}
-static int
-capi_release(struct inode *inode, struct file *file)
+static int capi_release(struct inode *inode, struct file *file)
{
- struct capidev *cdev = (struct capidev *)file->private_data;
+ struct capidev *cdev = file->private_data;
- capidev_free(cdev);
- file->private_data = NULL;
-
+ mutex_lock(&capidev_list_lock);
+ list_del(&cdev->list);
+ mutex_unlock(&capidev_list_lock);
+
+ if (cdev->ap.applid)
+ capi20_release(&cdev->ap);
+ skb_queue_purge(&cdev->recvqueue);
+ capincci_free(cdev, 0xffffffff);
+
+ kfree(cdev);
return 0;
}
@@ -1023,182 +1034,159 @@ static const struct file_operations capi_fops =
#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
/* -------- tty_operations for capincci ----------------------------- */
-static int capinc_tty_open(struct tty_struct * tty, struct file * file)
+static int
+capinc_tty_install(struct tty_driver *driver, struct tty_struct *tty)
{
- struct capiminor *mp;
- unsigned long flags;
+ int idx = tty->index;
+ struct capiminor *mp = capiminor_get(idx);
+ int ret = tty_init_termios(tty);
+
+ if (ret == 0) {
+ tty_driver_kref_get(driver);
+ tty->count++;
+ tty->driver_data = mp;
+ driver->ttys[idx] = tty;
+ } else
+ capiminor_put(mp);
+ return ret;
+}
- if ((mp = capiminor_find(iminor(file->f_path.dentry->d_inode))) == NULL)
- return -ENXIO;
- if (mp->nccip == NULL)
- return -ENXIO;
+static void capinc_tty_cleanup(struct tty_struct *tty)
+{
+ struct capiminor *mp = tty->driver_data;
+ tty->driver_data = NULL;
+ capiminor_put(mp);
+}
- tty->driver_data = (void *)mp;
+static int capinc_tty_open(struct tty_struct *tty, struct file *filp)
+{
+ struct capiminor *mp = tty->driver_data;
+ int err;
+
+ err = tty_port_open(&mp->port, tty, filp);
+ if (err)
+ return err;
- spin_lock_irqsave(&workaround_lock, flags);
- if (atomic_read(&mp->ttyopencount) == 0)
- mp->tty = tty;
- atomic_inc(&mp->ttyopencount);
-#ifdef _DEBUG_REFCOUNT
- printk(KERN_DEBUG "capinc_tty_open ocount=%d\n", atomic_read(&mp->ttyopencount));
-#endif
handle_minor_recv(mp);
- spin_unlock_irqrestore(&workaround_lock, flags);
return 0;
}
-static void capinc_tty_close(struct tty_struct * tty, struct file * file)
+static void capinc_tty_close(struct tty_struct *tty, struct file *filp)
{
- struct capiminor *mp;
-
- mp = (struct capiminor *)tty->driver_data;
- if (mp) {
- if (atomic_dec_and_test(&mp->ttyopencount)) {
-#ifdef _DEBUG_REFCOUNT
- printk(KERN_DEBUG "capinc_tty_close lastclose\n");
-#endif
- tty->driver_data = NULL;
- mp->tty = NULL;
- }
-#ifdef _DEBUG_REFCOUNT
- printk(KERN_DEBUG "capinc_tty_close ocount=%d\n", atomic_read(&mp->ttyopencount));
-#endif
- if (mp->nccip == NULL)
- capiminor_free(mp);
- }
+ struct capiminor *mp = tty->driver_data;
-#ifdef _DEBUG_REFCOUNT
- printk(KERN_DEBUG "capinc_tty_close\n");
-#endif
+ tty_port_close(&mp->port, tty, filp);
}
-static int capinc_tty_write(struct tty_struct * tty,
+static int capinc_tty_write(struct tty_struct *tty,
const unsigned char *buf, int count)
{
- struct capiminor *mp = (struct capiminor *)tty->driver_data;
+ struct capiminor *mp = tty->driver_data;
struct sk_buff *skb;
- unsigned long flags;
#ifdef _DEBUG_TTYFUNCS
printk(KERN_DEBUG "capinc_tty_write(count=%d)\n", count);
#endif
- if (!mp || !mp->nccip) {
-#ifdef _DEBUG_TTYFUNCS
- printk(KERN_DEBUG "capinc_tty_write: mp or mp->ncci NULL\n");
-#endif
- return 0;
- }
-
- spin_lock_irqsave(&workaround_lock, flags);
- skb = mp->ttyskb;
+ spin_lock_bh(&mp->outlock);
+ skb = mp->outskb;
if (skb) {
- mp->ttyskb = NULL;
- skb_queue_tail(&mp->outqueue, skb);
+ mp->outskb = NULL;
+ __skb_queue_tail(&mp->outqueue, skb);
mp->outbytes += skb->len;
}
skb = alloc_skb(CAPI_DATA_B3_REQ_LEN+count, GFP_ATOMIC);
if (!skb) {
printk(KERN_ERR "capinc_tty_write: alloc_skb failed\n");
- spin_unlock_irqrestore(&workaround_lock, flags);
+ spin_unlock_bh(&mp->outlock);
return -ENOMEM;
}
skb_reserve(skb, CAPI_DATA_B3_REQ_LEN);
memcpy(skb_put(skb, count), buf, count);
- skb_queue_tail(&mp->outqueue, skb);
+ __skb_queue_tail(&mp->outqueue, skb);
mp->outbytes += skb->len;
- (void)handle_minor_send(mp);
- (void)handle_minor_recv(mp);
- spin_unlock_irqrestore(&workaround_lock, flags);
+ spin_unlock_bh(&mp->outlock);
+
+ handle_minor_send(mp);
+
return count;
}
static int capinc_tty_put_char(struct tty_struct *tty, unsigned char ch)
{
- struct capiminor *mp = (struct capiminor *)tty->driver_data;
+ struct capiminor *mp = tty->driver_data;
+ bool invoke_send = false;
struct sk_buff *skb;
- unsigned long flags;
int ret = 1;
#ifdef _DEBUG_TTYFUNCS
printk(KERN_DEBUG "capinc_put_char(%u)\n", ch);
#endif
- if (!mp || !mp->nccip) {
-#ifdef _DEBUG_TTYFUNCS
- printk(KERN_DEBUG "capinc_tty_put_char: mp or mp->ncci NULL\n");
-#endif
- return 0;
- }
-
- spin_lock_irqsave(&workaround_lock, flags);
- skb = mp->ttyskb;
+ spin_lock_bh(&mp->outlock);
+ skb = mp->outskb;
if (skb) {
if (skb_tailroom(skb) > 0) {
*(skb_put(skb, 1)) = ch;
- spin_unlock_irqrestore(&workaround_lock, flags);
- return 1;
+ goto unlock_out;
}
- mp->ttyskb = NULL;
- skb_queue_tail(&mp->outqueue, skb);
+ mp->outskb = NULL;
+ __skb_queue_tail(&mp->outqueue, skb);
mp->outbytes += skb->len;
- (void)handle_minor_send(mp);
+ invoke_send = true;
}
+
skb = alloc_skb(CAPI_DATA_B3_REQ_LEN+CAPI_MAX_BLKSIZE, GFP_ATOMIC);
if (skb) {
skb_reserve(skb, CAPI_DATA_B3_REQ_LEN);
*(skb_put(skb, 1)) = ch;
- mp->ttyskb = skb;
+ mp->outskb = skb;
} else {
printk(KERN_ERR "capinc_put_char: char %u lost\n", ch);
ret = 0;
}
- spin_unlock_irqrestore(&workaround_lock, flags);
+
+unlock_out:
+ spin_unlock_bh(&mp->outlock);
+
+ if (invoke_send)
+ handle_minor_send(mp);
+
return ret;
}
static void capinc_tty_flush_chars(struct tty_struct *tty)
{
- struct capiminor *mp = (struct capiminor *)tty->driver_data;
+ struct capiminor *mp = tty->driver_data;
struct sk_buff *skb;
- unsigned long flags;
#ifdef _DEBUG_TTYFUNCS
printk(KERN_DEBUG "capinc_tty_flush_chars\n");
#endif
- if (!mp || !mp->nccip) {
-#ifdef _DEBUG_TTYFUNCS
- printk(KERN_DEBUG "capinc_tty_flush_chars: mp or mp->ncci NULL\n");
-#endif
- return;
- }
-
- spin_lock_irqsave(&workaround_lock, flags);
- skb = mp->ttyskb;
+ spin_lock_bh(&mp->outlock);
+ skb = mp->outskb;
if (skb) {
- mp->ttyskb = NULL;
- skb_queue_tail(&mp->outqueue, skb);
+ mp->outskb = NULL;
+ __skb_queue_tail(&mp->outqueue, skb);
mp->outbytes += skb->len;
- (void)handle_minor_send(mp);
- }
- (void)handle_minor_recv(mp);
- spin_unlock_irqrestore(&workaround_lock, flags);
+ spin_unlock_bh(&mp->outlock);
+
+ handle_minor_send(mp);
+ } else
+ spin_unlock_bh(&mp->outlock);
+
+ handle_minor_recv(mp);
}
static int capinc_tty_write_room(struct tty_struct *tty)
{
- struct capiminor *mp = (struct capiminor *)tty->driver_data;
+ struct capiminor *mp = tty->driver_data;
int room;
- if (!mp || !mp->nccip) {
-#ifdef _DEBUG_TTYFUNCS
- printk(KERN_DEBUG "capinc_tty_write_room: mp or mp->ncci NULL\n");
-#endif
- return 0;
- }
+
room = CAPINC_MAX_SENDQUEUE-skb_queue_len(&mp->outqueue);
room *= CAPI_MAX_BLKSIZE;
#ifdef _DEBUG_TTYFUNCS
@@ -1209,13 +1197,8 @@ static int capinc_tty_write_room(struct tty_struct *tty)
static int capinc_tty_chars_in_buffer(struct tty_struct *tty)
{
- struct capiminor *mp = (struct capiminor *)tty->driver_data;
- if (!mp || !mp->nccip) {
-#ifdef _DEBUG_TTYFUNCS
- printk(KERN_DEBUG "capinc_tty_chars_in_buffer: mp or mp->ncci NULL\n");
-#endif
- return 0;
- }
+ struct capiminor *mp = tty->driver_data;
+
#ifdef _DEBUG_TTYFUNCS
printk(KERN_DEBUG "capinc_tty_chars_in_buffer = %d nack=%d sq=%d rq=%d\n",
mp->outbytes, mp->nack,
@@ -1244,62 +1227,55 @@ static void capinc_tty_set_termios(struct tty_struct *tty, struct ktermios * old
#endif
}
-static void capinc_tty_throttle(struct tty_struct * tty)
+static void capinc_tty_throttle(struct tty_struct *tty)
{
- struct capiminor *mp = (struct capiminor *)tty->driver_data;
+ struct capiminor *mp = tty->driver_data;
#ifdef _DEBUG_TTYFUNCS
printk(KERN_DEBUG "capinc_tty_throttle\n");
#endif
- if (mp)
- mp->ttyinstop = 1;
+ mp->ttyinstop = 1;
}
-static void capinc_tty_unthrottle(struct tty_struct * tty)
+static void capinc_tty_unthrottle(struct tty_struct *tty)
{
- struct capiminor *mp = (struct capiminor *)tty->driver_data;
- unsigned long flags;
+ struct capiminor *mp = tty->driver_data;
+
#ifdef _DEBUG_TTYFUNCS
printk(KERN_DEBUG "capinc_tty_unthrottle\n");
#endif
- if (mp) {
- spin_lock_irqsave(&workaround_lock, flags);
- mp->ttyinstop = 0;
- handle_minor_recv(mp);
- spin_unlock_irqrestore(&workaround_lock, flags);
- }
+ mp->ttyinstop = 0;
+ handle_minor_recv(mp);
}
static void capinc_tty_stop(struct tty_struct *tty)
{
- struct capiminor *mp = (struct capiminor *)tty->driver_data;
+ struct capiminor *mp = tty->driver_data;
+
#ifdef _DEBUG_TTYFUNCS
printk(KERN_DEBUG "capinc_tty_stop\n");
#endif
- if (mp) {
- mp->ttyoutstop = 1;
- }
+ mp->ttyoutstop = 1;
}
static void capinc_tty_start(struct tty_struct *tty)
{
- struct capiminor *mp = (struct capiminor *)tty->driver_data;
- unsigned long flags;
+ struct capiminor *mp = tty->driver_data;
+
#ifdef _DEBUG_TTYFUNCS
printk(KERN_DEBUG "capinc_tty_start\n");
#endif
- if (mp) {
- spin_lock_irqsave(&workaround_lock, flags);
- mp->ttyoutstop = 0;
- (void)handle_minor_send(mp);
- spin_unlock_irqrestore(&workaround_lock, flags);
- }
+ mp->ttyoutstop = 0;
+ handle_minor_send(mp);
}
static void capinc_tty_hangup(struct tty_struct *tty)
{
+ struct capiminor *mp = tty->driver_data;
+
#ifdef _DEBUG_TTYFUNCS
printk(KERN_DEBUG "capinc_tty_hangup\n");
#endif
+ tty_port_hangup(&mp->port);
}
static int capinc_tty_break_ctl(struct tty_struct *tty, int state)
@@ -1331,8 +1307,6 @@ static void capinc_tty_send_xchar(struct tty_struct *tty, char ch)
#endif
}
-static struct tty_driver *capinc_tty_driver;
-
static const struct tty_operations capinc_ops = {
.open = capinc_tty_open,
.close = capinc_tty_close,
@@ -1352,25 +1326,34 @@ static const struct tty_operations capinc_ops = {
.flush_buffer = capinc_tty_flush_buffer,
.set_ldisc = capinc_tty_set_ldisc,
.send_xchar = capinc_tty_send_xchar,
+ .install = capinc_tty_install,
+ .cleanup = capinc_tty_cleanup,
};
-static int capinc_tty_init(void)
+static int __init capinc_tty_init(void)
{
struct tty_driver *drv;
-
+ int err;
+
if (capi_ttyminors > CAPINC_MAX_PORTS)
capi_ttyminors = CAPINC_MAX_PORTS;
if (capi_ttyminors <= 0)
capi_ttyminors = CAPINC_NR_PORTS;
- drv = alloc_tty_driver(capi_ttyminors);
- if (!drv)
+ capiminors = kzalloc(sizeof(struct capi_minor *) * capi_ttyminors,
+ GFP_KERNEL);
+ if (!capiminors)
return -ENOMEM;
+ drv = alloc_tty_driver(capi_ttyminors);
+ if (!drv) {
+ kfree(capiminors);
+ return -ENOMEM;
+ }
drv->owner = THIS_MODULE;
drv->driver_name = "capi_nc";
drv->name = "capi";
- drv->major = capi_ttymajor;
+ drv->major = 0;
drv->minor_start = 0;
drv->type = TTY_DRIVER_TYPE_SERIAL;
drv->subtype = SERIAL_TYPE_NORMAL;
@@ -1379,27 +1362,39 @@ static int capinc_tty_init(void)
drv->init_termios.c_oflag = OPOST | ONLCR;
drv->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
drv->init_termios.c_lflag = 0;
- drv->flags = TTY_DRIVER_REAL_RAW|TTY_DRIVER_RESET_TERMIOS;
+ drv->flags =
+ TTY_DRIVER_REAL_RAW | TTY_DRIVER_RESET_TERMIOS |
+ TTY_DRIVER_DYNAMIC_DEV;
tty_set_operations(drv, &capinc_ops);
- if (tty_register_driver(drv)) {
+
+ err = tty_register_driver(drv);
+ if (err) {
put_tty_driver(drv);
+ kfree(capiminors);
printk(KERN_ERR "Couldn't register capi_nc driver\n");
- return -1;
+ return err;
}
capinc_tty_driver = drv;
return 0;
}
-static void capinc_tty_exit(void)
+static void __exit capinc_tty_exit(void)
{
- struct tty_driver *drv = capinc_tty_driver;
- int retval;
- if ((retval = tty_unregister_driver(drv)))
- printk(KERN_ERR "capi: failed to unregister capi_nc driver (%d)\n", retval);
- put_tty_driver(drv);
+ tty_unregister_driver(capinc_tty_driver);
+ put_tty_driver(capinc_tty_driver);
+ kfree(capiminors);
}
-#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
+#else /* !CONFIG_ISDN_CAPI_MIDDLEWARE */
+
+static inline int capinc_tty_init(void)
+{
+ return 0;
+}
+
+static inline void capinc_tty_exit(void) { }
+
+#endif /* !CONFIG_ISDN_CAPI_MIDDLEWARE */
/* -------- /proc functions ----------------------------------------- */
@@ -1407,134 +1402,91 @@ static void capinc_tty_exit(void)
* /proc/capi/capi20:
* minor applid nrecvctlpkt nrecvdatapkt nsendctlpkt nsenddatapkt
*/
-static int proc_capidev_read_proc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int capi20_proc_show(struct seq_file *m, void *v)
{
struct capidev *cdev;
struct list_head *l;
- int len = 0;
- read_lock(&capidev_list_lock);
+ mutex_lock(&capidev_list_lock);
list_for_each(l, &capidev_list) {
cdev = list_entry(l, struct capidev, list);
- len += sprintf(page+len, "0 %d %lu %lu %lu %lu\n",
+ seq_printf(m, "0 %d %lu %lu %lu %lu\n",
cdev->ap.applid,
cdev->ap.nrecvctlpkt,
cdev->ap.nrecvdatapkt,
cdev->ap.nsentctlpkt,
cdev->ap.nsentdatapkt);
- if (len <= off) {
- off -= len;
- len = 0;
- } else {
- if (len-off > count)
- goto endloop;
- }
}
+ mutex_unlock(&capidev_list_lock);
+ return 0;
+}
-endloop:
- read_unlock(&capidev_list_lock);
- if (len < count)
- *eof = 1;
- if (len > count) len = count;
- if (len < 0) len = 0;
- return len;
+static int capi20_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, capi20_proc_show, NULL);
}
+static const struct file_operations capi20_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = capi20_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
/*
* /proc/capi/capi20ncci:
* applid ncci
*/
-static int proc_capincci_read_proc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int capi20ncci_proc_show(struct seq_file *m, void *v)
{
- struct capidev *cdev;
- struct capincci *np;
- struct list_head *l;
- int len = 0;
+ struct capidev *cdev;
+ struct capincci *np;
- read_lock(&capidev_list_lock);
- list_for_each(l, &capidev_list) {
- cdev = list_entry(l, struct capidev, list);
- for (np=cdev->nccis; np; np = np->next) {
- len += sprintf(page+len, "%d 0x%x\n",
- cdev->ap.applid,
- np->ncci);
- if (len <= off) {
- off -= len;
- len = 0;
- } else {
- if (len-off > count)
- goto endloop;
- }
- }
+ mutex_lock(&capidev_list_lock);
+ list_for_each_entry(cdev, &capidev_list, list) {
+ mutex_lock(&cdev->lock);
+ list_for_each_entry(np, &cdev->nccis, list)
+ seq_printf(m, "%d 0x%x\n", cdev->ap.applid, np->ncci);
+ mutex_unlock(&cdev->lock);
}
-endloop:
- read_unlock(&capidev_list_lock);
- *start = page+off;
- if (len < count)
- *eof = 1;
- if (len>count) len = count;
- if (len<0) len = 0;
- return len;
+ mutex_unlock(&capidev_list_lock);
+ return 0;
+}
+
+static int capi20ncci_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, capi20ncci_proc_show, NULL);
}
-static struct procfsentries {
- char *name;
- mode_t mode;
- int (*read_proc)(char *page, char **start, off_t off,
- int count, int *eof, void *data);
- struct proc_dir_entry *procent;
-} procfsentries[] = {
- /* { "capi", S_IFDIR, 0 }, */
- { "capi/capi20", 0 , proc_capidev_read_proc },
- { "capi/capi20ncci", 0 , proc_capincci_read_proc },
+static const struct file_operations capi20ncci_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = capi20ncci_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
};
static void __init proc_init(void)
{
- int nelem = ARRAY_SIZE(procfsentries);
- int i;
-
- for (i=0; i < nelem; i++) {
- struct procfsentries *p = procfsentries + i;
- p->procent = create_proc_entry(p->name, p->mode, NULL);
- if (p->procent) p->procent->read_proc = p->read_proc;
- }
+ proc_create("capi/capi20", 0, NULL, &capi20_proc_fops);
+ proc_create("capi/capi20ncci", 0, NULL, &capi20ncci_proc_fops);
}
static void __exit proc_exit(void)
{
- int nelem = ARRAY_SIZE(procfsentries);
- int i;
-
- for (i=nelem-1; i >= 0; i--) {
- struct procfsentries *p = procfsentries + i;
- if (p->procent) {
- remove_proc_entry(p->name, NULL);
- p->procent = NULL;
- }
- }
+ remove_proc_entry("capi/capi20", NULL);
+ remove_proc_entry("capi/capi20ncci", NULL);
}
/* -------- init function and module interface ---------------------- */
-static char rev[32];
-
static int __init capi_init(void)
{
- char *p;
- char *compileinfo;
+ const char *compileinfo;
int major_ret;
- if ((p = strchr(revision, ':')) != NULL && p[1]) {
- strlcpy(rev, p + 2, sizeof(rev));
- if ((p = strchr(rev, '$')) != NULL && p > rev)
- *(p-1) = 0;
- } else
- strcpy(rev, "1.0");
-
major_ret = register_chrdev(capi_major, "capi20", &capi_fops);
if (major_ret < 0) {
printk(KERN_ERR "capi20: unable to get major %d\n", capi_major);
@@ -1548,28 +1500,24 @@ static int __init capi_init(void)
device_create(capi_class, NULL, MKDEV(capi_major, 0), NULL, "capi");
-#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
if (capinc_tty_init() < 0) {
device_destroy(capi_class, MKDEV(capi_major, 0));
class_destroy(capi_class);
unregister_chrdev(capi_major, "capi20");
return -ENOMEM;
}
-#endif /* CONFIG_ISDN_CAPI_MIDDLEWARE */
proc_init();
-#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
#if defined(CONFIG_ISDN_CAPI_CAPIFS) || defined(CONFIG_ISDN_CAPI_CAPIFS_MODULE)
compileinfo = " (middleware+capifs)";
-#else
+#elif defined(CONFIG_ISDN_CAPI_MIDDLEWARE)
compileinfo = " (no capifs)";
-#endif
#else
compileinfo = " (no middleware)";
#endif
- printk(KERN_NOTICE "capi20: Rev %s: started up with major %d%s\n",
- rev, capi_major, compileinfo);
+ printk(KERN_NOTICE "CAPI 2.0 started up with major %d%s\n",
+ capi_major, compileinfo);
return 0;
}
@@ -1582,10 +1530,7 @@ static void __exit capi_exit(void)
class_destroy(capi_class);
unregister_chrdev(capi_major, "capi20");
-#ifdef CONFIG_ISDN_CAPI_MIDDLEWARE
capinc_tty_exit();
-#endif
- printk(KERN_NOTICE "capi: Rev %s: unloaded\n", rev);
}
module_init(capi_init);
diff --git a/drivers/isdn/capi/capidrv.c b/drivers/isdn/capi/capidrv.c
index 66b7d7a8647..bf55ed5f38e 100644
--- a/drivers/isdn/capi/capidrv.c
+++ b/drivers/isdn/capi/capidrv.c
@@ -24,6 +24,7 @@
#include <linux/isdn.h>
#include <linux/isdnif.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/capi.h>
#include <linux/kernelcapi.h>
#include <linux/ctype.h>
@@ -34,7 +35,6 @@
#include <linux/isdn/capicmd.h>
#include "capidrv.h"
-static char *revision = "$Revision: 1.1.2.2 $";
static int debugmode = 0;
MODULE_DESCRIPTION("CAPI4Linux: Interface to ISDN4Linux");
@@ -2210,96 +2210,73 @@ static int capidrv_delcontr(u16 contr)
}
-static void lower_callback(unsigned int cmd, u32 contr, void *data)
+static int
+lower_callback(struct notifier_block *nb, unsigned long val, void *v)
{
+ capi_profile profile;
+ u32 contr = (long)v;
- switch (cmd) {
- case KCI_CONTRUP:
+ switch (val) {
+ case CAPICTR_UP:
printk(KERN_INFO "capidrv: controller %hu up\n", contr);
- (void) capidrv_addcontr(contr, (capi_profile *) data);
+ if (capi20_get_profile(contr, &profile) == CAPI_NOERROR)
+ (void) capidrv_addcontr(contr, &profile);
break;
- case KCI_CONTRDOWN:
+ case CAPICTR_DOWN:
printk(KERN_INFO "capidrv: controller %hu down\n", contr);
(void) capidrv_delcontr(contr);
break;
}
+ return NOTIFY_OK;
}
/*
* /proc/capi/capidrv:
* nrecvctlpkt nrecvdatapkt nsendctlpkt nsenddatapkt
*/
-static int proc_capidrv_read_proc(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int capidrv_proc_show(struct seq_file *m, void *v)
{
- int len = 0;
-
- len += sprintf(page+len, "%lu %lu %lu %lu\n",
+ seq_printf(m, "%lu %lu %lu %lu\n",
global.ap.nrecvctlpkt,
global.ap.nrecvdatapkt,
global.ap.nsentctlpkt,
global.ap.nsentdatapkt);
- if (off+count >= len)
- *eof = 1;
- if (len < off)
- return 0;
- *start = page + off;
- return ((count < len-off) ? count : len-off);
+ return 0;
}
-static struct procfsentries {
- char *name;
- mode_t mode;
- int (*read_proc)(char *page, char **start, off_t off,
- int count, int *eof, void *data);
- struct proc_dir_entry *procent;
-} procfsentries[] = {
- /* { "capi", S_IFDIR, 0 }, */
- { "capi/capidrv", 0 , proc_capidrv_read_proc },
+static int capidrv_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, capidrv_proc_show, NULL);
+}
+
+static const struct file_operations capidrv_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = capidrv_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
};
static void __init proc_init(void)
{
- int nelem = ARRAY_SIZE(procfsentries);
- int i;
-
- for (i=0; i < nelem; i++) {
- struct procfsentries *p = procfsentries + i;
- p->procent = create_proc_entry(p->name, p->mode, NULL);
- if (p->procent) p->procent->read_proc = p->read_proc;
- }
+ proc_create("capi/capidrv", 0, NULL, &capidrv_proc_fops);
}
static void __exit proc_exit(void)
{
- int nelem = ARRAY_SIZE(procfsentries);
- int i;
-
- for (i=nelem-1; i >= 0; i--) {
- struct procfsentries *p = procfsentries + i;
- if (p->procent) {
- remove_proc_entry(p->name, NULL);
- p->procent = NULL;
- }
- }
+ remove_proc_entry("capi/capidrv", NULL);
}
+static struct notifier_block capictr_nb = {
+ .notifier_call = lower_callback,
+};
+
static int __init capidrv_init(void)
{
capi_profile profile;
- char rev[32];
- char *p;
u32 ncontr, contr;
u16 errcode;
- if ((p = strchr(revision, ':')) != NULL && p[1]) {
- strncpy(rev, p + 2, sizeof(rev));
- rev[sizeof(rev)-1] = 0;
- if ((p = strchr(rev, '$')) != NULL && p > rev)
- *(p-1) = 0;
- } else
- strcpy(rev, "1.0");
-
global.ap.rparam.level3cnt = -2; /* number of bchannels twice */
global.ap.rparam.datablkcnt = 16;
global.ap.rparam.datablklen = 2048;
@@ -2310,7 +2287,7 @@ static int __init capidrv_init(void)
return -EIO;
}
- capi20_set_callback(&global.ap, lower_callback);
+ register_capictr_notifier(&capictr_nb);
errcode = capi20_get_profile(0, &profile);
if (errcode != CAPI_NOERROR) {
@@ -2327,29 +2304,15 @@ static int __init capidrv_init(void)
}
proc_init();
- printk(KERN_NOTICE "capidrv: Rev %s: loaded\n", rev);
return 0;
}
static void __exit capidrv_exit(void)
{
- char rev[32];
- char *p;
-
- if ((p = strchr(revision, ':')) != NULL) {
- strncpy(rev, p + 1, sizeof(rev));
- rev[sizeof(rev)-1] = 0;
- if ((p = strchr(rev, '$')) != NULL)
- *p = 0;
- } else {
- strcpy(rev, " ??? ");
- }
-
+ unregister_capictr_notifier(&capictr_nb);
capi20_release(&global.ap);
proc_exit();
-
- printk(KERN_NOTICE "capidrv: Rev%s: unloaded\n", rev);
}
module_init(capidrv_init);
diff --git a/drivers/isdn/capi/capifs.c b/drivers/isdn/capi/capifs.c
index 9f8f67b6c07..8596bd1a4d2 100644
--- a/drivers/isdn/capi/capifs.c
+++ b/drivers/isdn/capi/capifs.c
@@ -25,14 +25,10 @@ MODULE_LICENSE("GPL");
/* ------------------------------------------------------------------ */
-static char *revision = "$Revision: 1.1.2.3 $";
-
-/* ------------------------------------------------------------------ */
-
#define CAPIFS_SUPER_MAGIC (('C'<<8)|'N')
static struct vfsmount *capifs_mnt;
-static struct dentry *capifs_root;
+static int capifs_mnt_count;
static struct {
int setuid;
@@ -118,7 +114,7 @@ capifs_fill_super(struct super_block *s, void *data, int silent)
inode->i_fop = &simple_dir_operations;
inode->i_nlink = 2;
- capifs_root = s->s_root = d_alloc_root(inode);
+ s->s_root = d_alloc_root(inode);
if (s->s_root)
return 0;
@@ -141,82 +137,98 @@ static struct file_system_type capifs_fs_type = {
.kill_sb = kill_anon_super,
};
-static struct dentry *get_node(int num)
+static struct dentry *new_ncci(unsigned int number, dev_t device)
{
- char s[10];
- struct dentry *root = capifs_root;
+ struct super_block *s = capifs_mnt->mnt_sb;
+ struct dentry *root = s->s_root;
+ struct dentry *dentry;
+ struct inode *inode;
+ char name[10];
+ int namelen;
+
mutex_lock(&root->d_inode->i_mutex);
- return lookup_one_len(s, root, sprintf(s, "%d", num));
-}
-void capifs_new_ncci(unsigned int number, dev_t device)
-{
- struct dentry *dentry;
- struct inode *inode = new_inode(capifs_mnt->mnt_sb);
- if (!inode)
- return;
- inode->i_ino = number+2;
+ namelen = sprintf(name, "%d", number);
+ dentry = lookup_one_len(name, root, namelen);
+ if (IS_ERR(dentry)) {
+ dentry = NULL;
+ goto unlock_out;
+ }
- dentry = get_node(number);
+ if (dentry->d_inode) {
+ dput(dentry);
+ dentry = NULL;
+ goto unlock_out;
+ }
+
+ inode = new_inode(s);
+ if (!inode) {
+ dput(dentry);
+ dentry = NULL;
+ goto unlock_out;
+ }
/* config contents is protected by root's i_mutex */
inode->i_uid = config.setuid ? config.uid : current_fsuid();
inode->i_gid = config.setgid ? config.gid : current_fsgid();
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+ inode->i_ino = number + 2;
init_special_inode(inode, S_IFCHR|config.mode, device);
- //inode->i_op = &capifs_file_inode_operations;
- if (!IS_ERR(dentry) && !dentry->d_inode)
- d_instantiate(dentry, inode);
- mutex_unlock(&capifs_root->d_inode->i_mutex);
+ d_instantiate(dentry, inode);
+ dget(dentry);
+
+unlock_out:
+ mutex_unlock(&root->d_inode->i_mutex);
+
+ return dentry;
}
-void capifs_free_ncci(unsigned int number)
+struct dentry *capifs_new_ncci(unsigned int number, dev_t device)
{
- struct dentry *dentry = get_node(number);
-
- if (!IS_ERR(dentry)) {
- struct inode *inode = dentry->d_inode;
- if (inode) {
- inode->i_nlink--;
- d_delete(dentry);
- dput(dentry);
- }
+ struct dentry *dentry;
+
+ if (simple_pin_fs(&capifs_fs_type, &capifs_mnt, &capifs_mnt_count) < 0)
+ return NULL;
+
+ dentry = new_ncci(number, device);
+ if (!dentry)
+ simple_release_fs(&capifs_mnt, &capifs_mnt_count);
+
+ return dentry;
+}
+
+void capifs_free_ncci(struct dentry *dentry)
+{
+ struct dentry *root = capifs_mnt->mnt_sb->s_root;
+ struct inode *inode;
+
+ if (!dentry)
+ return;
+
+ mutex_lock(&root->d_inode->i_mutex);
+
+ inode = dentry->d_inode;
+ if (inode) {
+ drop_nlink(inode);
+ d_delete(dentry);
dput(dentry);
}
- mutex_unlock(&capifs_root->d_inode->i_mutex);
+ dput(dentry);
+
+ mutex_unlock(&root->d_inode->i_mutex);
+
+ simple_release_fs(&capifs_mnt, &capifs_mnt_count);
}
static int __init capifs_init(void)
{
- char rev[32];
- char *p;
- int err;
-
- if ((p = strchr(revision, ':')) != NULL && p[1]) {
- strlcpy(rev, p + 2, sizeof(rev));
- if ((p = strchr(rev, '$')) != NULL && p > rev)
- *(p-1) = 0;
- } else
- strcpy(rev, "1.0");
-
- err = register_filesystem(&capifs_fs_type);
- if (!err) {
- capifs_mnt = kern_mount(&capifs_fs_type);
- if (IS_ERR(capifs_mnt)) {
- err = PTR_ERR(capifs_mnt);
- unregister_filesystem(&capifs_fs_type);
- }
- }
- if (!err)
- printk(KERN_NOTICE "capifs: Rev %s\n", rev);
- return err;
+ return register_filesystem(&capifs_fs_type);
}
static void __exit capifs_exit(void)
{
unregister_filesystem(&capifs_fs_type);
- mntput(capifs_mnt);
}
EXPORT_SYMBOL(capifs_new_ncci);
diff --git a/drivers/isdn/capi/capifs.h b/drivers/isdn/capi/capifs.h
index d0bd4c3c430..e193d118953 100644
--- a/drivers/isdn/capi/capifs.h
+++ b/drivers/isdn/capi/capifs.h
@@ -7,5 +7,22 @@
*
*/
-void capifs_new_ncci(unsigned int num, dev_t device);
-void capifs_free_ncci(unsigned int num);
+#include <linux/dcache.h>
+
+#if defined(CONFIG_ISDN_CAPI_CAPIFS) || defined(CONFIG_ISDN_CAPI_CAPIFS_MODULE)
+
+struct dentry *capifs_new_ncci(unsigned int num, dev_t device);
+void capifs_free_ncci(struct dentry *dentry);
+
+#else
+
+static inline struct dentry *capifs_new_ncci(unsigned int num, dev_t device)
+{
+ return NULL;
+}
+
+static inline void capifs_free_ncci(struct dentry *dentry)
+{
+}
+
+#endif
diff --git a/drivers/isdn/capi/kcapi.c b/drivers/isdn/capi/kcapi.c
index dc506ab99ca..ce9b05b9e93 100644
--- a/drivers/isdn/capi/kcapi.c
+++ b/drivers/isdn/capi/kcapi.c
@@ -34,10 +34,7 @@
#include <linux/b1lli.h>
#endif
#include <linux/mutex.h>
-
-static char *revision = "$Revision: 1.1.2.8 $";
-
-/* ------------------------------------------------------------- */
+#include <linux/rcupdate.h>
static int showcapimsgs = 0;
@@ -48,12 +45,10 @@ module_param(showcapimsgs, uint, 0);
/* ------------------------------------------------------------- */
-struct capi_notifier {
+struct capictr_event {
struct work_struct work;
- unsigned int cmd;
+ unsigned int type;
u32 controller;
- u16 applid;
- u32 ncci;
};
/* ------------------------------------------------------------- */
@@ -65,30 +60,31 @@ static char capi_manufakturer[64] = "AVM Berlin";
#define NCCI2CTRL(ncci) (((ncci) >> 24) & 0x7f)
LIST_HEAD(capi_drivers);
-DEFINE_RWLOCK(capi_drivers_list_lock);
+DEFINE_MUTEX(capi_drivers_lock);
-static DEFINE_RWLOCK(application_lock);
-static DEFINE_MUTEX(controller_mutex);
+struct capi_ctr *capi_controller[CAPI_MAXCONTR];
+DEFINE_MUTEX(capi_controller_lock);
struct capi20_appl *capi_applications[CAPI_MAXAPPL];
-struct capi_ctr *capi_cards[CAPI_MAXCONTR];
-static int ncards;
+static int ncontrollers;
+
+static BLOCKING_NOTIFIER_HEAD(ctr_notifier_list);
/* -------- controller ref counting -------------------------------------- */
static inline struct capi_ctr *
-capi_ctr_get(struct capi_ctr *card)
+capi_ctr_get(struct capi_ctr *ctr)
{
- if (!try_module_get(card->owner))
+ if (!try_module_get(ctr->owner))
return NULL;
- return card;
+ return ctr;
}
static inline void
-capi_ctr_put(struct capi_ctr *card)
+capi_ctr_put(struct capi_ctr *ctr)
{
- module_put(card->owner);
+ module_put(ctr->owner);
}
/* ------------------------------------------------------------- */
@@ -98,7 +94,7 @@ static inline struct capi_ctr *get_capi_ctr_by_nr(u16 contr)
if (contr - 1 >= CAPI_MAXCONTR)
return NULL;
- return capi_cards[contr - 1];
+ return capi_controller[contr - 1];
}
static inline struct capi20_appl *get_capi_appl_by_nr(u16 applid)
@@ -106,7 +102,7 @@ static inline struct capi20_appl *get_capi_appl_by_nr(u16 applid)
if (applid - 1 >= CAPI_MAXAPPL)
return NULL;
- return capi_applications[applid - 1];
+ return rcu_dereference(capi_applications[applid - 1]);
}
/* -------- util functions ------------------------------------ */
@@ -148,106 +144,159 @@ static inline int capi_subcmd_valid(u8 subcmd)
/* ------------------------------------------------------------ */
-static void register_appl(struct capi_ctr *card, u16 applid, capi_register_params *rparam)
+static void
+register_appl(struct capi_ctr *ctr, u16 applid, capi_register_params *rparam)
{
- card = capi_ctr_get(card);
+ ctr = capi_ctr_get(ctr);
- if (card)
- card->register_appl(card, applid, rparam);
+ if (ctr)
+ ctr->register_appl(ctr, applid, rparam);
else
- printk(KERN_WARNING "%s: cannot get card resources\n", __func__);
+ printk(KERN_WARNING "%s: cannot get controller resources\n",
+ __func__);
}
-static void release_appl(struct capi_ctr *card, u16 applid)
+static void release_appl(struct capi_ctr *ctr, u16 applid)
{
DBG("applid %#x", applid);
- card->release_appl(card, applid);
- capi_ctr_put(card);
+ ctr->release_appl(ctr, applid);
+ capi_ctr_put(ctr);
}
-/* -------- KCI_CONTRUP --------------------------------------- */
-
static void notify_up(u32 contr)
{
- struct capi_ctr *card = get_capi_ctr_by_nr(contr);
struct capi20_appl *ap;
+ struct capi_ctr *ctr;
u16 applid;
- if (showcapimsgs & 1) {
+ mutex_lock(&capi_controller_lock);
+
+ if (showcapimsgs & 1)
printk(KERN_DEBUG "kcapi: notify up contr %d\n", contr);
- }
- if (!card) {
+
+ ctr = get_capi_ctr_by_nr(contr);
+ if (ctr) {
+ if (ctr->state == CAPI_CTR_RUNNING)
+ goto unlock_out;
+
+ ctr->state = CAPI_CTR_RUNNING;
+
+ for (applid = 1; applid <= CAPI_MAXAPPL; applid++) {
+ ap = get_capi_appl_by_nr(applid);
+ if (!ap)
+ continue;
+ register_appl(ctr, applid, &ap->rparam);
+ }
+
+ wake_up_interruptible_all(&ctr->state_wait_queue);
+ } else
printk(KERN_WARNING "%s: invalid contr %d\n", __func__, contr);
- return;
- }
- for (applid = 1; applid <= CAPI_MAXAPPL; applid++) {
- ap = get_capi_appl_by_nr(applid);
- if (!ap || ap->release_in_progress) continue;
- register_appl(card, applid, &ap->rparam);
- if (ap->callback && !ap->release_in_progress)
- ap->callback(KCI_CONTRUP, contr, &card->profile);
- }
-}
-/* -------- KCI_CONTRDOWN ------------------------------------- */
+unlock_out:
+ mutex_unlock(&capi_controller_lock);
+}
-static void notify_down(u32 contr)
+static void ctr_down(struct capi_ctr *ctr, int new_state)
{
struct capi20_appl *ap;
u16 applid;
- if (showcapimsgs & 1) {
- printk(KERN_DEBUG "kcapi: notify down contr %d\n", contr);
- }
+ if (ctr->state == CAPI_CTR_DETECTED || ctr->state == CAPI_CTR_DETACHED)
+ return;
+
+ ctr->state = new_state;
+
+ memset(ctr->manu, 0, sizeof(ctr->manu));
+ memset(&ctr->version, 0, sizeof(ctr->version));
+ memset(&ctr->profile, 0, sizeof(ctr->profile));
+ memset(ctr->serial, 0, sizeof(ctr->serial));
for (applid = 1; applid <= CAPI_MAXAPPL; applid++) {
ap = get_capi_appl_by_nr(applid);
- if (ap && ap->callback && !ap->release_in_progress)
- ap->callback(KCI_CONTRDOWN, contr, NULL);
+ if (ap)
+ capi_ctr_put(ctr);
}
+
+ wake_up_interruptible_all(&ctr->state_wait_queue);
}
-static void notify_handler(struct work_struct *work)
+static void notify_down(u32 contr)
{
- struct capi_notifier *np =
- container_of(work, struct capi_notifier, work);
+ struct capi_ctr *ctr;
- switch (np->cmd) {
- case KCI_CONTRUP:
- notify_up(np->controller);
+ mutex_lock(&capi_controller_lock);
+
+ if (showcapimsgs & 1)
+ printk(KERN_DEBUG "kcapi: notify down contr %d\n", contr);
+
+ ctr = get_capi_ctr_by_nr(contr);
+ if (ctr)
+ ctr_down(ctr, CAPI_CTR_DETECTED);
+ else
+ printk(KERN_WARNING "%s: invalid contr %d\n", __func__, contr);
+
+ mutex_unlock(&capi_controller_lock);
+}
+
+static int
+notify_handler(struct notifier_block *nb, unsigned long val, void *v)
+{
+ u32 contr = (long)v;
+
+ switch (val) {
+ case CAPICTR_UP:
+ notify_up(contr);
break;
- case KCI_CONTRDOWN:
- notify_down(np->controller);
+ case CAPICTR_DOWN:
+ notify_down(contr);
break;
}
+ return NOTIFY_OK;
+}
+
+static void do_notify_work(struct work_struct *work)
+{
+ struct capictr_event *event =
+ container_of(work, struct capictr_event, work);
- kfree(np);
+ blocking_notifier_call_chain(&ctr_notifier_list, event->type,
+ (void *)(long)event->controller);
+ kfree(event);
}
/*
* The notifier will result in adding/deleteing of devices. Devices can
* only removed in user process, not in bh.
*/
-static int notify_push(unsigned int cmd, u32 controller, u16 applid, u32 ncci)
+static int notify_push(unsigned int event_type, u32 controller)
{
- struct capi_notifier *np = kmalloc(sizeof(*np), GFP_ATOMIC);
+ struct capictr_event *event = kmalloc(sizeof(*event), GFP_ATOMIC);
- if (!np)
+ if (!event)
return -ENOMEM;
- INIT_WORK(&np->work, notify_handler);
- np->cmd = cmd;
- np->controller = controller;
- np->applid = applid;
- np->ncci = ncci;
+ INIT_WORK(&event->work, do_notify_work);
+ event->type = event_type;
+ event->controller = controller;
- schedule_work(&np->work);
+ schedule_work(&event->work);
return 0;
}
-
+int register_capictr_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_register(&ctr_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(register_capictr_notifier);
+
+int unregister_capictr_notifier(struct notifier_block *nb)
+{
+ return blocking_notifier_chain_unregister(&ctr_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_capictr_notifier);
+
/* -------- Receiver ------------------------------------------ */
static void recv_handler(struct work_struct *work)
@@ -273,68 +322,70 @@ static void recv_handler(struct work_struct *work)
/**
* capi_ctr_handle_message() - handle incoming CAPI message
- * @card: controller descriptor structure.
+ * @ctr: controller descriptor structure.
* @appl: application ID.
* @skb: message.
*
* Called by hardware driver to pass a CAPI message to the application.
*/
-void capi_ctr_handle_message(struct capi_ctr * card, u16 appl, struct sk_buff *skb)
+void capi_ctr_handle_message(struct capi_ctr *ctr, u16 appl,
+ struct sk_buff *skb)
{
struct capi20_appl *ap;
int showctl = 0;
u8 cmd, subcmd;
- unsigned long flags;
_cdebbuf *cdb;
- if (card->cardstate != CARD_RUNNING) {
+ if (ctr->state != CAPI_CTR_RUNNING) {
cdb = capi_message2str(skb->data);
if (cdb) {
printk(KERN_INFO "kcapi: controller [%03d] not active, got: %s",
- card->cnr, cdb->buf);
+ ctr->cnr, cdb->buf);
cdebbuf_free(cdb);
} else
printk(KERN_INFO "kcapi: controller [%03d] not active, cannot trace\n",
- card->cnr);
+ ctr->cnr);
goto error;
}
cmd = CAPIMSG_COMMAND(skb->data);
subcmd = CAPIMSG_SUBCOMMAND(skb->data);
if (cmd == CAPI_DATA_B3 && subcmd == CAPI_IND) {
- card->nrecvdatapkt++;
- if (card->traceflag > 2) showctl |= 2;
+ ctr->nrecvdatapkt++;
+ if (ctr->traceflag > 2)
+ showctl |= 2;
} else {
- card->nrecvctlpkt++;
- if (card->traceflag) showctl |= 2;
+ ctr->nrecvctlpkt++;
+ if (ctr->traceflag)
+ showctl |= 2;
}
- showctl |= (card->traceflag & 1);
+ showctl |= (ctr->traceflag & 1);
if (showctl & 2) {
if (showctl & 1) {
printk(KERN_DEBUG "kcapi: got [%03d] id#%d %s len=%u\n",
- card->cnr, CAPIMSG_APPID(skb->data),
+ ctr->cnr, CAPIMSG_APPID(skb->data),
capi_cmd2str(cmd, subcmd),
CAPIMSG_LEN(skb->data));
} else {
cdb = capi_message2str(skb->data);
if (cdb) {
printk(KERN_DEBUG "kcapi: got [%03d] %s\n",
- card->cnr, cdb->buf);
+ ctr->cnr, cdb->buf);
cdebbuf_free(cdb);
} else
printk(KERN_DEBUG "kcapi: got [%03d] id#%d %s len=%u, cannot trace\n",
- card->cnr, CAPIMSG_APPID(skb->data),
+ ctr->cnr, CAPIMSG_APPID(skb->data),
capi_cmd2str(cmd, subcmd),
CAPIMSG_LEN(skb->data));
}
}
- read_lock_irqsave(&application_lock, flags);
+ rcu_read_lock();
ap = get_capi_appl_by_nr(CAPIMSG_APPID(skb->data));
- if ((!ap) || (ap->release_in_progress)) {
- read_unlock_irqrestore(&application_lock, flags);
+ if (!ap) {
+ rcu_read_unlock();
cdb = capi_message2str(skb->data);
if (cdb) {
printk(KERN_ERR "kcapi: handle_message: applid %d state released (%s)\n",
@@ -348,7 +399,7 @@ void capi_ctr_handle_message(struct capi_ctr * card, u16 appl, struct sk_buff *s
}
skb_queue_tail(&ap->recv_queue, skb);
schedule_work(&ap->recv_work);
- read_unlock_irqrestore(&application_lock, flags);
+ rcu_read_unlock();
return;
@@ -360,74 +411,54 @@ EXPORT_SYMBOL(capi_ctr_handle_message);
/**
* capi_ctr_ready() - signal CAPI controller ready
- * @card: controller descriptor structure.
+ * @ctr: controller descriptor structure.
*
* Called by hardware driver to signal that the controller is up and running.
*/
-void capi_ctr_ready(struct capi_ctr * card)
+void capi_ctr_ready(struct capi_ctr *ctr)
{
- card->cardstate = CARD_RUNNING;
-
- printk(KERN_NOTICE "kcapi: card [%03d] \"%s\" ready.\n",
- card->cnr, card->name);
+ printk(KERN_NOTICE "kcapi: controller [%03d] \"%s\" ready.\n",
+ ctr->cnr, ctr->name);
- notify_push(KCI_CONTRUP, card->cnr, 0, 0);
+ notify_push(CAPICTR_UP, ctr->cnr);
}
EXPORT_SYMBOL(capi_ctr_ready);
/**
* capi_ctr_down() - signal CAPI controller not ready
- * @card: controller descriptor structure.
+ * @ctr: controller descriptor structure.
*
* Called by hardware driver to signal that the controller is down and
* unavailable for use.
*/
-void capi_ctr_down(struct capi_ctr * card)
+void capi_ctr_down(struct capi_ctr *ctr)
{
- u16 appl;
-
- DBG("");
-
- if (card->cardstate == CARD_DETECTED)
- return;
-
- card->cardstate = CARD_DETECTED;
-
- memset(card->manu, 0, sizeof(card->manu));
- memset(&card->version, 0, sizeof(card->version));
- memset(&card->profile, 0, sizeof(card->profile));
- memset(card->serial, 0, sizeof(card->serial));
-
- for (appl = 1; appl <= CAPI_MAXAPPL; appl++) {
- struct capi20_appl *ap = get_capi_appl_by_nr(appl);
- if (!ap || ap->release_in_progress)
- continue;
-
- capi_ctr_put(card);
- }
-
- printk(KERN_NOTICE "kcapi: card [%03d] down.\n", card->cnr);
+ printk(KERN_NOTICE "kcapi: controller [%03d] down.\n", ctr->cnr);
- notify_push(KCI_CONTRDOWN, card->cnr, 0, 0);
+ notify_push(CAPICTR_DOWN, ctr->cnr);
}
EXPORT_SYMBOL(capi_ctr_down);
/**
* capi_ctr_suspend_output() - suspend controller
- * @card: controller descriptor structure.
+ * @ctr: controller descriptor structure.
*
* Called by hardware driver to stop data flow.
+ *
+ * Note: The caller is responsible for synchronizing concurrent state changes
+ * as well as invocations of capi_ctr_handle_message.
*/
-void capi_ctr_suspend_output(struct capi_ctr *card)
+void capi_ctr_suspend_output(struct capi_ctr *ctr)
{
- if (!card->blocked) {
- printk(KERN_DEBUG "kcapi: card [%03d] suspend\n", card->cnr);
- card->blocked = 1;
+ if (!ctr->blocked) {
+ printk(KERN_DEBUG "kcapi: controller [%03d] suspend\n",
+ ctr->cnr);
+ ctr->blocked = 1;
}
}
@@ -435,16 +466,20 @@ EXPORT_SYMBOL(capi_ctr_suspend_output);
/**
* capi_ctr_resume_output() - resume controller
- * @card: controller descriptor structure.
+ * @ctr: controller descriptor structure.
*
* Called by hardware driver to resume data flow.
+ *
+ * Note: The caller is responsible for synchronizing concurrent state changes
+ * as well as invocations of capi_ctr_handle_message.
*/
-void capi_ctr_resume_output(struct capi_ctr *card)
+void capi_ctr_resume_output(struct capi_ctr *ctr)
{
- if (card->blocked) {
- printk(KERN_DEBUG "kcapi: card [%03d] resume\n", card->cnr);
- card->blocked = 0;
+ if (ctr->blocked) {
+ printk(KERN_DEBUG "kcapi: controller [%03d] resumed\n",
+ ctr->cnr);
+ ctr->blocked = 0;
}
}
@@ -454,53 +489,48 @@ EXPORT_SYMBOL(capi_ctr_resume_output);
/**
* attach_capi_ctr() - register CAPI controller
- * @card: controller descriptor structure.
+ * @ctr: controller descriptor structure.
*
* Called by hardware driver to register a controller with the CAPI subsystem.
* Return value: 0 on success, error code < 0 on error
*/
-int
-attach_capi_ctr(struct capi_ctr *card)
+int attach_capi_ctr(struct capi_ctr *ctr)
{
int i;
- mutex_lock(&controller_mutex);
+ mutex_lock(&capi_controller_lock);
for (i = 0; i < CAPI_MAXCONTR; i++) {
- if (capi_cards[i] == NULL)
+ if (!capi_controller[i])
break;
}
if (i == CAPI_MAXCONTR) {
- mutex_unlock(&controller_mutex);
+ mutex_unlock(&capi_controller_lock);
printk(KERN_ERR "kcapi: out of controller slots\n");
return -EBUSY;
}
- capi_cards[i] = card;
-
- mutex_unlock(&controller_mutex);
-
- card->nrecvctlpkt = 0;
- card->nrecvdatapkt = 0;
- card->nsentctlpkt = 0;
- card->nsentdatapkt = 0;
- card->cnr = i + 1;
- card->cardstate = CARD_DETECTED;
- card->blocked = 0;
- card->traceflag = showcapimsgs;
-
- sprintf(card->procfn, "capi/controllers/%d", card->cnr);
- card->procent = create_proc_entry(card->procfn, 0, NULL);
- if (card->procent) {
- card->procent->read_proc =
- (int (*)(char *,char **,off_t,int,int *,void *))
- card->ctr_read_proc;
- card->procent->data = card;
- }
+ capi_controller[i] = ctr;
+
+ ctr->nrecvctlpkt = 0;
+ ctr->nrecvdatapkt = 0;
+ ctr->nsentctlpkt = 0;
+ ctr->nsentdatapkt = 0;
+ ctr->cnr = i + 1;
+ ctr->state = CAPI_CTR_DETECTED;
+ ctr->blocked = 0;
+ ctr->traceflag = showcapimsgs;
+ init_waitqueue_head(&ctr->state_wait_queue);
- ncards++;
- printk(KERN_NOTICE "kcapi: Controller [%03d]: %s attached\n",
- card->cnr, card->name);
+ sprintf(ctr->procfn, "capi/controllers/%d", ctr->cnr);
+ ctr->procent = proc_create_data(ctr->procfn, 0, NULL, ctr->proc_fops, ctr);
+
+ ncontrollers++;
+
+ mutex_unlock(&capi_controller_lock);
+
+ printk(KERN_NOTICE "kcapi: controller [%03d]: %s attached\n",
+ ctr->cnr, ctr->name);
return 0;
}
@@ -508,29 +538,38 @@ EXPORT_SYMBOL(attach_capi_ctr);
/**
* detach_capi_ctr() - unregister CAPI controller
- * @card: controller descriptor structure.
+ * @ctr: controller descriptor structure.
*
* Called by hardware driver to remove the registration of a controller
* with the CAPI subsystem.
* Return value: 0 on success, error code < 0 on error
*/
-int detach_capi_ctr(struct capi_ctr *card)
+int detach_capi_ctr(struct capi_ctr *ctr)
{
- if (card->cardstate != CARD_DETECTED)
- capi_ctr_down(card);
+ int err = 0;
- ncards--;
+ mutex_lock(&capi_controller_lock);
- if (card->procent) {
- remove_proc_entry(card->procfn, NULL);
- card->procent = NULL;
+ ctr_down(ctr, CAPI_CTR_DETACHED);
+
+ if (capi_controller[ctr->cnr - 1] != ctr) {
+ err = -EINVAL;
+ goto unlock_out;
}
- capi_cards[card->cnr - 1] = NULL;
- printk(KERN_NOTICE "kcapi: Controller [%03d]: %s unregistered\n",
- card->cnr, card->name);
+ capi_controller[ctr->cnr - 1] = NULL;
+ ncontrollers--;
- return 0;
+ if (ctr->procent)
+ remove_proc_entry(ctr->procfn, NULL);
+
+ printk(KERN_NOTICE "kcapi: controller [%03d]: %s unregistered\n",
+ ctr->cnr, ctr->name);
+
+unlock_out:
+ mutex_unlock(&capi_controller_lock);
+
+ return err;
}
EXPORT_SYMBOL(detach_capi_ctr);
@@ -544,11 +583,9 @@ EXPORT_SYMBOL(detach_capi_ctr);
void register_capi_driver(struct capi_driver *driver)
{
- unsigned long flags;
-
- write_lock_irqsave(&capi_drivers_list_lock, flags);
+ mutex_lock(&capi_drivers_lock);
list_add_tail(&driver->list, &capi_drivers);
- write_unlock_irqrestore(&capi_drivers_list_lock, flags);
+ mutex_unlock(&capi_drivers_lock);
}
EXPORT_SYMBOL(register_capi_driver);
@@ -562,11 +599,9 @@ EXPORT_SYMBOL(register_capi_driver);
void unregister_capi_driver(struct capi_driver *driver)
{
- unsigned long flags;
-
- write_lock_irqsave(&capi_drivers_list_lock, flags);
+ mutex_lock(&capi_drivers_lock);
list_del(&driver->list);
- write_unlock_irqrestore(&capi_drivers_list_lock, flags);
+ mutex_unlock(&capi_drivers_lock);
}
EXPORT_SYMBOL(unregister_capi_driver);
@@ -584,12 +619,21 @@ EXPORT_SYMBOL(unregister_capi_driver);
u16 capi20_isinstalled(void)
{
+ u16 ret = CAPI_REGNOTINSTALLED;
int i;
- for (i = 0; i < CAPI_MAXCONTR; i++) {
- if (capi_cards[i] && capi_cards[i]->cardstate == CARD_RUNNING)
- return CAPI_NOERROR;
- }
- return CAPI_REGNOTINSTALLED;
+
+ mutex_lock(&capi_controller_lock);
+
+ for (i = 0; i < CAPI_MAXCONTR; i++)
+ if (capi_controller[i] &&
+ capi_controller[i]->state == CAPI_CTR_RUNNING) {
+ ret = CAPI_NOERROR;
+ break;
+ }
+
+ mutex_unlock(&capi_controller_lock);
+
+ return ret;
}
EXPORT_SYMBOL(capi20_isinstalled);
@@ -610,46 +654,43 @@ u16 capi20_register(struct capi20_appl *ap)
{
int i;
u16 applid;
- unsigned long flags;
DBG("");
if (ap->rparam.datablklen < 128)
return CAPI_LOGBLKSIZETOSMALL;
- write_lock_irqsave(&application_lock, flags);
+ ap->nrecvctlpkt = 0;
+ ap->nrecvdatapkt = 0;
+ ap->nsentctlpkt = 0;
+ ap->nsentdatapkt = 0;
+ mutex_init(&ap->recv_mtx);
+ skb_queue_head_init(&ap->recv_queue);
+ INIT_WORK(&ap->recv_work, recv_handler);
+ ap->release_in_progress = 0;
+
+ mutex_lock(&capi_controller_lock);
for (applid = 1; applid <= CAPI_MAXAPPL; applid++) {
if (capi_applications[applid - 1] == NULL)
break;
}
if (applid > CAPI_MAXAPPL) {
- write_unlock_irqrestore(&application_lock, flags);
+ mutex_unlock(&capi_controller_lock);
return CAPI_TOOMANYAPPLS;
}
ap->applid = applid;
capi_applications[applid - 1] = ap;
- ap->nrecvctlpkt = 0;
- ap->nrecvdatapkt = 0;
- ap->nsentctlpkt = 0;
- ap->nsentdatapkt = 0;
- ap->callback = NULL;
- mutex_init(&ap->recv_mtx);
- skb_queue_head_init(&ap->recv_queue);
- INIT_WORK(&ap->recv_work, recv_handler);
- ap->release_in_progress = 0;
-
- write_unlock_irqrestore(&application_lock, flags);
-
- mutex_lock(&controller_mutex);
for (i = 0; i < CAPI_MAXCONTR; i++) {
- if (!capi_cards[i] || capi_cards[i]->cardstate != CARD_RUNNING)
+ if (!capi_controller[i] ||
+ capi_controller[i]->state != CAPI_CTR_RUNNING)
continue;
- register_appl(capi_cards[i], applid, &ap->rparam);
+ register_appl(capi_controller[i], applid, &ap->rparam);
}
- mutex_unlock(&controller_mutex);
+
+ mutex_unlock(&capi_controller_lock);
if (showcapimsgs & 1) {
printk(KERN_DEBUG "kcapi: appl %d up\n", applid);
@@ -673,22 +714,24 @@ EXPORT_SYMBOL(capi20_register);
u16 capi20_release(struct capi20_appl *ap)
{
int i;
- unsigned long flags;
DBG("applid %#x", ap->applid);
- write_lock_irqsave(&application_lock, flags);
+ mutex_lock(&capi_controller_lock);
+
ap->release_in_progress = 1;
capi_applications[ap->applid - 1] = NULL;
- write_unlock_irqrestore(&application_lock, flags);
- mutex_lock(&controller_mutex);
+ synchronize_rcu();
+
for (i = 0; i < CAPI_MAXCONTR; i++) {
- if (!capi_cards[i] || capi_cards[i]->cardstate != CARD_RUNNING)
+ if (!capi_controller[i] ||
+ capi_controller[i]->state != CAPI_CTR_RUNNING)
continue;
- release_appl(capi_cards[i], ap->applid);
+ release_appl(capi_controller[i], ap->applid);
}
- mutex_unlock(&controller_mutex);
+
+ mutex_unlock(&capi_controller_lock);
flush_scheduled_work();
skb_queue_purge(&ap->recv_queue);
@@ -713,13 +756,13 @@ EXPORT_SYMBOL(capi20_release);
u16 capi20_put_message(struct capi20_appl *ap, struct sk_buff *skb)
{
- struct capi_ctr *card;
+ struct capi_ctr *ctr;
int showctl = 0;
u8 cmd, subcmd;
DBG("applid %#x", ap->applid);
- if (ncards == 0)
+ if (ncontrollers == 0)
return CAPI_REGNOTINSTALLED;
if ((ap->applid == 0) || ap->release_in_progress)
return CAPI_ILLAPPNR;
@@ -727,28 +770,33 @@ u16 capi20_put_message(struct capi20_appl *ap, struct sk_buff *skb)
|| !capi_cmd_valid(CAPIMSG_COMMAND(skb->data))
|| !capi_subcmd_valid(CAPIMSG_SUBCOMMAND(skb->data)))
return CAPI_ILLCMDORSUBCMDORMSGTOSMALL;
- card = get_capi_ctr_by_nr(CAPIMSG_CONTROLLER(skb->data));
- if (!card || card->cardstate != CARD_RUNNING) {
- card = get_capi_ctr_by_nr(1); // XXX why?
- if (!card || card->cardstate != CARD_RUNNING)
- return CAPI_REGNOTINSTALLED;
- }
- if (card->blocked)
+
+ /*
+ * The controller reference is protected by the existence of the
+ * application passed to us. We assume that the caller properly
+ * synchronizes this service with capi20_release.
+ */
+ ctr = get_capi_ctr_by_nr(CAPIMSG_CONTROLLER(skb->data));
+ if (!ctr || ctr->state != CAPI_CTR_RUNNING)
+ return CAPI_REGNOTINSTALLED;
+ if (ctr->blocked)
return CAPI_SENDQUEUEFULL;
cmd = CAPIMSG_COMMAND(skb->data);
subcmd = CAPIMSG_SUBCOMMAND(skb->data);
if (cmd == CAPI_DATA_B3 && subcmd== CAPI_REQ) {
- card->nsentdatapkt++;
+ ctr->nsentdatapkt++;
ap->nsentdatapkt++;
- if (card->traceflag > 2) showctl |= 2;
+ if (ctr->traceflag > 2)
+ showctl |= 2;
} else {
- card->nsentctlpkt++;
+ ctr->nsentctlpkt++;
ap->nsentctlpkt++;
- if (card->traceflag) showctl |= 2;
+ if (ctr->traceflag)
+ showctl |= 2;
}
- showctl |= (card->traceflag & 1);
+ showctl |= (ctr->traceflag & 1);
if (showctl & 2) {
if (showctl & 1) {
printk(KERN_DEBUG "kcapi: put [%03d] id#%d %s len=%u\n",
@@ -771,7 +819,7 @@ u16 capi20_put_message(struct capi20_appl *ap, struct sk_buff *skb)
CAPIMSG_LEN(skb->data));
}
}
- return card->send_message(card, skb);
+ return ctr->send_message(ctr, skb);
}
EXPORT_SYMBOL(capi20_put_message);
@@ -788,17 +836,25 @@ EXPORT_SYMBOL(capi20_put_message);
u16 capi20_get_manufacturer(u32 contr, u8 *buf)
{
- struct capi_ctr *card;
+ struct capi_ctr *ctr;
+ u16 ret;
if (contr == 0) {
strlcpy(buf, capi_manufakturer, CAPI_MANUFACTURER_LEN);
return CAPI_NOERROR;
}
- card = get_capi_ctr_by_nr(contr);
- if (!card || card->cardstate != CARD_RUNNING)
- return CAPI_REGNOTINSTALLED;
- strlcpy(buf, card->manu, CAPI_MANUFACTURER_LEN);
- return CAPI_NOERROR;
+
+ mutex_lock(&capi_controller_lock);
+
+ ctr = get_capi_ctr_by_nr(contr);
+ if (ctr && ctr->state == CAPI_CTR_RUNNING) {
+ strlcpy(buf, ctr->manu, CAPI_MANUFACTURER_LEN);
+ ret = CAPI_NOERROR;
+ } else
+ ret = CAPI_REGNOTINSTALLED;
+
+ mutex_unlock(&capi_controller_lock);
+ return ret;
}
EXPORT_SYMBOL(capi20_get_manufacturer);
@@ -815,18 +871,25 @@ EXPORT_SYMBOL(capi20_get_manufacturer);
u16 capi20_get_version(u32 contr, struct capi_version *verp)
{
- struct capi_ctr *card;
+ struct capi_ctr *ctr;
+ u16 ret;
if (contr == 0) {
*verp = driver_version;
return CAPI_NOERROR;
}
- card = get_capi_ctr_by_nr(contr);
- if (!card || card->cardstate != CARD_RUNNING)
- return CAPI_REGNOTINSTALLED;
- memcpy((void *) verp, &card->version, sizeof(capi_version));
- return CAPI_NOERROR;
+ mutex_lock(&capi_controller_lock);
+
+ ctr = get_capi_ctr_by_nr(contr);
+ if (ctr && ctr->state == CAPI_CTR_RUNNING) {
+ memcpy(verp, &ctr->version, sizeof(capi_version));
+ ret = CAPI_NOERROR;
+ } else
+ ret = CAPI_REGNOTINSTALLED;
+
+ mutex_unlock(&capi_controller_lock);
+ return ret;
}
EXPORT_SYMBOL(capi20_get_version);
@@ -843,18 +906,25 @@ EXPORT_SYMBOL(capi20_get_version);
u16 capi20_get_serial(u32 contr, u8 *serial)
{
- struct capi_ctr *card;
+ struct capi_ctr *ctr;
+ u16 ret;
if (contr == 0) {
strlcpy(serial, driver_serial, CAPI_SERIAL_LEN);
return CAPI_NOERROR;
}
- card = get_capi_ctr_by_nr(contr);
- if (!card || card->cardstate != CARD_RUNNING)
- return CAPI_REGNOTINSTALLED;
- strlcpy((void *) serial, card->serial, CAPI_SERIAL_LEN);
- return CAPI_NOERROR;
+ mutex_lock(&capi_controller_lock);
+
+ ctr = get_capi_ctr_by_nr(contr);
+ if (ctr && ctr->state == CAPI_CTR_RUNNING) {
+ strlcpy(serial, ctr->serial, CAPI_SERIAL_LEN);
+ ret = CAPI_NOERROR;
+ } else
+ ret = CAPI_REGNOTINSTALLED;
+
+ mutex_unlock(&capi_controller_lock);
+ return ret;
}
EXPORT_SYMBOL(capi20_get_serial);
@@ -871,23 +941,65 @@ EXPORT_SYMBOL(capi20_get_serial);
u16 capi20_get_profile(u32 contr, struct capi_profile *profp)
{
- struct capi_ctr *card;
+ struct capi_ctr *ctr;
+ u16 ret;
if (contr == 0) {
- profp->ncontroller = ncards;
+ profp->ncontroller = ncontrollers;
return CAPI_NOERROR;
}
- card = get_capi_ctr_by_nr(contr);
- if (!card || card->cardstate != CARD_RUNNING)
- return CAPI_REGNOTINSTALLED;
- memcpy((void *) profp, &card->profile,
- sizeof(struct capi_profile));
- return CAPI_NOERROR;
+ mutex_lock(&capi_controller_lock);
+
+ ctr = get_capi_ctr_by_nr(contr);
+ if (ctr && ctr->state == CAPI_CTR_RUNNING) {
+ memcpy(profp, &ctr->profile, sizeof(struct capi_profile));
+ ret = CAPI_NOERROR;
+ } else
+ ret = CAPI_REGNOTINSTALLED;
+
+ mutex_unlock(&capi_controller_lock);
+ return ret;
}
EXPORT_SYMBOL(capi20_get_profile);
+/* Must be called with capi_controller_lock held. */
+static int wait_on_ctr_state(struct capi_ctr *ctr, unsigned int state)
+{
+ DEFINE_WAIT(wait);
+ int retval = 0;
+
+ ctr = capi_ctr_get(ctr);
+ if (!ctr)
+ return -ESRCH;
+
+ for (;;) {
+ prepare_to_wait(&ctr->state_wait_queue, &wait,
+ TASK_INTERRUPTIBLE);
+
+ if (ctr->state == state)
+ break;
+ if (ctr->state == CAPI_CTR_DETACHED) {
+ retval = -ESRCH;
+ break;
+ }
+ if (signal_pending(current)) {
+ retval = -EINTR;
+ break;
+ }
+
+ mutex_unlock(&capi_controller_lock);
+ schedule();
+ mutex_lock(&capi_controller_lock);
+ }
+ finish_wait(&ctr->state_wait_queue, &wait);
+
+ capi_ctr_put(ctr);
+
+ return retval;
+}
+
#ifdef AVMB1_COMPAT
static int old_capi_manufacturer(unsigned int cmd, void __user *data)
{
@@ -895,11 +1007,10 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data)
avmb1_extcarddef cdef;
avmb1_resetdef rdef;
capicardparams cparams;
- struct capi_ctr *card;
+ struct capi_ctr *ctr;
struct capi_driver *driver = NULL;
capiloaddata ldata;
struct list_head *l;
- unsigned long flags;
int retval;
switch (cmd) {
@@ -919,7 +1030,8 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data)
cparams.irq = cdef.irq;
cparams.cardnr = cdef.cardnr;
- read_lock_irqsave(&capi_drivers_list_lock, flags);
+ mutex_lock(&capi_drivers_lock);
+
switch (cdef.cardtype) {
case AVM_CARDTYPE_B1:
list_for_each(l, &capi_drivers) {
@@ -940,18 +1052,15 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data)
break;
}
if (!driver) {
- read_unlock_irqrestore(&capi_drivers_list_lock, flags);
printk(KERN_ERR "kcapi: driver not loaded.\n");
- return -EIO;
- }
- if (!driver->add_card) {
- read_unlock_irqrestore(&capi_drivers_list_lock, flags);
+ retval = -EIO;
+ } else if (!driver->add_card) {
printk(KERN_ERR "kcapi: driver has no add card function.\n");
- return -EIO;
- }
+ retval = -EIO;
+ } else
+ retval = driver->add_card(driver, &cparams);
- retval = driver->add_card(driver, &cparams);
- read_unlock_irqrestore(&capi_drivers_list_lock, flags);
+ mutex_unlock(&capi_drivers_lock);
return retval;
case AVMB1_LOAD:
@@ -968,27 +1077,30 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data)
sizeof(avmb1_loadandconfigdef)))
return -EFAULT;
}
- card = get_capi_ctr_by_nr(ldef.contr);
- if (!card)
- return -EINVAL;
- card = capi_ctr_get(card);
- if (!card)
- return -ESRCH;
- if (card->load_firmware == NULL) {
+
+ mutex_lock(&capi_controller_lock);
+
+ ctr = get_capi_ctr_by_nr(ldef.contr);
+ if (!ctr) {
+ retval = -EINVAL;
+ goto load_unlock_out;
+ }
+
+ if (ctr->load_firmware == NULL) {
printk(KERN_DEBUG "kcapi: load: no load function\n");
- capi_ctr_put(card);
- return -ESRCH;
+ retval = -ESRCH;
+ goto load_unlock_out;
}
if (ldef.t4file.len <= 0) {
printk(KERN_DEBUG "kcapi: load: invalid parameter: length of t4file is %d ?\n", ldef.t4file.len);
- capi_ctr_put(card);
- return -EINVAL;
+ retval = -EINVAL;
+ goto load_unlock_out;
}
if (ldef.t4file.data == NULL) {
printk(KERN_DEBUG "kcapi: load: invalid parameter: dataptr is 0\n");
- capi_ctr_put(card);
- return -EINVAL;
+ retval = -EINVAL;
+ goto load_unlock_out;
}
ldata.firmware.user = 1;
@@ -998,54 +1110,49 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data)
ldata.configuration.data = ldef.t4config.data;
ldata.configuration.len = ldef.t4config.len;
- if (card->cardstate != CARD_DETECTED) {
+ if (ctr->state != CAPI_CTR_DETECTED) {
printk(KERN_INFO "kcapi: load: contr=%d not in detect state\n", ldef.contr);
- capi_ctr_put(card);
- return -EBUSY;
+ retval = -EBUSY;
+ goto load_unlock_out;
}
- card->cardstate = CARD_LOADING;
-
- retval = card->load_firmware(card, &ldata);
+ ctr->state = CAPI_CTR_LOADING;
+ retval = ctr->load_firmware(ctr, &ldata);
if (retval) {
- card->cardstate = CARD_DETECTED;
- capi_ctr_put(card);
- return retval;
+ ctr->state = CAPI_CTR_DETECTED;
+ goto load_unlock_out;
}
- while (card->cardstate != CARD_RUNNING) {
-
- msleep_interruptible(100); /* 0.1 sec */
+ retval = wait_on_ctr_state(ctr, CAPI_CTR_RUNNING);
- if (signal_pending(current)) {
- capi_ctr_put(card);
- return -EINTR;
- }
- }
- capi_ctr_put(card);
- return 0;
+load_unlock_out:
+ mutex_unlock(&capi_controller_lock);
+ return retval;
case AVMB1_RESETCARD:
if (copy_from_user(&rdef, data, sizeof(avmb1_resetdef)))
return -EFAULT;
- card = get_capi_ctr_by_nr(rdef.contr);
- if (!card)
- return -ESRCH;
- if (card->cardstate == CARD_DETECTED)
- return 0;
+ retval = 0;
- card->reset_ctr(card);
+ mutex_lock(&capi_controller_lock);
- while (card->cardstate > CARD_DETECTED) {
+ ctr = get_capi_ctr_by_nr(rdef.contr);
+ if (!ctr) {
+ retval = -ESRCH;
+ goto reset_unlock_out;
+ }
- msleep_interruptible(100); /* 0.1 sec */
+ if (ctr->state == CAPI_CTR_DETECTED)
+ goto reset_unlock_out;
- if (signal_pending(current))
- return -EINTR;
- }
- return 0;
+ ctr->reset_ctr(ctr);
+
+ retval = wait_on_ctr_state(ctr, CAPI_CTR_DETECTED);
+reset_unlock_out:
+ mutex_unlock(&capi_controller_lock);
+ return retval;
}
return -EINVAL;
}
@@ -1062,7 +1169,8 @@ static int old_capi_manufacturer(unsigned int cmd, void __user *data)
int capi20_manufacturer(unsigned int cmd, void __user *data)
{
- struct capi_ctr *card;
+ struct capi_ctr *ctr;
+ int retval;
switch (cmd) {
#ifdef AVMB1_COMPAT
@@ -1080,14 +1188,20 @@ int capi20_manufacturer(unsigned int cmd, void __user *data)
if (copy_from_user(&fdef, data, sizeof(kcapi_flagdef)))
return -EFAULT;
- card = get_capi_ctr_by_nr(fdef.contr);
- if (!card)
- return -ESRCH;
+ mutex_lock(&capi_controller_lock);
+
+ ctr = get_capi_ctr_by_nr(fdef.contr);
+ if (ctr) {
+ ctr->traceflag = fdef.flag;
+ printk(KERN_INFO "kcapi: contr [%03d] set trace=%d\n",
+ ctr->cnr, ctr->traceflag);
+ retval = 0;
+ } else
+ retval = -ESRCH;
+
+ mutex_unlock(&capi_controller_lock);
- card->traceflag = fdef.flag;
- printk(KERN_INFO "kcapi: contr [%03d] set trace=%d\n",
- card->cnr, card->traceflag);
- return 0;
+ return retval;
}
case KCAPI_CMD_ADDCARD:
{
@@ -1095,7 +1209,6 @@ int capi20_manufacturer(unsigned int cmd, void __user *data)
struct capi_driver *driver = NULL;
capicardparams cparams;
kcapi_carddef cdef;
- int retval;
if ((retval = copy_from_user(&cdef, data, sizeof(cdef))))
return retval;
@@ -1107,6 +1220,8 @@ int capi20_manufacturer(unsigned int cmd, void __user *data)
cparams.cardtype = 0;
cdef.driver[sizeof(cdef.driver)-1] = 0;
+ mutex_lock(&capi_drivers_lock);
+
list_for_each(l, &capi_drivers) {
driver = list_entry(l, struct capi_driver, list);
if (strcmp(driver->name, cdef.driver) == 0)
@@ -1115,15 +1230,15 @@ int capi20_manufacturer(unsigned int cmd, void __user *data)
if (driver == NULL) {
printk(KERN_ERR "kcapi: driver \"%s\" not loaded.\n",
cdef.driver);
- return -ESRCH;
- }
-
- if (!driver->add_card) {
+ retval = -ESRCH;
+ } else if (!driver->add_card) {
printk(KERN_ERR "kcapi: driver \"%s\" has no add card function.\n", cdef.driver);
- return -EIO;
- }
+ retval = -EIO;
+ } else
+ retval = driver->add_card(driver, &cparams);
- return driver->add_card(driver, &cparams);
+ mutex_unlock(&capi_drivers_lock);
+ return retval;
}
default:
@@ -1137,30 +1252,6 @@ int capi20_manufacturer(unsigned int cmd, void __user *data)
EXPORT_SYMBOL(capi20_manufacturer);
-/* temporary hack */
-
-/**
- * capi20_set_callback() - set CAPI application notification callback function
- * @ap: CAPI application descriptor structure.
- * @callback: callback function (NULL to remove).
- *
- * If not NULL, the callback function will be called to notify the
- * application of the addition or removal of a controller.
- * The first argument (cmd) will tell whether the controller was added
- * (KCI_CONTRUP) or removed (KCI_CONTRDOWN).
- * The second argument (contr) will be the controller number.
- * For cmd==KCI_CONTRUP the third argument (data) will be a pointer to the
- * new controller's capability profile structure.
- */
-
-void capi20_set_callback(struct capi20_appl *ap,
- void (*callback) (unsigned int cmd, __u32 contr, void *data))
-{
- ap->callback = callback;
-}
-
-EXPORT_SYMBOL(capi20_set_callback);
-
/* ------------------------------------------------------------- */
/* -------- Init & Cleanup ------------------------------------- */
/* ------------------------------------------------------------- */
@@ -1169,27 +1260,21 @@ EXPORT_SYMBOL(capi20_set_callback);
* init / exit functions
*/
+static struct notifier_block capictr_nb = {
+ .notifier_call = notify_handler,
+ .priority = INT_MAX,
+};
+
static int __init kcapi_init(void)
{
- char *p;
- char rev[32];
- int ret;
-
- ret = cdebug_init();
- if (ret)
- return ret;
- kcapi_proc_init();
-
- if ((p = strchr(revision, ':')) != NULL && p[1]) {
- strlcpy(rev, p + 2, sizeof(rev));
- if ((p = strchr(rev, '$')) != NULL && p > rev)
- *(p-1) = 0;
- } else
- strcpy(rev, "1.0");
+ int err;
- printk(KERN_NOTICE "CAPI Subsystem Rev %s\n", rev);
+ register_capictr_notifier(&capictr_nb);
- return 0;
+ err = cdebug_init();
+ if (!err)
+ kcapi_proc_init();
+ return err;
}
static void __exit kcapi_exit(void)
diff --git a/drivers/isdn/capi/kcapi.h b/drivers/isdn/capi/kcapi.h
index 244711f7f83..f4620b38ec5 100644
--- a/drivers/isdn/capi/kcapi.h
+++ b/drivers/isdn/capi/kcapi.h
@@ -24,16 +24,19 @@ printk(KERN_DEBUG "%s: " format "\n" , __func__ , ## arg); \
#endif
enum {
- CARD_DETECTED = 1,
- CARD_LOADING = 2,
- CARD_RUNNING = 3,
+ CAPI_CTR_DETACHED = 0,
+ CAPI_CTR_DETECTED = 1,
+ CAPI_CTR_LOADING = 2,
+ CAPI_CTR_RUNNING = 3,
};
extern struct list_head capi_drivers;
-extern rwlock_t capi_drivers_list_lock;
+extern struct mutex capi_drivers_lock;
+
+extern struct capi_ctr *capi_controller[CAPI_MAXCONTR];
+extern struct mutex capi_controller_lock;
extern struct capi20_appl *capi_applications[CAPI_MAXAPPL];
-extern struct capi_ctr *capi_cards[CAPI_MAXCONTR];
#ifdef CONFIG_PROC_FS
diff --git a/drivers/isdn/capi/kcapi_proc.c b/drivers/isdn/capi/kcapi_proc.c
index 09d4db764d2..ea2dff602e4 100644
--- a/drivers/isdn/capi/kcapi_proc.c
+++ b/drivers/isdn/capi/kcapi_proc.c
@@ -15,13 +15,12 @@
#include <linux/seq_file.h>
#include <linux/init.h>
-static char *
-cardstate2str(unsigned short cardstate)
+static char *state2str(unsigned short state)
{
- switch (cardstate) {
- case CARD_DETECTED: return "detected";
- case CARD_LOADING: return "loading";
- case CARD_RUNNING: return "running";
+ switch (state) {
+ case CAPI_CTR_DETECTED: return "detected";
+ case CAPI_CTR_LOADING: return "loading";
+ case CAPI_CTR_RUNNING: return "running";
default: return "???";
}
}
@@ -36,9 +35,12 @@ cardstate2str(unsigned short cardstate)
// ---------------------------------------------------------------------------
static void *controller_start(struct seq_file *seq, loff_t *pos)
+ __acquires(capi_controller_lock)
{
+ mutex_lock(&capi_controller_lock);
+
if (*pos < CAPI_MAXCONTR)
- return &capi_cards[*pos];
+ return &capi_controller[*pos];
return NULL;
}
@@ -47,13 +49,15 @@ static void *controller_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
if (*pos < CAPI_MAXCONTR)
- return &capi_cards[*pos];
+ return &capi_controller[*pos];
return NULL;
}
static void controller_stop(struct seq_file *seq, void *v)
+ __releases(capi_controller_lock)
{
+ mutex_unlock(&capi_controller_lock);
}
static int controller_show(struct seq_file *seq, void *v)
@@ -65,7 +69,7 @@ static int controller_show(struct seq_file *seq, void *v)
seq_printf(seq, "%d %-10s %-8s %-16s %s\n",
ctr->cnr, ctr->driver_name,
- cardstate2str(ctr->cardstate),
+ state2str(ctr->state),
ctr->name,
ctr->procinfo ? ctr->procinfo(ctr) : "");
@@ -135,9 +139,11 @@ static const struct file_operations proc_contrstats_ops = {
// applid nrecvctlpkt nrecvdatapkt nsentctlpkt nsentdatapkt
// ---------------------------------------------------------------------------
-static void *
-applications_start(struct seq_file *seq, loff_t *pos)
+static void *applications_start(struct seq_file *seq, loff_t *pos)
+ __acquires(capi_controller_lock)
{
+ mutex_lock(&capi_controller_lock);
+
if (*pos < CAPI_MAXAPPL)
return &capi_applications[*pos];
@@ -154,9 +160,10 @@ applications_next(struct seq_file *seq, void *v, loff_t *pos)
return NULL;
}
-static void
-applications_stop(struct seq_file *seq, void *v)
+static void applications_stop(struct seq_file *seq, void *v)
+ __releases(capi_controller_lock)
{
+ mutex_unlock(&capi_controller_lock);
}
static int
@@ -239,9 +246,9 @@ static const struct file_operations proc_applstats_ops = {
// ---------------------------------------------------------------------------
static void *capi_driver_start(struct seq_file *seq, loff_t *pos)
- __acquires(&capi_drivers_list_lock)
+ __acquires(&capi_drivers_lock)
{
- read_lock(&capi_drivers_list_lock);
+ mutex_lock(&capi_drivers_lock);
return seq_list_start(&capi_drivers, *pos);
}
@@ -251,9 +258,9 @@ static void *capi_driver_next(struct seq_file *seq, void *v, loff_t *pos)
}
static void capi_driver_stop(struct seq_file *seq, void *v)
- __releases(&capi_drivers_list_lock)
+ __releases(&capi_drivers_lock)
{
- read_unlock(&capi_drivers_list_lock);
+ mutex_unlock(&capi_drivers_lock);
}
static int capi_driver_show(struct seq_file *seq, void *v)
diff --git a/drivers/isdn/gigaset/asyncdata.c b/drivers/isdn/gigaset/asyncdata.c
index ccb2a7b7c41..c5016bd2d94 100644
--- a/drivers/isdn/gigaset/asyncdata.c
+++ b/drivers/isdn/gigaset/asyncdata.c
@@ -40,6 +40,8 @@ static inline int muststuff(unsigned char c)
* Append received bytes to the command response buffer and forward them
* line by line to the response handler. Exit whenever a mode/state change
* might have occurred.
+ * Note: Received lines may be terminated by CR, LF, or CR LF, which will be
+ * removed before passing the line to the response handler.
* Return value:
* number of processed bytes
*/
@@ -65,14 +67,14 @@ static unsigned cmd_loop(unsigned numbytes, struct inbuf_t *inbuf)
/* --v-- fall through --v-- */
case '\r':
/* end of message line, pass to response handler */
- gig_dbg(DEBUG_TRANSCMD, "%s: End of Message (%d Bytes)",
- __func__, cbytes);
if (cbytes >= MAX_RESP_SIZE) {
dev_warn(cs->dev, "response too large (%d)\n",
cbytes);
cbytes = MAX_RESP_SIZE;
}
cs->cbytes = cbytes;
+ gigaset_dbg_buffer(DEBUG_TRANSCMD, "received response",
+ cbytes, cs->respdata);
gigaset_handle_modem_response(cs);
cbytes = 0;
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index 95ebc512989..0be15c70c16 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -347,12 +347,7 @@ static inline void error_hangup(struct bc_state *bcs)
{
struct cardstate *cs = bcs->cs;
- gig_dbg(DEBUG_ANY, "%s: scheduling HUP for channel %d",
- __func__, bcs->channel);
-
- if (!gigaset_add_event(cs, &bcs->at_state, EV_HUP, NULL, 0, NULL))
- dev_err(cs->dev, "event queue full\n");
-
+ gigaset_add_event(cs, &bcs->at_state, EV_HUP, NULL, 0, NULL);
gigaset_schedule_event(cs);
}
@@ -1706,8 +1701,7 @@ static void complete_cb(struct cardstate *cs)
/* unqueue completed buffer */
cs->cmdbytes -= cs->curlen;
- gig_dbg(DEBUG_TRANSCMD|DEBUG_LOCKCMD,
- "write_command: sent %u bytes, %u left",
+ gig_dbg(DEBUG_OUTPUT, "write_command: sent %u bytes, %u left",
cs->curlen, cs->cmdbytes);
if (cb->next != NULL) {
cs->cmdbuf = cb->next;
@@ -1881,13 +1875,13 @@ static int start_cbsend(struct cardstate *cs)
/* check if suspend requested */
if (ucs->basstate & BS_SUSPEND) {
- gig_dbg(DEBUG_TRANSCMD|DEBUG_LOCKCMD, "suspending");
+ gig_dbg(DEBUG_OUTPUT, "suspending");
return -EHOSTUNREACH;
}
/* check if AT channel is open */
if (!(ucs->basstate & BS_ATOPEN)) {
- gig_dbg(DEBUG_TRANSCMD|DEBUG_LOCKCMD, "AT channel not open");
+ gig_dbg(DEBUG_OUTPUT, "AT channel not open");
rc = req_submit(cs->bcs, HD_OPEN_ATCHANNEL, 0, BAS_TIMEOUT);
if (rc < 0) {
/* flush command queue */
@@ -2251,7 +2245,7 @@ static int gigaset_probe(struct usb_interface *interface,
int i, j;
int rc;
- gig_dbg(DEBUG_ANY,
+ gig_dbg(DEBUG_INIT,
"%s: Check if device matches .. (Vendor: 0x%x, Product: 0x%x)",
__func__, le16_to_cpu(udev->descriptor.idVendor),
le16_to_cpu(udev->descriptor.idProduct));
@@ -2259,7 +2253,7 @@ static int gigaset_probe(struct usb_interface *interface,
/* set required alternate setting */
hostif = interface->cur_altsetting;
if (hostif->desc.bAlternateSetting != 3) {
- gig_dbg(DEBUG_ANY,
+ gig_dbg(DEBUG_INIT,
"%s: wrong alternate setting %d - trying to switch",
__func__, hostif->desc.bAlternateSetting);
if (usb_set_interface(udev, hostif->desc.bInterfaceNumber, 3)
diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
index 3f5cd06af10..6643d6533cc 100644
--- a/drivers/isdn/gigaset/capi.c
+++ b/drivers/isdn/gigaset/capi.c
@@ -13,6 +13,8 @@
#include "gigaset.h"
#include <linux/ctype.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/isdn/capilli.h>
#include <linux/isdn/capicmd.h>
#include <linux/isdn/capiutil.h>
@@ -169,20 +171,6 @@ static inline void ignore_cstruct_param(struct cardstate *cs, _cstruct param,
}
/*
- * check for legal hex digit
- */
-static inline int ishexdigit(char c)
-{
- if (c >= '0' && c <= '9')
- return 1;
- if (c >= 'A' && c <= 'F')
- return 1;
- if (c >= 'a' && c <= 'f')
- return 1;
- return 0;
-}
-
-/*
* convert hex to binary
*/
static inline u8 hex2bin(char c)
@@ -202,7 +190,7 @@ static int encode_ie(char *in, u8 *out, int maxlen)
{
int l = 0;
while (*in) {
- if (!ishexdigit(in[0]) || !ishexdigit(in[1]) || l >= maxlen)
+ if (!isxdigit(in[0]) || !isxdigit(in[1]) || l >= maxlen)
return -1;
out[++l] = (hex2bin(in[0]) << 4) + hex2bin(in[1]);
in += 2;
@@ -1425,9 +1413,10 @@ static void do_connect_req(struct gigaset_capi_ctr *iif,
/* queue & schedule EV_DIAL event */
if (!gigaset_add_event(cs, &bcs->at_state, EV_DIAL, commands,
- bcs->at_state.seq_index, NULL))
- goto oom;
- gig_dbg(DEBUG_CMD, "scheduling DIAL");
+ bcs->at_state.seq_index, NULL)) {
+ info = CAPI_MSGOSRESOURCEERR;
+ goto error;
+ }
gigaset_schedule_event(cs);
ap->connected = APCONN_SETUP;
send_conf(iif, ap, skb, CapiSuccess);
@@ -1541,7 +1530,6 @@ static void do_connect_resp(struct gigaset_capi_ctr *iif,
if (!gigaset_add_event(cs, &cs->bcs[channel-1].at_state,
EV_ACCEPT, NULL, 0, NULL))
return;
- gig_dbg(DEBUG_CMD, "scheduling ACCEPT");
gigaset_schedule_event(cs);
return;
@@ -1582,7 +1570,6 @@ static void do_connect_resp(struct gigaset_capi_ctr *iif,
if (!gigaset_add_event(cs, &cs->bcs[channel-1].at_state,
EV_HUP, NULL, 0, NULL))
return;
- gig_dbg(DEBUG_CMD, "scheduling HUP");
gigaset_schedule_event(cs);
return;
}
@@ -1665,11 +1652,9 @@ static void do_connect_b3_resp(struct gigaset_capi_ctr *iif,
/* trigger hangup, causing eventual DISCONNECT_IND */
if (!gigaset_add_event(cs, &bcs->at_state,
EV_HUP, NULL, 0, NULL)) {
- dev_err(cs->dev, "%s: out of memory\n", __func__);
dev_kfree_skb_any(skb);
return;
}
- gig_dbg(DEBUG_CMD, "scheduling HUP");
gigaset_schedule_event(cs);
/* emit DISCONNECT_B3_IND */
@@ -1768,11 +1753,9 @@ static void do_disconnect_req(struct gigaset_capi_ctr *iif,
/* trigger hangup, causing eventual DISCONNECT_IND */
if (!gigaset_add_event(cs, &bcs->at_state, EV_HUP, NULL, 0, NULL)) {
- dev_err(cs->dev, "%s: out of memory\n", __func__);
send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
return;
}
- gig_dbg(DEBUG_CMD, "scheduling HUP");
gigaset_schedule_event(cs);
/* emit reply */
@@ -1815,11 +1798,9 @@ static void do_disconnect_b3_req(struct gigaset_capi_ctr *iif,
/* trigger hangup, causing eventual DISCONNECT_B3_IND */
if (!gigaset_add_event(cs, &cs->bcs[channel-1].at_state,
EV_HUP, NULL, 0, NULL)) {
- dev_err(cs->dev, "%s: out of memory\n", __func__);
send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
return;
}
- gig_dbg(DEBUG_CMD, "scheduling HUP");
gigaset_schedule_event(cs);
/* NCPI parameter: not applicable for B3 Transparent */
@@ -2106,35 +2087,22 @@ static char *gigaset_procinfo(struct capi_ctr *ctr)
return ctr->name; /* ToDo: more? */
}
-/**
- * gigaset_ctr_read_proc() - build controller proc file entry
- * @page: buffer of PAGE_SIZE bytes for receiving the entry.
- * @start: unused.
- * @off: unused.
- * @count: unused.
- * @eof: unused.
- * @ctr: controller descriptor structure.
- *
- * Return value: length of generated entry
- */
-static int gigaset_ctr_read_proc(char *page, char **start, off_t off,
- int count, int *eof, struct capi_ctr *ctr)
+static int gigaset_proc_show(struct seq_file *m, void *v)
{
+ struct capi_ctr *ctr = m->private;
struct cardstate *cs = ctr->driverdata;
char *s;
int i;
- int len = 0;
- len += sprintf(page+len, "%-16s %s\n", "name", ctr->name);
- len += sprintf(page+len, "%-16s %s %s\n", "dev",
+
+ seq_printf(m, "%-16s %s\n", "name", ctr->name);
+ seq_printf(m, "%-16s %s %s\n", "dev",
dev_driver_string(cs->dev), dev_name(cs->dev));
- len += sprintf(page+len, "%-16s %d\n", "id", cs->myid);
+ seq_printf(m, "%-16s %d\n", "id", cs->myid);
if (cs->gotfwver)
- len += sprintf(page+len, "%-16s %d.%d.%d.%d\n", "firmware",
+ seq_printf(m, "%-16s %d.%d.%d.%d\n", "firmware",
cs->fwver[0], cs->fwver[1], cs->fwver[2], cs->fwver[3]);
- len += sprintf(page+len, "%-16s %d\n", "channels",
- cs->channels);
- len += sprintf(page+len, "%-16s %s\n", "onechannel",
- cs->onechannel ? "yes" : "no");
+ seq_printf(m, "%-16s %d\n", "channels", cs->channels);
+ seq_printf(m, "%-16s %s\n", "onechannel", cs->onechannel ? "yes" : "no");
switch (cs->mode) {
case M_UNKNOWN:
@@ -2152,7 +2120,7 @@ static int gigaset_ctr_read_proc(char *page, char **start, off_t off,
default:
s = "??";
}
- len += sprintf(page+len, "%-16s %s\n", "mode", s);
+ seq_printf(m, "%-16s %s\n", "mode", s);
switch (cs->mstate) {
case MS_UNINITIALIZED:
@@ -2176,25 +2144,21 @@ static int gigaset_ctr_read_proc(char *page, char **start, off_t off,
default:
s = "??";
}
- len += sprintf(page+len, "%-16s %s\n", "mstate", s);
+ seq_printf(m, "%-16s %s\n", "mstate", s);
- len += sprintf(page+len, "%-16s %s\n", "running",
- cs->running ? "yes" : "no");
- len += sprintf(page+len, "%-16s %s\n", "connected",
- cs->connected ? "yes" : "no");
- len += sprintf(page+len, "%-16s %s\n", "isdn_up",
- cs->isdn_up ? "yes" : "no");
- len += sprintf(page+len, "%-16s %s\n", "cidmode",
- cs->cidmode ? "yes" : "no");
+ seq_printf(m, "%-16s %s\n", "running", cs->running ? "yes" : "no");
+ seq_printf(m, "%-16s %s\n", "connected", cs->connected ? "yes" : "no");
+ seq_printf(m, "%-16s %s\n", "isdn_up", cs->isdn_up ? "yes" : "no");
+ seq_printf(m, "%-16s %s\n", "cidmode", cs->cidmode ? "yes" : "no");
for (i = 0; i < cs->channels; i++) {
- len += sprintf(page+len, "[%d]%-13s %d\n", i, "corrupted",
+ seq_printf(m, "[%d]%-13s %d\n", i, "corrupted",
cs->bcs[i].corrupted);
- len += sprintf(page+len, "[%d]%-13s %d\n", i, "trans_down",
+ seq_printf(m, "[%d]%-13s %d\n", i, "trans_down",
cs->bcs[i].trans_down);
- len += sprintf(page+len, "[%d]%-13s %d\n", i, "trans_up",
+ seq_printf(m, "[%d]%-13s %d\n", i, "trans_up",
cs->bcs[i].trans_up);
- len += sprintf(page+len, "[%d]%-13s %d\n", i, "chstate",
+ seq_printf(m, "[%d]%-13s %d\n", i, "chstate",
cs->bcs[i].chstate);
switch (cs->bcs[i].proto2) {
case L2_BITSYNC:
@@ -2209,11 +2173,23 @@ static int gigaset_ctr_read_proc(char *page, char **start, off_t off,
default:
s = "??";
}
- len += sprintf(page+len, "[%d]%-13s %s\n", i, "proto2", s);
+ seq_printf(m, "[%d]%-13s %s\n", i, "proto2", s);
}
- return len;
+ return 0;
}
+static int gigaset_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, gigaset_proc_show, PDE(inode)->data);
+}
+
+static const struct file_operations gigaset_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = gigaset_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
static struct capi_driver capi_driver_gigaset = {
.name = "gigaset",
@@ -2256,7 +2232,7 @@ int gigaset_isdn_register(struct cardstate *cs, const char *isdnid)
iif->ctr.release_appl = gigaset_release_appl;
iif->ctr.send_message = gigaset_send_message;
iif->ctr.procinfo = gigaset_procinfo;
- iif->ctr.ctr_read_proc = gigaset_ctr_read_proc;
+ iif->ctr.proc_fops = &gigaset_proc_fops;
INIT_LIST_HEAD(&iif->appls);
skb_queue_head_init(&iif->sendqueue);
atomic_set(&iif->sendqlen, 0);
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
index 664b0c519c3..85de3399a2f 100644
--- a/drivers/isdn/gigaset/common.c
+++ b/drivers/isdn/gigaset/common.c
@@ -149,10 +149,8 @@ static int test_timeout(struct at_state_t *at_state)
return 0;
}
- if (!gigaset_add_event(at_state->cs, at_state, EV_TIMEOUT, NULL,
- at_state->timer_index, NULL))
- dev_err(at_state->cs->dev, "%s: out of memory\n",
- __func__);
+ gigaset_add_event(at_state->cs, at_state, EV_TIMEOUT, NULL,
+ at_state->timer_index, NULL);
return 1;
}
@@ -180,7 +178,7 @@ static void timer_tick(unsigned long data)
if (cs->running) {
mod_timer(&cs->timer, jiffies + msecs_to_jiffies(GIG_TICK));
if (timeout) {
- gig_dbg(DEBUG_CMD, "scheduling timeout");
+ gig_dbg(DEBUG_EVENT, "scheduling timeout");
tasklet_schedule(&cs->event_tasklet);
}
}
@@ -194,14 +192,14 @@ int gigaset_get_channel(struct bc_state *bcs)
spin_lock_irqsave(&bcs->cs->lock, flags);
if (bcs->use_count || !try_module_get(bcs->cs->driver->owner)) {
- gig_dbg(DEBUG_ANY, "could not allocate channel %d",
+ gig_dbg(DEBUG_CHANNEL, "could not allocate channel %d",
bcs->channel);
spin_unlock_irqrestore(&bcs->cs->lock, flags);
return 0;
}
++bcs->use_count;
bcs->busy = 1;
- gig_dbg(DEBUG_ANY, "allocated channel %d", bcs->channel);
+ gig_dbg(DEBUG_CHANNEL, "allocated channel %d", bcs->channel);
spin_unlock_irqrestore(&bcs->cs->lock, flags);
return 1;
}
@@ -213,7 +211,7 @@ struct bc_state *gigaset_get_free_channel(struct cardstate *cs)
spin_lock_irqsave(&cs->lock, flags);
if (!try_module_get(cs->driver->owner)) {
- gig_dbg(DEBUG_ANY,
+ gig_dbg(DEBUG_CHANNEL,
"could not get module for allocating channel");
spin_unlock_irqrestore(&cs->lock, flags);
return NULL;
@@ -223,12 +221,12 @@ struct bc_state *gigaset_get_free_channel(struct cardstate *cs)
++cs->bcs[i].use_count;
cs->bcs[i].busy = 1;
spin_unlock_irqrestore(&cs->lock, flags);
- gig_dbg(DEBUG_ANY, "allocated channel %d", i);
+ gig_dbg(DEBUG_CHANNEL, "allocated channel %d", i);
return cs->bcs + i;
}
module_put(cs->driver->owner);
spin_unlock_irqrestore(&cs->lock, flags);
- gig_dbg(DEBUG_ANY, "no free channel");
+ gig_dbg(DEBUG_CHANNEL, "no free channel");
return NULL;
}
@@ -238,14 +236,15 @@ void gigaset_free_channel(struct bc_state *bcs)
spin_lock_irqsave(&bcs->cs->lock, flags);
if (!bcs->busy) {
- gig_dbg(DEBUG_ANY, "could not free channel %d", bcs->channel);
+ gig_dbg(DEBUG_CHANNEL, "could not free channel %d",
+ bcs->channel);
spin_unlock_irqrestore(&bcs->cs->lock, flags);
return;
}
--bcs->use_count;
bcs->busy = 0;
module_put(bcs->cs->driver->owner);
- gig_dbg(DEBUG_ANY, "freed channel %d", bcs->channel);
+ gig_dbg(DEBUG_CHANNEL, "freed channel %d", bcs->channel);
spin_unlock_irqrestore(&bcs->cs->lock, flags);
}
@@ -258,14 +257,15 @@ int gigaset_get_channels(struct cardstate *cs)
for (i = 0; i < cs->channels; ++i)
if (cs->bcs[i].use_count) {
spin_unlock_irqrestore(&cs->lock, flags);
- gig_dbg(DEBUG_ANY, "could not allocate all channels");
+ gig_dbg(DEBUG_CHANNEL,
+ "could not allocate all channels");
return 0;
}
for (i = 0; i < cs->channels; ++i)
++cs->bcs[i].use_count;
spin_unlock_irqrestore(&cs->lock, flags);
- gig_dbg(DEBUG_ANY, "allocated all channels");
+ gig_dbg(DEBUG_CHANNEL, "allocated all channels");
return 1;
}
@@ -275,7 +275,7 @@ void gigaset_free_channels(struct cardstate *cs)
unsigned long flags;
int i;
- gig_dbg(DEBUG_ANY, "unblocking all channels");
+ gig_dbg(DEBUG_CHANNEL, "unblocking all channels");
spin_lock_irqsave(&cs->lock, flags);
for (i = 0; i < cs->channels; ++i)
--cs->bcs[i].use_count;
@@ -287,7 +287,7 @@ void gigaset_block_channels(struct cardstate *cs)
unsigned long flags;
int i;
- gig_dbg(DEBUG_ANY, "blocking all channels");
+ gig_dbg(DEBUG_CHANNEL, "blocking all channels");
spin_lock_irqsave(&cs->lock, flags);
for (i = 0; i < cs->channels; ++i)
++cs->bcs[i].use_count;
@@ -338,6 +338,8 @@ struct event_t *gigaset_add_event(struct cardstate *cs,
unsigned next, tail;
struct event_t *event = NULL;
+ gig_dbg(DEBUG_EVENT, "queueing event %d", type);
+
spin_lock_irqsave(&cs->ev_lock, flags);
tail = cs->ev_tail;
@@ -934,11 +936,8 @@ int gigaset_start(struct cardstate *cs)
if (!gigaset_add_event(cs, &cs->at_state, EV_START, NULL, 0, NULL)) {
cs->waiting = 0;
- dev_err(cs->dev, "%s: out of memory\n", __func__);
goto error;
}
-
- gig_dbg(DEBUG_CMD, "scheduling START");
gigaset_schedule_event(cs);
wait_event(cs->waitqueue, !cs->waiting);
@@ -973,12 +972,8 @@ int gigaset_shutdown(struct cardstate *cs)
cs->waiting = 1;
- if (!gigaset_add_event(cs, &cs->at_state, EV_SHUTDOWN, NULL, 0, NULL)) {
- dev_err(cs->dev, "%s: out of memory\n", __func__);
+ if (!gigaset_add_event(cs, &cs->at_state, EV_SHUTDOWN, NULL, 0, NULL))
goto exit;
- }
-
- gig_dbg(DEBUG_CMD, "scheduling SHUTDOWN");
gigaset_schedule_event(cs);
wait_event(cs->waitqueue, !cs->waiting);
@@ -1004,12 +999,8 @@ void gigaset_stop(struct cardstate *cs)
cs->waiting = 1;
- if (!gigaset_add_event(cs, &cs->at_state, EV_STOP, NULL, 0, NULL)) {
- dev_err(cs->dev, "%s: out of memory\n", __func__);
+ if (!gigaset_add_event(cs, &cs->at_state, EV_STOP, NULL, 0, NULL))
goto exit;
- }
-
- gig_dbg(DEBUG_CMD, "scheduling STOP");
gigaset_schedule_event(cs);
wait_event(cs->waitqueue, !cs->waiting);
diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c
index ddeb0456d20..c8f89b78b23 100644
--- a/drivers/isdn/gigaset/ev-layer.c
+++ b/drivers/isdn/gigaset/ev-layer.c
@@ -427,7 +427,7 @@ static int isdn_getnum(char *p)
{
int v = -1;
- gig_dbg(DEBUG_TRANSCMD, "string: %s", p);
+ gig_dbg(DEBUG_EVENT, "string: %s", p);
while (*p >= '0' && *p <= '9')
v = ((v < 0) ? 0 : (v * 10)) + (int) ((*p++) - '0');
@@ -444,7 +444,7 @@ static int isdn_gethex(char *p)
int v = 0;
int c;
- gig_dbg(DEBUG_TRANSCMD, "string: %s", p);
+ gig_dbg(DEBUG_EVENT, "string: %s", p);
if (!*p)
return -1;
@@ -517,7 +517,6 @@ void gigaset_handle_modem_response(struct cardstate *cs)
return;
}
cs->respdata[len] = 0;
- gig_dbg(DEBUG_TRANSCMD, "raw string: '%s'", cs->respdata);
argv[0] = cs->respdata;
params = 1;
if (cs->at_state.getstring) {
@@ -552,14 +551,14 @@ void gigaset_handle_modem_response(struct cardstate *cs)
for (j = 1; j < params; ++j)
argv[j][-1] = 0;
- gig_dbg(DEBUG_TRANSCMD, "CMD received: %s", argv[0]);
+ gig_dbg(DEBUG_EVENT, "CMD received: %s", argv[0]);
if (cid) {
--params;
- gig_dbg(DEBUG_TRANSCMD, "CID: %s", argv[params]);
+ gig_dbg(DEBUG_EVENT, "CID: %s", argv[params]);
}
- gig_dbg(DEBUG_TRANSCMD, "available params: %d", params - 1);
+ gig_dbg(DEBUG_EVENT, "available params: %d", params - 1);
for (j = 1; j < params; j++)
- gig_dbg(DEBUG_TRANSCMD, "param %d: %s", j, argv[j]);
+ gig_dbg(DEBUG_EVENT, "param %d: %s", j, argv[j]);
}
spin_lock_irqsave(&cs->ev_lock, flags);
@@ -642,7 +641,7 @@ void gigaset_handle_modem_response(struct cardstate *cs)
dev_err(cs->dev, "out of memory\n");
++curarg;
}
- gig_dbg(DEBUG_CMD, "string==%s",
+ gig_dbg(DEBUG_EVENT, "string==%s",
event->ptr ? (char *) event->ptr : "NULL");
break;
case RT_ZCAU:
@@ -669,7 +668,7 @@ void gigaset_handle_modem_response(struct cardstate *cs)
++curarg;
} else
event->parameter = -1;
- gig_dbg(DEBUG_CMD, "parameter==%d", event->parameter);
+ gig_dbg(DEBUG_EVENT, "parameter==%d", event->parameter);
break;
}
@@ -684,7 +683,7 @@ void gigaset_handle_modem_response(struct cardstate *cs)
spin_unlock_irqrestore(&cs->ev_lock, flags);
if (curarg != params)
- gig_dbg(DEBUG_ANY,
+ gig_dbg(DEBUG_EVENT,
"invalid number of processed parameters: %d/%d",
curarg, params);
}
@@ -705,8 +704,8 @@ static void disconnect(struct at_state_t **at_state_p)
/* revert to selected idle mode */
if (!cs->cidmode) {
cs->at_state.pending_commands |= PC_UMMODE;
+ gig_dbg(DEBUG_EVENT, "Scheduling PC_UMMODE");
cs->commands_pending = 1;
- gig_dbg(DEBUG_CMD, "Scheduling PC_UMMODE");
}
spin_unlock_irqrestore(&cs->lock, flags);
@@ -784,15 +783,15 @@ static void init_failed(struct cardstate *cs, int mode)
static void schedule_init(struct cardstate *cs, int state)
{
if (cs->at_state.pending_commands & PC_INIT) {
- gig_dbg(DEBUG_CMD, "not scheduling PC_INIT again");
+ gig_dbg(DEBUG_EVENT, "not scheduling PC_INIT again");
return;
}
cs->mstate = state;
cs->mode = M_UNKNOWN;
gigaset_block_channels(cs);
cs->at_state.pending_commands |= PC_INIT;
+ gig_dbg(DEBUG_EVENT, "Scheduling PC_INIT");
cs->commands_pending = 1;
- gig_dbg(DEBUG_CMD, "Scheduling PC_INIT");
}
/* Add "AT" to a command, add the cid, dle encode it, send the result to the
@@ -923,7 +922,7 @@ static void start_dial(struct at_state_t *at_state, void *data,
}
at_state->pending_commands |= PC_CID;
- gig_dbg(DEBUG_CMD, "Scheduling PC_CID");
+ gig_dbg(DEBUG_EVENT, "Scheduling PC_CID");
cs->commands_pending = 1;
return;
@@ -933,7 +932,7 @@ error:
commands[i] = NULL;
}
at_state->pending_commands |= PC_NOCID;
- gig_dbg(DEBUG_CMD, "Scheduling PC_NOCID");
+ gig_dbg(DEBUG_EVENT, "Scheduling PC_NOCID");
cs->commands_pending = 1;
return;
}
@@ -955,7 +954,7 @@ static void start_accept(struct at_state_t *at_state)
dev_err(at_state->cs->dev, "out of memory\n");
/* error reset */
at_state->pending_commands |= PC_HUP;
- gig_dbg(DEBUG_CMD, "Scheduling PC_HUP");
+ gig_dbg(DEBUG_EVENT, "Scheduling PC_HUP");
cs->commands_pending = 1;
return;
}
@@ -964,7 +963,7 @@ static void start_accept(struct at_state_t *at_state)
snprintf(bcs->commands[AT_ISO], 9, "^SISO=%u\r", bcs->channel + 1);
at_state->pending_commands |= PC_ACCEPT;
- gig_dbg(DEBUG_CMD, "Scheduling PC_ACCEPT");
+ gig_dbg(DEBUG_EVENT, "Scheduling PC_ACCEPT");
cs->commands_pending = 1;
}
@@ -1009,8 +1008,8 @@ static void do_shutdown(struct cardstate *cs)
if (cs->mstate == MS_READY) {
cs->mstate = MS_SHUTDOWN;
cs->at_state.pending_commands |= PC_SHUTDOWN;
+ gig_dbg(DEBUG_EVENT, "Scheduling PC_SHUTDOWN");
cs->commands_pending = 1;
- gig_dbg(DEBUG_CMD, "Scheduling PC_SHUTDOWN");
} else
finish_shutdown(cs);
}
@@ -1191,8 +1190,8 @@ static void do_action(int action, struct cardstate *cs,
}
spin_unlock_irqrestore(&cs->lock, flags);
cs->at_state.pending_commands |= PC_CIDMODE;
+ gig_dbg(DEBUG_EVENT, "Scheduling PC_CIDMODE");
cs->commands_pending = 1;
- gig_dbg(DEBUG_CMD, "Scheduling PC_CIDMODE");
break;
case ACT_FAILINIT:
dev_warn(cs->dev, "Could not initialize the device.\n");
@@ -1443,7 +1442,7 @@ static void do_action(int action, struct cardstate *cs,
case ACT_GOTVER:
if (cs->gotfwver == 0) {
cs->gotfwver = 1;
- gig_dbg(DEBUG_ANY,
+ gig_dbg(DEBUG_EVENT,
"firmware version %02d.%03d.%02d.%02d",
cs->fwver[0], cs->fwver[1],
cs->fwver[2], cs->fwver[3]);
@@ -1481,8 +1480,8 @@ static void do_action(int action, struct cardstate *cs,
break;
case ACT_HUP:
at_state->pending_commands |= PC_HUP;
+ gig_dbg(DEBUG_EVENT, "Scheduling PC_HUP");
cs->commands_pending = 1;
- gig_dbg(DEBUG_CMD, "Scheduling PC_HUP");
break;
/* hotplug events */
@@ -1519,10 +1518,10 @@ static void do_action(int action, struct cardstate *cs,
cs->cidmode = ev->parameter;
if (ev->parameter) {
cs->at_state.pending_commands |= PC_CIDMODE;
- gig_dbg(DEBUG_CMD, "Scheduling PC_CIDMODE");
+ gig_dbg(DEBUG_EVENT, "Scheduling PC_CIDMODE");
} else {
cs->at_state.pending_commands |= PC_UMMODE;
- gig_dbg(DEBUG_CMD, "Scheduling PC_UMMODE");
+ gig_dbg(DEBUG_EVENT, "Scheduling PC_UMMODE");
}
cs->commands_pending = 1;
}
@@ -1573,6 +1572,8 @@ static void process_event(struct cardstate *cs, struct event_t *ev)
if (ev->cid >= 0) {
at_state = at_state_from_cid(cs, ev->cid);
if (!at_state) {
+ gig_dbg(DEBUG_EVENT, "event %d for invalid cid %d",
+ ev->type, ev->cid);
gigaset_add_event(cs, &cs->at_state, RSP_WRONG_CID,
NULL, 0, NULL);
return;
@@ -1580,13 +1581,13 @@ static void process_event(struct cardstate *cs, struct event_t *ev)
} else {
at_state = ev->at_state;
if (at_state_invalid(cs, at_state)) {
- gig_dbg(DEBUG_ANY, "event for invalid at_state %p",
+ gig_dbg(DEBUG_EVENT, "event for invalid at_state %p",
at_state);
return;
}
}
- gig_dbg(DEBUG_CMD, "connection state %d, event %d",
+ gig_dbg(DEBUG_EVENT, "connection state %d, event %d",
at_state->ConState, ev->type);
bcs = at_state->bcs;
@@ -1600,11 +1601,11 @@ static void process_event(struct cardstate *cs, struct event_t *ev)
if (ev->parameter != at_state->timer_index
|| !at_state->timer_active) {
ev->type = RSP_NONE; /* old timeout */
- gig_dbg(DEBUG_ANY, "old timeout");
+ gig_dbg(DEBUG_EVENT, "old timeout");
} else if (!at_state->waiting)
- gig_dbg(DEBUG_ANY, "timeout occurred");
+ gig_dbg(DEBUG_EVENT, "timeout occurred");
else
- gig_dbg(DEBUG_ANY, "stopped waiting");
+ gig_dbg(DEBUG_EVENT, "stopped waiting");
}
spin_unlock_irqrestore(&cs->lock, flags);
@@ -1712,11 +1713,11 @@ static void process_command_flags(struct cardstate *cs)
cs->commands_pending = 0;
if (cs->cur_at_seq) {
- gig_dbg(DEBUG_CMD, "not searching scheduled commands: busy");
+ gig_dbg(DEBUG_EVENT, "not searching scheduled commands: busy");
return;
}
- gig_dbg(DEBUG_CMD, "searching scheduled commands");
+ gig_dbg(DEBUG_EVENT, "searching scheduled commands");
sequence = SEQ_NONE;
@@ -1857,7 +1858,7 @@ static void process_command_flags(struct cardstate *cs)
switch (cs->mode) {
case M_UNIMODEM:
cs->at_state.pending_commands |= PC_CIDMODE;
- gig_dbg(DEBUG_CMD, "Scheduling PC_CIDMODE");
+ gig_dbg(DEBUG_EVENT, "Scheduling PC_CIDMODE");
cs->commands_pending = 1;
return;
#ifdef GIG_MAYINITONDIAL
diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
index e963a6c2e86..1875ab80b33 100644
--- a/drivers/isdn/gigaset/gigaset.h
+++ b/drivers/isdn/gigaset/gigaset.h
@@ -38,7 +38,7 @@
#define GIG_COMPAT {0, 4, 0, 0}
#define MAX_REC_PARAMS 10 /* Max. number of params in response string */
-#define MAX_RESP_SIZE 512 /* Max. size of a response string */
+#define MAX_RESP_SIZE 511 /* Max. size of a response string */
#define MAX_EVENTS 64 /* size of event queue */
@@ -78,9 +78,10 @@ enum debuglevel {
DEBUG_STREAM = 0x00040, /* application data stream I/O events */
DEBUG_STREAM_DUMP = 0x00080, /* application data stream content */
DEBUG_LLDATA = 0x00100, /* sent/received LL data */
+ DEBUG_EVENT = 0x00200, /* event processing */
DEBUG_DRIVER = 0x00400, /* driver structure */
DEBUG_HDLC = 0x00800, /* M10x HDLC processing */
- DEBUG_WRITE = 0x01000, /* M105 data write */
+ DEBUG_CHANNEL = 0x01000, /* channel allocation/deallocation */
DEBUG_TRANSCMD = 0x02000, /* AT-COMMANDS+RESPONSES */
DEBUG_MCMD = 0x04000, /* COMMANDS THAT ARE SENT VERY OFTEN */
DEBUG_INIT = 0x08000, /* (de)allocation+initialization of data
@@ -498,7 +499,7 @@ struct cardstate {
spinlock_t ev_lock;
/* current modem response */
- unsigned char respdata[MAX_RESP_SIZE];
+ unsigned char respdata[MAX_RESP_SIZE+1];
unsigned cbytes;
/* private data of hardware drivers */
@@ -785,8 +786,6 @@ static inline void gigaset_schedule_event(struct cardstate *cs)
static inline void gigaset_bchannel_down(struct bc_state *bcs)
{
gigaset_add_event(bcs->cs, &bcs->at_state, EV_BC_CLOSED, NULL, 0, NULL);
-
- gig_dbg(DEBUG_CMD, "scheduling BC_CLOSED");
gigaset_schedule_event(bcs->cs);
}
@@ -795,8 +794,6 @@ static inline void gigaset_bchannel_down(struct bc_state *bcs)
static inline void gigaset_bchannel_up(struct bc_state *bcs)
{
gigaset_add_event(bcs->cs, &bcs->at_state, EV_BC_OPEN, NULL, 0, NULL);
-
- gig_dbg(DEBUG_CMD, "scheduling BC_OPEN");
gigaset_schedule_event(bcs->cs);
}
diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c
index c129ee47a8f..f0acb9dc9e3 100644
--- a/drivers/isdn/gigaset/i4l.c
+++ b/drivers/isdn/gigaset/i4l.c
@@ -216,7 +216,7 @@ static int command_from_LL(isdn_ctrl *cntrl)
return -EINVAL;
case ISDN_CMD_DIAL:
- gig_dbg(DEBUG_ANY,
+ gig_dbg(DEBUG_CMD,
"ISDN_CMD_DIAL (phone: %s, msn: %s, si1: %d, si2: %d)",
cntrl->parm.setup.phone, cntrl->parm.setup.eazmsn,
cntrl->parm.setup.si1, cntrl->parm.setup.si2);
@@ -304,11 +304,10 @@ static int command_from_LL(isdn_ctrl *cntrl)
gigaset_free_channel(bcs);
return -ENOMEM;
}
-
- gig_dbg(DEBUG_CMD, "scheduling DIAL");
gigaset_schedule_event(cs);
break;
case ISDN_CMD_ACCEPTD:
+ gig_dbg(DEBUG_CMD, "ISDN_CMD_ACCEPTD");
if (ch >= cs->channels) {
dev_err(cs->dev,
"ISDN_CMD_ACCEPTD: invalid channel (%d)\n", ch);
@@ -318,14 +317,11 @@ static int command_from_LL(isdn_ctrl *cntrl)
if (!gigaset_add_event(cs, &bcs->at_state,
EV_ACCEPT, NULL, 0, NULL))
return -ENOMEM;
-
- gig_dbg(DEBUG_CMD, "scheduling ACCEPT");
gigaset_schedule_event(cs);
break;
- case ISDN_CMD_ACCEPTB:
- break;
case ISDN_CMD_HANGUP:
+ gig_dbg(DEBUG_CMD, "ISDN_CMD_HANGUP");
if (ch >= cs->channels) {
dev_err(cs->dev,
"ISDN_CMD_HANGUP: invalid channel (%d)\n", ch);
@@ -335,8 +331,6 @@ static int command_from_LL(isdn_ctrl *cntrl)
if (!gigaset_add_event(cs, &bcs->at_state,
EV_HUP, NULL, 0, NULL))
return -ENOMEM;
-
- gig_dbg(DEBUG_CMD, "scheduling HUP");
gigaset_schedule_event(cs);
break;
@@ -376,6 +370,7 @@ static int command_from_LL(isdn_ctrl *cntrl)
}
break;
case ISDN_CMD_SETL3: /* Set L3 to given protocol */
+ gig_dbg(DEBUG_CMD, "ISDN_CMD_SETL3");
if (ch >= cs->channels) {
dev_err(cs->dev,
"ISDN_CMD_SETL3: invalid channel (%d)\n", ch);
@@ -390,44 +385,9 @@ static int command_from_LL(isdn_ctrl *cntrl)
}
break;
- case ISDN_CMD_PROCEED:
- gig_dbg(DEBUG_ANY, "ISDN_CMD_PROCEED");
- break;
- case ISDN_CMD_ALERT:
- gig_dbg(DEBUG_ANY, "ISDN_CMD_ALERT");
- if (cntrl->arg >= cs->channels) {
- dev_err(cs->dev,
- "ISDN_CMD_ALERT: invalid channel (%d)\n",
- (int) cntrl->arg);
- return -EINVAL;
- }
- break;
- case ISDN_CMD_REDIR:
- gig_dbg(DEBUG_ANY, "ISDN_CMD_REDIR");
- break;
- case ISDN_CMD_PROT_IO:
- gig_dbg(DEBUG_ANY, "ISDN_CMD_PROT_IO");
- break;
- case ISDN_CMD_FAXCMD:
- gig_dbg(DEBUG_ANY, "ISDN_CMD_FAXCMD");
- break;
- case ISDN_CMD_GETL2:
- gig_dbg(DEBUG_ANY, "ISDN_CMD_GETL2");
- break;
- case ISDN_CMD_GETL3:
- gig_dbg(DEBUG_ANY, "ISDN_CMD_GETL3");
- break;
- case ISDN_CMD_GETEAZ:
- gig_dbg(DEBUG_ANY, "ISDN_CMD_GETEAZ");
- break;
- case ISDN_CMD_SETSIL:
- gig_dbg(DEBUG_ANY, "ISDN_CMD_SETSIL");
- break;
- case ISDN_CMD_GETSIL:
- gig_dbg(DEBUG_ANY, "ISDN_CMD_GETSIL");
- break;
+
default:
- dev_err(cs->dev, "unknown command %d from LL\n",
+ gig_dbg(DEBUG_CMD, "unknown command %d from LL",
cntrl->command);
return -EINVAL;
}
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
index d2260b0055f..a1bcbc21ff7 100644
--- a/drivers/isdn/gigaset/interface.c
+++ b/drivers/isdn/gigaset/interface.c
@@ -45,8 +45,6 @@ static int if_lock(struct cardstate *cs, int *arg)
cs->waiting = 0;
return -ENOMEM;
}
-
- gig_dbg(DEBUG_CMD, "scheduling IF_LOCK");
gigaset_schedule_event(cs);
wait_event(cs->waitqueue, !cs->waiting);
@@ -81,8 +79,6 @@ static int if_version(struct cardstate *cs, unsigned arg[4])
cs->waiting = 0;
return -ENOMEM;
}
-
- gig_dbg(DEBUG_CMD, "scheduling IF_VER");
gigaset_schedule_event(cs);
wait_event(cs->waitqueue, !cs->waiting);
@@ -274,7 +270,7 @@ static int if_ioctl(struct tty_struct *tty, struct file *file,
? -EFAULT : 0;
break;
default:
- gig_dbg(DEBUG_ANY, "%s: arg not supported - 0x%04x",
+ gig_dbg(DEBUG_IF, "%s: arg not supported - 0x%04x",
__func__, cmd);
retval = -ENOIOCTLCMD;
}
@@ -455,7 +451,7 @@ static void if_throttle(struct tty_struct *tty)
else if (!cs->open_count)
dev_warn(cs->dev, "%s: device not opened\n", __func__);
else
- gig_dbg(DEBUG_ANY, "%s: not implemented\n", __func__);
+ gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
mutex_unlock(&cs->mutex);
}
@@ -479,7 +475,7 @@ static void if_unthrottle(struct tty_struct *tty)
else if (!cs->open_count)
dev_warn(cs->dev, "%s: device not opened\n", __func__);
else
- gig_dbg(DEBUG_ANY, "%s: not implemented\n", __func__);
+ gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
mutex_unlock(&cs->mutex);
}
@@ -630,7 +626,7 @@ void gigaset_if_receive(struct cardstate *cs,
spin_lock_irqsave(&cs->lock, flags);
tty = cs->tty;
if (tty == NULL)
- gig_dbg(DEBUG_ANY, "receive on closed device");
+ gig_dbg(DEBUG_IF, "receive on closed device");
else {
tty_buffer_request_room(tty, len);
tty_insert_flip_string(tty, buffer, len);
diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c
index 85394a6ebae..16fd3bd4888 100644
--- a/drivers/isdn/gigaset/isocdata.c
+++ b/drivers/isdn/gigaset/isocdata.c
@@ -905,29 +905,49 @@ void gigaset_isoc_receive(unsigned char *src, unsigned count,
/* == data input =========================================================== */
+/* process a block of received bytes in command mode (mstate != MS_LOCKED)
+ * Append received bytes to the command response buffer and forward them
+ * line by line to the response handler.
+ * Note: Received lines may be terminated by CR, LF, or CR LF, which will be
+ * removed before passing the line to the response handler.
+ */
static void cmd_loop(unsigned char *src, int numbytes, struct inbuf_t *inbuf)
{
struct cardstate *cs = inbuf->cs;
unsigned cbytes = cs->cbytes;
+ unsigned char c;
while (numbytes--) {
- /* copy next character, check for end of line */
- switch (cs->respdata[cbytes] = *src++) {
- case '\r':
+ c = *src++;
+ switch (c) {
case '\n':
- /* end of line */
- gig_dbg(DEBUG_TRANSCMD, "%s: End of Command (%d Bytes)",
- __func__, cbytes);
- if (cbytes >= MAX_RESP_SIZE - 1)
- dev_warn(cs->dev, "response too large\n");
+ if (cbytes == 0 && cs->respdata[0] == '\r') {
+ /* collapse LF with preceding CR */
+ cs->respdata[0] = 0;
+ break;
+ }
+ /* --v-- fall through --v-- */
+ case '\r':
+ /* end of message line, pass to response handler */
+ if (cbytes >= MAX_RESP_SIZE) {
+ dev_warn(cs->dev, "response too large (%d)\n",
+ cbytes);
+ cbytes = MAX_RESP_SIZE;
+ }
cs->cbytes = cbytes;
+ gigaset_dbg_buffer(DEBUG_TRANSCMD, "received response",
+ cbytes, cs->respdata);
gigaset_handle_modem_response(cs);
cbytes = 0;
+
+ /* store EOL byte for CRLF collapsing */
+ cs->respdata[0] = c;
break;
default:
- /* advance in line buffer, checking for overflow */
- if (cbytes < MAX_RESP_SIZE - 1)
- cbytes++;
+ /* append to line buffer if possible */
+ if (cbytes < MAX_RESP_SIZE)
+ cs->respdata[cbytes] = c;
+ cbytes++;
}
}
@@ -958,8 +978,6 @@ void gigaset_isoc_input(struct inbuf_t *inbuf)
numbytes, src);
gigaset_if_receive(inbuf->cs, src, numbytes);
} else {
- gigaset_dbg_buffer(DEBUG_CMD, "received response",
- numbytes, src);
cmd_loop(src, numbytes, inbuf);
}
diff --git a/drivers/isdn/gigaset/proc.c b/drivers/isdn/gigaset/proc.c
index 758a00c1d2e..b69f73a0668 100644
--- a/drivers/isdn/gigaset/proc.c
+++ b/drivers/isdn/gigaset/proc.c
@@ -48,8 +48,6 @@ static ssize_t set_cidmode(struct device *dev, struct device_attribute *attr,
mutex_unlock(&cs->mutex);
return -ENOMEM;
}
-
- gig_dbg(DEBUG_CMD, "scheduling PROC_CIDMODE");
gigaset_schedule_event(cs);
wait_event(cs->waitqueue, !cs->waiting);
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
index 3ab1daeb276..9430a2bbb52 100644
--- a/drivers/isdn/gigaset/usb-gigaset.c
+++ b/drivers/isdn/gigaset/usb-gigaset.c
@@ -628,7 +628,7 @@ static int write_modem(struct cardstate *cs)
struct usb_cardstate *ucs = cs->hw.usb;
unsigned long flags;
- gig_dbg(DEBUG_WRITE, "len: %d...", bcs->tx_skb->len);
+ gig_dbg(DEBUG_OUTPUT, "len: %d...", bcs->tx_skb->len);
if (!bcs->tx_skb->len) {
dev_kfree_skb_any(bcs->tx_skb);
diff --git a/drivers/isdn/hardware/avm/avmcard.h b/drivers/isdn/hardware/avm/avmcard.h
index d964f07e4a5..a70e8854461 100644
--- a/drivers/isdn/hardware/avm/avmcard.h
+++ b/drivers/isdn/hardware/avm/avmcard.h
@@ -556,8 +556,7 @@ u16 b1_send_message(struct capi_ctr *ctrl, struct sk_buff *skb);
void b1_parse_version(avmctrl_info *card);
irqreturn_t b1_interrupt(int interrupt, void *devptr);
-int b1ctl_read_proc(char *page, char **start, off_t off,
- int count, int *eof, struct capi_ctr *ctrl);
+extern const struct file_operations b1ctl_proc_fops;
avmcard_dmainfo *avmcard_dma_alloc(char *name, struct pci_dev *,
long rsize, long ssize);
@@ -577,7 +576,6 @@ void b1dma_register_appl(struct capi_ctr *ctrl,
capi_register_params *rp);
void b1dma_release_appl(struct capi_ctr *ctrl, u16 appl);
u16 b1dma_send_message(struct capi_ctr *ctrl, struct sk_buff *skb);
-int b1dmactl_read_proc(char *page, char **start, off_t off,
- int count, int *eof, struct capi_ctr *ctrl);
+extern const struct file_operations b1dmactl_proc_fops;
#endif /* _AVMCARD_H_ */
diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
index a7c0083e78a..c38fa0f4c72 100644
--- a/drivers/isdn/hardware/avm/b1.c
+++ b/drivers/isdn/hardware/avm/b1.c
@@ -12,6 +12,8 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/mm.h>
@@ -634,18 +636,17 @@ irqreturn_t b1_interrupt(int interrupt, void *devptr)
}
/* ------------------------------------------------------------- */
-int b1ctl_read_proc(char *page, char **start, off_t off,
- int count, int *eof, struct capi_ctr *ctrl)
+static int b1ctl_proc_show(struct seq_file *m, void *v)
{
+ struct capi_ctr *ctrl = m->private;
avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
avmcard *card = cinfo->card;
u8 flag;
- int len = 0;
char *s;
- len += sprintf(page+len, "%-16s %s\n", "name", card->name);
- len += sprintf(page+len, "%-16s 0x%x\n", "io", card->port);
- len += sprintf(page+len, "%-16s %d\n", "irq", card->irq);
+ seq_printf(m, "%-16s %s\n", "name", card->name);
+ seq_printf(m, "%-16s 0x%x\n", "io", card->port);
+ seq_printf(m, "%-16s %d\n", "irq", card->irq);
switch (card->cardtype) {
case avm_b1isa: s = "B1 ISA"; break;
case avm_b1pci: s = "B1 PCI"; break;
@@ -658,20 +659,20 @@ int b1ctl_read_proc(char *page, char **start, off_t off,
case avm_c2: s = "C2"; break;
default: s = "???"; break;
}
- len += sprintf(page+len, "%-16s %s\n", "type", s);
+ seq_printf(m, "%-16s %s\n", "type", s);
if (card->cardtype == avm_t1isa)
- len += sprintf(page+len, "%-16s %d\n", "cardnr", card->cardnr);
+ seq_printf(m, "%-16s %d\n", "cardnr", card->cardnr);
if ((s = cinfo->version[VER_DRIVER]) != NULL)
- len += sprintf(page+len, "%-16s %s\n", "ver_driver", s);
+ seq_printf(m, "%-16s %s\n", "ver_driver", s);
if ((s = cinfo->version[VER_CARDTYPE]) != NULL)
- len += sprintf(page+len, "%-16s %s\n", "ver_cardtype", s);
+ seq_printf(m, "%-16s %s\n", "ver_cardtype", s);
if ((s = cinfo->version[VER_SERIAL]) != NULL)
- len += sprintf(page+len, "%-16s %s\n", "ver_serial", s);
+ seq_printf(m, "%-16s %s\n", "ver_serial", s);
if (card->cardtype != avm_m1) {
flag = ((u8 *)(ctrl->profile.manu))[3];
if (flag)
- len += sprintf(page+len, "%-16s%s%s%s%s%s%s%s\n",
+ seq_printf(m, "%-16s%s%s%s%s%s%s%s\n",
"protocol",
(flag & 0x01) ? " DSS1" : "",
(flag & 0x02) ? " CT1" : "",
@@ -685,7 +686,7 @@ int b1ctl_read_proc(char *page, char **start, off_t off,
if (card->cardtype != avm_m1) {
flag = ((u8 *)(ctrl->profile.manu))[5];
if (flag)
- len += sprintf(page+len, "%-16s%s%s%s%s\n",
+ seq_printf(m, "%-16s%s%s%s%s\n",
"linetype",
(flag & 0x01) ? " point to point" : "",
(flag & 0x02) ? " point to multipoint" : "",
@@ -693,16 +694,25 @@ int b1ctl_read_proc(char *page, char **start, off_t off,
(flag & 0x04) ? " leased line with D-channel" : ""
);
}
- len += sprintf(page+len, "%-16s %s\n", "cardname", cinfo->cardname);
-
- if (off+count >= len)
- *eof = 1;
- if (len < off)
- return 0;
- *start = page + off;
- return ((count < len-off) ? count : len-off);
+ seq_printf(m, "%-16s %s\n", "cardname", cinfo->cardname);
+
+ return 0;
+}
+
+static int b1ctl_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, b1ctl_proc_show, PDE(inode)->data);
}
+const struct file_operations b1ctl_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = b1ctl_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+EXPORT_SYMBOL(b1ctl_proc_fops);
+
/* ------------------------------------------------------------- */
#ifdef CONFIG_PCI
@@ -781,8 +791,6 @@ EXPORT_SYMBOL(b1_send_message);
EXPORT_SYMBOL(b1_parse_version);
EXPORT_SYMBOL(b1_interrupt);
-EXPORT_SYMBOL(b1ctl_read_proc);
-
static int __init b1_init(void)
{
char *p;
diff --git a/drivers/isdn/hardware/avm/b1dma.c b/drivers/isdn/hardware/avm/b1dma.c
index 0e84aaae43f..124550d0dbf 100644
--- a/drivers/isdn/hardware/avm/b1dma.c
+++ b/drivers/isdn/hardware/avm/b1dma.c
@@ -11,6 +11,8 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/mm.h>
@@ -855,21 +857,20 @@ u16 b1dma_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
/* ------------------------------------------------------------- */
-int b1dmactl_read_proc(char *page, char **start, off_t off,
- int count, int *eof, struct capi_ctr *ctrl)
+static int b1dmactl_proc_show(struct seq_file *m, void *v)
{
+ struct capi_ctr *ctrl = m->private;
avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
avmcard *card = cinfo->card;
u8 flag;
- int len = 0;
char *s;
u32 txoff, txlen, rxoff, rxlen, csr;
unsigned long flags;
- len += sprintf(page+len, "%-16s %s\n", "name", card->name);
- len += sprintf(page+len, "%-16s 0x%x\n", "io", card->port);
- len += sprintf(page+len, "%-16s %d\n", "irq", card->irq);
- len += sprintf(page+len, "%-16s 0x%lx\n", "membase", card->membase);
+ seq_printf(m, "%-16s %s\n", "name", card->name);
+ seq_printf(m, "%-16s 0x%x\n", "io", card->port);
+ seq_printf(m, "%-16s %d\n", "irq", card->irq);
+ seq_printf(m, "%-16s 0x%lx\n", "membase", card->membase);
switch (card->cardtype) {
case avm_b1isa: s = "B1 ISA"; break;
case avm_b1pci: s = "B1 PCI"; break;
@@ -882,18 +883,18 @@ int b1dmactl_read_proc(char *page, char **start, off_t off,
case avm_c2: s = "C2"; break;
default: s = "???"; break;
}
- len += sprintf(page+len, "%-16s %s\n", "type", s);
+ seq_printf(m, "%-16s %s\n", "type", s);
if ((s = cinfo->version[VER_DRIVER]) != NULL)
- len += sprintf(page+len, "%-16s %s\n", "ver_driver", s);
+ seq_printf(m, "%-16s %s\n", "ver_driver", s);
if ((s = cinfo->version[VER_CARDTYPE]) != NULL)
- len += sprintf(page+len, "%-16s %s\n", "ver_cardtype", s);
+ seq_printf(m, "%-16s %s\n", "ver_cardtype", s);
if ((s = cinfo->version[VER_SERIAL]) != NULL)
- len += sprintf(page+len, "%-16s %s\n", "ver_serial", s);
+ seq_printf(m, "%-16s %s\n", "ver_serial", s);
if (card->cardtype != avm_m1) {
flag = ((u8 *)(ctrl->profile.manu))[3];
if (flag)
- len += sprintf(page+len, "%-16s%s%s%s%s%s%s%s\n",
+ seq_printf(m, "%-16s%s%s%s%s%s%s%s\n",
"protocol",
(flag & 0x01) ? " DSS1" : "",
(flag & 0x02) ? " CT1" : "",
@@ -907,7 +908,7 @@ int b1dmactl_read_proc(char *page, char **start, off_t off,
if (card->cardtype != avm_m1) {
flag = ((u8 *)(ctrl->profile.manu))[5];
if (flag)
- len += sprintf(page+len, "%-16s%s%s%s%s\n",
+ seq_printf(m, "%-16s%s%s%s%s\n",
"linetype",
(flag & 0x01) ? " point to point" : "",
(flag & 0x02) ? " point to multipoint" : "",
@@ -915,7 +916,7 @@ int b1dmactl_read_proc(char *page, char **start, off_t off,
(flag & 0x04) ? " leased line with D-channel" : ""
);
}
- len += sprintf(page+len, "%-16s %s\n", "cardname", cinfo->cardname);
+ seq_printf(m, "%-16s %s\n", "cardname", cinfo->cardname);
spin_lock_irqsave(&card->lock, flags);
@@ -930,27 +931,30 @@ int b1dmactl_read_proc(char *page, char **start, off_t off,
spin_unlock_irqrestore(&card->lock, flags);
- len += sprintf(page+len, "%-16s 0x%lx\n",
- "csr (cached)", (unsigned long)card->csr);
- len += sprintf(page+len, "%-16s 0x%lx\n",
- "csr", (unsigned long)csr);
- len += sprintf(page+len, "%-16s %lu\n",
- "txoff", (unsigned long)txoff);
- len += sprintf(page+len, "%-16s %lu\n",
- "txlen", (unsigned long)txlen);
- len += sprintf(page+len, "%-16s %lu\n",
- "rxoff", (unsigned long)rxoff);
- len += sprintf(page+len, "%-16s %lu\n",
- "rxlen", (unsigned long)rxlen);
-
- if (off+count >= len)
- *eof = 1;
- if (len < off)
- return 0;
- *start = page + off;
- return ((count < len-off) ? count : len-off);
+ seq_printf(m, "%-16s 0x%lx\n", "csr (cached)", (unsigned long)card->csr);
+ seq_printf(m, "%-16s 0x%lx\n", "csr", (unsigned long)csr);
+ seq_printf(m, "%-16s %lu\n", "txoff", (unsigned long)txoff);
+ seq_printf(m, "%-16s %lu\n", "txlen", (unsigned long)txlen);
+ seq_printf(m, "%-16s %lu\n", "rxoff", (unsigned long)rxoff);
+ seq_printf(m, "%-16s %lu\n", "rxlen", (unsigned long)rxlen);
+
+ return 0;
+}
+
+static int b1dmactl_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, b1dmactl_proc_show, PDE(inode)->data);
}
+const struct file_operations b1dmactl_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = b1dmactl_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+EXPORT_SYMBOL(b1dmactl_proc_fops);
+
/* ------------------------------------------------------------- */
EXPORT_SYMBOL(b1dma_reset);
@@ -963,7 +967,6 @@ EXPORT_SYMBOL(b1dma_reset_ctr);
EXPORT_SYMBOL(b1dma_register_appl);
EXPORT_SYMBOL(b1dma_release_appl);
EXPORT_SYMBOL(b1dma_send_message);
-EXPORT_SYMBOL(b1dmactl_read_proc);
static int __init b1dma_init(void)
{
diff --git a/drivers/isdn/hardware/avm/b1isa.c b/drivers/isdn/hardware/avm/b1isa.c
index 6461a32bc83..ff5390546f9 100644
--- a/drivers/isdn/hardware/avm/b1isa.c
+++ b/drivers/isdn/hardware/avm/b1isa.c
@@ -121,7 +121,7 @@ static int b1isa_probe(struct pci_dev *pdev)
cinfo->capi_ctrl.load_firmware = b1_load_firmware;
cinfo->capi_ctrl.reset_ctr = b1_reset_ctr;
cinfo->capi_ctrl.procinfo = b1isa_procinfo;
- cinfo->capi_ctrl.ctr_read_proc = b1ctl_read_proc;
+ cinfo->capi_ctrl.proc_fops = &b1ctl_proc_fops;
strcpy(cinfo->capi_ctrl.name, card->name);
retval = attach_capi_ctr(&cinfo->capi_ctrl);
diff --git a/drivers/isdn/hardware/avm/b1pci.c b/drivers/isdn/hardware/avm/b1pci.c
index 5b314a2c404..c97e4315079 100644
--- a/drivers/isdn/hardware/avm/b1pci.c
+++ b/drivers/isdn/hardware/avm/b1pci.c
@@ -112,7 +112,7 @@ static int b1pci_probe(struct capicardparams *p, struct pci_dev *pdev)
cinfo->capi_ctrl.load_firmware = b1_load_firmware;
cinfo->capi_ctrl.reset_ctr = b1_reset_ctr;
cinfo->capi_ctrl.procinfo = b1pci_procinfo;
- cinfo->capi_ctrl.ctr_read_proc = b1ctl_read_proc;
+ cinfo->capi_ctrl.proc_fops = &b1ctl_proc_fops;
strcpy(cinfo->capi_ctrl.name, card->name);
cinfo->capi_ctrl.owner = THIS_MODULE;
@@ -251,7 +251,7 @@ static int b1pciv4_probe(struct capicardparams *p, struct pci_dev *pdev)
cinfo->capi_ctrl.load_firmware = b1dma_load_firmware;
cinfo->capi_ctrl.reset_ctr = b1dma_reset_ctr;
cinfo->capi_ctrl.procinfo = b1pciv4_procinfo;
- cinfo->capi_ctrl.ctr_read_proc = b1dmactl_read_proc;
+ cinfo->capi_ctrl.proc_fops = &b1dmactl_proc_fops;
strcpy(cinfo->capi_ctrl.name, card->name);
retval = attach_capi_ctr(&cinfo->capi_ctrl);
diff --git a/drivers/isdn/hardware/avm/b1pcmcia.c b/drivers/isdn/hardware/avm/b1pcmcia.c
index 7740403b40e..d6391e0afee 100644
--- a/drivers/isdn/hardware/avm/b1pcmcia.c
+++ b/drivers/isdn/hardware/avm/b1pcmcia.c
@@ -108,7 +108,7 @@ static int b1pcmcia_add_card(unsigned int port, unsigned irq,
cinfo->capi_ctrl.load_firmware = b1_load_firmware;
cinfo->capi_ctrl.reset_ctr = b1_reset_ctr;
cinfo->capi_ctrl.procinfo = b1pcmcia_procinfo;
- cinfo->capi_ctrl.ctr_read_proc = b1ctl_read_proc;
+ cinfo->capi_ctrl.proc_fops = &b1ctl_proc_fops;
strcpy(cinfo->capi_ctrl.name, card->name);
retval = attach_capi_ctr(&cinfo->capi_ctrl);
diff --git a/drivers/isdn/hardware/avm/c4.c b/drivers/isdn/hardware/avm/c4.c
index 6833301a45f..de6e6b31181 100644
--- a/drivers/isdn/hardware/avm/c4.c
+++ b/drivers/isdn/hardware/avm/c4.c
@@ -11,6 +11,8 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/mm.h>
@@ -1062,19 +1064,18 @@ static char *c4_procinfo(struct capi_ctr *ctrl)
return cinfo->infobuf;
}
-static int c4_read_proc(char *page, char **start, off_t off,
- int count, int *eof, struct capi_ctr *ctrl)
+static int c4_proc_show(struct seq_file *m, void *v)
{
+ struct capi_ctr *ctrl = m->private;
avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
avmcard *card = cinfo->card;
u8 flag;
- int len = 0;
char *s;
- len += sprintf(page+len, "%-16s %s\n", "name", card->name);
- len += sprintf(page+len, "%-16s 0x%x\n", "io", card->port);
- len += sprintf(page+len, "%-16s %d\n", "irq", card->irq);
- len += sprintf(page+len, "%-16s 0x%lx\n", "membase", card->membase);
+ seq_printf(m, "%-16s %s\n", "name", card->name);
+ seq_printf(m, "%-16s 0x%x\n", "io", card->port);
+ seq_printf(m, "%-16s %d\n", "irq", card->irq);
+ seq_printf(m, "%-16s 0x%lx\n", "membase", card->membase);
switch (card->cardtype) {
case avm_b1isa: s = "B1 ISA"; break;
case avm_b1pci: s = "B1 PCI"; break;
@@ -1087,18 +1088,18 @@ static int c4_read_proc(char *page, char **start, off_t off,
case avm_c2: s = "C2"; break;
default: s = "???"; break;
}
- len += sprintf(page+len, "%-16s %s\n", "type", s);
+ seq_printf(m, "%-16s %s\n", "type", s);
if ((s = cinfo->version[VER_DRIVER]) != NULL)
- len += sprintf(page+len, "%-16s %s\n", "ver_driver", s);
+ seq_printf(m, "%-16s %s\n", "ver_driver", s);
if ((s = cinfo->version[VER_CARDTYPE]) != NULL)
- len += sprintf(page+len, "%-16s %s\n", "ver_cardtype", s);
+ seq_printf(m, "%-16s %s\n", "ver_cardtype", s);
if ((s = cinfo->version[VER_SERIAL]) != NULL)
- len += sprintf(page+len, "%-16s %s\n", "ver_serial", s);
+ seq_printf(m, "%-16s %s\n", "ver_serial", s);
if (card->cardtype != avm_m1) {
flag = ((u8 *)(ctrl->profile.manu))[3];
if (flag)
- len += sprintf(page+len, "%-16s%s%s%s%s%s%s%s\n",
+ seq_printf(m, "%-16s%s%s%s%s%s%s%s\n",
"protocol",
(flag & 0x01) ? " DSS1" : "",
(flag & 0x02) ? " CT1" : "",
@@ -1112,7 +1113,7 @@ static int c4_read_proc(char *page, char **start, off_t off,
if (card->cardtype != avm_m1) {
flag = ((u8 *)(ctrl->profile.manu))[5];
if (flag)
- len += sprintf(page+len, "%-16s%s%s%s%s\n",
+ seq_printf(m, "%-16s%s%s%s%s\n",
"linetype",
(flag & 0x01) ? " point to point" : "",
(flag & 0x02) ? " point to multipoint" : "",
@@ -1120,16 +1121,24 @@ static int c4_read_proc(char *page, char **start, off_t off,
(flag & 0x04) ? " leased line with D-channel" : ""
);
}
- len += sprintf(page+len, "%-16s %s\n", "cardname", cinfo->cardname);
-
- if (off+count >= len)
- *eof = 1;
- if (len < off)
- return 0;
- *start = page + off;
- return ((count < len-off) ? count : len-off);
+ seq_printf(m, "%-16s %s\n", "cardname", cinfo->cardname);
+
+ return 0;
}
+static int c4_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, c4_proc_show, PDE(inode)->data);
+}
+
+static const struct file_operations c4_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = c4_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
/* ------------------------------------------------------------- */
static int c4_add_card(struct capicardparams *p, struct pci_dev *dev,
@@ -1201,7 +1210,7 @@ static int c4_add_card(struct capicardparams *p, struct pci_dev *dev,
cinfo->capi_ctrl.load_firmware = c4_load_firmware;
cinfo->capi_ctrl.reset_ctr = c4_reset_ctr;
cinfo->capi_ctrl.procinfo = c4_procinfo;
- cinfo->capi_ctrl.ctr_read_proc = c4_read_proc;
+ cinfo->capi_ctrl.proc_fops = &c4_proc_fops;
strcpy(cinfo->capi_ctrl.name, card->name);
retval = attach_capi_ctr(&cinfo->capi_ctrl);
diff --git a/drivers/isdn/hardware/avm/t1isa.c b/drivers/isdn/hardware/avm/t1isa.c
index 1c53fd49adb..baeeb3c2a3e 100644
--- a/drivers/isdn/hardware/avm/t1isa.c
+++ b/drivers/isdn/hardware/avm/t1isa.c
@@ -429,7 +429,7 @@ static int t1isa_probe(struct pci_dev *pdev, int cardnr)
cinfo->capi_ctrl.load_firmware = t1isa_load_firmware;
cinfo->capi_ctrl.reset_ctr = t1isa_reset_ctr;
cinfo->capi_ctrl.procinfo = t1isa_procinfo;
- cinfo->capi_ctrl.ctr_read_proc = b1ctl_read_proc;
+ cinfo->capi_ctrl.proc_fops = &b1ctl_proc_fops;
strcpy(cinfo->capi_ctrl.name, card->name);
retval = attach_capi_ctr(&cinfo->capi_ctrl);
diff --git a/drivers/isdn/hardware/avm/t1pci.c b/drivers/isdn/hardware/avm/t1pci.c
index e6d298d7514..5a3f8309801 100644
--- a/drivers/isdn/hardware/avm/t1pci.c
+++ b/drivers/isdn/hardware/avm/t1pci.c
@@ -119,7 +119,7 @@ static int t1pci_add_card(struct capicardparams *p, struct pci_dev *pdev)
cinfo->capi_ctrl.load_firmware = b1dma_load_firmware;
cinfo->capi_ctrl.reset_ctr = b1dma_reset_ctr;
cinfo->capi_ctrl.procinfo = t1pci_procinfo;
- cinfo->capi_ctrl.ctr_read_proc = b1dmactl_read_proc;
+ cinfo->capi_ctrl.proc_fops = &b1dmactl_proc_fops;
strcpy(cinfo->capi_ctrl.name, card->name);
retval = attach_capi_ctr(&cinfo->capi_ctrl);
diff --git a/drivers/isdn/hardware/eicon/capimain.c b/drivers/isdn/hardware/eicon/capimain.c
index 98fcdfc7ca5..0f073cd7376 100644
--- a/drivers/isdn/hardware/eicon/capimain.c
+++ b/drivers/isdn/hardware/eicon/capimain.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <asm/uaccess.h>
+#include <linux/seq_file.h>
#include <linux/skbuff.h>
#include "os_capi.h"
@@ -75,25 +76,32 @@ void diva_os_free_message_buffer(diva_os_message_buffer_s * dmb)
/*
* proc function for controller info
*/
-static int diva_ctl_read_proc(char *page, char **start, off_t off,
- int count, int *eof, struct capi_ctr *ctrl)
+static int diva_ctl_proc_show(struct seq_file *m, void *v)
{
+ struct capi_ctr *ctrl = m->private;
diva_card *card = (diva_card *) ctrl->driverdata;
- int len = 0;
-
- len += sprintf(page + len, "%s\n", ctrl->name);
- len += sprintf(page + len, "Serial No. : %s\n", ctrl->serial);
- len += sprintf(page + len, "Id : %d\n", card->Id);
- len += sprintf(page + len, "Channels : %d\n", card->d.channels);
-
- if (off + count >= len)
- *eof = 1;
- if (len < off)
- return 0;
- *start = page + off;
- return ((count < len - off) ? count : len - off);
+
+ seq_printf(m, "%s\n", ctrl->name);
+ seq_printf(m, "Serial No. : %s\n", ctrl->serial);
+ seq_printf(m, "Id : %d\n", card->Id);
+ seq_printf(m, "Channels : %d\n", card->d.channels);
+
+ return 0;
+}
+
+static int diva_ctl_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, diva_ctl_proc_show, NULL);
}
+static const struct file_operations diva_ctl_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = diva_ctl_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
/*
* set additional os settings in capi_ctr struct
*/
@@ -102,7 +110,7 @@ void diva_os_set_controller_struct(struct capi_ctr *ctrl)
ctrl->driver_name = DRIVERLNAME;
ctrl->load_firmware = NULL;
ctrl->reset_ctr = NULL;
- ctrl->ctr_read_proc = diva_ctl_read_proc;
+ ctrl->proc_fops = &diva_ctl_proc_fops;
ctrl->owner = THIS_MODULE;
}
diff --git a/drivers/isdn/hardware/eicon/diva_didd.c b/drivers/isdn/hardware/eicon/diva_didd.c
index 993b14cf177..5d06a743782 100644
--- a/drivers/isdn/hardware/eicon/diva_didd.c
+++ b/drivers/isdn/hardware/eicon/diva_didd.c
@@ -15,6 +15,7 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <net/net_namespace.h>
#include "platform.h"
@@ -62,39 +63,41 @@ static char *getrev(const char *revision)
return rev;
}
-static int
-proc_read(char *page, char **start, off_t off, int count, int *eof,
- void *data)
+static int divadidd_proc_show(struct seq_file *m, void *v)
{
- int len = 0;
char tmprev[32];
strcpy(tmprev, main_revision);
- len += sprintf(page + len, "%s\n", DRIVERNAME);
- len += sprintf(page + len, "name : %s\n", DRIVERLNAME);
- len += sprintf(page + len, "release : %s\n", DRIVERRELEASE_DIDD);
- len += sprintf(page + len, "build : %s(%s)\n",
+ seq_printf(m, "%s\n", DRIVERNAME);
+ seq_printf(m, "name : %s\n", DRIVERLNAME);
+ seq_printf(m, "release : %s\n", DRIVERRELEASE_DIDD);
+ seq_printf(m, "build : %s(%s)\n",
diva_didd_common_code_build, DIVA_BUILD);
- len += sprintf(page + len, "revision : %s\n", getrev(tmprev));
-
- if (off + count >= len)
- *eof = 1;
- if (len < off)
- return 0;
- *start = page + off;
- return ((count < len - off) ? count : len - off);
+ seq_printf(m, "revision : %s\n", getrev(tmprev));
+
+ return 0;
}
+static int divadidd_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, divadidd_proc_show, NULL);
+}
+
+static const struct file_operations divadidd_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = divadidd_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static int DIVA_INIT_FUNCTION create_proc(void)
{
proc_net_eicon = proc_mkdir("eicon", init_net.proc_net);
if (proc_net_eicon) {
- if ((proc_didd =
- create_proc_entry(DRIVERLNAME, S_IFREG | S_IRUGO,
- proc_net_eicon))) {
- proc_didd->read_proc = proc_read;
- }
+ proc_didd = proc_create(DRIVERLNAME, S_IRUGO, proc_net_eicon,
+ &divadidd_proc_fops);
return (1);
}
return (0);
diff --git a/drivers/isdn/hardware/eicon/divasi.c b/drivers/isdn/hardware/eicon/divasi.c
index 69e71ebe784..f577719ab3f 100644
--- a/drivers/isdn/hardware/eicon/divasi.c
+++ b/drivers/isdn/hardware/eicon/divasi.c
@@ -17,6 +17,7 @@
#include <linux/poll.h>
#include <linux/proc_fs.h>
#include <linux/skbuff.h>
+#include <linux/seq_file.h>
#include <linux/smp_lock.h>
#include <asm/uaccess.h>
@@ -86,39 +87,40 @@ static void diva_um_timer_function(unsigned long data);
extern struct proc_dir_entry *proc_net_eicon;
static struct proc_dir_entry *um_idi_proc_entry = NULL;
-static int
-um_idi_proc_read(char *page, char **start, off_t off, int count, int *eof,
- void *data)
+static int um_idi_proc_show(struct seq_file *m, void *v)
{
- int len = 0;
char tmprev[32];
- len += sprintf(page + len, "%s\n", DRIVERNAME);
- len += sprintf(page + len, "name : %s\n", DRIVERLNAME);
- len += sprintf(page + len, "release : %s\n", DRIVERRELEASE_IDI);
+ seq_printf(m, "%s\n", DRIVERNAME);
+ seq_printf(m, "name : %s\n", DRIVERLNAME);
+ seq_printf(m, "release : %s\n", DRIVERRELEASE_IDI);
strcpy(tmprev, main_revision);
- len += sprintf(page + len, "revision : %s\n", getrev(tmprev));
- len += sprintf(page + len, "build : %s\n", DIVA_BUILD);
- len += sprintf(page + len, "major : %d\n", major);
-
- if (off + count >= len)
- *eof = 1;
- if (len < off)
- return 0;
- *start = page + off;
- return ((count < len - off) ? count : len - off);
+ seq_printf(m, "revision : %s\n", getrev(tmprev));
+ seq_printf(m, "build : %s\n", DIVA_BUILD);
+ seq_printf(m, "major : %d\n", major);
+
+ return 0;
+}
+
+static int um_idi_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, um_idi_proc_show, NULL);
}
+static const struct file_operations um_idi_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = um_idi_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static int DIVA_INIT_FUNCTION create_um_idi_proc(void)
{
- um_idi_proc_entry = create_proc_entry(DRIVERLNAME,
- S_IFREG | S_IRUGO | S_IWUSR,
- proc_net_eicon);
+ um_idi_proc_entry = proc_create(DRIVERLNAME, S_IRUGO, proc_net_eicon,
+ &um_idi_proc_fops);
if (!um_idi_proc_entry)
return (0);
-
- um_idi_proc_entry->read_proc = um_idi_proc_read;
-
return (1);
}
diff --git a/drivers/isdn/hardware/eicon/divasproc.c b/drivers/isdn/hardware/eicon/divasproc.c
index 040827288ec..46d44a94262 100644
--- a/drivers/isdn/hardware/eicon/divasproc.c
+++ b/drivers/isdn/hardware/eicon/divasproc.c
@@ -14,6 +14,7 @@
#include <linux/kernel.h>
#include <linux/poll.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/list.h>
#include <asm/uaccess.h>
@@ -141,14 +142,10 @@ void remove_divas_proc(void)
}
}
-/*
-** write group_optimization
-*/
-static int
-write_grp_opt(struct file *file, const char __user *buffer, unsigned long count,
- void *data)
+static ssize_t grp_opt_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
- diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) data;
+ diva_os_xdi_adapter_t *a = PDE(file->f_path.dentry->d_inode)->data;
PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
if ((count == 1) || (count == 2)) {
@@ -172,14 +169,10 @@ write_grp_opt(struct file *file, const char __user *buffer, unsigned long count,
return (-EINVAL);
}
-/*
-** write dynamic_l1_down
-*/
-static int
-write_d_l1_down(struct file *file, const char __user *buffer, unsigned long count,
- void *data)
+static ssize_t d_l1_down_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
- diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) data;
+ diva_os_xdi_adapter_t *a = PDE(file->f_path.dentry->d_inode)->data;
PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
if ((count == 1) || (count == 2)) {
@@ -203,63 +196,62 @@ write_d_l1_down(struct file *file, const char __user *buffer, unsigned long coun
return (-EINVAL);
}
-
-/*
-** read dynamic_l1_down
-*/
-static int
-read_d_l1_down(char *page, char **start, off_t off, int count, int *eof,
- void *data)
+static int d_l1_down_proc_show(struct seq_file *m, void *v)
{
- int len = 0;
- diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) data;
+ diva_os_xdi_adapter_t *a = m->private;
PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
- len += sprintf(page + len, "%s\n",
+ seq_printf(m, "%s\n",
(IoAdapter->capi_cfg.
cfg_1 & DIVA_XDI_CAPI_CFG_1_DYNAMIC_L1_ON) ? "1" :
"0");
+ return 0;
+}
- if (off + count >= len)
- *eof = 1;
- if (len < off)
- return 0;
- *start = page + off;
- return ((count < len - off) ? count : len - off);
+static int d_l1_down_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, d_l1_down_proc_show, PDE(inode)->data);
}
-/*
-** read group_optimization
-*/
-static int
-read_grp_opt(char *page, char **start, off_t off, int count, int *eof,
- void *data)
+static const struct file_operations d_l1_down_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = d_l1_down_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = d_l1_down_proc_write,
+};
+
+static int grp_opt_proc_show(struct seq_file *m, void *v)
{
- int len = 0;
- diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) data;
+ diva_os_xdi_adapter_t *a = m->private;
PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
- len += sprintf(page + len, "%s\n",
+ seq_printf(m, "%s\n",
(IoAdapter->capi_cfg.
cfg_1 & DIVA_XDI_CAPI_CFG_1_GROUP_POPTIMIZATION_ON)
? "1" : "0");
+ return 0;
+}
- if (off + count >= len)
- *eof = 1;
- if (len < off)
- return 0;
- *start = page + off;
- return ((count < len - off) ? count : len - off);
+static int grp_opt_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, grp_opt_proc_show, PDE(inode)->data);
}
-/*
-** info write
-*/
-static int
-info_write(struct file *file, const char __user *buffer, unsigned long count,
- void *data)
+static const struct file_operations grp_opt_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = grp_opt_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = grp_opt_proc_write,
+};
+
+static ssize_t info_proc_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *pos)
{
- diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) data;
+ diva_os_xdi_adapter_t *a = PDE(file->f_path.dentry->d_inode)->data;
PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
char c[4];
@@ -277,63 +269,46 @@ info_write(struct file *file, const char __user *buffer, unsigned long count,
return (-EINVAL);
}
-/*
-** info read
-*/
-static int
-info_read(char *page, char **start, off_t off, int count, int *eof,
- void *data)
+static int info_proc_show(struct seq_file *m, void *v)
{
int i = 0;
- int len = 0;
char *p;
char tmpser[16];
- diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) data;
+ diva_os_xdi_adapter_t *a = m->private;
PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1];
- len +=
- sprintf(page + len, "Name : %s\n",
- IoAdapter->Properties.Name);
- len += sprintf(page + len, "DSP state : %08x\n", a->dsp_mask);
- len += sprintf(page + len, "Channels : %02d\n",
- IoAdapter->Properties.Channels);
- len += sprintf(page + len, "E. max/used : %03d/%03d\n",
+ seq_printf(m, "Name : %s\n", IoAdapter->Properties.Name);
+ seq_printf(m, "DSP state : %08x\n", a->dsp_mask);
+ seq_printf(m, "Channels : %02d\n", IoAdapter->Properties.Channels);
+ seq_printf(m, "E. max/used : %03d/%03d\n",
IoAdapter->e_max, IoAdapter->e_count);
diva_get_vserial_number(IoAdapter, tmpser);
- len += sprintf(page + len, "Serial : %s\n", tmpser);
- len +=
- sprintf(page + len, "IRQ : %d\n",
- IoAdapter->irq_info.irq_nr);
- len += sprintf(page + len, "CardIndex : %d\n", a->CardIndex);
- len += sprintf(page + len, "CardOrdinal : %d\n", a->CardOrdinal);
- len += sprintf(page + len, "Controller : %d\n", a->controller);
- len += sprintf(page + len, "Bus-Type : %s\n",
+ seq_printf(m, "Serial : %s\n", tmpser);
+ seq_printf(m, "IRQ : %d\n", IoAdapter->irq_info.irq_nr);
+ seq_printf(m, "CardIndex : %d\n", a->CardIndex);
+ seq_printf(m, "CardOrdinal : %d\n", a->CardOrdinal);
+ seq_printf(m, "Controller : %d\n", a->controller);
+ seq_printf(m, "Bus-Type : %s\n",
(a->Bus ==
DIVAS_XDI_ADAPTER_BUS_ISA) ? "ISA" : "PCI");
- len += sprintf(page + len, "Port-Name : %s\n", a->port_name);
+ seq_printf(m, "Port-Name : %s\n", a->port_name);
if (a->Bus == DIVAS_XDI_ADAPTER_BUS_PCI) {
- len +=
- sprintf(page + len, "PCI-bus : %d\n",
- a->resources.pci.bus);
- len +=
- sprintf(page + len, "PCI-func : %d\n",
- a->resources.pci.func);
+ seq_printf(m, "PCI-bus : %d\n", a->resources.pci.bus);
+ seq_printf(m, "PCI-func : %d\n", a->resources.pci.func);
for (i = 0; i < 8; i++) {
if (a->resources.pci.bar[i]) {
- len +=
- sprintf(page + len,
+ seq_printf(m,
"Mem / I/O %d : 0x%x / mapped : 0x%lx",
i, a->resources.pci.bar[i],
(unsigned long) a->resources.
pci.addr[i]);
if (a->resources.pci.length[i]) {
- len +=
- sprintf(page + len,
+ seq_printf(m,
" / length : %d",
a->resources.pci.
length[i]);
}
- len += sprintf(page + len, "\n");
+ seq_putc(m, '\n');
}
}
}
@@ -353,16 +328,25 @@ info_read(char *page, char **start, off_t off, int count, int *eof,
} else {
p = "ready";
}
- len += sprintf(page + len, "State : %s\n", p);
+ seq_printf(m, "State : %s\n", p);
- if (off + count >= len)
- *eof = 1;
- if (len < off)
- return 0;
- *start = page + off;
- return ((count < len - off) ? count : len - off);
+ return 0;
+}
+
+static int info_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, info_proc_show, PDE(inode)->data);
}
+static const struct file_operations info_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = info_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = info_proc_write,
+};
+
/*
** adapter proc init/de-init
*/
@@ -380,28 +364,20 @@ int create_adapter_proc(diva_os_xdi_adapter_t * a)
return (0);
a->proc_adapter_dir = (void *) de;
- if (!(pe =
- create_proc_entry(info_proc_name, S_IFREG | S_IRUGO | S_IWUSR, de)))
+ pe = proc_create_data(info_proc_name, S_IRUGO | S_IWUSR, de,
+ &info_proc_fops, a);
+ if (!pe)
return (0);
a->proc_info = (void *) pe;
- pe->write_proc = info_write;
- pe->read_proc = info_read;
- pe->data = a;
- if ((pe = create_proc_entry(grp_opt_proc_name,
- S_IFREG | S_IRUGO | S_IWUSR, de))) {
+ pe = proc_create_data(grp_opt_proc_name, S_IRUGO | S_IWUSR, de,
+ &grp_opt_proc_fops, a);
+ if (pe)
a->proc_grp_opt = (void *) pe;
- pe->write_proc = write_grp_opt;
- pe->read_proc = read_grp_opt;
- pe->data = a;
- }
- if ((pe = create_proc_entry(d_l1_down_proc_name,
- S_IFREG | S_IRUGO | S_IWUSR, de))) {
+ pe = proc_create_data(d_l1_down_proc_name, S_IRUGO | S_IWUSR, de,
+ &d_l1_down_proc_fops, a);
+ if (pe)
a->proc_d_l1_down = (void *) pe;
- pe->write_proc = write_d_l1_down;
- pe->read_proc = read_d_l1_down;
- pe->data = a;
- }
DBG_TRC(("proc entry %s created", tmp));
diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c
index 1a1420d7a82..ad36df9b759 100644
--- a/drivers/isdn/hardware/mISDN/hfcmulti.c
+++ b/drivers/isdn/hardware/mISDN/hfcmulti.c
@@ -2846,7 +2846,7 @@ mode_hfcmulti(struct hfc_multi *hc, int ch, int protocol, int slot_tx,
int conf;
if (ch < 0 || ch > 31)
- return EINVAL;
+ return -EINVAL;
oslot_tx = hc->chan[ch].slot_tx;
oslot_rx = hc->chan[ch].slot_rx;
conf = hc->chan[ch].conf;
diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
index 62441ba53b9..36c6c616a65 100644
--- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c
+++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c
@@ -1133,6 +1133,7 @@ inf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) {
kfree(sc);
release_card(card);
+ break;
} else
card->sc[i - 1] = sc;
}
diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c
index d3f1077b709..2952a58c7a6 100644
--- a/drivers/isdn/hardware/mISDN/w6692.c
+++ b/drivers/isdn/hardware/mISDN/w6692.c
@@ -529,6 +529,7 @@ W6692_fill_Bfifo(struct w6692_ch *wch)
}
}
+#if 0
static int
setvolume(struct w6692_ch *wch, int mic, struct sk_buff *skb)
{
@@ -571,6 +572,7 @@ enable_pots(struct w6692_ch *wch)
WriteW6692(card, W_PCTL, card->pctl);
return 0;
}
+#endif
static int
disable_pots(struct w6692_ch *wch)
diff --git a/drivers/isdn/hisax/isar.c b/drivers/isdn/hisax/isar.c
index bfeb9b6aa04..6bde16c00fb 100644
--- a/drivers/isdn/hisax/isar.c
+++ b/drivers/isdn/hisax/isar.c
@@ -138,7 +138,7 @@ waitrecmsg(struct IsdnCardState *cs, u_char *len,
while((!(cs->BC_Read_Reg(cs, 0, ISAR_IRQBIT) & ISAR_IRQSTA)) &&
(timeout++ < maxdelay))
udelay(1);
- if (timeout >= maxdelay) {
+ if (timeout > maxdelay) {
printk(KERN_WARNING"isar recmsg IRQSTA timeout\n");
return(0);
}
diff --git a/drivers/isdn/hysdn/hycapi.c b/drivers/isdn/hysdn/hycapi.c
index 4ffaa14b9fc..fe874afa4f8 100644
--- a/drivers/isdn/hysdn/hycapi.c
+++ b/drivers/isdn/hysdn/hycapi.c
@@ -11,6 +11,8 @@
*/
#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <linux/signal.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
@@ -432,26 +434,16 @@ static u16 hycapi_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
return retval;
}
-/*********************************************************************
-hycapi_read_proc
-
-Informations provided in the /proc/capi-entries.
-
-*********************************************************************/
-
-static int hycapi_read_proc(char *page, char **start, off_t off,
- int count, int *eof, struct capi_ctr *ctrl)
+static int hycapi_proc_show(struct seq_file *m, void *v)
{
+ struct capi_ctr *ctrl = m->private;
hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata);
hysdn_card *card = cinfo->card;
- int len = 0;
char *s;
-#ifdef HYCAPI_PRINTFNAMES
- printk(KERN_NOTICE "hycapi_read_proc\n");
-#endif
- len += sprintf(page+len, "%-16s %s\n", "name", cinfo->cardname);
- len += sprintf(page+len, "%-16s 0x%x\n", "io", card->iobase);
- len += sprintf(page+len, "%-16s %d\n", "irq", card->irq);
+
+ seq_printf(m, "%-16s %s\n", "name", cinfo->cardname);
+ seq_printf(m, "%-16s 0x%x\n", "io", card->iobase);
+ seq_printf(m, "%-16s %d\n", "irq", card->irq);
switch (card->brdtype) {
case BD_PCCARD: s = "HYSDN Hycard"; break;
@@ -461,24 +453,32 @@ static int hycapi_read_proc(char *page, char **start, off_t off,
case BD_PLEXUS: s = "HYSDN Plexus30"; break;
default: s = "???"; break;
}
- len += sprintf(page+len, "%-16s %s\n", "type", s);
+ seq_printf(m, "%-16s %s\n", "type", s);
if ((s = cinfo->version[VER_DRIVER]) != NULL)
- len += sprintf(page+len, "%-16s %s\n", "ver_driver", s);
+ seq_printf(m, "%-16s %s\n", "ver_driver", s);
if ((s = cinfo->version[VER_CARDTYPE]) != NULL)
- len += sprintf(page+len, "%-16s %s\n", "ver_cardtype", s);
+ seq_printf(m, "%-16s %s\n", "ver_cardtype", s);
if ((s = cinfo->version[VER_SERIAL]) != NULL)
- len += sprintf(page+len, "%-16s %s\n", "ver_serial", s);
+ seq_printf(m, "%-16s %s\n", "ver_serial", s);
- len += sprintf(page+len, "%-16s %s\n", "cardname", cinfo->cardname);
+ seq_printf(m, "%-16s %s\n", "cardname", cinfo->cardname);
- if (off+count >= len)
- *eof = 1;
- if (len < off)
- return 0;
- *start = page + off;
- return ((count < len-off) ? count : len-off);
+ return 0;
+}
+
+static int hycapi_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, hycapi_proc_show, PDE(inode)->data);
}
+static const struct file_operations hycapi_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = hycapi_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
/**************************************************************
hycapi_load_firmware
@@ -774,7 +774,7 @@ hycapi_capi_create(hysdn_card *card)
ctrl->load_firmware = hycapi_load_firmware;
ctrl->reset_ctr = hycapi_reset_ctr;
ctrl->procinfo = hycapi_procinfo;
- ctrl->ctr_read_proc = hycapi_read_proc;
+ ctrl->proc_fops = &hycapi_proc_fops;
strcpy(ctrl->name, cinfo->cardname);
ctrl->owner = THIS_MODULE;
diff --git a/drivers/isdn/i4l/Kconfig b/drivers/isdn/i4l/Kconfig
index 07c4e49f9e7..9c6650ea848 100644
--- a/drivers/isdn/i4l/Kconfig
+++ b/drivers/isdn/i4l/Kconfig
@@ -134,14 +134,7 @@ source "drivers/isdn/sc/Kconfig"
source "drivers/isdn/act2000/Kconfig"
-source "drivers/isdn/hysdn/Kconfig"
-
endmenu
# end ISDN_I4L
endif
-config ISDN_HDLC
- tristate
- select CRC_CCITT
- select BITREVERSE
-
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index a93637223c8..3bdbb611570 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1160,8 +1160,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
cc->start = tmpll;
- if (dm_get_device(ti, argv[3], cc->start, ti->len,
- dm_table_get_mode(ti->table), &cc->dev)) {
+ if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &cc->dev)) {
ti->error = "Device lookup failed";
goto bad_device;
}
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index ebe7381f47c..852052880d7 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -156,8 +156,8 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- if (dm_get_device(ti, argv[0], dc->start_read, ti->len,
- dm_table_get_mode(ti->table), &dc->dev_read)) {
+ if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
+ &dc->dev_read)) {
ti->error = "Device lookup failed";
goto bad;
}
@@ -177,8 +177,8 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad_dev_read;
}
- if (dm_get_device(ti, argv[3], dc->start_write, ti->len,
- dm_table_get_mode(ti->table), &dc->dev_write)) {
+ if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table),
+ &dc->dev_write)) {
ti->error = "Write device lookup failed";
goto bad_dev_read;
}
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index 1d669322b27..d7500e1c26f 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -285,7 +285,8 @@ retry:
up_write(&_hash_lock);
}
-static int dm_hash_rename(uint32_t cookie, const char *old, const char *new)
+static int dm_hash_rename(uint32_t cookie, uint32_t *flags, const char *old,
+ const char *new)
{
char *new_name, *old_name;
struct hash_cell *hc;
@@ -344,7 +345,8 @@ static int dm_hash_rename(uint32_t cookie, const char *old, const char *new)
dm_table_put(table);
}
- dm_kobject_uevent(hc->md, KOBJ_CHANGE, cookie);
+ if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, cookie))
+ *flags |= DM_UEVENT_GENERATED_FLAG;
dm_put(hc->md);
up_write(&_hash_lock);
@@ -736,10 +738,10 @@ static int dev_remove(struct dm_ioctl *param, size_t param_size)
__hash_remove(hc);
up_write(&_hash_lock);
- dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr);
+ if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr))
+ param->flags |= DM_UEVENT_GENERATED_FLAG;
dm_put(md);
- param->data_size = 0;
return 0;
}
@@ -773,7 +775,9 @@ static int dev_rename(struct dm_ioctl *param, size_t param_size)
return r;
param->data_size = 0;
- return dm_hash_rename(param->event_nr, param->name, new_name);
+
+ return dm_hash_rename(param->event_nr, &param->flags, param->name,
+ new_name);
}
static int dev_set_geometry(struct dm_ioctl *param, size_t param_size)
@@ -897,16 +901,17 @@ static int do_resume(struct dm_ioctl *param)
set_disk_ro(dm_disk(md), 1);
}
- if (dm_suspended_md(md))
+ if (dm_suspended_md(md)) {
r = dm_resume(md);
+ if (!r && !dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr))
+ param->flags |= DM_UEVENT_GENERATED_FLAG;
+ }
if (old_map)
dm_table_destroy(old_map);
- if (!r) {
- dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr);
+ if (!r)
r = __dev_status(md, param);
- }
dm_put(md);
return r;
@@ -1476,6 +1481,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
{
/* Always clear this flag */
param->flags &= ~DM_BUFFER_FULL_FLAG;
+ param->flags &= ~DM_UEVENT_GENERATED_FLAG;
/* Ignores parameters */
if (cmd == DM_REMOVE_ALL_CMD ||
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 82f7d6e6b1e..9200dbf2391 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -47,8 +47,7 @@ static int linear_ctr(struct dm_target *ti, unsigned int argc, char **argv)
}
lc->start = tmp;
- if (dm_get_device(ti, argv[0], lc->start, ti->len,
- dm_table_get_mode(ti->table), &lc->dev)) {
+ if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &lc->dev)) {
ti->error = "dm-linear: Device lookup failed";
goto bad;
}
diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c
index 7035582786f..5a08be0222d 100644
--- a/drivers/md/dm-log.c
+++ b/drivers/md/dm-log.c
@@ -543,8 +543,7 @@ static int disk_ctr(struct dm_dirty_log *log, struct dm_target *ti,
return -EINVAL;
}
- r = dm_get_device(ti, argv[0], 0, 0 /* FIXME */,
- FMODE_READ | FMODE_WRITE, &dev);
+ r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &dev);
if (r)
return r;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index e81345a1d08..826bce7343b 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -69,6 +69,7 @@ struct multipath {
struct list_head priority_groups;
unsigned pg_init_required; /* pg_init needs calling? */
unsigned pg_init_in_progress; /* Only one pg_init allowed at once */
+ wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
unsigned nr_valid_paths; /* Total number of usable paths */
struct pgpath *current_pgpath;
@@ -95,8 +96,6 @@ struct multipath {
mempool_t *mpio_pool;
struct mutex work_mutex;
-
- unsigned suspended; /* Don't create new I/O internally when set. */
};
/*
@@ -202,6 +201,7 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
m->queue_io = 1;
INIT_WORK(&m->process_queued_ios, process_queued_ios);
INIT_WORK(&m->trigger_event, trigger_event);
+ init_waitqueue_head(&m->pg_init_wait);
mutex_init(&m->work_mutex);
m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache);
if (!m->mpio_pool) {
@@ -235,6 +235,21 @@ static void free_multipath(struct multipath *m)
* Path selection
*-----------------------------------------------*/
+static void __pg_init_all_paths(struct multipath *m)
+{
+ struct pgpath *pgpath;
+
+ m->pg_init_count++;
+ m->pg_init_required = 0;
+ list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
+ /* Skip failed paths */
+ if (!pgpath->is_active)
+ continue;
+ if (queue_work(kmpath_handlerd, &pgpath->activate_path))
+ m->pg_init_in_progress++;
+ }
+}
+
static void __switch_pg(struct multipath *m, struct pgpath *pgpath)
{
m->current_pg = pgpath->pg;
@@ -439,7 +454,7 @@ static void process_queued_ios(struct work_struct *work)
{
struct multipath *m =
container_of(work, struct multipath, process_queued_ios);
- struct pgpath *pgpath = NULL, *tmp;
+ struct pgpath *pgpath = NULL;
unsigned must_queue = 1;
unsigned long flags;
@@ -457,14 +472,9 @@ static void process_queued_ios(struct work_struct *work)
(!pgpath && !m->queue_if_no_path))
must_queue = 0;
- if (m->pg_init_required && !m->pg_init_in_progress && pgpath) {
- m->pg_init_count++;
- m->pg_init_required = 0;
- list_for_each_entry(tmp, &pgpath->pg->pgpaths, list) {
- if (queue_work(kmpath_handlerd, &tmp->activate_path))
- m->pg_init_in_progress++;
- }
- }
+ if (m->pg_init_required && !m->pg_init_in_progress && pgpath)
+ __pg_init_all_paths(m);
+
out:
spin_unlock_irqrestore(&m->lock, flags);
if (!must_queue)
@@ -597,8 +607,8 @@ static struct pgpath *parse_path(struct arg_set *as, struct path_selector *ps,
if (!p)
return ERR_PTR(-ENOMEM);
- r = dm_get_device(ti, shift(as), ti->begin, ti->len,
- dm_table_get_mode(ti->table), &p->path.dev);
+ r = dm_get_device(ti, shift(as), dm_table_get_mode(ti->table),
+ &p->path.dev);
if (r) {
ti->error = "error getting device";
goto bad;
@@ -890,9 +900,34 @@ static int multipath_ctr(struct dm_target *ti, unsigned int argc,
return r;
}
-static void flush_multipath_work(void)
+static void multipath_wait_for_pg_init_completion(struct multipath *m)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ unsigned long flags;
+
+ add_wait_queue(&m->pg_init_wait, &wait);
+
+ while (1) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+
+ spin_lock_irqsave(&m->lock, flags);
+ if (!m->pg_init_in_progress) {
+ spin_unlock_irqrestore(&m->lock, flags);
+ break;
+ }
+ spin_unlock_irqrestore(&m->lock, flags);
+
+ io_schedule();
+ }
+ set_current_state(TASK_RUNNING);
+
+ remove_wait_queue(&m->pg_init_wait, &wait);
+}
+
+static void flush_multipath_work(struct multipath *m)
{
flush_workqueue(kmpath_handlerd);
+ multipath_wait_for_pg_init_completion(m);
flush_workqueue(kmultipathd);
flush_scheduled_work();
}
@@ -901,7 +936,7 @@ static void multipath_dtr(struct dm_target *ti)
{
struct multipath *m = ti->private;
- flush_multipath_work();
+ flush_multipath_work(m);
free_multipath(m);
}
@@ -1128,8 +1163,7 @@ static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
static void pg_init_done(void *data, int errors)
{
- struct dm_path *path = data;
- struct pgpath *pgpath = path_to_pgpath(path);
+ struct pgpath *pgpath = data;
struct priority_group *pg = pgpath->pg;
struct multipath *m = pg->m;
unsigned long flags;
@@ -1143,8 +1177,8 @@ static void pg_init_done(void *data, int errors)
errors = 0;
break;
}
- DMERR("Cannot failover device because scsi_dh_%s was not "
- "loaded.", m->hw_handler_name);
+ DMERR("Could not failover the device: Handler scsi_dh_%s "
+ "Error %d.", m->hw_handler_name, errors);
/*
* Fail path for now, so we do not ping pong
*/
@@ -1181,14 +1215,24 @@ static void pg_init_done(void *data, int errors)
m->current_pgpath = NULL;
m->current_pg = NULL;
}
- } else if (!m->pg_init_required) {
- m->queue_io = 0;
+ } else if (!m->pg_init_required)
pg->bypassed = 0;
- }
- m->pg_init_in_progress--;
- if (!m->pg_init_in_progress)
- queue_work(kmultipathd, &m->process_queued_ios);
+ if (--m->pg_init_in_progress)
+ /* Activations of other paths are still on going */
+ goto out;
+
+ if (!m->pg_init_required)
+ m->queue_io = 0;
+
+ queue_work(kmultipathd, &m->process_queued_ios);
+
+ /*
+ * Wake up any thread waiting to suspend.
+ */
+ wake_up(&m->pg_init_wait);
+
+out:
spin_unlock_irqrestore(&m->lock, flags);
}
@@ -1198,7 +1242,7 @@ static void activate_path(struct work_struct *work)
container_of(work, struct pgpath, activate_path);
scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev),
- pg_init_done, &pgpath->path);
+ pg_init_done, pgpath);
}
/*
@@ -1276,8 +1320,7 @@ static void multipath_postsuspend(struct dm_target *ti)
struct multipath *m = ti->private;
mutex_lock(&m->work_mutex);
- m->suspended = 1;
- flush_multipath_work();
+ flush_multipath_work(m);
mutex_unlock(&m->work_mutex);
}
@@ -1289,10 +1332,6 @@ static void multipath_resume(struct dm_target *ti)
struct multipath *m = (struct multipath *) ti->private;
unsigned long flags;
- mutex_lock(&m->work_mutex);
- m->suspended = 0;
- mutex_unlock(&m->work_mutex);
-
spin_lock_irqsave(&m->lock, flags);
m->queue_if_no_path = m->saved_queue_if_no_path;
spin_unlock_irqrestore(&m->lock, flags);
@@ -1428,11 +1467,6 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
mutex_lock(&m->work_mutex);
- if (m->suspended) {
- r = -EBUSY;
- goto out;
- }
-
if (dm_suspended(ti)) {
r = -EBUSY;
goto out;
@@ -1471,8 +1505,7 @@ static int multipath_message(struct dm_target *ti, unsigned argc, char **argv)
goto out;
}
- r = dm_get_device(ti, argv[1], ti->begin, ti->len,
- dm_table_get_mode(ti->table), &dev);
+ r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
if (r) {
DMWARN("message: error getting device %s",
argv[1]);
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 6c1046df81f..ddda531723d 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -465,9 +465,17 @@ static void map_region(struct dm_io_region *io, struct mirror *m,
static void hold_bio(struct mirror_set *ms, struct bio *bio)
{
/*
- * If device is suspended, complete the bio.
+ * Lock is required to avoid race condition during suspend
+ * process.
*/
+ spin_lock_irq(&ms->lock);
+
if (atomic_read(&ms->suspend)) {
+ spin_unlock_irq(&ms->lock);
+
+ /*
+ * If device is suspended, complete the bio.
+ */
if (dm_noflush_suspending(ms->ti))
bio_endio(bio, DM_ENDIO_REQUEUE);
else
@@ -478,7 +486,6 @@ static void hold_bio(struct mirror_set *ms, struct bio *bio)
/*
* Hold bio until the suspend is complete.
*/
- spin_lock_irq(&ms->lock);
bio_list_add(&ms->holds, bio);
spin_unlock_irq(&ms->lock);
}
@@ -737,9 +744,12 @@ static void do_writes(struct mirror_set *ms, struct bio_list *writes)
dm_rh_delay(ms->rh, bio);
while ((bio = bio_list_pop(&nosync))) {
- if (unlikely(ms->leg_failure) && errors_handled(ms))
- hold_bio(ms, bio);
- else {
+ if (unlikely(ms->leg_failure) && errors_handled(ms)) {
+ spin_lock_irq(&ms->lock);
+ bio_list_add(&ms->failures, bio);
+ spin_unlock_irq(&ms->lock);
+ wakeup_mirrord(ms);
+ } else {
map_bio(get_default_mirror(ms), bio);
generic_make_request(bio);
}
@@ -917,8 +927,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
return -EINVAL;
}
- if (dm_get_device(ti, argv[0], offset, ti->len,
- dm_table_get_mode(ti->table),
+ if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
&ms->mirror[mirror].dev)) {
ti->error = "Device lookup failure";
return -ENXIO;
@@ -1259,6 +1268,20 @@ static void mirror_presuspend(struct dm_target *ti)
atomic_set(&ms->suspend, 1);
/*
+ * Process bios in the hold list to start recovery waiting
+ * for bios in the hold list. After the process, no bio has
+ * a chance to be added in the hold list because ms->suspend
+ * is set.
+ */
+ spin_lock_irq(&ms->lock);
+ holds = ms->holds;
+ bio_list_init(&ms->holds);
+ spin_unlock_irq(&ms->lock);
+
+ while ((bio = bio_list_pop(&holds)))
+ hold_bio(ms, bio);
+
+ /*
* We must finish up all the work that we've
* generated (i.e. recovery work).
*/
@@ -1278,22 +1301,6 @@ static void mirror_presuspend(struct dm_target *ti)
* we know that all of our I/O has been pushed.
*/
flush_workqueue(ms->kmirrord_wq);
-
- /*
- * Now set ms->suspend is set and the workqueue flushed, no more
- * entries can be added to ms->hold list, so process it.
- *
- * Bios can still arrive concurrently with or after this
- * presuspend function, but they cannot join the hold list
- * because ms->suspend is set.
- */
- spin_lock_irq(&ms->lock);
- holds = ms->holds;
- bio_list_init(&ms->holds);
- spin_unlock_irq(&ms->lock);
-
- while ((bio = bio_list_pop(&holds)))
- hold_bio(ms, bio);
}
static void mirror_postsuspend(struct dm_target *ti)
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index ee8eb283650..54853773510 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -83,10 +83,10 @@ struct dm_snapshot {
/* Whether or not owning mapped_device is suspended */
int suspended;
- mempool_t *pending_pool;
-
atomic_t pending_exceptions_count;
+ mempool_t *pending_pool;
+
struct dm_exception_table pending;
struct dm_exception_table complete;
@@ -96,6 +96,11 @@ struct dm_snapshot {
*/
spinlock_t pe_lock;
+ /* Chunks with outstanding reads */
+ spinlock_t tracked_chunk_lock;
+ mempool_t *tracked_chunk_pool;
+ struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
+
/* The on disk metadata handler */
struct dm_exception_store *store;
@@ -105,10 +110,12 @@ struct dm_snapshot {
struct bio_list queued_bios;
struct work_struct queued_bios_work;
- /* Chunks with outstanding reads */
- mempool_t *tracked_chunk_pool;
- spinlock_t tracked_chunk_lock;
- struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE];
+ /* Wait for events based on state_bits */
+ unsigned long state_bits;
+
+ /* Range of chunks currently being merged. */
+ chunk_t first_merging_chunk;
+ int num_merging_chunks;
/*
* The merge operation failed if this flag is set.
@@ -125,13 +132,6 @@ struct dm_snapshot {
*/
int merge_failed;
- /* Wait for events based on state_bits */
- unsigned long state_bits;
-
- /* Range of chunks currently being merged. */
- chunk_t first_merging_chunk;
- int num_merging_chunks;
-
/*
* Incoming bios that overlap with chunks being merged must wait
* for them to be committed.
@@ -1081,8 +1081,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
argv++;
argc--;
- r = dm_get_device(ti, cow_path, 0, 0,
- FMODE_READ | FMODE_WRITE, &s->cow);
+ r = dm_get_device(ti, cow_path, FMODE_READ | FMODE_WRITE, &s->cow);
if (r) {
ti->error = "Cannot get COW device";
goto bad_cow;
@@ -1098,7 +1097,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
argv += args_used;
argc -= args_used;
- r = dm_get_device(ti, origin_path, 0, ti->len, origin_mode, &s->origin);
+ r = dm_get_device(ti, origin_path, origin_mode, &s->origin);
if (r) {
ti->error = "Cannot get origin device";
goto bad_origin;
@@ -2100,8 +2099,7 @@ static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv)
return -EINVAL;
}
- r = dm_get_device(ti, argv[0], 0, ti->len,
- dm_table_get_mode(ti->table), &dev);
+ r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dev);
if (r) {
ti->error = "Cannot get target device";
return r;
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index bd58703ee8f..e610725db76 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -80,8 +80,7 @@ static int get_stripe(struct dm_target *ti, struct stripe_c *sc,
if (sscanf(argv[1], "%llu", &start) != 1)
return -EINVAL;
- if (dm_get_device(ti, argv[0], start, sc->stripe_width,
- dm_table_get_mode(ti->table),
+ if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
&sc->stripe[stripe].dev))
return -ENXIO;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 4b22feb01a0..9924ea23032 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -429,8 +429,7 @@ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
* it's already present.
*/
static int __table_get_device(struct dm_table *t, struct dm_target *ti,
- const char *path, sector_t start, sector_t len,
- fmode_t mode, struct dm_dev **result)
+ const char *path, fmode_t mode, struct dm_dev **result)
{
int r;
dev_t uninitialized_var(dev);
@@ -527,11 +526,10 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
}
EXPORT_SYMBOL_GPL(dm_set_device_limits);
-int dm_get_device(struct dm_target *ti, const char *path, sector_t start,
- sector_t len, fmode_t mode, struct dm_dev **result)
+int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode,
+ struct dm_dev **result)
{
- return __table_get_device(ti->table, ti, path,
- start, len, mode, result);
+ return __table_get_device(ti->table, ti, path, mode, result);
}
@@ -1231,8 +1229,6 @@ void dm_table_unplug_all(struct dm_table *t)
struct mapped_device *dm_table_get_md(struct dm_table *t)
{
- dm_get(t->md);
-
return t->md;
}
diff --git a/drivers/md/dm-uevent.c b/drivers/md/dm-uevent.c
index c7c555a8c7b..6b1e3b61b25 100644
--- a/drivers/md/dm-uevent.c
+++ b/drivers/md/dm-uevent.c
@@ -187,7 +187,7 @@ void dm_path_uevent(enum dm_uevent_type event_type, struct dm_target *ti,
if (event_type >= ARRAY_SIZE(_dm_uevent_type_names)) {
DMERR("%s: Invalid event_type %d", __func__, event_type);
- goto out;
+ return;
}
event = dm_build_path_uevent(md, ti,
@@ -195,12 +195,9 @@ void dm_path_uevent(enum dm_uevent_type event_type, struct dm_target *ti,
_dm_uevent_type_names[event_type].name,
path, nr_valid_paths);
if (IS_ERR(event))
- goto out;
+ return;
dm_uevent_add(md, &event->elist);
-
-out:
- dm_put(md);
}
EXPORT_SYMBOL_GPL(dm_path_uevent);
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index aa4e2aa86d4..d21e1284604 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -635,8 +635,10 @@ static void dec_pending(struct dm_io *io, int error)
if (!md->barrier_error && io_error != -EOPNOTSUPP)
md->barrier_error = io_error;
end_io_acct(io);
+ free_io(md, io);
} else {
end_io_acct(io);
+ free_io(md, io);
if (io_error != DM_ENDIO_REQUEUE) {
trace_block_bio_complete(md->queue, bio);
@@ -644,8 +646,6 @@ static void dec_pending(struct dm_io *io, int error)
bio_endio(bio, io_error);
}
}
-
- free_io(md, io);
}
}
@@ -2618,18 +2618,19 @@ out:
/*-----------------------------------------------------------------
* Event notification.
*---------------------------------------------------------------*/
-void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
+int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
unsigned cookie)
{
char udev_cookie[DM_COOKIE_LENGTH];
char *envp[] = { udev_cookie, NULL };
if (!cookie)
- kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
+ return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
else {
snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
DM_COOKIE_ENV_VAR_NAME, cookie);
- kobject_uevent_env(&disk_to_dev(md->disk)->kobj, action, envp);
+ return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
+ action, envp);
}
}
@@ -2699,23 +2700,13 @@ int dm_suspended_md(struct mapped_device *md)
int dm_suspended(struct dm_target *ti)
{
- struct mapped_device *md = dm_table_get_md(ti->table);
- int r = dm_suspended_md(md);
-
- dm_put(md);
-
- return r;
+ return dm_suspended_md(dm_table_get_md(ti->table));
}
EXPORT_SYMBOL_GPL(dm_suspended);
int dm_noflush_suspending(struct dm_target *ti)
{
- struct mapped_device *md = dm_table_get_md(ti->table);
- int r = __noflush_suspending(md);
-
- dm_put(md);
-
- return r;
+ return __noflush_suspending(dm_table_get_md(ti->table));
}
EXPORT_SYMBOL_GPL(dm_noflush_suspending);
diff --git a/drivers/md/dm.h b/drivers/md/dm.h
index 8dadaa5bc39..bad1724d486 100644
--- a/drivers/md/dm.h
+++ b/drivers/md/dm.h
@@ -125,8 +125,8 @@ void dm_stripe_exit(void);
int dm_open_count(struct mapped_device *md);
int dm_lock_for_deletion(struct mapped_device *md);
-void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
- unsigned cookie);
+int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
+ unsigned cookie);
int dm_io_init(void);
void dm_io_exit(void);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 509c8f3dd9a..70ffbd071b2 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -4680,7 +4680,7 @@ static int raid5_alloc_percpu(raid5_conf_t *conf)
{
unsigned long cpu;
struct page *spare_page;
- struct raid5_percpu *allcpus;
+ struct raid5_percpu __percpu *allcpus;
void *scribble;
int err;
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index dd708359b45..0f86f5e3672 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -405,7 +405,7 @@ struct raid5_private_data {
* lists and performing address
* conversions
*/
- } *percpu;
+ } __percpu *percpu;
size_t scribble_len; /* size of scribble region must be
* associated with conf to handle
* cpu hotplug while reshaping
diff --git a/drivers/media/dvb/dvb-core/dvb_net.c b/drivers/media/dvb/dvb-core/dvb_net.c
index b11533f7619..441c0642b30 100644
--- a/drivers/media/dvb/dvb-core/dvb_net.c
+++ b/drivers/media/dvb/dvb-core/dvb_net.c
@@ -950,11 +950,8 @@ static int dvb_net_filter_sec_set(struct net_device *dev,
(*secfilter)->filter_mask[10] = mac_mask[1];
(*secfilter)->filter_mask[11]=mac_mask[0];
- dprintk("%s: filter mac=%02x %02x %02x %02x %02x %02x\n",
- dev->name, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
- dprintk("%s: filter mask=%02x %02x %02x %02x %02x %02x\n",
- dev->name, mac_mask[0], mac_mask[1], mac_mask[2],
- mac_mask[3], mac_mask[4], mac_mask[5]);
+ dprintk("%s: filter mac=%pM\n", dev->name, mac);
+ dprintk("%s: filter mask=%pM\n", dev->name, mac_mask);
return 0;
}
@@ -1142,18 +1139,18 @@ static void wq_set_multicast_list (struct work_struct *work)
} else if ((dev->flags & IFF_ALLMULTI)) {
dprintk("%s: allmulti mode\n", dev->name);
priv->rx_mode = RX_MODE_ALL_MULTI;
- } else if (dev->mc_count) {
+ } else if (!netdev_mc_empty(dev)) {
int mci;
struct dev_mc_list *mc;
dprintk("%s: set_mc_list, %d entries\n",
- dev->name, dev->mc_count);
+ dev->name, netdev_mc_count(dev));
priv->rx_mode = RX_MODE_MULTI;
priv->multi_num = 0;
for (mci = 0, mc=dev->mc_list;
- mci < dev->mc_count;
+ mci < netdev_mc_count(dev);
mc = mc->next, mci++) {
dvb_set_mc_filter(dev, mc);
}
@@ -1240,7 +1237,6 @@ static void dvb_net_setup(struct net_device *dev)
dev->header_ops = &dvb_header_ops;
dev->netdev_ops = &dvb_netdev_ops;
dev->mtu = 4096;
- dev->mc_count = 0;
dev->flags |= IFF_NOARP;
}
diff --git a/drivers/media/dvb/firewire/firedtv-fw.c b/drivers/media/dvb/firewire/firedtv-fw.c
index 7a3de16fba0..75afe4f81e3 100644
--- a/drivers/media/dvb/firewire/firedtv-fw.c
+++ b/drivers/media/dvb/firewire/firedtv-fw.c
@@ -239,47 +239,18 @@ static const struct fw_address_region fcp_region = {
};
/* Adjust the template string if models with longer names appear. */
-#define MAX_MODEL_NAME_LEN ((int)DIV_ROUND_UP(sizeof("FireDTV ????"), 4))
-
-static size_t model_name(u32 *directory, __be32 *buffer)
-{
- struct fw_csr_iterator ci;
- int i, length, key, value, last_key = 0;
- u32 *block = NULL;
-
- fw_csr_iterator_init(&ci, directory);
- while (fw_csr_iterator_next(&ci, &key, &value)) {
- if (last_key == CSR_MODEL &&
- key == (CSR_DESCRIPTOR | CSR_LEAF))
- block = ci.p - 1 + value;
- last_key = key;
- }
-
- if (block == NULL)
- return 0;
-
- length = min((int)(block[0] >> 16) - 2, MAX_MODEL_NAME_LEN);
- if (length <= 0)
- return 0;
-
- /* fast-forward to text string */
- block += 3;
-
- for (i = 0; i < length; i++)
- buffer[i] = cpu_to_be32(block[i]);
-
- return length * 4;
-}
+#define MAX_MODEL_NAME_LEN sizeof("FireDTV ????")
static int node_probe(struct device *dev)
{
struct firedtv *fdtv;
- __be32 name[MAX_MODEL_NAME_LEN];
+ char name[MAX_MODEL_NAME_LEN];
int name_len, err;
- name_len = model_name(fw_unit(dev)->directory, name);
+ name_len = fw_csr_string(fw_unit(dev)->directory, CSR_MODEL,
+ name, sizeof(name));
- fdtv = fdtv_alloc(dev, &backend, (char *)name, name_len);
+ fdtv = fdtv_alloc(dev, &backend, name, name_len >= 0 ? name_len : 0);
if (!fdtv)
return -ENOMEM;
diff --git a/drivers/media/video/dabusb.c b/drivers/media/video/dabusb.c
index 9b413a35e04..0f505086774 100644
--- a/drivers/media/video/dabusb.c
+++ b/drivers/media/video/dabusb.c
@@ -616,10 +616,12 @@ static int dabusb_open (struct inode *inode, struct file *file)
{
int devnum = iminor(inode);
pdabusb_t s;
+ int r;
if (devnum < DABUSB_MINOR || devnum >= (DABUSB_MINOR + NRDABUSB))
return -EIO;
+ lock_kernel();
s = &dabusb[devnum - DABUSB_MINOR];
dbg("dabusb_open");
@@ -634,6 +636,7 @@ static int dabusb_open (struct inode *inode, struct file *file)
msleep_interruptible(500);
if (signal_pending (current)) {
+ unlock_kernel();
return -EAGAIN;
}
mutex_lock(&s->mutex);
@@ -641,6 +644,7 @@ static int dabusb_open (struct inode *inode, struct file *file)
if (usb_set_interface (s->usbdev, _DABUSB_IF, 1) < 0) {
mutex_unlock(&s->mutex);
dev_err(&s->usbdev->dev, "set_interface failed\n");
+ unlock_kernel();
return -EINVAL;
}
s->opened = 1;
@@ -649,7 +653,9 @@ static int dabusb_open (struct inode *inode, struct file *file)
file->f_pos = 0;
file->private_data = s;
- return nonseekable_open(inode, file);
+ r = nonseekable_open(inode, file);
+ unlock_kernel();
+ return r;
}
static int dabusb_release (struct inode *inode, struct file *file)
diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
index 7045c45da9b..949a648f8e2 100644
--- a/drivers/message/i2o/i2o_proc.c
+++ b/drivers/message/i2o/i2o_proc.c
@@ -111,10 +111,7 @@ static int print_serial_number(struct seq_file *seq, u8 * serialno, int max_len)
break;
case I2O_SNFORMAT_LAN48_MAC: /* LAN-48 MAC Address */
- seq_printf(seq,
- "LAN-48 MAC address @ %02X:%02X:%02X:%02X:%02X:%02X",
- serialno[2], serialno[3],
- serialno[4], serialno[5], serialno[6], serialno[7]);
+ seq_printf(seq, "LAN-48 MAC address @ %pM", &serialno[2]);
break;
case I2O_SNFORMAT_WAN: /* WAN MAC Address */
@@ -126,10 +123,8 @@ static int print_serial_number(struct seq_file *seq, u8 * serialno, int max_len)
case I2O_SNFORMAT_LAN64_MAC: /* LAN-64 MAC Address */
/* FIXME: Figure out what a LAN-64 address really looks like?? */
seq_printf(seq,
- "LAN-64 MAC address @ [?:%02X:%02X:?] %02X:%02X:%02X:%02X:%02X:%02X",
- serialno[8], serialno[9],
- serialno[2], serialno[3],
- serialno[4], serialno[5], serialno[6], serialno[7]);
+ "LAN-64 MAC address @ [?:%02X:%02X:?] %pM",
+ serialno[8], serialno[9], &serialno[2]);
break;
case I2O_SNFORMAT_DDM: /* I2O DDM */
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 413576a2f31..b670d10d5c9 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -94,7 +94,7 @@ config TPS65010
config MENELAUS
bool "Texas Instruments TWL92330/Menelaus PM chip"
- depends on I2C=y && ARCH_OMAP24XX
+ depends on I2C=y && ARCH_OMAP2
help
If you say yes here you get support for the Texas Instruments
TWL92330/Menelaus Power Management chip. This include voltage
diff --git a/drivers/mfd/htc-egpio.c b/drivers/mfd/htc-egpio.c
index aa266e1f69b..addb846c1e3 100644
--- a/drivers/mfd/htc-egpio.c
+++ b/drivers/mfd/htc-egpio.c
@@ -108,7 +108,7 @@ static void egpio_handler(unsigned int irq, struct irq_desc *desc)
ack_irqs(ei);
/* Process all set pins. */
readval &= ei->irqs_enabled;
- for_each_bit(irqpin, &readval, ei->nirqs) {
+ for_each_set_bit(irqpin, &readval, ei->nirqs) {
/* Run irq handler */
pr_debug("got IRQ %d\n", irqpin);
irq = ei->irq_start + irqpin;
diff --git a/drivers/mfd/mc13783-core.c b/drivers/mfd/mc13783-core.c
index 735c8a4d164..62a847e4c2d 100644
--- a/drivers/mfd/mc13783-core.c
+++ b/drivers/mfd/mc13783-core.c
@@ -225,7 +225,7 @@ int mc13783_reg_rmw(struct mc13783 *mc13783, unsigned int offset,
}
EXPORT_SYMBOL(mc13783_reg_rmw);
-int mc13783_mask(struct mc13783 *mc13783, int irq)
+int mc13783_irq_mask(struct mc13783 *mc13783, int irq)
{
int ret;
unsigned int offmask = irq < 24 ? MC13783_IRQMASK0 : MC13783_IRQMASK1;
@@ -245,9 +245,9 @@ int mc13783_mask(struct mc13783 *mc13783, int irq)
return mc13783_reg_write(mc13783, offmask, mask | irqbit);
}
-EXPORT_SYMBOL(mc13783_mask);
+EXPORT_SYMBOL(mc13783_irq_mask);
-int mc13783_unmask(struct mc13783 *mc13783, int irq)
+int mc13783_irq_unmask(struct mc13783 *mc13783, int irq)
{
int ret;
unsigned int offmask = irq < 24 ? MC13783_IRQMASK0 : MC13783_IRQMASK1;
@@ -267,7 +267,53 @@ int mc13783_unmask(struct mc13783 *mc13783, int irq)
return mc13783_reg_write(mc13783, offmask, mask & ~irqbit);
}
-EXPORT_SYMBOL(mc13783_unmask);
+EXPORT_SYMBOL(mc13783_irq_unmask);
+
+int mc13783_irq_status(struct mc13783 *mc13783, int irq,
+ int *enabled, int *pending)
+{
+ int ret;
+ unsigned int offmask = irq < 24 ? MC13783_IRQMASK0 : MC13783_IRQMASK1;
+ unsigned int offstat = irq < 24 ? MC13783_IRQSTAT0 : MC13783_IRQSTAT1;
+ u32 irqbit = 1 << (irq < 24 ? irq : irq - 24);
+
+ if (irq < 0 || irq >= MC13783_NUM_IRQ)
+ return -EINVAL;
+
+ if (enabled) {
+ u32 mask;
+
+ ret = mc13783_reg_read(mc13783, offmask, &mask);
+ if (ret)
+ return ret;
+
+ *enabled = mask & irqbit;
+ }
+
+ if (pending) {
+ u32 stat;
+
+ ret = mc13783_reg_read(mc13783, offstat, &stat);
+ if (ret)
+ return ret;
+
+ *pending = stat & irqbit;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mc13783_irq_status);
+
+int mc13783_irq_ack(struct mc13783 *mc13783, int irq)
+{
+ unsigned int offstat = irq < 24 ? MC13783_IRQSTAT0 : MC13783_IRQSTAT1;
+ unsigned int val = 1 << (irq < 24 ? irq : irq - 24);
+
+ BUG_ON(irq < 0 || irq >= MC13783_NUM_IRQ);
+
+ return mc13783_reg_write(mc13783, offstat, val);
+}
+EXPORT_SYMBOL(mc13783_irq_ack);
int mc13783_irq_request_nounmask(struct mc13783 *mc13783, int irq,
irq_handler_t handler, const char *name, void *dev)
@@ -297,7 +343,7 @@ int mc13783_irq_request(struct mc13783 *mc13783, int irq,
if (ret)
return ret;
- ret = mc13783_unmask(mc13783, irq);
+ ret = mc13783_irq_unmask(mc13783, irq);
if (ret) {
mc13783->irqhandler[irq] = NULL;
mc13783->irqdata[irq] = NULL;
@@ -317,7 +363,7 @@ int mc13783_irq_free(struct mc13783 *mc13783, int irq, void *dev)
mc13783->irqdata[irq] != dev)
return -EINVAL;
- ret = mc13783_mask(mc13783, irq);
+ ret = mc13783_irq_mask(mc13783, irq);
if (ret)
return ret;
@@ -333,17 +379,6 @@ static inline irqreturn_t mc13783_irqhandler(struct mc13783 *mc13783, int irq)
return mc13783->irqhandler[irq](irq, mc13783->irqdata[irq]);
}
-int mc13783_ackirq(struct mc13783 *mc13783, int irq)
-{
- unsigned int offstat = irq < 24 ? MC13783_IRQSTAT0 : MC13783_IRQSTAT1;
- unsigned int val = 1 << (irq < 24 ? irq : irq - 24);
-
- BUG_ON(irq < 0 || irq >= MC13783_NUM_IRQ);
-
- return mc13783_reg_write(mc13783, offstat, val);
-}
-EXPORT_SYMBOL(mc13783_ackirq);
-
/*
* returns: number of handled irqs or negative error
* locking: holds mc13783->lock
@@ -422,7 +457,7 @@ static irqreturn_t mc13783_handler_adcdone(int irq, void *data)
{
struct mc13783_adcdone_data *adcdone_data = data;
- mc13783_ackirq(adcdone_data->mc13783, irq);
+ mc13783_irq_ack(adcdone_data->mc13783, irq);
complete_all(&adcdone_data->done);
@@ -486,7 +521,7 @@ int mc13783_adc_do_conversion(struct mc13783 *mc13783, unsigned int mode,
dev_dbg(&mc13783->spidev->dev, "%s: request irq\n", __func__);
mc13783_irq_request(mc13783, MC13783_IRQ_ADCDONE,
mc13783_handler_adcdone, __func__, &adcdone_data);
- mc13783_ackirq(mc13783, MC13783_IRQ_ADCDONE);
+ mc13783_irq_ack(mc13783, MC13783_IRQ_ADCDONE);
mc13783_reg_write(mc13783, MC13783_REG_ADC_0, adc0);
mc13783_reg_write(mc13783, MC13783_REG_ADC_1, adc1);
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index e3551d20464..d16af6a423f 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -212,6 +212,15 @@ config CS5535_MFGPT_DEFAULT_IRQ
want to use a different IRQ by default. This is here for
architectures to set as necessary.
+config CS5535_CLOCK_EVENT_SRC
+ tristate "CS5535/CS5536 high-res timer (MFGPT) events"
+ depends on GENERIC_TIME && GENERIC_CLOCKEVENTS && CS5535_MFGPT
+ help
+ This driver provides a clock event source based on the MFGPT
+ timer(s) in the CS5535 and CS5536 companion chips.
+ MFGPTs have a better resolution and max interval than the
+ generic PIT, and are suitable for use as high-res timers.
+
config HP_ILO
tristate "Channel interface driver for HP iLO/iLO2 processor"
depends on PCI
diff --git a/drivers/misc/iwmc3200top/fw-download.c b/drivers/misc/iwmc3200top/fw-download.c
index 50d431e469f..9dbaeb574e6 100644
--- a/drivers/misc/iwmc3200top/fw-download.c
+++ b/drivers/misc/iwmc3200top/fw-download.c
@@ -43,15 +43,14 @@ static int iwmct_fw_parser_init(struct iwmct_priv *priv, const u8 *file,
struct iwmct_parser *parser = &priv->parser;
struct iwmct_fw_hdr *fw_hdr = &parser->versions;
- LOG_INFOEX(priv, INIT, "-->\n");
+ LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
LOG_INFO(priv, FW_DOWNLOAD, "file_size=%zd\n", file_size);
parser->file = file;
parser->file_size = file_size;
parser->cur_pos = 0;
- parser->buf = NULL;
-
+ parser->entry_point = 0;
parser->buf = kzalloc(block_size, GFP_KERNEL);
if (!parser->buf) {
LOG_ERROR(priv, FW_DOWNLOAD, "kzalloc error\n");
@@ -70,7 +69,7 @@ static int iwmct_fw_parser_init(struct iwmct_priv *priv, const u8 *file,
parser->cur_pos += sizeof(struct iwmct_fw_hdr);
- LOG_INFOEX(priv, INIT, "<--\n");
+ LOG_TRACE(priv, FW_DOWNLOAD, "<--\n");
return 0;
}
@@ -113,7 +112,7 @@ static int iwmct_parse_next_section(struct iwmct_priv *priv, const u8 **p_sec,
struct iwmct_dbg *dbg = &priv->dbg;
struct iwmct_fw_sec_hdr *sec_hdr;
- LOG_INFOEX(priv, INIT, "-->\n");
+ LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
while (parser->cur_pos + sizeof(struct iwmct_fw_sec_hdr)
<= parser->file_size) {
@@ -152,7 +151,7 @@ static int iwmct_parse_next_section(struct iwmct_priv *priv, const u8 **p_sec,
"finished with section cur_pos=%zd\n", parser->cur_pos);
}
- LOG_INFOEX(priv, INIT, "<--\n");
+ LOG_TRACE(priv, INIT, "<--\n");
return 0;
}
@@ -167,7 +166,7 @@ static int iwmct_download_section(struct iwmct_priv *priv, const u8 *p_sec,
int ret = 0;
u32 cmd = 0;
- LOG_INFOEX(priv, INIT, "-->\n");
+ LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
LOG_INFO(priv, FW_DOWNLOAD, "Download address 0x%x size 0x%zx\n",
addr, sec_size);
@@ -229,7 +228,7 @@ static int iwmct_download_section(struct iwmct_priv *priv, const u8 *p_sec,
hdr->cmd = cpu_to_le32(cmd);
/* send it down */
/* TODO: add more proper sending and error checking */
- ret = iwmct_tx(priv, 0, parser->buf, trans_size);
+ ret = iwmct_tx(priv, parser->buf, trans_size);
if (ret != 0) {
LOG_INFO(priv, FW_DOWNLOAD,
"iwmct_tx returned %d\n", ret);
@@ -251,7 +250,7 @@ static int iwmct_download_section(struct iwmct_priv *priv, const u8 *p_sec,
if (sent < sec_size)
ret = -EINVAL;
exit:
- LOG_INFOEX(priv, INIT, "<--\n");
+ LOG_TRACE(priv, FW_DOWNLOAD, "<--\n");
return ret;
}
@@ -262,7 +261,7 @@ static int iwmct_kick_fw(struct iwmct_priv *priv, bool jump)
int ret;
u32 cmd;
- LOG_INFOEX(priv, INIT, "-->\n");
+ LOG_TRACE(priv, FW_DOWNLOAD, "-->\n");
memset(parser->buf, 0, parser->buf_size);
cmd = IWMC_CMD_SIGNATURE << CMD_HDR_SIGNATURE_POS;
@@ -281,11 +280,11 @@ static int iwmct_kick_fw(struct iwmct_priv *priv, bool jump)
LOG_HEXDUMP(FW_DOWNLOAD, parser->buf, sizeof(*hdr));
/* send it down */
/* TODO: add more proper sending and error checking */
- ret = iwmct_tx(priv, 0, parser->buf, IWMC_SDIO_BLK_SIZE);
+ ret = iwmct_tx(priv, parser->buf, IWMC_SDIO_BLK_SIZE);
if (ret)
LOG_INFO(priv, FW_DOWNLOAD, "iwmct_tx returned %d", ret);
- LOG_INFOEX(priv, INIT, "<--\n");
+ LOG_TRACE(priv, FW_DOWNLOAD, "<--\n");
return 0;
}
@@ -298,8 +297,16 @@ int iwmct_fw_load(struct iwmct_priv *priv)
__le32 addr;
int ret;
- /* clear parser struct */
- memset(&priv->parser, 0, sizeof(struct iwmct_parser));
+
+ LOG_INFO(priv, FW_DOWNLOAD, "barker download request 0x%x is:\n",
+ priv->barker);
+ LOG_INFO(priv, FW_DOWNLOAD, "******* Top FW %s requested ********\n",
+ (priv->barker & BARKER_DNLOAD_TOP_MSK) ? "was" : "not");
+ LOG_INFO(priv, FW_DOWNLOAD, "******* GPS FW %s requested ********\n",
+ (priv->barker & BARKER_DNLOAD_GPS_MSK) ? "was" : "not");
+ LOG_INFO(priv, FW_DOWNLOAD, "******* BT FW %s requested ********\n",
+ (priv->barker & BARKER_DNLOAD_BT_MSK) ? "was" : "not");
+
/* get the firmware */
ret = request_firmware(&raw, fw_name, &priv->func->dev);
@@ -317,6 +324,7 @@ int iwmct_fw_load(struct iwmct_priv *priv)
LOG_INFO(priv, FW_DOWNLOAD, "Read firmware '%s'\n", fw_name);
+ /* clear parser struct */
ret = iwmct_fw_parser_init(priv, raw->data, raw->size, priv->trans_len);
if (ret < 0) {
LOG_ERROR(priv, FW_DOWNLOAD,
@@ -324,7 +332,6 @@ int iwmct_fw_load(struct iwmct_priv *priv)
goto exit;
}
- /* checksum */
if (!iwmct_checksum(priv)) {
LOG_ERROR(priv, FW_DOWNLOAD, "checksum error\n");
ret = -EINVAL;
@@ -333,23 +340,18 @@ int iwmct_fw_load(struct iwmct_priv *priv)
/* download firmware to device */
while (iwmct_parse_next_section(priv, &pdata, &len, &addr)) {
- if (iwmct_download_section(priv, pdata, len, addr)) {
+ ret = iwmct_download_section(priv, pdata, len, addr);
+ if (ret) {
LOG_ERROR(priv, FW_DOWNLOAD,
"%s download section failed\n", fw_name);
- ret = -EIO;
goto exit;
}
}
- iwmct_kick_fw(priv, !!(priv->barker & BARKER_DNLOAD_JUMP_MSK));
+ ret = iwmct_kick_fw(priv, !!(priv->barker & BARKER_DNLOAD_JUMP_MSK));
exit:
kfree(priv->parser.buf);
-
- if (raw)
- release_firmware(raw);
-
- raw = NULL;
-
+ release_firmware(raw);
return ret;
}
diff --git a/drivers/misc/iwmc3200top/iwmc3200top.h b/drivers/misc/iwmc3200top/iwmc3200top.h
index 43bd510e187..740ff0738ea 100644
--- a/drivers/misc/iwmc3200top/iwmc3200top.h
+++ b/drivers/misc/iwmc3200top/iwmc3200top.h
@@ -196,9 +196,7 @@ struct iwmct_priv {
struct list_head read_req_list;
};
-extern int iwmct_tx(struct iwmct_priv *priv, unsigned int addr,
- void *src, int count);
-
+extern int iwmct_tx(struct iwmct_priv *priv, void *src, int count);
extern int iwmct_fw_load(struct iwmct_priv *priv);
extern void iwmct_dbg_init_params(struct iwmct_priv *drv);
diff --git a/drivers/misc/iwmc3200top/log.h b/drivers/misc/iwmc3200top/log.h
index aba8121f978..4434bb16cea 100644
--- a/drivers/misc/iwmc3200top/log.h
+++ b/drivers/misc/iwmc3200top/log.h
@@ -37,13 +37,26 @@
#define LOG_SEV_INFO 3
#define LOG_SEV_INFOEX 4
-#define LOG_SEV_FILTER_ALL \
- (BIT(LOG_SEV_CRITICAL) | \
- BIT(LOG_SEV_ERROR) | \
- BIT(LOG_SEV_WARNING) | \
- BIT(LOG_SEV_INFO) | \
+/* Log levels not defined for FW */
+#define LOG_SEV_TRACE 5
+#define LOG_SEV_DUMP 6
+
+#define LOG_SEV_FW_FILTER_ALL \
+ (BIT(LOG_SEV_CRITICAL) | \
+ BIT(LOG_SEV_ERROR) | \
+ BIT(LOG_SEV_WARNING) | \
+ BIT(LOG_SEV_INFO) | \
BIT(LOG_SEV_INFOEX))
+#define LOG_SEV_FILTER_ALL \
+ (BIT(LOG_SEV_CRITICAL) | \
+ BIT(LOG_SEV_ERROR) | \
+ BIT(LOG_SEV_WARNING) | \
+ BIT(LOG_SEV_INFO) | \
+ BIT(LOG_SEV_INFOEX) | \
+ BIT(LOG_SEV_TRACE) | \
+ BIT(LOG_SEV_DUMP))
+
/* log source */
#define LOG_SRC_INIT 0
#define LOG_SRC_DEBUGFS 1
@@ -104,16 +117,16 @@ do { \
__func__, __LINE__, ##args); \
} while (0)
-#define LOG_INFOEX(priv, src, fmt, args...) \
+#define LOG_TRACE(priv, src, fmt, args...) \
do { \
- if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_INFOEX)) \
+ if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_TRACE)) \
dev_dbg(priv2dev(priv), "%s %d: " fmt, \
__func__, __LINE__, ##args); \
} while (0)
#define LOG_HEXDUMP(src, ptr, len) \
do { \
- if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_INFOEX)) \
+ if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_DUMP)) \
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, \
16, 1, ptr, len, false); \
} while (0)
@@ -142,7 +155,7 @@ ssize_t store_iwmct_log_level_fw(struct device *d,
#define LOG_ERROR(priv, src, fmt, args...)
#define LOG_WARNING(priv, src, fmt, args...)
#define LOG_INFO(priv, src, fmt, args...)
-#define LOG_INFOEX(priv, src, fmt, args...)
+#define LOG_TRACE(priv, src, fmt, args...)
#define LOG_HEXDUMP(src, ptr, len)
static inline void iwmct_log_top_message(struct iwmct_priv *priv,
diff --git a/drivers/misc/iwmc3200top/main.c b/drivers/misc/iwmc3200top/main.c
index fafcaa481d7..3b7292a5cea 100644
--- a/drivers/misc/iwmc3200top/main.c
+++ b/drivers/misc/iwmc3200top/main.c
@@ -49,6 +49,20 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR(DRIVER_COPYRIGHT);
MODULE_FIRMWARE(FW_NAME(FW_API_VER));
+
+static inline int __iwmct_tx(struct iwmct_priv *priv, void *src, int count)
+{
+ return sdio_memcpy_toio(priv->func, IWMC_SDIO_DATA_ADDR, src, count);
+
+}
+int iwmct_tx(struct iwmct_priv *priv, void *src, int count)
+{
+ int ret;
+ sdio_claim_host(priv->func);
+ ret = __iwmct_tx(priv, src, count);
+ sdio_release_host(priv->func);
+ return ret;
+}
/*
* This workers main task is to wait for OP_OPR_ALIVE
* from TOP FW until ALIVE_MSG_TIMOUT timeout is elapsed.
@@ -66,7 +80,7 @@ static void iwmct_rescan_worker(struct work_struct *ws)
ret = bus_rescan_devices(priv->func->dev.bus);
if (ret < 0)
- LOG_INFO(priv, FW_DOWNLOAD, "bus_rescan_devices FAILED!!!\n");
+ LOG_INFO(priv, INIT, "bus_rescan_devices FAILED!!!\n");
}
static void op_top_message(struct iwmct_priv *priv, struct top_msg *msg)
@@ -137,7 +151,7 @@ int iwmct_send_hcmd(struct iwmct_priv *priv, u8 *cmd, u16 len)
int ret;
u8 *buf;
- LOG_INFOEX(priv, FW_MSG, "Sending hcmd:\n");
+ LOG_TRACE(priv, FW_MSG, "Sending hcmd:\n");
/* add padding to 256 for IWMC */
((struct top_msg *)cmd)->hdr.flags |= CMD_FLAG_PADDING_256;
@@ -158,27 +172,12 @@ int iwmct_send_hcmd(struct iwmct_priv *priv, u8 *cmd, u16 len)
}
memcpy(buf, cmd, len);
-
- sdio_claim_host(priv->func);
- ret = sdio_memcpy_toio(priv->func, IWMC_SDIO_DATA_ADDR, buf,
- FW_HCMD_BLOCK_SIZE);
- sdio_release_host(priv->func);
+ ret = iwmct_tx(priv, buf, FW_HCMD_BLOCK_SIZE);
kfree(buf);
return ret;
}
-int iwmct_tx(struct iwmct_priv *priv, unsigned int addr,
- void *src, int count)
-{
- int ret;
-
- sdio_claim_host(priv->func);
- ret = sdio_memcpy_toio(priv->func, addr, src, count);
- sdio_release_host(priv->func);
-
- return ret;
-}
static void iwmct_irq_read_worker(struct work_struct *ws)
{
@@ -192,7 +191,7 @@ static void iwmct_irq_read_worker(struct work_struct *ws)
priv = container_of(ws, struct iwmct_priv, isr_worker);
- LOG_INFO(priv, IRQ, "enter iwmct_irq_read_worker %p\n", ws);
+ LOG_TRACE(priv, IRQ, "enter iwmct_irq_read_worker %p\n", ws);
/* --------------------- Handshake with device -------------------- */
sdio_claim_host(priv->func);
@@ -273,8 +272,7 @@ static void iwmct_irq_read_worker(struct work_struct *ws)
if (barker & BARKER_DNLOAD_SYNC_MSK) {
/* Send the same barker back */
- ret = sdio_memcpy_toio(priv->func, IWMC_SDIO_DATA_ADDR,
- buf, iosize);
+ ret = __iwmct_tx(priv, buf, iosize);
if (ret) {
LOG_ERROR(priv, IRQ,
"error %d echoing barker\n", ret);
@@ -292,15 +290,6 @@ static void iwmct_irq_read_worker(struct work_struct *ws)
sdio_release_host(priv->func);
-
- LOG_INFO(priv, IRQ, "barker download request 0x%x is:\n", priv->barker);
- LOG_INFO(priv, IRQ, "******* Top FW %s requested ********\n",
- (priv->barker & BARKER_DNLOAD_TOP_MSK) ? "was" : "not");
- LOG_INFO(priv, IRQ, "******* GPS FW %s requested ********\n",
- (priv->barker & BARKER_DNLOAD_GPS_MSK) ? "was" : "not");
- LOG_INFO(priv, IRQ, "******* BT FW %s requested ********\n",
- (priv->barker & BARKER_DNLOAD_BT_MSK) ? "was" : "not");
-
if (priv->dbg.fw_download)
iwmct_fw_load(priv);
else
@@ -312,7 +301,7 @@ exit_release:
sdio_release_host(priv->func);
exit:
kfree(buf);
- LOG_INFO(priv, IRQ, "exit iwmct_irq_read_worker\n");
+ LOG_TRACE(priv, IRQ, "exit iwmct_irq_read_worker\n");
}
static void iwmct_irq(struct sdio_func *func)
@@ -325,12 +314,12 @@ static void iwmct_irq(struct sdio_func *func)
priv = sdio_get_drvdata(func);
- LOG_INFO(priv, IRQ, "enter iwmct_irq\n");
+ LOG_TRACE(priv, IRQ, "enter iwmct_irq\n");
/* read the function's status register */
val = sdio_readb(func, IWMC_SDIO_INTR_STATUS_ADDR, &ret);
- LOG_INFO(priv, IRQ, "iir value = %d, ret=%d\n", val, ret);
+ LOG_TRACE(priv, IRQ, "iir value = %d, ret=%d\n", val, ret);
if (!val) {
LOG_ERROR(priv, IRQ, "iir = 0, exiting ISR\n");
@@ -372,7 +361,7 @@ static void iwmct_irq(struct sdio_func *func)
queue_work(priv->wq, &priv->isr_worker);
- LOG_INFO(priv, IRQ, "exit iwmct_irq\n");
+ LOG_TRACE(priv, IRQ, "exit iwmct_irq\n");
return;
@@ -608,8 +597,6 @@ static void iwmct_remove(struct sdio_func *func)
struct iwmct_work_struct *read_req;
struct iwmct_priv *priv = sdio_get_drvdata(func);
- priv = sdio_get_drvdata(func);
-
LOG_INFO(priv, INIT, "enter\n");
sdio_claim_host(func);
@@ -660,7 +647,7 @@ static int __init iwmct_init(void)
/* Default log filter settings */
iwmct_log_set_filter(LOG_SRC_ALL, LOG_SEV_FILTER_RUNTIME);
- iwmct_log_set_filter(LOG_SRC_FW_MSG, LOG_SEV_FILTER_ALL);
+ iwmct_log_set_filter(LOG_SRC_FW_MSG, LOG_SEV_FW_FILTER_ALL);
iwmct_log_set_fw_filter(LOG_SRC_ALL, FW_LOG_SEV_FILTER_RUNTIME);
rc = sdio_register_driver(&iwmct_driver);
diff --git a/drivers/misc/lkdtm.c b/drivers/misc/lkdtm.c
index 3648b23d5c9..4a0648301fd 100644
--- a/drivers/misc/lkdtm.c
+++ b/drivers/misc/lkdtm.c
@@ -26,21 +26,9 @@
* It is adapted from the Linux Kernel Dump Test Tool by
* Fernando Luis Vazquez Cao <http://lkdtt.sourceforge.net>
*
- * Usage : insmod lkdtm.ko [recur_count={>0}] cpoint_name=<> cpoint_type=<>
- * [cpoint_count={>0}]
+ * Debugfs support added by Simon Kagstrom <simon.kagstrom@netinsight.net>
*
- * recur_count : Recursion level for the stack overflow test. Default is 10.
- *
- * cpoint_name : Crash point where the kernel is to be crashed. It can be
- * one of INT_HARDWARE_ENTRY, INT_HW_IRQ_EN, INT_TASKLET_ENTRY,
- * FS_DEVRW, MEM_SWAPOUT, TIMERADD, SCSI_DISPATCH_CMD,
- * IDE_CORE_CP
- *
- * cpoint_type : Indicates the action to be taken on hitting the crash point.
- * It can be one of PANIC, BUG, EXCEPTION, LOOP, OVERFLOW
- *
- * cpoint_count : Indicates the number of times the crash point is to be hit
- * to trigger an action. The default is 10.
+ * See Documentation/fault-injection/provoke-crashes.txt for instructions
*/
#include <linux/kernel.h>
@@ -53,13 +41,12 @@
#include <linux/interrupt.h>
#include <linux/hrtimer.h>
#include <scsi/scsi_cmnd.h>
+#include <linux/debugfs.h>
#ifdef CONFIG_IDE
#include <linux/ide.h>
#endif
-#define NUM_CPOINTS 8
-#define NUM_CPOINT_TYPES 5
#define DEFAULT_COUNT 10
#define REC_NUM_DEFAULT 10
@@ -72,7 +59,8 @@ enum cname {
MEM_SWAPOUT,
TIMERADD,
SCSI_DISPATCH_CMD,
- IDE_CORE_CP
+ IDE_CORE_CP,
+ DIRECT,
};
enum ctype {
@@ -81,7 +69,11 @@ enum ctype {
BUG,
EXCEPTION,
LOOP,
- OVERFLOW
+ OVERFLOW,
+ CORRUPT_STACK,
+ UNALIGNED_LOAD_STORE_WRITE,
+ OVERWRITE_ALLOCATION,
+ WRITE_AFTER_FREE,
};
static char* cp_name[] = {
@@ -92,7 +84,8 @@ static char* cp_name[] = {
"MEM_SWAPOUT",
"TIMERADD",
"SCSI_DISPATCH_CMD",
- "IDE_CORE_CP"
+ "IDE_CORE_CP",
+ "DIRECT",
};
static char* cp_type[] = {
@@ -100,7 +93,11 @@ static char* cp_type[] = {
"BUG",
"EXCEPTION",
"LOOP",
- "OVERFLOW"
+ "OVERFLOW",
+ "CORRUPT_STACK",
+ "UNALIGNED_LOAD_STORE_WRITE",
+ "OVERWRITE_ALLOCATION",
+ "WRITE_AFTER_FREE",
};
static struct jprobe lkdtm;
@@ -193,34 +190,66 @@ int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file,
}
#endif
+/* Return the crashpoint number or NONE if the name is invalid */
+static enum ctype parse_cp_type(const char *what, size_t count)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cp_type); i++) {
+ if (!strcmp(what, cp_type[i]))
+ return i + 1;
+ }
+
+ return NONE;
+}
+
+static const char *cp_type_to_str(enum ctype type)
+{
+ if (type == NONE || type < 0 || type > ARRAY_SIZE(cp_type))
+ return "None";
+
+ return cp_type[type - 1];
+}
+
+static const char *cp_name_to_str(enum cname name)
+{
+ if (name == INVALID || name < 0 || name > ARRAY_SIZE(cp_name))
+ return "INVALID";
+
+ return cp_name[name - 1];
+}
+
+
static int lkdtm_parse_commandline(void)
{
int i;
- if (cpoint_name == NULL || cpoint_type == NULL ||
- cpoint_count < 1 || recur_count < 1)
+ if (cpoint_count < 1 || recur_count < 1)
return -EINVAL;
- for (i = 0; i < NUM_CPOINTS; ++i) {
+ count = cpoint_count;
+
+ /* No special parameters */
+ if (!cpoint_type && !cpoint_name)
+ return 0;
+
+ /* Neither or both of these need to be set */
+ if (!cpoint_type || !cpoint_name)
+ return -EINVAL;
+
+ cptype = parse_cp_type(cpoint_type, strlen(cpoint_type));
+ if (cptype == NONE)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(cp_name); i++) {
if (!strcmp(cpoint_name, cp_name[i])) {
cpoint = i + 1;
- break;
- }
- }
-
- for (i = 0; i < NUM_CPOINT_TYPES; ++i) {
- if (!strcmp(cpoint_type, cp_type[i])) {
- cptype = i + 1;
- break;
+ return 0;
}
}
- if (cpoint == INVALID || cptype == NONE)
- return -EINVAL;
-
- count = cpoint_count;
-
- return 0;
+ /* Could not find a valid crash point */
+ return -EINVAL;
}
static int recursive_loop(int a)
@@ -235,53 +264,92 @@ static int recursive_loop(int a)
return recursive_loop(a);
}
-void lkdtm_handler(void)
+static void lkdtm_do_action(enum ctype which)
{
- printk(KERN_INFO "lkdtm : Crash point %s of type %s hit\n",
- cpoint_name, cpoint_type);
- --count;
+ switch (which) {
+ case PANIC:
+ panic("dumptest");
+ break;
+ case BUG:
+ BUG();
+ break;
+ case EXCEPTION:
+ *((int *) 0) = 0;
+ break;
+ case LOOP:
+ for (;;)
+ ;
+ break;
+ case OVERFLOW:
+ (void) recursive_loop(0);
+ break;
+ case CORRUPT_STACK: {
+ volatile u32 data[8];
+ volatile u32 *p = data;
+
+ p[12] = 0x12345678;
+ break;
+ }
+ case UNALIGNED_LOAD_STORE_WRITE: {
+ static u8 data[5] __attribute__((aligned(4))) = {1, 2,
+ 3, 4, 5};
+ u32 *p;
+ u32 val = 0x12345678;
+
+ p = (u32 *)(data + 1);
+ if (*p == 0)
+ val = 0x87654321;
+ *p = val;
+ break;
+ }
+ case OVERWRITE_ALLOCATION: {
+ size_t len = 1020;
+ u32 *data = kmalloc(len, GFP_KERNEL);
+
+ data[1024 / sizeof(u32)] = 0x12345678;
+ kfree(data);
+ break;
+ }
+ case WRITE_AFTER_FREE: {
+ size_t len = 1024;
+ u32 *data = kmalloc(len, GFP_KERNEL);
+
+ kfree(data);
+ schedule();
+ memset(data, 0x78, len);
+ break;
+ }
+ case NONE:
+ default:
+ break;
+ }
+
+}
+
+static void lkdtm_handler(void)
+{
+ count--;
+ printk(KERN_INFO "lkdtm: Crash point %s of type %s hit, trigger in %d rounds\n",
+ cp_name_to_str(cpoint), cp_type_to_str(cptype), count);
if (count == 0) {
- switch (cptype) {
- case NONE:
- break;
- case PANIC:
- printk(KERN_INFO "lkdtm : PANIC\n");
- panic("dumptest");
- break;
- case BUG:
- printk(KERN_INFO "lkdtm : BUG\n");
- BUG();
- break;
- case EXCEPTION:
- printk(KERN_INFO "lkdtm : EXCEPTION\n");
- *((int *) 0) = 0;
- break;
- case LOOP:
- printk(KERN_INFO "lkdtm : LOOP\n");
- for (;;);
- break;
- case OVERFLOW:
- printk(KERN_INFO "lkdtm : OVERFLOW\n");
- (void) recursive_loop(0);
- break;
- default:
- break;
- }
+ lkdtm_do_action(cptype);
count = cpoint_count;
}
}
-static int __init lkdtm_module_init(void)
+static int lkdtm_register_cpoint(enum cname which)
{
int ret;
- if (lkdtm_parse_commandline() == -EINVAL) {
- printk(KERN_INFO "lkdtm : Invalid command\n");
- return -EINVAL;
- }
+ cpoint = INVALID;
+ if (lkdtm.entry != NULL)
+ unregister_jprobe(&lkdtm);
- switch (cpoint) {
+ switch (which) {
+ case DIRECT:
+ lkdtm_do_action(cptype);
+ return 0;
case INT_HARDWARE_ENTRY:
lkdtm.kp.symbol_name = "do_IRQ";
lkdtm.entry = (kprobe_opcode_t*) jp_do_irq;
@@ -315,28 +383,268 @@ static int __init lkdtm_module_init(void)
lkdtm.kp.symbol_name = "generic_ide_ioctl";
lkdtm.entry = (kprobe_opcode_t*) jp_generic_ide_ioctl;
#else
- printk(KERN_INFO "lkdtm : Crash point not available\n");
+ printk(KERN_INFO "lkdtm: Crash point not available\n");
+ return -EINVAL;
#endif
break;
default:
- printk(KERN_INFO "lkdtm : Invalid Crash Point\n");
- break;
+ printk(KERN_INFO "lkdtm: Invalid Crash Point\n");
+ return -EINVAL;
}
+ cpoint = which;
if ((ret = register_jprobe(&lkdtm)) < 0) {
- printk(KERN_INFO "lkdtm : Couldn't register jprobe\n");
- return ret;
+ printk(KERN_INFO "lkdtm: Couldn't register jprobe\n");
+ cpoint = INVALID;
+ }
+
+ return ret;
+}
+
+static ssize_t do_register_entry(enum cname which, struct file *f,
+ const char __user *user_buf, size_t count, loff_t *off)
+{
+ char *buf;
+ int err;
+
+ if (count >= PAGE_SIZE)
+ return -EINVAL;
+
+ buf = (char *)__get_free_page(GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ if (copy_from_user(buf, user_buf, count)) {
+ free_page((unsigned long) buf);
+ return -EFAULT;
+ }
+ /* NULL-terminate and remove enter */
+ buf[count] = '\0';
+ strim(buf);
+
+ cptype = parse_cp_type(buf, count);
+ free_page((unsigned long) buf);
+
+ if (cptype == NONE)
+ return -EINVAL;
+
+ err = lkdtm_register_cpoint(which);
+ if (err < 0)
+ return err;
+
+ *off += count;
+
+ return count;
+}
+
+/* Generic read callback that just prints out the available crash types */
+static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
+ size_t count, loff_t *off)
+{
+ char *buf;
+ int i, n, out;
+
+ buf = (char *)__get_free_page(GFP_KERNEL);
+
+ n = snprintf(buf, PAGE_SIZE, "Available crash types:\n");
+ for (i = 0; i < ARRAY_SIZE(cp_type); i++)
+ n += snprintf(buf + n, PAGE_SIZE - n, "%s\n", cp_type[i]);
+ buf[n] = '\0';
+
+ out = simple_read_from_buffer(user_buf, count, off,
+ buf, n);
+ free_page((unsigned long) buf);
+
+ return out;
+}
+
+static int lkdtm_debugfs_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+
+
+static ssize_t int_hardware_entry(struct file *f, const char __user *buf,
+ size_t count, loff_t *off)
+{
+ return do_register_entry(INT_HARDWARE_ENTRY, f, buf, count, off);
+}
+
+static ssize_t int_hw_irq_en(struct file *f, const char __user *buf,
+ size_t count, loff_t *off)
+{
+ return do_register_entry(INT_HW_IRQ_EN, f, buf, count, off);
+}
+
+static ssize_t int_tasklet_entry(struct file *f, const char __user *buf,
+ size_t count, loff_t *off)
+{
+ return do_register_entry(INT_TASKLET_ENTRY, f, buf, count, off);
+}
+
+static ssize_t fs_devrw_entry(struct file *f, const char __user *buf,
+ size_t count, loff_t *off)
+{
+ return do_register_entry(FS_DEVRW, f, buf, count, off);
+}
+
+static ssize_t mem_swapout_entry(struct file *f, const char __user *buf,
+ size_t count, loff_t *off)
+{
+ return do_register_entry(MEM_SWAPOUT, f, buf, count, off);
+}
+
+static ssize_t timeradd_entry(struct file *f, const char __user *buf,
+ size_t count, loff_t *off)
+{
+ return do_register_entry(TIMERADD, f, buf, count, off);
+}
+
+static ssize_t scsi_dispatch_cmd_entry(struct file *f,
+ const char __user *buf, size_t count, loff_t *off)
+{
+ return do_register_entry(SCSI_DISPATCH_CMD, f, buf, count, off);
+}
+
+static ssize_t ide_core_cp_entry(struct file *f, const char __user *buf,
+ size_t count, loff_t *off)
+{
+ return do_register_entry(IDE_CORE_CP, f, buf, count, off);
+}
+
+/* Special entry to just crash directly. Available without KPROBEs */
+static ssize_t direct_entry(struct file *f, const char __user *user_buf,
+ size_t count, loff_t *off)
+{
+ enum ctype type;
+ char *buf;
+
+ if (count >= PAGE_SIZE)
+ return -EINVAL;
+ if (count < 1)
+ return -EINVAL;
+
+ buf = (char *)__get_free_page(GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ if (copy_from_user(buf, user_buf, count)) {
+ free_page((unsigned long) buf);
+ return -EFAULT;
+ }
+ /* NULL-terminate and remove enter */
+ buf[count] = '\0';
+ strim(buf);
+
+ type = parse_cp_type(buf, count);
+ free_page((unsigned long) buf);
+ if (type == NONE)
+ return -EINVAL;
+
+ printk(KERN_INFO "lkdtm: Performing direct entry %s\n",
+ cp_type_to_str(type));
+ lkdtm_do_action(type);
+ *off += count;
+
+ return count;
+}
+
+struct crash_entry {
+ const char *name;
+ const struct file_operations fops;
+};
+
+static const struct crash_entry crash_entries[] = {
+ {"DIRECT", {.read = lkdtm_debugfs_read,
+ .open = lkdtm_debugfs_open,
+ .write = direct_entry} },
+ {"INT_HARDWARE_ENTRY", {.read = lkdtm_debugfs_read,
+ .open = lkdtm_debugfs_open,
+ .write = int_hardware_entry} },
+ {"INT_HW_IRQ_EN", {.read = lkdtm_debugfs_read,
+ .open = lkdtm_debugfs_open,
+ .write = int_hw_irq_en} },
+ {"INT_TASKLET_ENTRY", {.read = lkdtm_debugfs_read,
+ .open = lkdtm_debugfs_open,
+ .write = int_tasklet_entry} },
+ {"FS_DEVRW", {.read = lkdtm_debugfs_read,
+ .open = lkdtm_debugfs_open,
+ .write = fs_devrw_entry} },
+ {"MEM_SWAPOUT", {.read = lkdtm_debugfs_read,
+ .open = lkdtm_debugfs_open,
+ .write = mem_swapout_entry} },
+ {"TIMERADD", {.read = lkdtm_debugfs_read,
+ .open = lkdtm_debugfs_open,
+ .write = timeradd_entry} },
+ {"SCSI_DISPATCH_CMD", {.read = lkdtm_debugfs_read,
+ .open = lkdtm_debugfs_open,
+ .write = scsi_dispatch_cmd_entry} },
+ {"IDE_CORE_CP", {.read = lkdtm_debugfs_read,
+ .open = lkdtm_debugfs_open,
+ .write = ide_core_cp_entry} },
+};
+
+static struct dentry *lkdtm_debugfs_root;
+
+static int __init lkdtm_module_init(void)
+{
+ int ret = -EINVAL;
+ int n_debugfs_entries = 1; /* Assume only the direct entry */
+ int i;
+
+ /* Register debugfs interface */
+ lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL);
+ if (!lkdtm_debugfs_root) {
+ printk(KERN_ERR "lkdtm: creating root dir failed\n");
+ return -ENODEV;
+ }
+
+#ifdef CONFIG_KPROBES
+ n_debugfs_entries = ARRAY_SIZE(crash_entries);
+#endif
+
+ for (i = 0; i < n_debugfs_entries; i++) {
+ const struct crash_entry *cur = &crash_entries[i];
+ struct dentry *de;
+
+ de = debugfs_create_file(cur->name, 0644, lkdtm_debugfs_root,
+ NULL, &cur->fops);
+ if (de == NULL) {
+ printk(KERN_ERR "lkdtm: could not create %s\n",
+ cur->name);
+ goto out_err;
+ }
+ }
+
+ if (lkdtm_parse_commandline() == -EINVAL) {
+ printk(KERN_INFO "lkdtm: Invalid command\n");
+ goto out_err;
+ }
+
+ if (cpoint != INVALID && cptype != NONE) {
+ ret = lkdtm_register_cpoint(cpoint);
+ if (ret < 0) {
+ printk(KERN_INFO "lkdtm: Invalid crash point %d\n",
+ cpoint);
+ goto out_err;
+ }
+ printk(KERN_INFO "lkdtm: Crash point %s of type %s registered\n",
+ cpoint_name, cpoint_type);
+ } else {
+ printk(KERN_INFO "lkdtm: No crash points registered, enable through debugfs\n");
}
- printk(KERN_INFO "lkdtm : Crash point %s of type %s registered\n",
- cpoint_name, cpoint_type);
return 0;
+
+out_err:
+ debugfs_remove_recursive(lkdtm_debugfs_root);
+ return ret;
}
static void __exit lkdtm_module_exit(void)
{
- unregister_jprobe(&lkdtm);
- printk(KERN_INFO "lkdtm : Crash point unregistered\n");
+ debugfs_remove_recursive(lkdtm_debugfs_root);
+
+ unregister_jprobe(&lkdtm);
+ printk(KERN_INFO "lkdtm: Crash point unregistered\n");
}
module_init(lkdtm_module_init);
diff --git a/drivers/misc/sgi-xp/xpnet.c b/drivers/misc/sgi-xp/xpnet.c
index 16f0abda142..57b152f8d1b 100644
--- a/drivers/misc/sgi-xp/xpnet.c
+++ b/drivers/misc/sgi-xp/xpnet.c
@@ -475,7 +475,7 @@ xpnet_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (skb->data[0] == 0xff) {
/* we are being asked to broadcast to all partitions */
- for_each_bit(dest_partid, xpnet_broadcast_partitions,
+ for_each_set_bit(dest_partid, xpnet_broadcast_partitions,
xp_max_npartitions) {
xpnet_send(skb, queued_msg, start_addr, end_addr,
diff --git a/drivers/mmc/card/sdio_uart.c b/drivers/mmc/card/sdio_uart.c
index f53755533e7..3fab78ba895 100644
--- a/drivers/mmc/card/sdio_uart.c
+++ b/drivers/mmc/card/sdio_uart.c
@@ -37,6 +37,7 @@
#include <linux/gfp.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
+#include <linux/kfifo.h>
#include <linux/mmc/core.h>
#include <linux/mmc/card.h>
@@ -47,19 +48,9 @@
#define UART_NR 8 /* Number of UARTs this driver can handle */
-#define UART_XMIT_SIZE PAGE_SIZE
+#define FIFO_SIZE PAGE_SIZE
#define WAKEUP_CHARS 256
-#define circ_empty(circ) ((circ)->head == (circ)->tail)
-#define circ_clear(circ) ((circ)->head = (circ)->tail = 0)
-
-#define circ_chars_pending(circ) \
- (CIRC_CNT((circ)->head, (circ)->tail, UART_XMIT_SIZE))
-
-#define circ_chars_free(circ) \
- (CIRC_SPACE((circ)->head, (circ)->tail, UART_XMIT_SIZE))
-
-
struct uart_icount {
__u32 cts;
__u32 dsr;
@@ -82,7 +73,7 @@ struct sdio_uart_port {
struct mutex func_lock;
struct task_struct *in_sdio_uart_irq;
unsigned int regs_offset;
- struct circ_buf xmit;
+ struct kfifo xmit_fifo;
spinlock_t write_lock;
struct uart_icount icount;
unsigned int uartclk;
@@ -105,6 +96,8 @@ static int sdio_uart_add_port(struct sdio_uart_port *port)
kref_init(&port->kref);
mutex_init(&port->func_lock);
spin_lock_init(&port->write_lock);
+ if (kfifo_alloc(&port->xmit_fifo, FIFO_SIZE, GFP_KERNEL))
+ return -ENOMEM;
spin_lock(&sdio_uart_table_lock);
for (index = 0; index < UART_NR; index++) {
@@ -140,6 +133,7 @@ static void sdio_uart_port_destroy(struct kref *kref)
{
struct sdio_uart_port *port =
container_of(kref, struct sdio_uart_port, kref);
+ kfifo_free(&port->xmit_fifo);
kfree(port);
}
@@ -456,9 +450,11 @@ static void sdio_uart_receive_chars(struct sdio_uart_port *port,
static void sdio_uart_transmit_chars(struct sdio_uart_port *port)
{
- struct circ_buf *xmit = &port->xmit;
+ struct kfifo *xmit = &port->xmit_fifo;
int count;
struct tty_struct *tty;
+ u8 iobuf[16];
+ int len;
if (port->x_char) {
sdio_out(port, UART_TX, port->x_char);
@@ -469,27 +465,25 @@ static void sdio_uart_transmit_chars(struct sdio_uart_port *port)
tty = tty_port_tty_get(&port->port);
- if (tty == NULL || circ_empty(xmit) ||
+ if (tty == NULL || !kfifo_len(xmit) ||
tty->stopped || tty->hw_stopped) {
sdio_uart_stop_tx(port);
tty_kref_put(tty);
return;
}
- count = 16;
- do {
- sdio_out(port, UART_TX, xmit->buf[xmit->tail]);
- xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+ len = kfifo_out_locked(xmit, iobuf, 16, &port->write_lock);
+ for (count = 0; count < len; count++) {
+ sdio_out(port, UART_TX, iobuf[count]);
port->icount.tx++;
- if (circ_empty(xmit))
- break;
- } while (--count > 0);
+ }
- if (circ_chars_pending(xmit) < WAKEUP_CHARS)
+ len = kfifo_len(xmit);
+ if (len < WAKEUP_CHARS) {
tty_wakeup(tty);
-
- if (circ_empty(xmit))
- sdio_uart_stop_tx(port);
+ if (len == 0)
+ sdio_uart_stop_tx(port);
+ }
tty_kref_put(tty);
}
@@ -632,7 +626,6 @@ static int sdio_uart_activate(struct tty_port *tport, struct tty_struct *tty)
{
struct sdio_uart_port *port =
container_of(tport, struct sdio_uart_port, port);
- unsigned long page;
int ret;
/*
@@ -641,22 +634,17 @@ static int sdio_uart_activate(struct tty_port *tport, struct tty_struct *tty)
*/
set_bit(TTY_IO_ERROR, &tty->flags);
- /* Initialise and allocate the transmit buffer. */
- page = __get_free_page(GFP_KERNEL);
- if (!page)
- return -ENOMEM;
- port->xmit.buf = (unsigned char *)page;
- circ_clear(&port->xmit);
+ kfifo_reset(&port->xmit_fifo);
ret = sdio_uart_claim_func(port);
if (ret)
- goto err1;
+ return ret;
ret = sdio_enable_func(port->func);
if (ret)
- goto err2;
+ goto err1;
ret = sdio_claim_irq(port->func, sdio_uart_irq);
if (ret)
- goto err3;
+ goto err2;
/*
* Clear the FIFO buffers and disable them.
@@ -700,12 +688,10 @@ static int sdio_uart_activate(struct tty_port *tport, struct tty_struct *tty)
sdio_uart_release_func(port);
return 0;
-err3:
- sdio_disable_func(port->func);
err2:
- sdio_uart_release_func(port);
+ sdio_disable_func(port->func);
err1:
- free_page((unsigned long)port->xmit.buf);
+ sdio_uart_release_func(port);
return ret;
}
@@ -727,7 +713,7 @@ static void sdio_uart_shutdown(struct tty_port *tport)
ret = sdio_uart_claim_func(port);
if (ret)
- goto skip;
+ return;
sdio_uart_stop_rx(port);
@@ -749,10 +735,6 @@ static void sdio_uart_shutdown(struct tty_port *tport)
sdio_disable_func(port->func);
sdio_uart_release_func(port);
-
-skip:
- /* Free the transmit buffer page. */
- free_page((unsigned long)port->xmit.buf);
}
/**
@@ -822,27 +804,12 @@ static int sdio_uart_write(struct tty_struct *tty, const unsigned char *buf,
int count)
{
struct sdio_uart_port *port = tty->driver_data;
- struct circ_buf *circ = &port->xmit;
- int c, ret = 0;
+ int ret;
if (!port->func)
return -ENODEV;
- spin_lock(&port->write_lock);
- while (1) {
- c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE);
- if (count < c)
- c = count;
- if (c <= 0)
- break;
- memcpy(circ->buf + circ->head, buf, c);
- circ->head = (circ->head + c) & (UART_XMIT_SIZE - 1);
- buf += c;
- count -= c;
- ret += c;
- }
- spin_unlock(&port->write_lock);
-
+ ret = kfifo_in_locked(&port->xmit_fifo, buf, count, &port->write_lock);
if (!(port->ier & UART_IER_THRI)) {
int err = sdio_uart_claim_func(port);
if (!err) {
@@ -859,13 +826,13 @@ static int sdio_uart_write(struct tty_struct *tty, const unsigned char *buf,
static int sdio_uart_write_room(struct tty_struct *tty)
{
struct sdio_uart_port *port = tty->driver_data;
- return port ? circ_chars_free(&port->xmit) : 0;
+ return FIFO_SIZE - kfifo_len(&port->xmit_fifo);
}
static int sdio_uart_chars_in_buffer(struct tty_struct *tty)
{
struct sdio_uart_port *port = tty->driver_data;
- return port ? circ_chars_pending(&port->xmit) : 0;
+ return kfifo_len(&port->xmit_fifo);
}
static void sdio_uart_send_xchar(struct tty_struct *tty, char ch)
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 30acd526582..f4b97d3c3d0 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1151,6 +1151,9 @@ void mmc_stop_host(struct mmc_host *host)
cancel_delayed_work(&host->detect);
mmc_flush_scheduled_work();
+ /* clear pm flags now and let card drivers set them as needed */
+ host->pm_flags = 0;
+
mmc_bus_get(host);
if (host->bus_ops && !host->bus_dead) {
if (host->bus_ops->remove)
@@ -1273,12 +1276,13 @@ int mmc_suspend_host(struct mmc_host *host, pm_message_t state)
mmc_claim_host(host);
mmc_detach_bus(host);
mmc_release_host(host);
+ host->pm_flags = 0;
err = 0;
}
}
mmc_bus_put(host);
- if (!err)
+ if (!err && !(host->pm_flags & MMC_PM_KEEP_POWER))
mmc_power_off(host);
return err;
@@ -1296,8 +1300,10 @@ int mmc_resume_host(struct mmc_host *host)
mmc_bus_get(host);
if (host->bus_ops && !host->bus_dead) {
- mmc_power_up(host);
- mmc_select_voltage(host, host->ocr);
+ if (!(host->pm_flags & MMC_PM_KEEP_POWER)) {
+ mmc_power_up(host);
+ mmc_select_voltage(host, host->ocr);
+ }
BUG_ON(!host->bus_ops->resume);
err = host->bus_ops->resume(host);
if (err) {
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 06b64085a35..2dd4cfe7ca1 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -188,6 +188,40 @@ static int sdio_disable_cd(struct mmc_card *card)
}
/*
+ * Devices that remain active during a system suspend are
+ * put back into 1-bit mode.
+ */
+static int sdio_disable_wide(struct mmc_card *card)
+{
+ int ret;
+ u8 ctrl;
+
+ if (!(card->host->caps & MMC_CAP_4_BIT_DATA))
+ return 0;
+
+ if (card->cccr.low_speed && !card->cccr.wide_bus)
+ return 0;
+
+ ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_IF, 0, &ctrl);
+ if (ret)
+ return ret;
+
+ if (!(ctrl & SDIO_BUS_WIDTH_4BIT))
+ return 0;
+
+ ctrl &= ~SDIO_BUS_WIDTH_4BIT;
+ ctrl |= SDIO_BUS_ASYNC_INT;
+
+ ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL);
+ if (ret)
+ return ret;
+
+ mmc_set_bus_width(card->host, MMC_BUS_WIDTH_1);
+
+ return 0;
+}
+
+/*
* Test if the card supports high-speed mode and, if so, switch to it.
*/
static int sdio_enable_hs(struct mmc_card *card)
@@ -224,7 +258,7 @@ static int sdio_enable_hs(struct mmc_card *card)
* we're trying to reinitialise.
*/
static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
- struct mmc_card *oldcard)
+ struct mmc_card *oldcard, int powered_resume)
{
struct mmc_card *card;
int err;
@@ -235,9 +269,11 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
/*
* Inform the card of the voltage
*/
- err = mmc_send_io_op_cond(host, host->ocr, &ocr);
- if (err)
- goto err;
+ if (!powered_resume) {
+ err = mmc_send_io_op_cond(host, host->ocr, &ocr);
+ if (err)
+ goto err;
+ }
/*
* For SPI, enable CRC as appropriate.
@@ -262,7 +298,7 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
/*
* For native busses: set card RCA and quit open drain mode.
*/
- if (!mmc_host_is_spi(host)) {
+ if (!powered_resume && !mmc_host_is_spi(host)) {
err = mmc_send_relative_addr(host, &card->rca);
if (err)
goto remove;
@@ -273,7 +309,7 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
/*
* Select card, as all following commands rely on that.
*/
- if (!mmc_host_is_spi(host)) {
+ if (!powered_resume && !mmc_host_is_spi(host)) {
err = mmc_select_card(card);
if (err)
goto remove;
@@ -425,6 +461,12 @@ static int mmc_sdio_suspend(struct mmc_host *host)
}
}
+ if (!err && host->pm_flags & MMC_PM_KEEP_POWER) {
+ mmc_claim_host(host);
+ sdio_disable_wide(host->card);
+ mmc_release_host(host);
+ }
+
return err;
}
@@ -437,7 +479,13 @@ static int mmc_sdio_resume(struct mmc_host *host)
/* Basic card reinitialization. */
mmc_claim_host(host);
- err = mmc_sdio_init_card(host, host->ocr, host->card);
+ err = mmc_sdio_init_card(host, host->ocr, host->card,
+ (host->pm_flags & MMC_PM_KEEP_POWER));
+ if (!err)
+ /* We may have switched to 1-bit mode during suspend. */
+ err = sdio_enable_wide(host->card);
+ if (!err && host->sdio_irqs)
+ mmc_signal_sdio_irq(host);
mmc_release_host(host);
/*
@@ -507,7 +555,7 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)
/*
* Detect and init the card.
*/
- err = mmc_sdio_init_card(host, host->ocr, NULL);
+ err = mmc_sdio_init_card(host, host->ocr, NULL, 0);
if (err)
goto err;
card = host->card;
diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c
index f9aa8a7deff..ff27c8c7135 100644
--- a/drivers/mmc/core/sdio_io.c
+++ b/drivers/mmc/core/sdio_io.c
@@ -189,7 +189,12 @@ static inline unsigned int sdio_max_byte_size(struct sdio_func *func)
{
unsigned mval = min(func->card->host->max_seg_size,
func->card->host->max_blk_size);
- mval = min(mval, func->max_blksize);
+
+ if (mmc_blksz_for_byte_mode(func->card))
+ mval = min(mval, func->cur_blksize);
+ else
+ mval = min(mval, func->max_blksize);
+
return min(mval, 512u); /* maximum size for byte mode */
}
@@ -635,3 +640,52 @@ void sdio_f0_writeb(struct sdio_func *func, unsigned char b, unsigned int addr,
*err_ret = ret;
}
EXPORT_SYMBOL_GPL(sdio_f0_writeb);
+
+/**
+ * sdio_get_host_pm_caps - get host power management capabilities
+ * @func: SDIO function attached to host
+ *
+ * Returns a capability bitmask corresponding to power management
+ * features supported by the host controller that the card function
+ * might rely upon during a system suspend. The host doesn't need
+ * to be claimed, nor the function active, for this information to be
+ * obtained.
+ */
+mmc_pm_flag_t sdio_get_host_pm_caps(struct sdio_func *func)
+{
+ BUG_ON(!func);
+ BUG_ON(!func->card);
+
+ return func->card->host->pm_caps;
+}
+EXPORT_SYMBOL_GPL(sdio_get_host_pm_caps);
+
+/**
+ * sdio_set_host_pm_flags - set wanted host power management capabilities
+ * @func: SDIO function attached to host
+ *
+ * Set a capability bitmask corresponding to wanted host controller
+ * power management features for the upcoming suspend state.
+ * This must be called, if needed, each time the suspend method of
+ * the function driver is called, and must contain only bits that
+ * were returned by sdio_get_host_pm_caps().
+ * The host doesn't need to be claimed, nor the function active,
+ * for this information to be set.
+ */
+int sdio_set_host_pm_flags(struct sdio_func *func, mmc_pm_flag_t flags)
+{
+ struct mmc_host *host;
+
+ BUG_ON(!func);
+ BUG_ON(!func->card);
+
+ host = func->card->host;
+
+ if (flags & ~host->pm_caps)
+ return -EINVAL;
+
+ /* function suspend methods are serialized, hence no lock needed */
+ host->pm_flags |= flags;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(sdio_set_host_pm_flags);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index ce1d28884e2..7b431bbab7f 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -69,20 +69,16 @@ config MMC_SDHCI_PCI
If unsure, say N.
config MMC_RICOH_MMC
- tristate "Ricoh MMC Controller Disabler (EXPERIMENTAL)"
+ bool "Ricoh MMC Controller Disabler (EXPERIMENTAL)"
depends on MMC_SDHCI_PCI
help
- This selects the disabler for the Ricoh MMC Controller. This
+ This adds a pci quirk to disable Ricoh MMC Controller. This
proprietary controller is unnecessary because the SDHCI driver
supports MMC cards on the SD controller, but if it is not
disabled, it will steal the MMC cards away - rendering them
- useless. It is safe to select this driver even if you don't
+ useless. It is safe to select this even if you don't
have a Ricoh based card reader.
-
- To compile this driver as a module, choose M here:
- the module will be called ricoh_mmc.
-
If unsure, say Y.
config MMC_SDHCI_OF
@@ -193,6 +189,7 @@ config MMC_AU1X
choice
prompt "Atmel SD/MMC Driver"
+ depends on AVR32 || ARCH_AT91
default MMC_ATMELMCI if AVR32
help
Choose which driver to use for the Atmel MCI Silicon
@@ -399,7 +396,7 @@ config MMC_VIA_SDMMC
config SDH_BFIN
tristate "Blackfin Secure Digital Host support"
- depends on MMC && ((BF54x && !BF544) || (BF51x && !BF512))
+ depends on (BF54x && !BF544) || (BF51x && !BF512)
help
If you say yes here you will get support for the Blackfin on-chip
Secure Digital Host interface. This includes support for MMC and
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 3d253dd4240..f4803977dfc 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -12,7 +12,6 @@ obj-$(CONFIG_MMC_IMX) += imxmmc.o
obj-$(CONFIG_MMC_MXC) += mxcmmc.o
obj-$(CONFIG_MMC_SDHCI) += sdhci.o
obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o
-obj-$(CONFIG_MMC_RICOH_MMC) += ricoh_mmc.o
obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o
obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o
obj-$(CONFIG_MMC_WBSD) += wbsd.o
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index 63924e0c7ea..91dc60cd032 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -78,6 +78,17 @@
#define DRIVER_NAME "at91_mci"
+static inline int at91mci_is_mci1rev2xx(void)
+{
+ return ( cpu_is_at91sam9260()
+ || cpu_is_at91sam9263()
+ || cpu_is_at91cap9()
+ || cpu_is_at91sam9rl()
+ || cpu_is_at91sam9g10()
+ || cpu_is_at91sam9g20()
+ );
+}
+
#define FL_SENT_COMMAND (1 << 0)
#define FL_SENT_STOP (1 << 1)
@@ -88,6 +99,10 @@
#define at91_mci_read(host, reg) __raw_readl((host)->baseaddr + (reg))
#define at91_mci_write(host, reg, val) __raw_writel((val), (host)->baseaddr + (reg))
+#define MCI_BLKSIZE 512
+#define MCI_MAXBLKSIZE 4095
+#define MCI_BLKATONCE 256
+#define MCI_BUFSIZE (MCI_BLKSIZE * MCI_BLKATONCE)
/*
* Low level type for this driver
@@ -200,8 +215,8 @@ static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data
size = data->blksz * data->blocks;
len = data->sg_len;
- /* AT91SAM926[0/3] Data Write Operation and number of bytes erratum */
- if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
+ /* MCI1 rev2xx Data Write Operation and number of bytes erratum */
+ if (at91mci_is_mci1rev2xx())
if (host->total_length == 12)
memset(dmabuf, 0, 12);
@@ -227,8 +242,10 @@ static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data
for (index = 0; index < (amount / 4); index++)
*dmabuf++ = swab32(sgbuffer[index]);
} else {
- memcpy(dmabuf, sgbuffer, amount);
- dmabuf += amount;
+ char *tmpv = (char *)dmabuf;
+ memcpy(tmpv, sgbuffer, amount);
+ tmpv += amount;
+ dmabuf = (unsigned *)tmpv;
}
kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ);
@@ -245,80 +262,14 @@ static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data
}
/*
- * Prepare a dma read
- */
-static void at91_mci_pre_dma_read(struct at91mci_host *host)
-{
- int i;
- struct scatterlist *sg;
- struct mmc_command *cmd;
- struct mmc_data *data;
-
- pr_debug("pre dma read\n");
-
- cmd = host->cmd;
- if (!cmd) {
- pr_debug("no command\n");
- return;
- }
-
- data = cmd->data;
- if (!data) {
- pr_debug("no data\n");
- return;
- }
-
- for (i = 0; i < 2; i++) {
- /* nothing left to transfer */
- if (host->transfer_index >= data->sg_len) {
- pr_debug("Nothing left to transfer (index = %d)\n", host->transfer_index);
- break;
- }
-
- /* Check to see if this needs filling */
- if (i == 0) {
- if (at91_mci_read(host, ATMEL_PDC_RCR) != 0) {
- pr_debug("Transfer active in current\n");
- continue;
- }
- }
- else {
- if (at91_mci_read(host, ATMEL_PDC_RNCR) != 0) {
- pr_debug("Transfer active in next\n");
- continue;
- }
- }
-
- /* Setup the next transfer */
- pr_debug("Using transfer index %d\n", host->transfer_index);
-
- sg = &data->sg[host->transfer_index++];
- pr_debug("sg = %p\n", sg);
-
- sg->dma_address = dma_map_page(NULL, sg_page(sg), sg->offset, sg->length, DMA_FROM_DEVICE);
-
- pr_debug("dma address = %08X, length = %d\n", sg->dma_address, sg->length);
-
- if (i == 0) {
- at91_mci_write(host, ATMEL_PDC_RPR, sg->dma_address);
- at91_mci_write(host, ATMEL_PDC_RCR, (data->blksz & 0x3) ? sg->length : sg->length / 4);
- }
- else {
- at91_mci_write(host, ATMEL_PDC_RNPR, sg->dma_address);
- at91_mci_write(host, ATMEL_PDC_RNCR, (data->blksz & 0x3) ? sg->length : sg->length / 4);
- }
- }
-
- pr_debug("pre dma read done\n");
-}
-
-/*
* Handle after a dma read
*/
static void at91_mci_post_dma_read(struct at91mci_host *host)
{
struct mmc_command *cmd;
struct mmc_data *data;
+ unsigned int len, i, size;
+ unsigned *dmabuf = host->buffer;
pr_debug("post dma read\n");
@@ -334,42 +285,39 @@ static void at91_mci_post_dma_read(struct at91mci_host *host)
return;
}
- while (host->in_use_index < host->transfer_index) {
- struct scatterlist *sg;
+ size = data->blksz * data->blocks;
+ len = data->sg_len;
- pr_debug("finishing index %d\n", host->in_use_index);
+ at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_ENDRX);
+ at91_mci_write(host, AT91_MCI_IER, AT91_MCI_RXBUFF);
- sg = &data->sg[host->in_use_index++];
+ for (i = 0; i < len; i++) {
+ struct scatterlist *sg;
+ int amount;
+ unsigned int *sgbuffer;
- pr_debug("Unmapping page %08X\n", sg->dma_address);
+ sg = &data->sg[i];
- dma_unmap_page(NULL, sg->dma_address, sg->length, DMA_FROM_DEVICE);
+ sgbuffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
+ amount = min(size, sg->length);
+ size -= amount;
if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
- unsigned int *buffer;
int index;
-
- /* Swap the contents of the buffer */
- buffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
- pr_debug("buffer = %p, length = %d\n", buffer, sg->length);
-
- for (index = 0; index < (sg->length / 4); index++)
- buffer[index] = swab32(buffer[index]);
-
- kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
+ for (index = 0; index < (amount / 4); index++)
+ sgbuffer[index] = swab32(*dmabuf++);
+ } else {
+ char *tmpv = (char *)dmabuf;
+ memcpy(sgbuffer, tmpv, amount);
+ tmpv += amount;
+ dmabuf = (unsigned *)tmpv;
}
- flush_dcache_page(sg_page(sg));
-
- data->bytes_xfered += sg->length;
- }
-
- /* Is there another transfer to trigger? */
- if (host->transfer_index < data->sg_len)
- at91_mci_pre_dma_read(host);
- else {
- at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_ENDRX);
- at91_mci_write(host, AT91_MCI_IER, AT91_MCI_RXBUFF);
+ kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ);
+ dmac_flush_range((void *)sgbuffer, ((void *)sgbuffer) + amount);
+ data->bytes_xfered += amount;
+ if (size == 0)
+ break;
}
pr_debug("post dma read done\n");
@@ -461,7 +409,7 @@ static void at91_mci_enable(struct at91mci_host *host)
at91_mci_write(host, AT91_MCI_DTOR, AT91_MCI_DTOMUL_1M | AT91_MCI_DTOCYC);
mr = AT91_MCI_PDCMODE | 0x34a;
- if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
+ if (at91mci_is_mci1rev2xx())
mr |= AT91_MCI_RDPROOF | AT91_MCI_WRPROOF;
at91_mci_write(host, AT91_MCI_MR, mr);
@@ -602,10 +550,14 @@ static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command
/*
* Handle a read
*/
- host->buffer = NULL;
host->total_length = 0;
- at91_mci_pre_dma_read(host);
+ at91_mci_write(host, ATMEL_PDC_RPR, host->physical_address);
+ at91_mci_write(host, ATMEL_PDC_RCR, (data->blksz & 0x3) ?
+ (blocks * block_length) : (blocks * block_length) / 4);
+ at91_mci_write(host, ATMEL_PDC_RNPR, 0);
+ at91_mci_write(host, ATMEL_PDC_RNCR, 0);
+
ier = AT91_MCI_ENDRX /* | AT91_MCI_RXBUFF */;
}
else {
@@ -614,27 +566,15 @@ static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command
*/
host->total_length = block_length * blocks;
/*
- * AT91SAM926[0/3] Data Write Operation and
+ * MCI1 rev2xx Data Write Operation and
* number of bytes erratum
*/
- if (cpu_is_at91sam9260 () || cpu_is_at91sam9263())
+ if (at91mci_is_mci1rev2xx())
if (host->total_length < 12)
host->total_length = 12;
- host->buffer = kmalloc(host->total_length, GFP_KERNEL);
- if (!host->buffer) {
- pr_debug("Can't alloc tx buffer\n");
- cmd->error = -ENOMEM;
- mmc_request_done(host->mmc, host->request);
- return;
- }
-
at91_mci_sg_to_dma(host, data);
- host->physical_address = dma_map_single(NULL,
- host->buffer, host->total_length,
- DMA_TO_DEVICE);
-
pr_debug("Transmitting %d bytes\n", host->total_length);
at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address);
@@ -701,14 +641,6 @@ static void at91_mci_completed_command(struct at91mci_host *host, unsigned int s
cmd->resp[2] = at91_mci_read(host, AT91_MCI_RSPR(2));
cmd->resp[3] = at91_mci_read(host, AT91_MCI_RSPR(3));
- if (host->buffer) {
- dma_unmap_single(NULL,
- host->physical_address, host->total_length,
- DMA_TO_DEVICE);
- kfree(host->buffer);
- host->buffer = NULL;
- }
-
pr_debug("Status = %08X/%08x [%08X %08X %08X %08X]\n",
status, at91_mci_read(host, AT91_MCI_SR),
cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
@@ -754,7 +686,8 @@ static void at91_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
host->request = mrq;
host->flags = 0;
- mod_timer(&host->timer, jiffies + HZ);
+ /* more than 1s timeout needed with slow SD cards */
+ mod_timer(&host->timer, jiffies + msecs_to_jiffies(2000));
at91_mci_process_next(host);
}
@@ -942,7 +875,8 @@ static irqreturn_t at91_mmc_det_irq(int irq, void *_host)
pr_debug("****** Resetting SD-card bus width ******\n");
at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
}
- mmc_detect_change(host->mmc, msecs_to_jiffies(100));
+ /* 0.5s needed because of early card detect switch firing */
+ mmc_detect_change(host->mmc, msecs_to_jiffies(500));
}
return IRQ_HANDLED;
}
@@ -1006,24 +940,42 @@ static int __init at91_mci_probe(struct platform_device *pdev)
mmc->f_min = 375000;
mmc->f_max = 25000000;
mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
- mmc->caps = MMC_CAP_SDIO_IRQ;
+ mmc->caps = 0;
- mmc->max_blk_size = 4095;
- mmc->max_blk_count = mmc->max_req_size;
+ mmc->max_blk_size = MCI_MAXBLKSIZE;
+ mmc->max_blk_count = MCI_BLKATONCE;
+ mmc->max_req_size = MCI_BUFSIZE;
+ mmc->max_phys_segs = MCI_BLKATONCE;
+ mmc->max_hw_segs = MCI_BLKATONCE;
+ mmc->max_seg_size = MCI_BUFSIZE;
host = mmc_priv(mmc);
host->mmc = mmc;
- host->buffer = NULL;
host->bus_mode = 0;
host->board = pdev->dev.platform_data;
if (host->board->wire4) {
- if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
+ if (at91mci_is_mci1rev2xx())
mmc->caps |= MMC_CAP_4_BIT_DATA;
else
dev_warn(&pdev->dev, "4 wire bus mode not supported"
" - using 1 wire\n");
}
+ host->buffer = dma_alloc_coherent(&pdev->dev, MCI_BUFSIZE,
+ &host->physical_address, GFP_KERNEL);
+ if (!host->buffer) {
+ ret = -ENOMEM;
+ dev_err(&pdev->dev, "Can't allocate transmit buffer\n");
+ goto fail5;
+ }
+
+ /* Add SDIO capability when available */
+ if (at91mci_is_mci1rev2xx()) {
+ /* at91mci MCI1 rev2xx sdio interrupt erratum */
+ if (host->board->wire4 || !host->board->slot_b)
+ mmc->caps |= MMC_CAP_SDIO_IRQ;
+ }
+
/*
* Reserve GPIOs ... board init code makes sure these pins are set
* up as GPIOs with the right direction (input, except for vcc)
@@ -1032,7 +984,7 @@ static int __init at91_mci_probe(struct platform_device *pdev)
ret = gpio_request(host->board->det_pin, "mmc_detect");
if (ret < 0) {
dev_dbg(&pdev->dev, "couldn't claim card detect pin\n");
- goto fail5;
+ goto fail4b;
}
}
if (host->board->wp_pin) {
@@ -1132,6 +1084,10 @@ fail3:
fail4:
if (host->board->det_pin)
gpio_free(host->board->det_pin);
+fail4b:
+ if (host->buffer)
+ dma_free_coherent(&pdev->dev, MCI_BUFSIZE,
+ host->buffer, host->physical_address);
fail5:
mmc_free_host(mmc);
fail6:
@@ -1154,6 +1110,10 @@ static int __exit at91_mci_remove(struct platform_device *pdev)
host = mmc_priv(mmc);
+ if (host->buffer)
+ dma_free_coherent(&pdev->dev, MCI_BUFSIZE,
+ host->buffer, host->physical_address);
+
if (host->board->det_pin) {
if (device_can_wakeup(&pdev->dev))
free_irq(gpio_to_irq(host->board->det_pin), host);
diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c
index 3343a57355c..56f7b448b91 100644
--- a/drivers/mmc/host/bfin_sdh.c
+++ b/drivers/mmc/host/bfin_sdh.c
@@ -115,7 +115,7 @@ static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data)
unsigned int length;
unsigned int data_ctl;
unsigned int dma_cfg;
- struct scatterlist *sg;
+ unsigned int cycle_ns, timeout;
dev_dbg(mmc_dev(host->mmc), "%s enter flags: 0x%x\n", __func__, data->flags);
host->data = data;
@@ -136,8 +136,11 @@ static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data)
data_ctl |= ((ffs(data->blksz) - 1) << 4);
bfin_write_SDH_DATA_CTL(data_ctl);
-
- bfin_write_SDH_DATA_TIMER(0xFFFF);
+ /* the time of a host clock period in ns */
+ cycle_ns = 1000000000 / (get_sclk() / (2 * (host->clk_div + 1)));
+ timeout = data->timeout_ns / cycle_ns;
+ timeout += data->timeout_clks;
+ bfin_write_SDH_DATA_TIMER(timeout);
SSYNC();
if (data->flags & MMC_DATA_READ) {
@@ -151,6 +154,7 @@ static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data)
#if defined(CONFIG_BF54x)
dma_cfg |= DMAFLOW_ARRAY | NDSIZE_5 | RESTART | WDSIZE_32 | DMAEN;
{
+ struct scatterlist *sg;
int i;
for_each_sg(data->sg, sg, host->dma_len, i) {
host->sg_cpu[i].start_addr = sg_dma_address(sg);
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index dd45e7c3517..3bd0ba294e9 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -73,6 +73,7 @@
/* DAVINCI_MMCCTL definitions */
#define MMCCTL_DATRST (1 << 0)
#define MMCCTL_CMDRST (1 << 1)
+#define MMCCTL_WIDTH_8_BIT (1 << 8)
#define MMCCTL_WIDTH_4_BIT (1 << 2)
#define MMCCTL_DATEG_DISABLED (0 << 6)
#define MMCCTL_DATEG_RISING (1 << 6)
@@ -791,22 +792,42 @@ static void calculate_clk_divider(struct mmc_host *mmc, struct mmc_ios *ios)
static void mmc_davinci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
- unsigned int mmc_pclk = 0;
struct mmc_davinci_host *host = mmc_priv(mmc);
- mmc_pclk = host->mmc_input_clk;
dev_dbg(mmc_dev(host->mmc),
"clock %dHz busmode %d powermode %d Vdd %04x\n",
ios->clock, ios->bus_mode, ios->power_mode,
ios->vdd);
- if (ios->bus_width == MMC_BUS_WIDTH_4) {
- dev_dbg(mmc_dev(host->mmc), "Enabling 4 bit mode\n");
- writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_WIDTH_4_BIT,
- host->base + DAVINCI_MMCCTL);
- } else {
- dev_dbg(mmc_dev(host->mmc), "Disabling 4 bit mode\n");
- writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_WIDTH_4_BIT,
+
+ switch (ios->bus_width) {
+ case MMC_BUS_WIDTH_8:
+ dev_dbg(mmc_dev(host->mmc), "Enabling 8 bit mode\n");
+ writel((readl(host->base + DAVINCI_MMCCTL) &
+ ~MMCCTL_WIDTH_4_BIT) | MMCCTL_WIDTH_8_BIT,
host->base + DAVINCI_MMCCTL);
+ break;
+ case MMC_BUS_WIDTH_4:
+ dev_dbg(mmc_dev(host->mmc), "Enabling 4 bit mode\n");
+ if (host->version == MMC_CTLR_VERSION_2)
+ writel((readl(host->base + DAVINCI_MMCCTL) &
+ ~MMCCTL_WIDTH_8_BIT) | MMCCTL_WIDTH_4_BIT,
+ host->base + DAVINCI_MMCCTL);
+ else
+ writel(readl(host->base + DAVINCI_MMCCTL) |
+ MMCCTL_WIDTH_4_BIT,
+ host->base + DAVINCI_MMCCTL);
+ break;
+ case MMC_BUS_WIDTH_1:
+ dev_dbg(mmc_dev(host->mmc), "Enabling 1 bit mode\n");
+ if (host->version == MMC_CTLR_VERSION_2)
+ writel(readl(host->base + DAVINCI_MMCCTL) &
+ ~(MMCCTL_WIDTH_8_BIT | MMCCTL_WIDTH_4_BIT),
+ host->base + DAVINCI_MMCCTL);
+ else
+ writel(readl(host->base + DAVINCI_MMCCTL) &
+ ~MMCCTL_WIDTH_4_BIT,
+ host->base + DAVINCI_MMCCTL);
+ break;
}
calculate_clk_divider(mmc, ios);
@@ -1189,10 +1210,14 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)
/* REVISIT: someday, support IRQ-driven card detection. */
mmc->caps |= MMC_CAP_NEEDS_POLL;
+ mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
- if (!pdata || pdata->wires == 4 || pdata->wires == 0)
+ if (pdata && (pdata->wires == 4 || pdata->wires == 0))
mmc->caps |= MMC_CAP_4_BIT_DATA;
+ if (pdata && (pdata->wires == 8))
+ mmc->caps |= (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA);
+
host->version = pdata->version;
mmc->ops = &mmc_davinci_ops;
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 4b232251890..83f0affadca 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -30,6 +30,8 @@
#include <linux/mmc/core.h>
#include <linux/io.h>
#include <linux/semaphore.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
#include <plat/dma.h>
#include <mach/hardware.h>
#include <plat/board.h>
@@ -146,6 +148,15 @@ struct omap_hsmmc_host {
struct clk *fclk;
struct clk *iclk;
struct clk *dbclk;
+ /*
+ * vcc == configured supply
+ * vcc_aux == optional
+ * - MMC1, supply for DAT4..DAT7
+ * - MMC2/MMC2, external level shifter voltage supply, for
+ * chip (SDIO, eMMC, etc) or transceiver (MMC2 only)
+ */
+ struct regulator *vcc;
+ struct regulator *vcc_aux;
struct semaphore sem;
struct work_struct mmc_carddetect_work;
void __iomem *base;
@@ -171,10 +182,337 @@ struct omap_hsmmc_host {
int vdd;
int protect_card;
int reqs_blocked;
+ int use_reg;
struct omap_mmc_platform_data *pdata;
};
+static int omap_hsmmc_card_detect(struct device *dev, int slot)
+{
+ struct omap_mmc_platform_data *mmc = dev->platform_data;
+
+ /* NOTE: assumes card detect signal is active-low */
+ return !gpio_get_value_cansleep(mmc->slots[0].switch_pin);
+}
+
+static int omap_hsmmc_get_wp(struct device *dev, int slot)
+{
+ struct omap_mmc_platform_data *mmc = dev->platform_data;
+
+ /* NOTE: assumes write protect signal is active-high */
+ return gpio_get_value_cansleep(mmc->slots[0].gpio_wp);
+}
+
+static int omap_hsmmc_get_cover_state(struct device *dev, int slot)
+{
+ struct omap_mmc_platform_data *mmc = dev->platform_data;
+
+ /* NOTE: assumes card detect signal is active-low */
+ return !gpio_get_value_cansleep(mmc->slots[0].switch_pin);
+}
+
+#ifdef CONFIG_PM
+
+static int omap_hsmmc_suspend_cdirq(struct device *dev, int slot)
+{
+ struct omap_mmc_platform_data *mmc = dev->platform_data;
+
+ disable_irq(mmc->slots[0].card_detect_irq);
+ return 0;
+}
+
+static int omap_hsmmc_resume_cdirq(struct device *dev, int slot)
+{
+ struct omap_mmc_platform_data *mmc = dev->platform_data;
+
+ enable_irq(mmc->slots[0].card_detect_irq);
+ return 0;
+}
+
+#else
+
+#define omap_hsmmc_suspend_cdirq NULL
+#define omap_hsmmc_resume_cdirq NULL
+
+#endif
+
+#ifdef CONFIG_REGULATOR
+
+static int omap_hsmmc_1_set_power(struct device *dev, int slot, int power_on,
+ int vdd)
+{
+ struct omap_hsmmc_host *host =
+ platform_get_drvdata(to_platform_device(dev));
+ int ret;
+
+ if (mmc_slot(host).before_set_reg)
+ mmc_slot(host).before_set_reg(dev, slot, power_on, vdd);
+
+ if (power_on)
+ ret = mmc_regulator_set_ocr(host->vcc, vdd);
+ else
+ ret = mmc_regulator_set_ocr(host->vcc, 0);
+
+ if (mmc_slot(host).after_set_reg)
+ mmc_slot(host).after_set_reg(dev, slot, power_on, vdd);
+
+ return ret;
+}
+
+static int omap_hsmmc_23_set_power(struct device *dev, int slot, int power_on,
+ int vdd)
+{
+ struct omap_hsmmc_host *host =
+ platform_get_drvdata(to_platform_device(dev));
+ int ret = 0;
+
+ /*
+ * If we don't see a Vcc regulator, assume it's a fixed
+ * voltage always-on regulator.
+ */
+ if (!host->vcc)
+ return 0;
+
+ if (mmc_slot(host).before_set_reg)
+ mmc_slot(host).before_set_reg(dev, slot, power_on, vdd);
+
+ /*
+ * Assume Vcc regulator is used only to power the card ... OMAP
+ * VDDS is used to power the pins, optionally with a transceiver to
+ * support cards using voltages other than VDDS (1.8V nominal). When a
+ * transceiver is used, DAT3..7 are muxed as transceiver control pins.
+ *
+ * In some cases this regulator won't support enable/disable;
+ * e.g. it's a fixed rail for a WLAN chip.
+ *
+ * In other cases vcc_aux switches interface power. Example, for
+ * eMMC cards it represents VccQ. Sometimes transceivers or SDIO
+ * chips/cards need an interface voltage rail too.
+ */
+ if (power_on) {
+ ret = mmc_regulator_set_ocr(host->vcc, vdd);
+ /* Enable interface voltage rail, if needed */
+ if (ret == 0 && host->vcc_aux) {
+ ret = regulator_enable(host->vcc_aux);
+ if (ret < 0)
+ ret = mmc_regulator_set_ocr(host->vcc, 0);
+ }
+ } else {
+ if (host->vcc_aux)
+ ret = regulator_disable(host->vcc_aux);
+ if (ret == 0)
+ ret = mmc_regulator_set_ocr(host->vcc, 0);
+ }
+
+ if (mmc_slot(host).after_set_reg)
+ mmc_slot(host).after_set_reg(dev, slot, power_on, vdd);
+
+ return ret;
+}
+
+static int omap_hsmmc_1_set_sleep(struct device *dev, int slot, int sleep,
+ int vdd, int cardsleep)
+{
+ struct omap_hsmmc_host *host =
+ platform_get_drvdata(to_platform_device(dev));
+ int mode = sleep ? REGULATOR_MODE_STANDBY : REGULATOR_MODE_NORMAL;
+
+ return regulator_set_mode(host->vcc, mode);
+}
+
+static int omap_hsmmc_23_set_sleep(struct device *dev, int slot, int sleep,
+ int vdd, int cardsleep)
+{
+ struct omap_hsmmc_host *host =
+ platform_get_drvdata(to_platform_device(dev));
+ int err, mode;
+
+ /*
+ * If we don't see a Vcc regulator, assume it's a fixed
+ * voltage always-on regulator.
+ */
+ if (!host->vcc)
+ return 0;
+
+ mode = sleep ? REGULATOR_MODE_STANDBY : REGULATOR_MODE_NORMAL;
+
+ if (!host->vcc_aux)
+ return regulator_set_mode(host->vcc, mode);
+
+ if (cardsleep) {
+ /* VCC can be turned off if card is asleep */
+ if (sleep)
+ err = mmc_regulator_set_ocr(host->vcc, 0);
+ else
+ err = mmc_regulator_set_ocr(host->vcc, vdd);
+ } else
+ err = regulator_set_mode(host->vcc, mode);
+ if (err)
+ return err;
+
+ if (!mmc_slot(host).vcc_aux_disable_is_sleep)
+ return regulator_set_mode(host->vcc_aux, mode);
+
+ if (sleep)
+ return regulator_disable(host->vcc_aux);
+ else
+ return regulator_enable(host->vcc_aux);
+}
+
+static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
+{
+ struct regulator *reg;
+ int ret = 0;
+
+ switch (host->id) {
+ case OMAP_MMC1_DEVID:
+ /* On-chip level shifting via PBIAS0/PBIAS1 */
+ mmc_slot(host).set_power = omap_hsmmc_1_set_power;
+ mmc_slot(host).set_sleep = omap_hsmmc_1_set_sleep;
+ break;
+ case OMAP_MMC2_DEVID:
+ case OMAP_MMC3_DEVID:
+ /* Off-chip level shifting, or none */
+ mmc_slot(host).set_power = omap_hsmmc_23_set_power;
+ mmc_slot(host).set_sleep = omap_hsmmc_23_set_sleep;
+ break;
+ default:
+ pr_err("MMC%d configuration not supported!\n", host->id);
+ return -EINVAL;
+ }
+
+ reg = regulator_get(host->dev, "vmmc");
+ if (IS_ERR(reg)) {
+ dev_dbg(host->dev, "vmmc regulator missing\n");
+ /*
+ * HACK: until fixed.c regulator is usable,
+ * we don't require a main regulator
+ * for MMC2 or MMC3
+ */
+ if (host->id == OMAP_MMC1_DEVID) {
+ ret = PTR_ERR(reg);
+ goto err;
+ }
+ } else {
+ host->vcc = reg;
+ mmc_slot(host).ocr_mask = mmc_regulator_get_ocrmask(reg);
+
+ /* Allow an aux regulator */
+ reg = regulator_get(host->dev, "vmmc_aux");
+ host->vcc_aux = IS_ERR(reg) ? NULL : reg;
+
+ /*
+ * UGLY HACK: workaround regulator framework bugs.
+ * When the bootloader leaves a supply active, it's
+ * initialized with zero usecount ... and we can't
+ * disable it without first enabling it. Until the
+ * framework is fixed, we need a workaround like this
+ * (which is safe for MMC, but not in general).
+ */
+ if (regulator_is_enabled(host->vcc) > 0) {
+ regulator_enable(host->vcc);
+ regulator_disable(host->vcc);
+ }
+ if (host->vcc_aux) {
+ if (regulator_is_enabled(reg) > 0) {
+ regulator_enable(reg);
+ regulator_disable(reg);
+ }
+ }
+ }
+
+ return 0;
+
+err:
+ mmc_slot(host).set_power = NULL;
+ mmc_slot(host).set_sleep = NULL;
+ return ret;
+}
+
+static void omap_hsmmc_reg_put(struct omap_hsmmc_host *host)
+{
+ regulator_put(host->vcc);
+ regulator_put(host->vcc_aux);
+ mmc_slot(host).set_power = NULL;
+ mmc_slot(host).set_sleep = NULL;
+}
+
+static inline int omap_hsmmc_have_reg(void)
+{
+ return 1;
+}
+
+#else
+
+static inline int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)
+{
+ return -EINVAL;
+}
+
+static inline void omap_hsmmc_reg_put(struct omap_hsmmc_host *host)
+{
+}
+
+static inline int omap_hsmmc_have_reg(void)
+{
+ return 0;
+}
+
+#endif
+
+static int omap_hsmmc_gpio_init(struct omap_mmc_platform_data *pdata)
+{
+ int ret;
+
+ if (gpio_is_valid(pdata->slots[0].switch_pin)) {
+ pdata->suspend = omap_hsmmc_suspend_cdirq;
+ pdata->resume = omap_hsmmc_resume_cdirq;
+ if (pdata->slots[0].cover)
+ pdata->slots[0].get_cover_state =
+ omap_hsmmc_get_cover_state;
+ else
+ pdata->slots[0].card_detect = omap_hsmmc_card_detect;
+ pdata->slots[0].card_detect_irq =
+ gpio_to_irq(pdata->slots[0].switch_pin);
+ ret = gpio_request(pdata->slots[0].switch_pin, "mmc_cd");
+ if (ret)
+ return ret;
+ ret = gpio_direction_input(pdata->slots[0].switch_pin);
+ if (ret)
+ goto err_free_sp;
+ } else
+ pdata->slots[0].switch_pin = -EINVAL;
+
+ if (gpio_is_valid(pdata->slots[0].gpio_wp)) {
+ pdata->slots[0].get_ro = omap_hsmmc_get_wp;
+ ret = gpio_request(pdata->slots[0].gpio_wp, "mmc_wp");
+ if (ret)
+ goto err_free_cd;
+ ret = gpio_direction_input(pdata->slots[0].gpio_wp);
+ if (ret)
+ goto err_free_wp;
+ } else
+ pdata->slots[0].gpio_wp = -EINVAL;
+
+ return 0;
+
+err_free_wp:
+ gpio_free(pdata->slots[0].gpio_wp);
+err_free_cd:
+ if (gpio_is_valid(pdata->slots[0].switch_pin))
+err_free_sp:
+ gpio_free(pdata->slots[0].switch_pin);
+ return ret;
+}
+
+static void omap_hsmmc_gpio_free(struct omap_mmc_platform_data *pdata)
+{
+ if (gpio_is_valid(pdata->slots[0].gpio_wp))
+ gpio_free(pdata->slots[0].gpio_wp);
+ if (gpio_is_valid(pdata->slots[0].switch_pin))
+ gpio_free(pdata->slots[0].switch_pin);
+}
+
/*
* Stop clock to the card
*/
@@ -835,7 +1173,7 @@ static void omap_hsmmc_detect(struct work_struct *work)
sysfs_notify(&host->mmc->class_dev.kobj, NULL, "cover_switch");
if (slot->card_detect)
- carddetect = slot->card_detect(slot->card_detect_irq);
+ carddetect = slot->card_detect(host->dev, host->slot_id);
else {
omap_hsmmc_protect_card(host);
carddetect = -ENOSYS;
@@ -1242,7 +1580,7 @@ static int omap_hsmmc_get_cd(struct mmc_host *mmc)
if (!mmc_slot(host).card_detect)
return -ENOSYS;
- return mmc_slot(host).card_detect(mmc_slot(host).card_detect_irq);
+ return mmc_slot(host).card_detect(host->dev, host->slot_id);
}
static int omap_hsmmc_get_ro(struct mmc_host *mmc)
@@ -1311,7 +1649,7 @@ static int omap_hsmmc_enabled_to_disabled(struct omap_hsmmc_host *host)
if (host->power_mode == MMC_POWER_OFF)
return 0;
- return msecs_to_jiffies(OMAP_MMC_SLEEP_TIMEOUT);
+ return OMAP_MMC_SLEEP_TIMEOUT;
}
/* Handler for [DISABLED -> REGSLEEP / CARDSLEEP] transition */
@@ -1347,11 +1685,14 @@ static int omap_hsmmc_disabled_to_sleep(struct omap_hsmmc_host *host)
dev_dbg(mmc_dev(host->mmc), "DISABLED -> %s\n",
host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP");
+ if (mmc_slot(host).no_off)
+ return 0;
+
if ((host->mmc->caps & MMC_CAP_NONREMOVABLE) ||
mmc_slot(host).card_detect ||
(mmc_slot(host).get_cover_state &&
mmc_slot(host).get_cover_state(host->dev, host->slot_id)))
- return msecs_to_jiffies(OMAP_MMC_OFF_TIMEOUT);
+ return OMAP_MMC_OFF_TIMEOUT;
return 0;
}
@@ -1362,6 +1703,9 @@ static int omap_hsmmc_sleep_to_off(struct omap_hsmmc_host *host)
if (!mmc_try_claim_host(host->mmc))
return 0;
+ if (mmc_slot(host).no_off)
+ return 0;
+
if (!((host->mmc->caps & MMC_CAP_NONREMOVABLE) ||
mmc_slot(host).card_detect ||
(mmc_slot(host).get_cover_state &&
@@ -1616,7 +1960,7 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
struct mmc_host *mmc;
struct omap_hsmmc_host *host = NULL;
struct resource *res;
- int ret = 0, irq;
+ int ret, irq;
if (pdata == NULL) {
dev_err(&pdev->dev, "Platform Data is missing\n");
@@ -1638,10 +1982,14 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
if (res == NULL)
return -EBUSY;
+ ret = omap_hsmmc_gpio_init(pdata);
+ if (ret)
+ goto err;
+
mmc = mmc_alloc_host(sizeof(struct omap_hsmmc_host), &pdev->dev);
if (!mmc) {
ret = -ENOMEM;
- goto err;
+ goto err_alloc;
}
host = mmc_priv(mmc);
@@ -1656,7 +2004,7 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
host->slot_id = 0;
host->mapbase = res->start;
host->base = ioremap(host->mapbase, SZ_4K);
- host->power_mode = -1;
+ host->power_mode = MMC_POWER_OFF;
platform_set_drvdata(pdev, host);
INIT_WORK(&host->mmc_carddetect_work, omap_hsmmc_detect);
@@ -1666,6 +2014,13 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
else
mmc->ops = &omap_hsmmc_ops;
+ /*
+ * If regulator_disable can only put vcc_aux to sleep then there is
+ * no off state.
+ */
+ if (mmc_slot(host).vcc_aux_disable_is_sleep)
+ mmc_slot(host).no_off = 1;
+
mmc->f_min = 400000;
mmc->f_max = 52000000;
@@ -1781,7 +2136,6 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
goto err_irq;
}
- /* initialize power supplies, gpios, etc */
if (pdata->init != NULL) {
if (pdata->init(&pdev->dev) != 0) {
dev_dbg(mmc_dev(host->mmc),
@@ -1789,6 +2143,14 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
goto err_irq_cd_init;
}
}
+
+ if (omap_hsmmc_have_reg() && !mmc_slot(host).set_power) {
+ ret = omap_hsmmc_reg_get(host);
+ if (ret)
+ goto err_reg;
+ host->use_reg = 1;
+ }
+
mmc->ocr_avail = mmc_slot(host).ocr_mask;
/* Request IRQ for card detect */
@@ -1823,19 +2185,22 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)
ret = device_create_file(&mmc->class_dev,
&dev_attr_cover_switch);
if (ret < 0)
- goto err_cover_switch;
+ goto err_slot_name;
}
omap_hsmmc_debugfs(mmc);
return 0;
-err_cover_switch:
- device_remove_file(&mmc->class_dev, &dev_attr_cover_switch);
err_slot_name:
mmc_remove_host(mmc);
-err_irq_cd:
free_irq(mmc_slot(host).card_detect_irq, host);
+err_irq_cd:
+ if (host->use_reg)
+ omap_hsmmc_reg_put(host);
+err_reg:
+ if (host->pdata->cleanup)
+ host->pdata->cleanup(&pdev->dev);
err_irq_cd_init:
free_irq(host->irq, host);
err_irq:
@@ -1847,14 +2212,14 @@ err_irq:
clk_disable(host->dbclk);
clk_put(host->dbclk);
}
-
err1:
iounmap(host->base);
+ platform_set_drvdata(pdev, NULL);
+ mmc_free_host(mmc);
+err_alloc:
+ omap_hsmmc_gpio_free(pdata);
err:
- dev_dbg(mmc_dev(host->mmc), "Probe Failed\n");
release_mem_region(res->start, res->end - res->start + 1);
- if (host)
- mmc_free_host(mmc);
return ret;
}
@@ -1866,6 +2231,8 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
if (host) {
mmc_host_enable(host->mmc);
mmc_remove_host(host->mmc);
+ if (host->use_reg)
+ omap_hsmmc_reg_put(host);
if (host->pdata->cleanup)
host->pdata->cleanup(&pdev->dev);
free_irq(host->irq, host);
@@ -1884,6 +2251,7 @@ static int omap_hsmmc_remove(struct platform_device *pdev)
mmc_free_host(host->mmc);
iounmap(host->base);
+ omap_hsmmc_gpio_free(pdev->dev.platform_data);
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/mmc/host/ricoh_mmc.c b/drivers/mmc/host/ricoh_mmc.c
deleted file mode 100644
index f6279051332..00000000000
--- a/drivers/mmc/host/ricoh_mmc.c
+++ /dev/null
@@ -1,262 +0,0 @@
-/*
- * ricoh_mmc.c - Dummy driver to disable the Rioch MMC controller.
- *
- * Copyright (C) 2007 Philip Langdale, All Rights Reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- */
-
-/*
- * This is a conceptually ridiculous driver, but it is required by the way
- * the Ricoh multi-function chips (R5CXXX) work. These chips implement
- * the four main memory card controllers (SD, MMC, MS, xD) and one or both
- * of cardbus or firewire. It happens that they implement SD and MMC
- * support as separate controllers (and PCI functions). The linux SDHCI
- * driver supports MMC cards but the chip detects MMC cards in hardware
- * and directs them to the MMC controller - so the SDHCI driver never sees
- * them. To get around this, we must disable the useless MMC controller.
- * At that point, the SDHCI controller will start seeing them. As a bonus,
- * a detection event occurs immediately, even if the MMC card is already
- * in the reader.
- *
- * It seems to be the case that the relevant PCI registers to deactivate the
- * MMC controller live on PCI function 0, which might be the cardbus controller
- * or the firewire controller, depending on the particular chip in question. As
- * such, it makes what this driver has to do unavoidably ugly. Such is life.
- */
-
-#include <linux/pci.h>
-
-#define DRIVER_NAME "ricoh-mmc"
-
-static const struct pci_device_id pci_ids[] __devinitdata = {
- {
- .vendor = PCI_VENDOR_ID_RICOH,
- .device = PCI_DEVICE_ID_RICOH_R5C843,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- },
- { /* end: all zeroes */ },
-};
-
-MODULE_DEVICE_TABLE(pci, pci_ids);
-
-static int ricoh_mmc_disable(struct pci_dev *fw_dev)
-{
- u8 write_enable;
- u8 write_target;
- u8 disable;
-
- if (fw_dev->device == PCI_DEVICE_ID_RICOH_RL5C476) {
- /* via RL5C476 */
-
- pci_read_config_byte(fw_dev, 0xB7, &disable);
- if (disable & 0x02) {
- printk(KERN_INFO DRIVER_NAME
- ": Controller already disabled. " \
- "Nothing to do.\n");
- return -ENODEV;
- }
-
- pci_read_config_byte(fw_dev, 0x8E, &write_enable);
- pci_write_config_byte(fw_dev, 0x8E, 0xAA);
- pci_read_config_byte(fw_dev, 0x8D, &write_target);
- pci_write_config_byte(fw_dev, 0x8D, 0xB7);
- pci_write_config_byte(fw_dev, 0xB7, disable | 0x02);
- pci_write_config_byte(fw_dev, 0x8E, write_enable);
- pci_write_config_byte(fw_dev, 0x8D, write_target);
- } else {
- /* via R5C832 */
-
- pci_read_config_byte(fw_dev, 0xCB, &disable);
- if (disable & 0x02) {
- printk(KERN_INFO DRIVER_NAME
- ": Controller already disabled. " \
- "Nothing to do.\n");
- return -ENODEV;
- }
-
- pci_read_config_byte(fw_dev, 0xCA, &write_enable);
- pci_write_config_byte(fw_dev, 0xCA, 0x57);
- pci_write_config_byte(fw_dev, 0xCB, disable | 0x02);
- pci_write_config_byte(fw_dev, 0xCA, write_enable);
- }
-
- printk(KERN_INFO DRIVER_NAME
- ": Controller is now disabled.\n");
-
- return 0;
-}
-
-static int ricoh_mmc_enable(struct pci_dev *fw_dev)
-{
- u8 write_enable;
- u8 write_target;
- u8 disable;
-
- if (fw_dev->device == PCI_DEVICE_ID_RICOH_RL5C476) {
- /* via RL5C476 */
-
- pci_read_config_byte(fw_dev, 0x8E, &write_enable);
- pci_write_config_byte(fw_dev, 0x8E, 0xAA);
- pci_read_config_byte(fw_dev, 0x8D, &write_target);
- pci_write_config_byte(fw_dev, 0x8D, 0xB7);
- pci_read_config_byte(fw_dev, 0xB7, &disable);
- pci_write_config_byte(fw_dev, 0xB7, disable & ~0x02);
- pci_write_config_byte(fw_dev, 0x8E, write_enable);
- pci_write_config_byte(fw_dev, 0x8D, write_target);
- } else {
- /* via R5C832 */
-
- pci_read_config_byte(fw_dev, 0xCA, &write_enable);
- pci_read_config_byte(fw_dev, 0xCB, &disable);
- pci_write_config_byte(fw_dev, 0xCA, 0x57);
- pci_write_config_byte(fw_dev, 0xCB, disable & ~0x02);
- pci_write_config_byte(fw_dev, 0xCA, write_enable);
- }
-
- printk(KERN_INFO DRIVER_NAME
- ": Controller is now re-enabled.\n");
-
- return 0;
-}
-
-static int __devinit ricoh_mmc_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- u8 rev;
- u8 ctrlfound = 0;
-
- struct pci_dev *fw_dev = NULL;
-
- BUG_ON(pdev == NULL);
- BUG_ON(ent == NULL);
-
- pci_read_config_byte(pdev, PCI_CLASS_REVISION, &rev);
-
- printk(KERN_INFO DRIVER_NAME
- ": Ricoh MMC controller found at %s [%04x:%04x] (rev %x)\n",
- pci_name(pdev), (int)pdev->vendor, (int)pdev->device,
- (int)rev);
-
- while ((fw_dev =
- pci_get_device(PCI_VENDOR_ID_RICOH,
- PCI_DEVICE_ID_RICOH_RL5C476, fw_dev))) {
- if (PCI_SLOT(pdev->devfn) == PCI_SLOT(fw_dev->devfn) &&
- PCI_FUNC(fw_dev->devfn) == 0 &&
- pdev->bus == fw_dev->bus) {
- if (ricoh_mmc_disable(fw_dev) != 0)
- return -ENODEV;
-
- pci_set_drvdata(pdev, fw_dev);
-
- ++ctrlfound;
- break;
- }
- }
-
- fw_dev = NULL;
-
- while (!ctrlfound &&
- (fw_dev = pci_get_device(PCI_VENDOR_ID_RICOH,
- PCI_DEVICE_ID_RICOH_R5C832, fw_dev))) {
- if (PCI_SLOT(pdev->devfn) == PCI_SLOT(fw_dev->devfn) &&
- PCI_FUNC(fw_dev->devfn) == 0 &&
- pdev->bus == fw_dev->bus) {
- if (ricoh_mmc_disable(fw_dev) != 0)
- return -ENODEV;
-
- pci_set_drvdata(pdev, fw_dev);
-
- ++ctrlfound;
- }
- }
-
- if (!ctrlfound) {
- printk(KERN_WARNING DRIVER_NAME
- ": Main Ricoh function not found. Cannot disable controller.\n");
- return -ENODEV;
- }
-
- return 0;
-}
-
-static void __devexit ricoh_mmc_remove(struct pci_dev *pdev)
-{
- struct pci_dev *fw_dev = NULL;
-
- fw_dev = pci_get_drvdata(pdev);
- BUG_ON(fw_dev == NULL);
-
- ricoh_mmc_enable(fw_dev);
-
- pci_set_drvdata(pdev, NULL);
-}
-
-static int ricoh_mmc_suspend_late(struct pci_dev *pdev, pm_message_t state)
-{
- struct pci_dev *fw_dev = NULL;
-
- fw_dev = pci_get_drvdata(pdev);
- BUG_ON(fw_dev == NULL);
-
- printk(KERN_INFO DRIVER_NAME ": Suspending.\n");
-
- ricoh_mmc_enable(fw_dev);
-
- return 0;
-}
-
-static int ricoh_mmc_resume_early(struct pci_dev *pdev)
-{
- struct pci_dev *fw_dev = NULL;
-
- fw_dev = pci_get_drvdata(pdev);
- BUG_ON(fw_dev == NULL);
-
- printk(KERN_INFO DRIVER_NAME ": Resuming.\n");
-
- ricoh_mmc_disable(fw_dev);
-
- return 0;
-}
-
-static struct pci_driver ricoh_mmc_driver = {
- .name = DRIVER_NAME,
- .id_table = pci_ids,
- .probe = ricoh_mmc_probe,
- .remove = __devexit_p(ricoh_mmc_remove),
- .suspend_late = ricoh_mmc_suspend_late,
- .resume_early = ricoh_mmc_resume_early,
-};
-
-/*****************************************************************************\
- * *
- * Driver init/exit *
- * *
-\*****************************************************************************/
-
-static int __init ricoh_mmc_drv_init(void)
-{
- printk(KERN_INFO DRIVER_NAME
- ": Ricoh MMC Controller disabling driver\n");
- printk(KERN_INFO DRIVER_NAME ": Copyright(c) Philip Langdale\n");
-
- return pci_register_driver(&ricoh_mmc_driver);
-}
-
-static void __exit ricoh_mmc_drv_exit(void)
-{
- pci_unregister_driver(&ricoh_mmc_driver);
-}
-
-module_init(ricoh_mmc_drv_init);
-module_exit(ricoh_mmc_drv_exit);
-
-MODULE_AUTHOR("Philip Langdale <philipl@alumni.utexas.net>");
-MODULE_DESCRIPTION("Ricoh MMC Controller disabling driver");
-MODULE_LICENSE("GPL");
-
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index d96e1abf2d6..2fdf7689ae6 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -1179,7 +1179,7 @@ static int s3cmci_card_present(struct mmc_host *mmc)
struct s3c24xx_mci_pdata *pdata = host->pdata;
int ret;
- if (pdata->gpio_detect == 0)
+ if (pdata->no_detect)
return -ENOSYS;
ret = gpio_get_value(pdata->gpio_detect) ? 0 : 1;
@@ -1360,6 +1360,8 @@ static struct mmc_host_ops s3cmci_ops = {
static struct s3c24xx_mci_pdata s3cmci_def_pdata = {
/* This is currently here to avoid a number of if (host->pdata)
* checks. Any zero fields to ensure reasonable defaults are picked. */
+ .no_wprotect = 1,
+ .no_detect = 1,
};
#ifdef CONFIG_CPU_FREQ
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 5c3a1767770..8e1020cf73f 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -80,9 +80,6 @@ struct sdhci_pci_chip {
static int ricoh_probe(struct sdhci_pci_chip *chip)
{
- if (chip->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
- chip->quirks |= SDHCI_QUIRK_CLOCK_BEFORE_RESET;
-
if (chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG ||
chip->pdev->subsystem_vendor == PCI_VENDOR_ID_SONY)
chip->quirks |= SDHCI_QUIRK_NO_CARD_NO_RESET;
@@ -92,7 +89,9 @@ static int ricoh_probe(struct sdhci_pci_chip *chip)
static const struct sdhci_pci_fixes sdhci_ricoh = {
.probe = ricoh_probe,
- .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR,
+ .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |
+ SDHCI_QUIRK_FORCE_DMA |
+ SDHCI_QUIRK_CLOCK_BEFORE_RESET,
};
static const struct sdhci_pci_fixes sdhci_ene_712 = {
@@ -501,6 +500,7 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state)
{
struct sdhci_pci_chip *chip;
struct sdhci_pci_slot *slot;
+ mmc_pm_flag_t pm_flags = 0;
int i, ret;
chip = pci_get_drvdata(pdev);
@@ -519,6 +519,8 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state)
sdhci_resume_host(chip->slots[i]->host);
return ret;
}
+
+ pm_flags |= slot->host->mmc->pm_flags;
}
if (chip->fixes && chip->fixes->suspend) {
@@ -531,9 +533,15 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state)
}
pci_save_state(pdev);
- pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
- pci_disable_device(pdev);
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ if (pm_flags & MMC_PM_KEEP_POWER) {
+ if (pm_flags & MMC_PM_WAKE_SDIO_IRQ)
+ pci_enable_wake(pdev, PCI_D3hot, 1);
+ pci_set_power_state(pdev, PCI_D3hot);
+ } else {
+ pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ }
return 0;
}
@@ -653,6 +661,8 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(
goto unmap;
}
+ host->mmc->pm_caps = MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ;
+
ret = sdhci_add_host(host);
if (ret)
goto remove;
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index c279fbc4c2e..d6ab62d539f 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -174,20 +174,31 @@ static void sdhci_reset(struct sdhci_host *host, u8 mask)
sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier);
}
-static void sdhci_init(struct sdhci_host *host)
+static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);
+
+static void sdhci_init(struct sdhci_host *host, int soft)
{
- sdhci_reset(host, SDHCI_RESET_ALL);
+ if (soft)
+ sdhci_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);
+ else
+ sdhci_reset(host, SDHCI_RESET_ALL);
sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK,
SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX |
SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT |
SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE);
+
+ if (soft) {
+ /* force clock reconfiguration */
+ host->clock = 0;
+ sdhci_set_ios(host->mmc, &host->mmc->ios);
+ }
}
static void sdhci_reinit(struct sdhci_host *host)
{
- sdhci_init(host);
+ sdhci_init(host, 0);
sdhci_enable_card_detection(host);
}
@@ -376,6 +387,20 @@ static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
local_irq_restore(*flags);
}
+static void sdhci_set_adma_desc(u8 *desc, u32 addr, int len, unsigned cmd)
+{
+ __le32 *dataddr = (__le32 __force *)(desc + 4);
+ __le16 *cmdlen = (__le16 __force *)desc;
+
+ /* SDHCI specification says ADMA descriptors should be 4 byte
+ * aligned, so using 16 or 32bit operations should be safe. */
+
+ cmdlen[0] = cpu_to_le16(cmd);
+ cmdlen[1] = cpu_to_le16(len);
+
+ dataddr[0] = cpu_to_le32(addr);
+}
+
static int sdhci_adma_table_pre(struct sdhci_host *host,
struct mmc_data *data)
{
@@ -443,19 +468,11 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
sdhci_kunmap_atomic(buffer, &flags);
}
- desc[7] = (align_addr >> 24) & 0xff;
- desc[6] = (align_addr >> 16) & 0xff;
- desc[5] = (align_addr >> 8) & 0xff;
- desc[4] = (align_addr >> 0) & 0xff;
+ /* tran, valid */
+ sdhci_set_adma_desc(desc, align_addr, offset, 0x21);
BUG_ON(offset > 65536);
- desc[3] = (offset >> 8) & 0xff;
- desc[2] = (offset >> 0) & 0xff;
-
- desc[1] = 0x00;
- desc[0] = 0x21; /* tran, valid */
-
align += 4;
align_addr += 4;
@@ -465,19 +482,10 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
len -= offset;
}
- desc[7] = (addr >> 24) & 0xff;
- desc[6] = (addr >> 16) & 0xff;
- desc[5] = (addr >> 8) & 0xff;
- desc[4] = (addr >> 0) & 0xff;
-
BUG_ON(len > 65536);
- desc[3] = (len >> 8) & 0xff;
- desc[2] = (len >> 0) & 0xff;
-
- desc[1] = 0x00;
- desc[0] = 0x21; /* tran, valid */
-
+ /* tran, valid */
+ sdhci_set_adma_desc(desc, addr, len, 0x21);
desc += 8;
/*
@@ -490,16 +498,9 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
/*
* Add a terminating entry.
*/
- desc[7] = 0;
- desc[6] = 0;
- desc[5] = 0;
- desc[4] = 0;
- desc[3] = 0;
- desc[2] = 0;
-
- desc[1] = 0x00;
- desc[0] = 0x03; /* nop, end, valid */
+ /* nop, end, valid */
+ sdhci_set_adma_desc(desc, 0, 0, 0x3);
/*
* Resync align buffer as we might have changed it.
@@ -1610,16 +1611,13 @@ int sdhci_resume_host(struct sdhci_host *host)
if (ret)
return ret;
- sdhci_init(host);
+ sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
mmiowb();
ret = mmc_resume_host(host->mmc);
- if (ret)
- return ret;
-
sdhci_enable_card_detection(host);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(sdhci_resume_host);
@@ -1874,7 +1872,7 @@ int sdhci_add_host(struct sdhci_host *host)
if (ret)
goto untasklet;
- sdhci_init(host);
+ sdhci_init(host, 0);
#ifdef CONFIG_MMC_DEBUG
sdhci_dumpregs(host);
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 2bb03a8b9ef..aa2807d0ce7 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -422,15 +422,6 @@ config MTD_H720X
This enables access to the flash chips on the Hynix evaluation boards.
If you have such a board, say 'Y'.
-config MTD_OMAP_NOR
- tristate "TI OMAP board mappings"
- depends on MTD_CFI && ARCH_OMAP
- help
- This enables access to the NOR flash chips on TI OMAP-based
- boards defining flash platform devices and flash platform data.
- These boards include the Innovator, H2, H3, OSK, Perseus2, and
- more. If you have such a board, say 'Y'.
-
# This needs CFI or JEDEC, depending on the cards found.
config MTD_PCI
tristate "PCI MTD driver"
diff --git a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile
index a44919f3f3d..bb035cd54c7 100644
--- a/drivers/mtd/maps/Makefile
+++ b/drivers/mtd/maps/Makefile
@@ -54,7 +54,6 @@ obj-$(CONFIG_MTD_IXP2000) += ixp2000.o
obj-$(CONFIG_MTD_WRSBC8260) += wr_sbc82xx_flash.o
obj-$(CONFIG_MTD_DMV182) += dmv182.o
obj-$(CONFIG_MTD_PLATRAM) += plat-ram.o
-obj-$(CONFIG_MTD_OMAP_NOR) += omap_nor.o
obj-$(CONFIG_MTD_INTEL_VR_NOR) += intel_vr_nor.o
obj-$(CONFIG_MTD_BFIN_ASYNC) += bfin-async-flash.o
obj-$(CONFIG_MTD_RBTX4939) += rbtx4939-flash.o
diff --git a/drivers/mtd/maps/omap_nor.c b/drivers/mtd/maps/omap_nor.c
index ead0b2fab67..e69de29bb2d 100644
--- a/drivers/mtd/maps/omap_nor.c
+++ b/drivers/mtd/maps/omap_nor.c
@@ -1,188 +0,0 @@
-/*
- * Flash memory support for various TI OMAP boards
- *
- * Copyright (C) 2001-2002 MontaVista Software Inc.
- * Copyright (C) 2003-2004 Texas Instruments
- * Copyright (C) 2004 Nokia Corporation
- *
- * Assembled using driver code copyright the companies above
- * and written by David Brownell, Jian Zhang <jzhang@ti.com>,
- * Tony Lindgren <tony@atomide.com> and others.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/platform_device.h>
-#include <linux/module.h>
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/ioport.h>
-#include <linux/slab.h>
-
-#include <linux/mtd/mtd.h>
-#include <linux/mtd/map.h>
-#include <linux/mtd/partitions.h>
-
-#include <asm/io.h>
-#include <mach/hardware.h>
-#include <asm/mach/flash.h>
-#include <plat/tc.h>
-
-#ifdef CONFIG_MTD_PARTITIONS
-static const char *part_probes[] = { /* "RedBoot", */ "cmdlinepart", NULL };
-#endif
-
-struct omapflash_info {
- struct mtd_partition *parts;
- struct mtd_info *mtd;
- struct map_info map;
-};
-
-static void omap_set_vpp(struct map_info *map, int enable)
-{
- static int count;
- u32 l;
-
- if (cpu_class_is_omap1()) {
- if (enable) {
- if (count++ == 0) {
- l = omap_readl(EMIFS_CONFIG);
- l |= OMAP_EMIFS_CONFIG_WP;
- omap_writel(l, EMIFS_CONFIG);
- }
- } else {
- if (count && (--count == 0)) {
- l = omap_readl(EMIFS_CONFIG);
- l &= ~OMAP_EMIFS_CONFIG_WP;
- omap_writel(l, EMIFS_CONFIG);
- }
- }
- }
-}
-
-static int __init omapflash_probe(struct platform_device *pdev)
-{
- int err;
- struct omapflash_info *info;
- struct flash_platform_data *pdata = pdev->dev.platform_data;
- struct resource *res = pdev->resource;
- unsigned long size = res->end - res->start + 1;
-
- info = kzalloc(sizeof(struct omapflash_info), GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- if (!request_mem_region(res->start, size, "flash")) {
- err = -EBUSY;
- goto out_free_info;
- }
-
- info->map.virt = ioremap(res->start, size);
- if (!info->map.virt) {
- err = -ENOMEM;
- goto out_release_mem_region;
- }
- info->map.name = dev_name(&pdev->dev);
- info->map.phys = res->start;
- info->map.size = size;
- info->map.bankwidth = pdata->width;
- info->map.set_vpp = omap_set_vpp;
-
- simple_map_init(&info->map);
- info->mtd = do_map_probe(pdata->map_name, &info->map);
- if (!info->mtd) {
- err = -EIO;
- goto out_iounmap;
- }
- info->mtd->owner = THIS_MODULE;
-
- info->mtd->dev.parent = &pdev->dev;
-
-#ifdef CONFIG_MTD_PARTITIONS
- err = parse_mtd_partitions(info->mtd, part_probes, &info->parts, 0);
- if (err > 0)
- add_mtd_partitions(info->mtd, info->parts, err);
- else if (err <= 0 && pdata->parts)
- add_mtd_partitions(info->mtd, pdata->parts, pdata->nr_parts);
- else
-#endif
- add_mtd_device(info->mtd);
-
- platform_set_drvdata(pdev, info);
-
- return 0;
-
-out_iounmap:
- iounmap(info->map.virt);
-out_release_mem_region:
- release_mem_region(res->start, size);
-out_free_info:
- kfree(info);
-
- return err;
-}
-
-static int __exit omapflash_remove(struct platform_device *pdev)
-{
- struct omapflash_info *info = platform_get_drvdata(pdev);
-
- platform_set_drvdata(pdev, NULL);
-
- if (info) {
- if (info->parts) {
- del_mtd_partitions(info->mtd);
- kfree(info->parts);
- } else
- del_mtd_device(info->mtd);
- map_destroy(info->mtd);
- release_mem_region(info->map.phys, info->map.size);
- iounmap((void __iomem *) info->map.virt);
- kfree(info);
- }
-
- return 0;
-}
-
-static struct platform_driver omapflash_driver = {
- .remove = __exit_p(omapflash_remove),
- .driver = {
- .name = "omapflash",
- .owner = THIS_MODULE,
- },
-};
-
-static int __init omapflash_init(void)
-{
- return platform_driver_probe(&omapflash_driver, omapflash_probe);
-}
-
-static void __exit omapflash_exit(void)
-{
- platform_driver_unregister(&omapflash_driver);
-}
-
-module_init(omapflash_init);
-module_exit(omapflash_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("MTD NOR map driver for TI OMAP boards");
-MODULE_ALIAS("platform:omapflash");
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index 1bb799f0125..26aec008018 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -30,12 +30,8 @@
#define DRIVER_NAME "omap2-nand"
-/* size (4 KiB) for IO mapping */
-#define NAND_IO_SIZE SZ_4K
-
#define NAND_WP_OFF 0
#define NAND_WP_BIT 0x00000010
-#define WR_RD_PIN_MONITORING 0x00600000
#define GPMC_BUF_FULL 0x00000001
#define GPMC_BUF_EMPTY 0x00000000
@@ -882,8 +878,6 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
struct omap_nand_info *info;
struct omap_nand_platform_data *pdata;
int err;
- unsigned long val;
-
pdata = pdev->dev.platform_data;
if (pdata == NULL) {
@@ -905,28 +899,14 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
info->gpmc_cs = pdata->cs;
info->gpmc_baseaddr = pdata->gpmc_baseaddr;
info->gpmc_cs_baseaddr = pdata->gpmc_cs_baseaddr;
+ info->phys_base = pdata->phys_base;
info->mtd.priv = &info->nand;
info->mtd.name = dev_name(&pdev->dev);
info->mtd.owner = THIS_MODULE;
- err = gpmc_cs_request(info->gpmc_cs, NAND_IO_SIZE, &info->phys_base);
- if (err < 0) {
- dev_err(&pdev->dev, "Cannot request GPMC CS\n");
- goto out_free_info;
- }
-
- /* Enable RD PIN Monitoring Reg */
- if (pdata->dev_ready) {
- val = gpmc_cs_read_reg(info->gpmc_cs, GPMC_CS_CONFIG1);
- val |= WR_RD_PIN_MONITORING;
- gpmc_cs_write_reg(info->gpmc_cs, GPMC_CS_CONFIG1, val);
- }
-
- val = gpmc_cs_read_reg(info->gpmc_cs, GPMC_CS_CONFIG7);
- val &= ~(0xf << 8);
- val |= (0xc & 0xf) << 8;
- gpmc_cs_write_reg(info->gpmc_cs, GPMC_CS_CONFIG7, val);
+ info->nand.options |= pdata->devsize ? NAND_BUSWIDTH_16 : 0;
+ info->nand.options |= NAND_SKIP_BBTSCAN;
/* NAND write protect off */
omap_nand_wp(&info->mtd, NAND_WP_OFF);
@@ -934,7 +914,7 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
if (!request_mem_region(info->phys_base, NAND_IO_SIZE,
pdev->dev.driver->name)) {
err = -EBUSY;
- goto out_free_cs;
+ goto out_free_info;
}
info->nand.IO_ADDR_R = ioremap(info->phys_base, NAND_IO_SIZE);
@@ -963,11 +943,6 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
info->nand.chip_delay = 50;
}
- info->nand.options |= NAND_SKIP_BBTSCAN;
- if ((gpmc_cs_read_reg(info->gpmc_cs, GPMC_CS_CONFIG1) & 0x3000)
- == 0x1000)
- info->nand.options |= NAND_BUSWIDTH_16;
-
if (use_prefetch) {
/* copy the virtual address of nand base for fifo access */
info->nand_pref_fifo_add = info->nand.IO_ADDR_R;
@@ -1043,8 +1018,6 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
out_release_mem_region:
release_mem_region(info->phys_base, NAND_IO_SIZE);
-out_free_cs:
- gpmc_cs_free(info->gpmc_cs);
out_free_info:
kfree(info);
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 14cec04c34f..bc45ef9af17 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -37,6 +37,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/stringify.h>
+#include <linux/namei.h>
#include <linux/stat.h>
#include <linux/miscdevice.h>
#include <linux/log2.h>
@@ -50,7 +51,8 @@
/**
* struct mtd_dev_param - MTD device parameter description data structure.
- * @name: MTD device name or number string
+ * @name: MTD character device node path, MTD device name, or MTD device number
+ * string
* @vid_hdr_offs: VID header offset
*/
struct mtd_dev_param {
@@ -59,10 +61,10 @@ struct mtd_dev_param {
};
/* Numbers of elements set in the @mtd_dev_param array */
-static int mtd_devs;
+static int __initdata mtd_devs;
/* MTD devices specification parameters */
-static struct mtd_dev_param mtd_dev_param[UBI_MAX_DEVICES];
+static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES];
/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
struct class *ubi_class;
@@ -363,11 +365,13 @@ static void dev_release(struct device *dev)
/**
* ubi_sysfs_init - initialize sysfs for an UBI device.
* @ubi: UBI device description object
+ * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was
+ * taken
*
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
-static int ubi_sysfs_init(struct ubi_device *ubi)
+static int ubi_sysfs_init(struct ubi_device *ubi, int *ref)
{
int err;
@@ -379,6 +383,7 @@ static int ubi_sysfs_init(struct ubi_device *ubi)
if (err)
return err;
+ *ref = 1;
err = device_create_file(&ubi->dev, &dev_eraseblock_size);
if (err)
return err;
@@ -434,7 +439,7 @@ static void ubi_sysfs_close(struct ubi_device *ubi)
}
/**
- * kill_volumes - destroy all volumes.
+ * kill_volumes - destroy all user volumes.
* @ubi: UBI device description object
*/
static void kill_volumes(struct ubi_device *ubi)
@@ -447,36 +452,29 @@ static void kill_volumes(struct ubi_device *ubi)
}
/**
- * free_user_volumes - free all user volumes.
- * @ubi: UBI device description object
- *
- * Normally the volumes are freed at the release function of the volume device
- * objects. However, on error paths the volumes have to be freed before the
- * device objects have been initialized.
- */
-static void free_user_volumes(struct ubi_device *ubi)
-{
- int i;
-
- for (i = 0; i < ubi->vtbl_slots; i++)
- if (ubi->volumes[i]) {
- kfree(ubi->volumes[i]->eba_tbl);
- kfree(ubi->volumes[i]);
- }
-}
-
-/**
* uif_init - initialize user interfaces for an UBI device.
* @ubi: UBI device description object
+ * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was
+ * taken, otherwise set to %0
+ *
+ * This function initializes various user interfaces for an UBI device. If the
+ * initialization fails at an early stage, this function frees all the
+ * resources it allocated, returns an error, and @ref is set to %0. However,
+ * if the initialization fails after the UBI device was registered in the
+ * driver core subsystem, this function takes a reference to @ubi->dev, because
+ * otherwise the release function ('dev_release()') would free whole @ubi
+ * object. The @ref argument is set to %1 in this case. The caller has to put
+ * this reference.
*
* This function returns zero in case of success and a negative error code in
- * case of failure. Note, this function destroys all volumes if it fails.
+ * case of failure.
*/
-static int uif_init(struct ubi_device *ubi)
+static int uif_init(struct ubi_device *ubi, int *ref)
{
int i, err;
dev_t dev;
+ *ref = 0;
sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num);
/*
@@ -504,7 +502,7 @@ static int uif_init(struct ubi_device *ubi)
goto out_unreg;
}
- err = ubi_sysfs_init(ubi);
+ err = ubi_sysfs_init(ubi, ref);
if (err)
goto out_sysfs;
@@ -522,6 +520,8 @@ static int uif_init(struct ubi_device *ubi)
out_volumes:
kill_volumes(ubi);
out_sysfs:
+ if (*ref)
+ get_device(&ubi->dev);
ubi_sysfs_close(ubi);
cdev_del(&ubi->cdev);
out_unreg:
@@ -875,7 +875,7 @@ static int ubi_reboot_notifier(struct notifier_block *n, unsigned long state,
int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
{
struct ubi_device *ubi;
- int i, err, do_free = 1;
+ int i, err, ref = 0;
/*
* Check if we already have the same MTD device attached.
@@ -975,9 +975,9 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
goto out_detach;
}
- err = uif_init(ubi);
+ err = uif_init(ubi, &ref);
if (err)
- goto out_nofree;
+ goto out_detach;
ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name);
if (IS_ERR(ubi->bgt_thread)) {
@@ -1025,12 +1025,8 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
out_uif:
uif_close(ubi);
-out_nofree:
- do_free = 0;
out_detach:
ubi_wl_close(ubi);
- if (do_free)
- free_user_volumes(ubi);
free_internal_volumes(ubi);
vfree(ubi->vtbl);
out_free:
@@ -1039,7 +1035,10 @@ out_free:
#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
vfree(ubi->dbg_peb_buf);
#endif
- kfree(ubi);
+ if (ref)
+ put_device(&ubi->dev);
+ else
+ kfree(ubi);
return err;
}
@@ -1096,7 +1095,7 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
/*
* Get a reference to the device in order to prevent 'dev_release()'
- * from freeing @ubi object.
+ * from freeing the @ubi object.
*/
get_device(&ubi->dev);
@@ -1116,13 +1115,50 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
}
/**
- * find_mtd_device - open an MTD device by its name or number.
- * @mtd_dev: name or number of the device
+ * open_mtd_by_chdev - open an MTD device by its character device node path.
+ * @mtd_dev: MTD character device node path
+ *
+ * This helper function opens an MTD device by its character node device path.
+ * Returns MTD device description object in case of success and a negative
+ * error code in case of failure.
+ */
+static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev)
+{
+ int err, major, minor, mode;
+ struct path path;
+
+ /* Probably this is an MTD character device node path */
+ err = kern_path(mtd_dev, LOOKUP_FOLLOW, &path);
+ if (err)
+ return ERR_PTR(err);
+
+ /* MTD device number is defined by the major / minor numbers */
+ major = imajor(path.dentry->d_inode);
+ minor = iminor(path.dentry->d_inode);
+ mode = path.dentry->d_inode->i_mode;
+ path_put(&path);
+ if (major != MTD_CHAR_MAJOR || !S_ISCHR(mode))
+ return ERR_PTR(-EINVAL);
+
+ if (minor & 1)
+ /*
+ * Just do not think the "/dev/mtdrX" devices support is need,
+ * so do not support them to avoid doing extra work.
+ */
+ return ERR_PTR(-EINVAL);
+
+ return get_mtd_device(NULL, minor / 2);
+}
+
+/**
+ * open_mtd_device - open MTD device by name, character device path, or number.
+ * @mtd_dev: name, character device node path, or MTD device device number
*
* This function tries to open and MTD device described by @mtd_dev string,
- * which is first treated as an ASCII number, and if it is not true, it is
- * treated as MTD device name. Returns MTD device description object in case of
- * success and a negative error code in case of failure.
+ * which is first treated as ASCII MTD device number, and if it is not true, it
+ * is treated as MTD device name, and if that is also not true, it is treated
+ * as MTD character device node path. Returns MTD device description object in
+ * case of success and a negative error code in case of failure.
*/
static struct mtd_info * __init open_mtd_device(const char *mtd_dev)
{
@@ -1137,6 +1173,9 @@ static struct mtd_info * __init open_mtd_device(const char *mtd_dev)
* MTD device name.
*/
mtd = get_mtd_device_nm(mtd_dev);
+ if (IS_ERR(mtd) && PTR_ERR(mtd) == -ENODEV)
+ /* Probably this is an MTD character device node path */
+ mtd = open_mtd_by_chdev(mtd_dev);
} else
mtd = get_mtd_device(NULL, mtd_num);
@@ -1352,13 +1391,15 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000);
MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: "
- "mtd=<name|num>[,<vid_hdr_offs>].\n"
+ "mtd=<name|num|path>[,<vid_hdr_offs>].\n"
"Multiple \"mtd\" parameters may be specified.\n"
- "MTD devices may be specified by their number or name.\n"
+ "MTD devices may be specified by their number, name, or "
+ "path to the MTD character device node.\n"
"Optional \"vid_hdr_offs\" parameter specifies UBI VID "
- "header position and data starting position to be used "
- "by UBI.\n"
- "Example: mtd=content,1984 mtd=4 - attach MTD device"
+ "header position to be used by UBI.\n"
+ "Example 1: mtd=/dev/mtd0 - attach MTD device "
+ "/dev/mtd0.\n"
+ "Example 2: mtd=content,1984 mtd=4 - attach MTD device "
"with name \"content\" using VID header offset 1984, and "
"MTD device number 4 with default VID header offset.");
diff --git a/drivers/mtd/ubi/debug.h b/drivers/mtd/ubi/debug.h
index f30bcb372c0..17a10712972 100644
--- a/drivers/mtd/ubi/debug.h
+++ b/drivers/mtd/ubi/debug.h
@@ -96,8 +96,11 @@ void ubi_dbg_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len);
#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID
int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len);
+int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
+ int offset, int len);
#else
#define ubi_dbg_check_all_ff(ubi, pnum, offset, len) 0
+#define ubi_dbg_check_write(ubi, buf, pnum, offset, len) 0
#endif
#ifdef CONFIG_MTD_UBI_DEBUG_DISABLE_BGT
@@ -176,6 +179,7 @@ static inline int ubi_dbg_is_erase_failure(void)
#define ubi_dbg_is_write_failure() 0
#define ubi_dbg_is_erase_failure() 0
#define ubi_dbg_check_all_ff(ubi, pnum, offset, len) 0
+#define ubi_dbg_check_write(ubi, buf, pnum, offset, len) 0
#endif /* !CONFIG_MTD_UBI_DEBUG */
#endif /* !__UBI_DEBUG_H__ */
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index 8aa51e7a6a7..b4ecc84c754 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -143,7 +143,7 @@ int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
err = paranoid_check_not_bad(ubi, pnum);
if (err)
- return err > 0 ? -EINVAL : err;
+ return err;
addr = (loff_t)pnum * ubi->peb_size + offset;
retry:
@@ -236,12 +236,12 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
err = paranoid_check_not_bad(ubi, pnum);
if (err)
- return err > 0 ? -EINVAL : err;
+ return err;
/* The area we are writing to has to contain all 0xFF bytes */
err = ubi_dbg_check_all_ff(ubi, pnum, offset, len);
if (err)
- return err > 0 ? -EINVAL : err;
+ return err;
if (offset >= ubi->leb_start) {
/*
@@ -250,10 +250,10 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
*/
err = paranoid_check_peb_ec_hdr(ubi, pnum);
if (err)
- return err > 0 ? -EINVAL : err;
+ return err;
err = paranoid_check_peb_vid_hdr(ubi, pnum);
if (err)
- return err > 0 ? -EINVAL : err;
+ return err;
}
if (ubi_dbg_is_write_failure()) {
@@ -273,6 +273,21 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
} else
ubi_assert(written == len);
+ if (!err) {
+ err = ubi_dbg_check_write(ubi, buf, pnum, offset, len);
+ if (err)
+ return err;
+
+ /*
+ * Since we always write sequentially, the rest of the PEB has
+ * to contain only 0xFF bytes.
+ */
+ offset += len;
+ len = ubi->peb_size - offset;
+ if (len)
+ err = ubi_dbg_check_all_ff(ubi, pnum, offset, len);
+ }
+
return err;
}
@@ -348,7 +363,7 @@ retry:
err = ubi_dbg_check_all_ff(ubi, pnum, 0, ubi->peb_size);
if (err)
- return err > 0 ? -EINVAL : err;
+ return err;
if (ubi_dbg_is_erase_failure() && !err) {
dbg_err("cannot erase PEB %d (emulated)", pnum);
@@ -542,7 +557,7 @@ int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture)
err = paranoid_check_not_bad(ubi, pnum);
if (err != 0)
- return err > 0 ? -EINVAL : err;
+ return err;
if (ubi->ro_mode) {
ubi_err("read-only mode");
@@ -819,7 +834,7 @@ int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
err = paranoid_check_ec_hdr(ubi, pnum, ec_hdr);
if (err)
- return -EINVAL;
+ return err;
err = ubi_io_write(ubi, ec_hdr, pnum, 0, ubi->ec_hdr_alsize);
return err;
@@ -1083,7 +1098,7 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
err = paranoid_check_peb_ec_hdr(ubi, pnum);
if (err)
- return err > 0 ? -EINVAL : err;
+ return err;
vid_hdr->magic = cpu_to_be32(UBI_VID_HDR_MAGIC);
vid_hdr->version = UBI_VERSION;
@@ -1092,7 +1107,7 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
err = paranoid_check_vid_hdr(ubi, pnum, vid_hdr);
if (err)
- return -EINVAL;
+ return err;
p = (char *)vid_hdr - ubi->vid_hdr_shift;
err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset,
@@ -1107,8 +1122,8 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
* @ubi: UBI device description object
* @pnum: physical eraseblock number to check
*
- * This function returns zero if the physical eraseblock is good, a positive
- * number if it is bad and a negative error code if an error occurred.
+ * This function returns zero if the physical eraseblock is good, %-EINVAL if
+ * it is bad and a negative error code if an error occurred.
*/
static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum)
{
@@ -1120,7 +1135,7 @@ static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum)
ubi_err("paranoid check failed for PEB %d", pnum);
ubi_dbg_dump_stack();
- return err;
+ return err > 0 ? -EINVAL : err;
}
/**
@@ -1130,7 +1145,7 @@ static int paranoid_check_not_bad(const struct ubi_device *ubi, int pnum)
* @ec_hdr: the erase counter header to check
*
* This function returns zero if the erase counter header contains valid
- * values, and %1 if not.
+ * values, and %-EINVAL if not.
*/
static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
const struct ubi_ec_hdr *ec_hdr)
@@ -1156,7 +1171,7 @@ static int paranoid_check_ec_hdr(const struct ubi_device *ubi, int pnum,
fail:
ubi_dbg_dump_ec_hdr(ec_hdr);
ubi_dbg_dump_stack();
- return 1;
+ return -EINVAL;
}
/**
@@ -1164,8 +1179,8 @@ fail:
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to check
*
- * This function returns zero if the erase counter header is all right, %1 if
- * not, and a negative error code if an error occurred.
+ * This function returns zero if the erase counter header is all right and and
+ * a negative error code if not or if an error occurred.
*/
static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
{
@@ -1188,7 +1203,7 @@ static int paranoid_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
ubi_err("paranoid check failed for PEB %d", pnum);
ubi_dbg_dump_ec_hdr(ec_hdr);
ubi_dbg_dump_stack();
- err = 1;
+ err = -EINVAL;
goto exit;
}
@@ -1206,7 +1221,7 @@ exit:
* @vid_hdr: the volume identifier header to check
*
* This function returns zero if the volume identifier header is all right, and
- * %1 if not.
+ * %-EINVAL if not.
*/
static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum,
const struct ubi_vid_hdr *vid_hdr)
@@ -1233,7 +1248,7 @@ fail:
ubi_err("paranoid check failed for PEB %d", pnum);
ubi_dbg_dump_vid_hdr(vid_hdr);
ubi_dbg_dump_stack();
- return 1;
+ return -EINVAL;
}
@@ -1243,7 +1258,7 @@ fail:
* @pnum: the physical eraseblock number to check
*
* This function returns zero if the volume identifier header is all right,
- * %1 if not, and a negative error code if an error occurred.
+ * and a negative error code if not or if an error occurred.
*/
static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
{
@@ -1270,7 +1285,7 @@ static int paranoid_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
ubi_err("paranoid check failed for PEB %d", pnum);
ubi_dbg_dump_vid_hdr(vid_hdr);
ubi_dbg_dump_stack();
- err = 1;
+ err = -EINVAL;
goto exit;
}
@@ -1282,6 +1297,61 @@ exit:
}
/**
+ * ubi_dbg_check_write - make sure write succeeded.
+ * @ubi: UBI device description object
+ * @buf: buffer with data which were written
+ * @pnum: physical eraseblock number the data were written to
+ * @offset: offset within the physical eraseblock the data were written to
+ * @len: how many bytes were written
+ *
+ * This functions reads data which were recently written and compares it with
+ * the original data buffer - the data have to match. Returns zero if the data
+ * match and a negative error code if not or in case of failure.
+ */
+int ubi_dbg_check_write(struct ubi_device *ubi, const void *buf, int pnum,
+ int offset, int len)
+{
+ int err, i;
+
+ mutex_lock(&ubi->dbg_buf_mutex);
+ err = ubi_io_read(ubi, ubi->dbg_peb_buf, pnum, offset, len);
+ if (err)
+ goto out_unlock;
+
+ for (i = 0; i < len; i++) {
+ uint8_t c = ((uint8_t *)buf)[i];
+ uint8_t c1 = ((uint8_t *)ubi->dbg_peb_buf)[i];
+ int dump_len;
+
+ if (c == c1)
+ continue;
+
+ ubi_err("paranoid check failed for PEB %d:%d, len %d",
+ pnum, offset, len);
+ ubi_msg("data differ at position %d", i);
+ dump_len = max_t(int, 128, len - i);
+ ubi_msg("hex dump of the original buffer from %d to %d",
+ i, i + dump_len);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
+ buf + i, dump_len, 1);
+ ubi_msg("hex dump of the read buffer from %d to %d",
+ i, i + dump_len);
+ print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
+ ubi->dbg_peb_buf + i, dump_len, 1);
+ ubi_dbg_dump_stack();
+ err = -EINVAL;
+ goto out_unlock;
+ }
+ mutex_unlock(&ubi->dbg_buf_mutex);
+
+ return 0;
+
+out_unlock:
+ mutex_unlock(&ubi->dbg_buf_mutex);
+ return err;
+}
+
+/**
* ubi_dbg_check_all_ff - check that a region of flash is empty.
* @ubi: UBI device description object
* @pnum: the physical eraseblock number to check
@@ -1289,8 +1359,8 @@ exit:
* @len: the length of the region to check
*
* This function returns zero if only 0xFF bytes are present at offset
- * @offset of the physical eraseblock @pnum, %1 if not, and a negative error
- * code if an error occurred.
+ * @offset of the physical eraseblock @pnum, and a negative error code if not
+ * or if an error occurred.
*/
int ubi_dbg_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
{
@@ -1321,7 +1391,7 @@ fail:
ubi_msg("hex dump of the %d-%d region", offset, offset + len);
print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
ubi->dbg_peb_buf, len, 1);
- err = 1;
+ err = -EINVAL;
error:
ubi_dbg_dump_stack();
mutex_unlock(&ubi->dbg_buf_mutex);
diff --git a/drivers/mtd/ubi/scan.c b/drivers/mtd/ubi/scan.c
index 90af61a2c3e..594184bbd56 100644
--- a/drivers/mtd/ubi/scan.c
+++ b/drivers/mtd/ubi/scan.c
@@ -974,11 +974,8 @@ struct ubi_scan_info *ubi_scan(struct ubi_device *ubi)
seb->ec = si->mean_ec;
err = paranoid_check_si(ubi, si);
- if (err) {
- if (err > 0)
- err = -EINVAL;
+ if (err)
goto out_vidh;
- }
ubi_free_vid_hdr(ubi, vidh);
kfree(ech);
@@ -1086,8 +1083,8 @@ void ubi_scan_destroy_si(struct ubi_scan_info *si)
* @ubi: UBI device description object
* @si: scanning information
*
- * This function returns zero if the scanning information is all right, %1 if
- * not and a negative error code if an error occurred.
+ * This function returns zero if the scanning information is all right, and a
+ * negative error code if not or if an error occurred.
*/
static int paranoid_check_si(struct ubi_device *ubi, struct ubi_scan_info *si)
{
@@ -1346,7 +1343,7 @@ bad_vid_hdr:
out:
ubi_dbg_dump_stack();
- return 1;
+ return -EINVAL;
}
#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 600c7229d5c..f64ddabd4ac 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -464,7 +464,7 @@ retry:
ubi->peb_size - ubi->vid_hdr_aloffset);
if (err) {
ubi_err("new PEB %d does not contain all 0xFF bytes", e->pnum);
- return err > 0 ? -EINVAL : err;
+ return err;
}
return e->pnum;
@@ -513,7 +513,7 @@ static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
err = paranoid_check_ec(ubi, e->pnum, e->ec);
- if (err > 0)
+ if (err)
return -EINVAL;
ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
@@ -1572,8 +1572,7 @@ void ubi_wl_close(struct ubi_device *ubi)
* @ec: the erase counter to check
*
* This function returns zero if the erase counter of physical eraseblock @pnum
- * is equivalent to @ec, %1 if not, and a negative error code if an error
- * occurred.
+ * is equivalent to @ec, and a negative error code if not or if an error occurred.
*/
static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec)
{
@@ -1611,8 +1610,8 @@ out_free:
* @e: the wear-leveling entry to check
* @root: the root of the tree
*
- * This function returns zero if @e is in the @root RB-tree and %1 if it is
- * not.
+ * This function returns zero if @e is in the @root RB-tree and %-EINVAL if it
+ * is not.
*/
static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
struct rb_root *root)
@@ -1623,7 +1622,7 @@ static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
ubi_err("paranoid check failed for PEB %d, EC %d, RB-tree %p ",
e->pnum, e->ec, root);
ubi_dbg_dump_stack();
- return 1;
+ return -EINVAL;
}
/**
@@ -1632,7 +1631,7 @@ static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
* @ubi: UBI device description object
* @e: the wear-leveling entry to check
*
- * This function returns zero if @e is in @ubi->pq and %1 if it is not.
+ * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
*/
static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e)
{
@@ -1647,6 +1646,6 @@ static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e)
ubi_err("paranoid check failed for PEB %d, EC %d, Protect queue",
e->pnum, e->ec);
ubi_dbg_dump_stack();
- return 1;
+ return -EINVAL;
}
#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */
diff --git a/drivers/net/3c501.c b/drivers/net/3c501.c
index 4d4cad393dc..b6de7b1e2a5 100644
--- a/drivers/net/3c501.c
+++ b/drivers/net/3c501.c
@@ -812,7 +812,7 @@ static void set_multicast_list(struct net_device *dev)
if (dev->flags & IFF_PROMISC) {
outb(RX_PROM, RX_CMD);
inb(RX_STATUS);
- } else if (dev->mc_list || dev->flags & IFF_ALLMULTI) {
+ } else if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) {
/* Multicast or all multicast is the same */
outb(RX_MULT, RX_CMD);
inb(RX_STATUS); /* Clear status. */
diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c
index 9257d7ce037..04b5bba1902 100644
--- a/drivers/net/3c505.c
+++ b/drivers/net/3c505.c
@@ -1216,7 +1216,7 @@ static int elp_close(struct net_device *dev)
static void elp_set_mc_list(struct net_device *dev)
{
elp_device *adapter = netdev_priv(dev);
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
int i;
unsigned long flags;
@@ -1229,11 +1229,10 @@ static void elp_set_mc_list(struct net_device *dev)
/* send a "load multicast list" command to the board, max 10 addrs/cmd */
/* if num_addrs==0 the list will be cleared */
adapter->tx_pcb.command = CMD_LOAD_MULTICAST_LIST;
- adapter->tx_pcb.length = 6 * dev->mc_count;
- for (i = 0; i < dev->mc_count; i++) {
- memcpy(adapter->tx_pcb.data.multicast[i], dmi->dmi_addr, 6);
- dmi = dmi->next;
- }
+ adapter->tx_pcb.length = 6 * netdev_mc_count(dev);
+ i = 0;
+ netdev_for_each_mc_addr(dmi, dev)
+ memcpy(adapter->tx_pcb.data.multicast[i++], dmi->dmi_addr, 6);
adapter->got[CMD_LOAD_MULTICAST_LIST] = 0;
if (!send_pcb(dev, &adapter->tx_pcb))
pr_err("%s: couldn't send set_multicast command\n", dev->name);
@@ -1244,7 +1243,7 @@ static void elp_set_mc_list(struct net_device *dev)
TIMEOUT_MSG(__LINE__);
}
}
- if (dev->mc_count)
+ if (!netdev_mc_empty(dev))
adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD | RECV_MULTI;
else /* num_addrs == 0 */
adapter->tx_pcb.data.configure = NO_LOOPBACK | RECV_BROAD;
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c
index 9d85efce591..902435a7646 100644
--- a/drivers/net/3c509.c
+++ b/drivers/net/3c509.c
@@ -1111,12 +1111,14 @@ set_multicast_list(struct net_device *dev)
unsigned long flags;
struct el3_private *lp = netdev_priv(dev);
int ioaddr = dev->base_addr;
+ int mc_count = netdev_mc_count(dev);
if (el3_debug > 1) {
static int old;
- if (old != dev->mc_count) {
- old = dev->mc_count;
- pr_debug("%s: Setting Rx mode to %d addresses.\n", dev->name, dev->mc_count);
+ if (old != mc_count) {
+ old = mc_count;
+ pr_debug("%s: Setting Rx mode to %d addresses.\n",
+ dev->name, mc_count);
}
}
spin_lock_irqsave(&lp->lock, flags);
@@ -1124,7 +1126,7 @@ set_multicast_list(struct net_device *dev)
outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm,
ioaddr + EL3_CMD);
}
- else if (dev->mc_count || (dev->flags&IFF_ALLMULTI)) {
+ else if (mc_count || (dev->flags&IFF_ALLMULTI)) {
outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast, ioaddr + EL3_CMD);
}
else
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index 063b049ffe5..1e898b1c806 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -1536,7 +1536,7 @@ static void set_rx_mode(struct net_device *dev)
pr_debug("%s: Setting promiscuous mode.\n",
dev->name);
new_mode = SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm;
- } else if ((dev->mc_list) || (dev->flags & IFF_ALLMULTI)) {
+ } else if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) {
new_mode = SetRxFilter | RxStation | RxMulticast | RxBroadcast;
} else
new_mode = SetRxFilter | RxStation | RxBroadcast;
diff --git a/drivers/net/3c523.c b/drivers/net/3c523.c
index 27d80ca5e4c..beed4fa10c6 100644
--- a/drivers/net/3c523.c
+++ b/drivers/net/3c523.c
@@ -625,8 +625,8 @@ static int init586(struct net_device *dev)
volatile struct iasetup_cmd_struct *ias_cmd;
volatile struct tdr_cmd_struct *tdr_cmd;
volatile struct mcsetup_cmd_struct *mc_cmd;
- struct dev_mc_list *dmi = dev->mc_list;
- int num_addrs = dev->mc_count;
+ struct dev_mc_list *dmi;
+ int num_addrs = netdev_mc_count(dev);
ptr = (void *) ((char *) p->scb + sizeof(struct scb_struct));
@@ -771,7 +771,7 @@ static int init586(struct net_device *dev)
* Multicast setup
*/
- if (dev->mc_count) {
+ if (num_addrs) {
/* I don't understand this: do we really need memory after the init? */
int len = ((char *) p->iscp - (char *) ptr - 8) / 6;
if (len <= 0) {
@@ -787,10 +787,9 @@ static int init586(struct net_device *dev)
mc_cmd->cmd_cmd = CMD_MCSETUP | CMD_LAST;
mc_cmd->cmd_link = 0xffff;
mc_cmd->mc_cnt = num_addrs * 6;
- for (i = 0; i < num_addrs; i++) {
- memcpy((char *) mc_cmd->mc_list[i], dmi->dmi_addr, 6);
- dmi = dmi->next;
- }
+ i = 0;
+ netdev_for_each_mc_addr(dmi, dev)
+ memcpy((char *) mc_cmd->mc_list[i++], dmi->dmi_addr, 6);
p->scb->cbl_offset = make16(mc_cmd);
p->scb->cmd = CUC_START;
elmc_id_attn586();
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c
index 36c4191e7bc..5c07b147ec9 100644
--- a/drivers/net/3c527.c
+++ b/drivers/net/3c527.c
@@ -1526,32 +1526,29 @@ static void do_mc32_set_multicast_list(struct net_device *dev, int retry)
if ((dev->flags&IFF_PROMISC) ||
(dev->flags&IFF_ALLMULTI) ||
- dev->mc_count > 10)
+ netdev_mc_count(dev) > 10)
/* Enable promiscuous mode */
filt |= 1;
- else if(dev->mc_count)
+ else if (!netdev_mc_empty(dev))
{
unsigned char block[62];
unsigned char *bp;
- struct dev_mc_list *dmc=dev->mc_list;
-
- int i;
+ struct dev_mc_list *dmc;
if(retry==0)
lp->mc_list_valid = 0;
if(!lp->mc_list_valid)
{
block[1]=0;
- block[0]=dev->mc_count;
+ block[0]=netdev_mc_count(dev);
bp=block+2;
- for(i=0;i<dev->mc_count;i++)
- {
+ netdev_for_each_mc_addr(dmc, dev) {
memcpy(bp, dmc->dmi_addr, 6);
bp+=6;
- dmc=dmc->next;
}
- if(mc32_command_nowait(dev, 2, block, 2+6*dev->mc_count)==-1)
+ if(mc32_command_nowait(dev, 2, block,
+ 2+6*netdev_mc_count(dev))==-1)
{
lp->mc_reload_wait = 1;
return;
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 39db0e96815..f965431f492 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -375,7 +375,7 @@ static struct vortex_chip_info {
};
-static struct pci_device_id vortex_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(vortex_pci_tbl) = {
{ 0x10B7, 0x5900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C590 },
{ 0x10B7, 0x5920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C592 },
{ 0x10B7, 0x5970, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C597 },
@@ -2970,7 +2970,7 @@ static void set_rx_mode(struct net_device *dev)
if (vortex_debug > 3)
pr_notice("%s: Setting promiscuous mode.\n", dev->name);
new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm;
- } else if ((dev->mc_list) || (dev->flags & IFF_ALLMULTI)) {
+ } else if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) {
new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast;
} else
new_mode = SetRxFilter | RxStation | RxBroadcast;
diff --git a/drivers/net/7990.c b/drivers/net/7990.c
index b1e5764628c..4e9a5a20b6a 100644
--- a/drivers/net/7990.c
+++ b/drivers/net/7990.c
@@ -595,9 +595,8 @@ static void lance_load_multicast (struct net_device *dev)
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_init_block *ib = lp->init_block;
volatile u16 *mcast_table = (u16 *)&ib->filter;
- struct dev_mc_list *dmi=dev->mc_list;
+ struct dev_mc_list *dmi;
char *addrs;
- int i;
u32 crc;
/* set all multicast bits */
@@ -611,9 +610,8 @@ static void lance_load_multicast (struct net_device *dev)
ib->filter [1] = 0;
/* Add addresses */
- for (i = 0; i < dev->mc_count; i++){
+ netdev_for_each_mc_addr(dmi, dev) {
addrs = dmi->dmi_addr;
- dmi = dmi->next;
/* multicast address? */
if (!(*addrs & 1))
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 3f452bcbfb9..3d4406b1665 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -46,6 +46,8 @@
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define DRV_NAME "8139cp"
#define DRV_VERSION "1.3"
#define DRV_RELDATE "Mar 22, 2004"
@@ -104,8 +106,6 @@ static int multicast_filter_limit = 32;
module_param(multicast_filter_limit, int, 0);
MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
-#define PFX DRV_NAME ": "
-
#define CP_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
NETIF_MSG_PROBE | \
NETIF_MSG_LINK)
@@ -394,7 +394,7 @@ static int cp_get_eeprom(struct net_device *dev,
static int cp_set_eeprom(struct net_device *dev,
struct ethtool_eeprom *eeprom, u8 *data);
-static struct pci_device_id cp_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(cp_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), },
{ PCI_DEVICE(PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322), },
{ },
@@ -470,9 +470,8 @@ static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
u32 status, u32 len)
{
- if (netif_msg_rx_err (cp))
- pr_debug("%s: rx err, slot %d status 0x%x len %d\n",
- cp->dev->name, rx_tail, status, len);
+ netif_dbg(cp, rx_err, cp->dev, "rx err, slot %d status 0x%x len %d\n",
+ rx_tail, status, len);
cp->dev->stats.rx_errors++;
if (status & RxErrFrame)
cp->dev->stats.rx_frame_errors++;
@@ -545,9 +544,8 @@ rx_status_loop:
goto rx_next;
}
- if (netif_msg_rx_status(cp))
- pr_debug("%s: rx slot %d status 0x%x len %d\n",
- dev->name, rx_tail, status, len);
+ netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n",
+ rx_tail, status, len);
new_skb = netdev_alloc_skb_ip_align(dev, buflen);
if (!new_skb) {
@@ -621,9 +619,8 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
if (!status || (status == 0xFFFF))
return IRQ_NONE;
- if (netif_msg_intr(cp))
- pr_debug("%s: intr, status %04x cmd %02x cpcmd %04x\n",
- dev->name, status, cpr8(Cmd), cpr16(CpCmd));
+ netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n",
+ status, cpr8(Cmd), cpr16(CpCmd));
cpw16(IntrStatus, status & ~cp_rx_intr_mask);
@@ -654,8 +651,8 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
- pr_err("%s: PCI bus error, status=%04x, PCI status=%04x\n",
- dev->name, status, pci_status);
+ netdev_err(dev, "PCI bus error, status=%04x, PCI status=%04x\n",
+ status, pci_status);
/* TODO: reset hardware */
}
@@ -700,9 +697,8 @@ static void cp_tx (struct cp_private *cp)
if (status & LastFrag) {
if (status & (TxError | TxFIFOUnder)) {
- if (netif_msg_tx_err(cp))
- pr_debug("%s: tx err, status 0x%x\n",
- cp->dev->name, status);
+ netif_dbg(cp, tx_err, cp->dev,
+ "tx err, status 0x%x\n", status);
cp->dev->stats.tx_errors++;
if (status & TxOWC)
cp->dev->stats.tx_window_errors++;
@@ -717,8 +713,8 @@ static void cp_tx (struct cp_private *cp)
((status >> TxColCntShift) & TxColCntMask);
cp->dev->stats.tx_packets++;
cp->dev->stats.tx_bytes += skb->len;
- if (netif_msg_tx_done(cp))
- pr_debug("%s: tx done, slot %d\n", cp->dev->name, tx_tail);
+ netif_dbg(cp, tx_done, cp->dev,
+ "tx done, slot %d\n", tx_tail);
}
dev_kfree_skb_irq(skb);
}
@@ -752,8 +748,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
netif_stop_queue(dev);
spin_unlock_irqrestore(&cp->lock, intr_flags);
- pr_err(PFX "%s: BUG! Tx Ring full when queue awake!\n",
- dev->name);
+ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
return NETDEV_TX_BUSY;
}
@@ -878,9 +873,8 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
wmb();
}
cp->tx_head = entry;
- if (netif_msg_tx_queued(cp))
- pr_debug("%s: tx queued, slot %d, skblen %d\n",
- dev->name, entry, skb->len);
+ netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
+ entry, skb->len);
if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
netif_stop_queue(dev);
@@ -899,7 +893,7 @@ static void __cp_set_rx_mode (struct net_device *dev)
{
struct cp_private *cp = netdev_priv(dev);
u32 mc_filter[2]; /* Multicast hash filter */
- int i, rx_mode;
+ int rx_mode;
u32 tmp;
/* Note: do not reorder, GCC is clever about common statements. */
@@ -909,7 +903,7 @@ static void __cp_set_rx_mode (struct net_device *dev)
AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
AcceptAllPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
@@ -918,8 +912,7 @@ static void __cp_set_rx_mode (struct net_device *dev)
struct dev_mc_list *mclist;
rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
@@ -993,7 +986,7 @@ static void cp_reset_hw (struct cp_private *cp)
schedule_timeout_uninterruptible(10);
}
- pr_err("%s: hardware reset timeout\n", cp->dev->name);
+ netdev_err(cp->dev, "hardware reset timeout\n");
}
static inline void cp_start_hw (struct cp_private *cp)
@@ -1160,8 +1153,7 @@ static int cp_open (struct net_device *dev)
struct cp_private *cp = netdev_priv(dev);
int rc;
- if (netif_msg_ifup(cp))
- pr_debug("%s: enabling interface\n", dev->name);
+ netif_dbg(cp, ifup, dev, "enabling interface\n");
rc = cp_alloc_rings(cp);
if (rc)
@@ -1195,8 +1187,7 @@ static int cp_close (struct net_device *dev)
napi_disable(&cp->napi);
- if (netif_msg_ifdown(cp))
- pr_debug("%s: disabling interface\n", dev->name);
+ netif_dbg(cp, ifdown, dev, "disabling interface\n");
spin_lock_irqsave(&cp->lock, flags);
@@ -1219,9 +1210,9 @@ static void cp_tx_timeout(struct net_device *dev)
unsigned long flags;
int rc;
- pr_warning("%s: Transmit timeout, status %2x %4x %4x %4x\n",
- dev->name, cpr8(Cmd), cpr16(CpCmd),
- cpr16(IntrStatus), cpr16(IntrMask));
+ netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
+ cpr8(Cmd), cpr16(CpCmd),
+ cpr16(IntrStatus), cpr16(IntrMask));
spin_lock_irqsave(&cp->lock, flags);
@@ -1874,8 +1865,8 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) {
dev_info(&pdev->dev,
- "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n",
- pdev->vendor, pdev->device, pdev->revision);
+ "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n",
+ pdev->vendor, pdev->device, pdev->revision);
return -ENODEV;
}
@@ -1933,14 +1924,13 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev,
- "No usable DMA configuration, aborting.\n");
+ "No usable DMA configuration, aborting\n");
goto err_out_res;
}
rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc) {
dev_err(&pdev->dev,
- "No usable consistent DMA configuration, "
- "aborting.\n");
+ "No usable consistent DMA configuration, aborting\n");
goto err_out_res;
}
}
@@ -1952,7 +1942,7 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
if (!regs) {
rc = -EIO;
dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n",
- (unsigned long long)pci_resource_len(pdev, 1),
+ (unsigned long long)pci_resource_len(pdev, 1),
(unsigned long long)pciaddr);
goto err_out_res;
}
@@ -1990,11 +1980,8 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto err_out_iomap;
- pr_info("%s: RTL-8139C+ at 0x%lx, %pM, IRQ %d\n",
- dev->name,
- dev->base_addr,
- dev->dev_addr,
- dev->irq);
+ netdev_info(dev, "RTL-8139C+ at 0x%lx, %pM, IRQ %d\n",
+ dev->base_addr, dev->dev_addr, dev->irq);
pci_set_drvdata(pdev, dev);
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 25f7339daab..b4efc913978 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -89,6 +89,8 @@
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define DRV_NAME "8139too"
#define DRV_VERSION "0.9.28"
@@ -111,7 +113,6 @@
#include <asm/irq.h>
#define RTL8139_DRIVER_NAME DRV_NAME " Fast Ethernet driver " DRV_VERSION
-#define PFX DRV_NAME ": "
/* Default Message level */
#define RTL8139_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
@@ -130,9 +131,9 @@
# define assert(expr) do {} while (0)
#else
# define assert(expr) \
- if(unlikely(!(expr))) { \
- pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
- #expr, __FILE__, __func__, __LINE__); \
+ if (unlikely(!(expr))) { \
+ pr_err("Assertion failed! %s,%s,%s,line=%d\n", \
+ #expr, __FILE__, __func__, __LINE__); \
}
#endif
@@ -231,7 +232,7 @@ static const struct {
};
-static struct pci_device_id rtl8139_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(rtl8139_pci_tbl) = {
{0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
{0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
{0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
@@ -957,7 +958,7 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
pdev->device == PCI_DEVICE_ID_REALTEK_8139 &&
pdev->subsystem_vendor == PCI_VENDOR_ID_ATHEROS &&
pdev->subsystem_device == PCI_DEVICE_ID_REALTEK_8139) {
- pr_info("8139too: OQO Model 2 detected. Forcing PIO\n");
+ pr_info("OQO Model 2 detected. Forcing PIO\n");
use_io = 1;
}
@@ -1010,21 +1011,19 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
tp->mii.reg_num_mask = 0x1f;
/* dev is fully set up and ready to use now */
- pr_debug("about to register device named %s (%p)...\n", dev->name, dev);
+ pr_debug("about to register device named %s (%p)...\n",
+ dev->name, dev);
i = register_netdev (dev);
if (i) goto err_out;
pci_set_drvdata (pdev, dev);
- pr_info("%s: %s at 0x%lx, %pM, IRQ %d\n",
- dev->name,
- board_info[ent->driver_data].name,
- dev->base_addr,
- dev->dev_addr,
- dev->irq);
+ netdev_info(dev, "%s at 0x%lx, %pM, IRQ %d\n",
+ board_info[ent->driver_data].name,
+ dev->base_addr, dev->dev_addr, dev->irq);
- pr_debug("%s: Identified 8139 chip type '%s'\n",
- dev->name, rtl_chip_info[tp->chipset].name);
+ netdev_dbg(dev, "Identified 8139 chip type '%s'\n",
+ rtl_chip_info[tp->chipset].name);
/* Find the connected MII xcvrs.
Doing this in open() would allow detecting external xcvrs later, but
@@ -1037,13 +1036,12 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
if (mii_status != 0xffff && mii_status != 0x0000) {
u16 advertising = mdio_read(dev, phy, 4);
tp->phys[phy_idx++] = phy;
- pr_info("%s: MII transceiver %d status 0x%4.4x advertising %4.4x.\n",
- dev->name, phy, mii_status, advertising);
+ netdev_info(dev, "MII transceiver %d status 0x%04x advertising %04x\n",
+ phy, mii_status, advertising);
}
}
if (phy_idx == 0) {
- pr_info("%s: No MII transceivers found! Assuming SYM transceiver.\n",
- dev->name);
+ netdev_info(dev, "No MII transceivers found! Assuming SYM transceiver\n");
tp->phys[0] = 32;
}
} else
@@ -1062,15 +1060,15 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
if (board_idx < MAX_UNITS && full_duplex[board_idx] > 0)
tp->mii.full_duplex = full_duplex[board_idx];
if (tp->mii.full_duplex) {
- pr_info("%s: Media type forced to Full Duplex.\n", dev->name);
+ netdev_info(dev, "Media type forced to Full Duplex\n");
/* Changing the MII-advertised media because might prevent
re-connection. */
tp->mii.force_media = 1;
}
if (tp->default_port) {
- pr_info(" Forcing %dMbps %s-duplex operation.\n",
- (option & 0x20 ? 100 : 10),
- (option & 0x10 ? "full" : "half"));
+ netdev_info(dev, " Forcing %dMbps %s-duplex operation\n",
+ (option & 0x20 ? 100 : 10),
+ (option & 0x10 ? "full" : "half"));
mdio_write(dev, tp->phys[0], 0,
((option & 0x20) ? 0x2000 : 0) | /* 100Mbps? */
((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */
@@ -1330,12 +1328,12 @@ static int rtl8139_open (struct net_device *dev)
rtl8139_hw_start (dev);
netif_start_queue (dev);
- if (netif_msg_ifup(tp))
- pr_debug("%s: rtl8139_open() ioaddr %#llx IRQ %d"
- " GP Pins %2.2x %s-duplex.\n", dev->name,
- (unsigned long long)pci_resource_start (tp->pci_dev, 1),
- dev->irq, RTL_R8 (MediaStatus),
- tp->mii.full_duplex ? "full" : "half");
+ netif_dbg(tp, ifup, dev,
+ "%s() ioaddr %#llx IRQ %d GP Pins %02x %s-duplex\n",
+ __func__,
+ (unsigned long long)pci_resource_start (tp->pci_dev, 1),
+ dev->irq, RTL_R8 (MediaStatus),
+ tp->mii.full_duplex ? "full" : "half");
rtl8139_start_thread(tp);
@@ -1393,7 +1391,7 @@ static void rtl8139_hw_start (struct net_device *dev)
RTL_W8 (Config3, RTL_R8 (Config3) & ~Cfg3_Magic);
}
- pr_debug("init buffer addresses\n");
+ netdev_dbg(dev, "init buffer addresses\n");
/* Lock Config[01234] and BMCR register writes */
RTL_W8 (Cfg9346, Cfg9346_Lock);
@@ -1555,14 +1553,11 @@ static inline void rtl8139_thread_iter (struct net_device *dev,
tp->mii.full_duplex = duplex;
if (mii_lpa) {
- pr_info("%s: Setting %s-duplex based on MII #%d link"
- " partner ability of %4.4x.\n",
- dev->name,
- tp->mii.full_duplex ? "full" : "half",
- tp->phys[0], mii_lpa);
+ netdev_info(dev, "Setting %s-duplex based on MII #%d link partner ability of %04x\n",
+ tp->mii.full_duplex ? "full" : "half",
+ tp->phys[0], mii_lpa);
} else {
- pr_info("%s: media is unconnected, link down, or incompatible connection\n",
- dev->name);
+ netdev_info(dev, "media is unconnected, link down, or incompatible connection\n");
}
#if 0
RTL_W8 (Cfg9346, Cfg9346_Unlock);
@@ -1576,13 +1571,12 @@ static inline void rtl8139_thread_iter (struct net_device *dev,
rtl8139_tune_twister (dev, tp);
- pr_debug("%s: Media selection tick, Link partner %4.4x.\n",
- dev->name, RTL_R16 (NWayLPAR));
- pr_debug("%s: Other registers are IntMask %4.4x IntStatus %4.4x\n",
- dev->name, RTL_R16 (IntrMask), RTL_R16 (IntrStatus));
- pr_debug("%s: Chip config %2.2x %2.2x.\n",
- dev->name, RTL_R8 (Config0),
- RTL_R8 (Config1));
+ netdev_dbg(dev, "Media selection tick, Link partner %04x\n",
+ RTL_R16(NWayLPAR));
+ netdev_dbg(dev, "Other registers are IntMask %04x IntStatus %04x\n",
+ RTL_R16(IntrMask), RTL_R16(IntrStatus));
+ netdev_dbg(dev, "Chip config %02x %02x\n",
+ RTL_R8(Config0), RTL_R8(Config1));
}
static void rtl8139_thread (struct work_struct *work)
@@ -1640,17 +1634,17 @@ static void rtl8139_tx_timeout_task (struct work_struct *work)
int i;
u8 tmp8;
- pr_debug("%s: Transmit timeout, status %2.2x %4.4x %4.4x media %2.2x.\n",
- dev->name, RTL_R8 (ChipCmd),
- RTL_R16(IntrStatus), RTL_R16(IntrMask), RTL_R8(MediaStatus));
+ netdev_dbg(dev, "Transmit timeout, status %02x %04x %04x media %02x\n",
+ RTL_R8(ChipCmd), RTL_R16(IntrStatus),
+ RTL_R16(IntrMask), RTL_R8(MediaStatus));
/* Emit info to figure out what went wrong. */
- pr_debug("%s: Tx queue start entry %ld dirty entry %ld.\n",
- dev->name, tp->cur_tx, tp->dirty_tx);
+ netdev_dbg(dev, "Tx queue start entry %ld dirty entry %ld\n",
+ tp->cur_tx, tp->dirty_tx);
for (i = 0; i < NUM_TX_DESC; i++)
- pr_debug("%s: Tx descriptor %d is %8.8lx.%s\n",
- dev->name, i, RTL_R32 (TxStatus0 + (i * 4)),
- i == tp->dirty_tx % NUM_TX_DESC ?
- " (queue head)" : "");
+ netdev_dbg(dev, "Tx descriptor %d is %08lx%s\n",
+ i, RTL_R32(TxStatus0 + (i * 4)),
+ i == tp->dirty_tx % NUM_TX_DESC ?
+ " (queue head)" : "");
tp->xstats.tx_timeouts++;
@@ -1729,9 +1723,8 @@ static netdev_tx_t rtl8139_start_xmit (struct sk_buff *skb,
netif_stop_queue (dev);
spin_unlock_irqrestore(&tp->lock, flags);
- if (netif_msg_tx_queued(tp))
- pr_debug("%s: Queued Tx packet size %u to slot %d.\n",
- dev->name, len, entry);
+ netif_dbg(tp, tx_queued, dev, "Queued Tx packet size %u to slot %d\n",
+ len, entry);
return NETDEV_TX_OK;
}
@@ -1760,9 +1753,8 @@ static void rtl8139_tx_interrupt (struct net_device *dev,
/* Note: TxCarrierLost is always asserted at 100mbps. */
if (txstatus & (TxOutOfWindow | TxAborted)) {
/* There was an major error, log it. */
- if (netif_msg_tx_err(tp))
- pr_debug("%s: Transmit error, Tx status %8.8x.\n",
- dev->name, txstatus);
+ netif_dbg(tp, tx_err, dev, "Transmit error, Tx status %08x\n",
+ txstatus);
dev->stats.tx_errors++;
if (txstatus & TxAborted) {
dev->stats.tx_aborted_errors++;
@@ -1792,8 +1784,8 @@ static void rtl8139_tx_interrupt (struct net_device *dev,
#ifndef RTL8139_NDEBUG
if (tp->cur_tx - dirty_tx > NUM_TX_DESC) {
- pr_err("%s: Out-of-sync dirty pointer, %ld vs. %ld.\n",
- dev->name, dirty_tx, tp->cur_tx);
+ netdev_err(dev, "Out-of-sync dirty pointer, %ld vs. %ld\n",
+ dirty_tx, tp->cur_tx);
dirty_tx += NUM_TX_DESC;
}
#endif /* RTL8139_NDEBUG */
@@ -1816,14 +1808,13 @@ static void rtl8139_rx_err (u32 rx_status, struct net_device *dev,
int tmp_work;
#endif
- if (netif_msg_rx_err (tp))
- pr_debug("%s: Ethernet frame had errors, status %8.8x.\n",
- dev->name, rx_status);
+ netif_dbg(tp, rx_err, dev, "Ethernet frame had errors, status %08x\n",
+ rx_status);
dev->stats.rx_errors++;
if (!(rx_status & RxStatusOK)) {
if (rx_status & RxTooLong) {
- pr_debug("%s: Oversized Ethernet frame, status %4.4x!\n",
- dev->name, rx_status);
+ netdev_dbg(dev, "Oversized Ethernet frame, status %04x!\n",
+ rx_status);
/* A.C.: The chip hangs here. */
}
if (rx_status & (RxBadSymbol | RxBadAlign))
@@ -1855,7 +1846,7 @@ static void rtl8139_rx_err (u32 rx_status, struct net_device *dev,
break;
}
if (tmp_work <= 0)
- pr_warning(PFX "rx stop wait too long\n");
+ netdev_warn(dev, "rx stop wait too long\n");
/* restart receive */
tmp_work = 200;
while (--tmp_work > 0) {
@@ -1866,7 +1857,7 @@ static void rtl8139_rx_err (u32 rx_status, struct net_device *dev,
break;
}
if (tmp_work <= 0)
- pr_warning(PFX "tx/rx enable wait too long\n");
+ netdev_warn(dev, "tx/rx enable wait too long\n");
/* and reinitialize all rx related registers */
RTL_W8_F (Cfg9346, Cfg9346_Unlock);
@@ -1877,7 +1868,7 @@ static void rtl8139_rx_err (u32 rx_status, struct net_device *dev,
RTL_W32 (RxConfig, tp->rx_config);
tp->cur_rx = 0;
- pr_debug("init buffer addresses\n");
+ netdev_dbg(dev, "init buffer addresses\n");
/* Lock Config[01234] and BMCR register writes */
RTL_W8 (Cfg9346, Cfg9346_Lock);
@@ -1931,10 +1922,9 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
unsigned int cur_rx = tp->cur_rx;
unsigned int rx_size = 0;
- pr_debug("%s: In rtl8139_rx(), current %4.4x BufAddr %4.4x,"
- " free to %4.4x, Cmd %2.2x.\n", dev->name, (u16)cur_rx,
- RTL_R16 (RxBufAddr),
- RTL_R16 (RxBufPtr), RTL_R8 (ChipCmd));
+ netdev_dbg(dev, "In %s(), current %04x BufAddr %04x, free to %04x, Cmd %02x\n",
+ __func__, (u16)cur_rx,
+ RTL_R16(RxBufAddr), RTL_R16(RxBufPtr), RTL_R8(ChipCmd));
while (netif_running(dev) && received < budget &&
(RTL_R8 (ChipCmd) & RxBufEmpty) == 0) {
@@ -1950,19 +1940,12 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
rx_size = rx_status >> 16;
pkt_size = rx_size - 4;
- if (netif_msg_rx_status(tp))
- pr_debug("%s: rtl8139_rx() status %4.4x, size %4.4x,"
- " cur %4.4x.\n", dev->name, rx_status,
- rx_size, cur_rx);
+ netif_dbg(tp, rx_status, dev, "%s() status %04x, size %04x, cur %04x\n",
+ __func__, rx_status, rx_size, cur_rx);
#if RTL8139_DEBUG > 2
- {
- int i;
- pr_debug("%s: Frame contents ", dev->name);
- for (i = 0; i < 70; i++)
- pr_cont(" %2.2x",
- rx_ring[ring_offset + i]);
- pr_cont(".\n");
- }
+ print_dump_hex(KERN_DEBUG, "Frame contents: ",
+ DUMP_PREFIX_OFFSET, 16, 1,
+ &rx_ring[ring_offset], 70, true);
#endif
/* Packet copy from FIFO still in progress.
@@ -1973,14 +1956,11 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
if (!tp->fifo_copy_timeout)
tp->fifo_copy_timeout = jiffies + 2;
else if (time_after(jiffies, tp->fifo_copy_timeout)) {
- pr_debug("%s: hung FIFO. Reset.", dev->name);
+ netdev_dbg(dev, "hung FIFO. Reset\n");
rx_size = 0;
goto no_early_rx;
}
- if (netif_msg_intr(tp)) {
- pr_debug("%s: fifo copy in progress.",
- dev->name);
- }
+ netif_dbg(tp, intr, dev, "fifo copy in progress\n");
tp->xstats.early_rx++;
break;
}
@@ -2021,8 +2001,7 @@ no_early_rx:
netif_receive_skb (skb);
} else {
if (net_ratelimit())
- pr_warning("%s: Memory squeeze, dropping packet.\n",
- dev->name);
+ netdev_warn(dev, "Memory squeeze, dropping packet\n");
dev->stats.rx_dropped++;
}
received++;
@@ -2036,10 +2015,9 @@ no_early_rx:
if (unlikely(!received || rx_size == 0xfff0))
rtl8139_isr_ack(tp);
- pr_debug("%s: Done rtl8139_rx(), current %4.4x BufAddr %4.4x,"
- " free to %4.4x, Cmd %2.2x.\n", dev->name, cur_rx,
- RTL_R16 (RxBufAddr),
- RTL_R16 (RxBufPtr), RTL_R8 (ChipCmd));
+ netdev_dbg(dev, "Done %s(), current %04x BufAddr %04x, free to %04x, Cmd %02x\n",
+ __func__, cur_rx,
+ RTL_R16(RxBufAddr), RTL_R16(RxBufPtr), RTL_R8(ChipCmd));
tp->cur_rx = cur_rx;
@@ -2060,8 +2038,7 @@ static void rtl8139_weird_interrupt (struct net_device *dev,
void __iomem *ioaddr,
int status, int link_changed)
{
- pr_debug("%s: Abnormal interrupt, status %8.8x.\n",
- dev->name, status);
+ netdev_dbg(dev, "Abnormal interrupt, status %08x\n", status);
assert (dev != NULL);
assert (tp != NULL);
@@ -2089,8 +2066,7 @@ static void rtl8139_weird_interrupt (struct net_device *dev,
pci_read_config_word (tp->pci_dev, PCI_STATUS, &pci_cmd_status);
pci_write_config_word (tp->pci_dev, PCI_STATUS, pci_cmd_status);
- pr_err("%s: PCI Bus error %4.4x.\n",
- dev->name, pci_cmd_status);
+ netdev_err(dev, "PCI Bus error %04x\n", pci_cmd_status);
}
}
@@ -2183,8 +2159,8 @@ static irqreturn_t rtl8139_interrupt (int irq, void *dev_instance)
out:
spin_unlock (&tp->lock);
- pr_debug("%s: exiting interrupt, intr_status=%#4.4x.\n",
- dev->name, RTL_R16 (IntrStatus));
+ netdev_dbg(dev, "exiting interrupt, intr_status=%#4.4x\n",
+ RTL_R16(IntrStatus));
return IRQ_RETVAL(handled);
}
@@ -2233,9 +2209,8 @@ static int rtl8139_close (struct net_device *dev)
netif_stop_queue(dev);
napi_disable(&tp->napi);
- if (netif_msg_ifdown(tp))
- pr_debug("%s: Shutting down ethercard, status was 0x%4.4x.\n",
- dev->name, RTL_R16 (IntrStatus));
+ netif_dbg(tp, ifdown, dev, "Shutting down ethercard, status was 0x%04x\n",
+ RTL_R16(IntrStatus));
spin_lock_irqsave (&tp->lock, flags);
@@ -2509,11 +2484,11 @@ static void __set_rx_mode (struct net_device *dev)
struct rtl8139_private *tp = netdev_priv(dev);
void __iomem *ioaddr = tp->mmio_addr;
u32 mc_filter[2]; /* Multicast hash filter */
- int i, rx_mode;
+ int rx_mode;
u32 tmp;
- pr_debug("%s: rtl8139_set_rx_mode(%4.4x) done -- Rx config %8.8lx.\n",
- dev->name, dev->flags, RTL_R32 (RxConfig));
+ netdev_dbg(dev, "rtl8139_set_rx_mode(%04x) done -- Rx config %08lx\n",
+ dev->flags, RTL_R32(RxConfig));
/* Note: do not reorder, GCC is clever about common statements. */
if (dev->flags & IFF_PROMISC) {
@@ -2521,7 +2496,7 @@ static void __set_rx_mode (struct net_device *dev)
AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
AcceptAllPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
@@ -2530,8 +2505,7 @@ static void __set_rx_mode (struct net_device *dev)
struct dev_mc_list *mclist;
rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
diff --git a/drivers/net/82596.c b/drivers/net/82596.c
index 1663bc9e45d..f94d17d78bb 100644
--- a/drivers/net/82596.c
+++ b/drivers/net/82596.c
@@ -1505,7 +1505,7 @@ static void set_multicast_list(struct net_device *dev)
int config = 0, cnt;
DEB(DEB_MULTI,printk(KERN_DEBUG "%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
- dev->name, dev->mc_count,
+ dev->name, netdev_mc_count(dev),
dev->flags & IFF_PROMISC ? "ON" : "OFF",
dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
@@ -1533,7 +1533,7 @@ static void set_multicast_list(struct net_device *dev)
i596_add_cmd(dev, &lp->cf_cmd.cmd);
}
- cnt = dev->mc_count;
+ cnt = netdev_mc_count(dev);
if (cnt > MAX_MC_CNT)
{
cnt = MAX_MC_CNT;
@@ -1541,7 +1541,7 @@ static void set_multicast_list(struct net_device *dev)
dev->name, cnt);
}
- if (dev->mc_count > 0) {
+ if (!netdev_mc_empty(dev)) {
struct dev_mc_list *dmi;
unsigned char *cp;
struct mc_cmd *cmd;
@@ -1550,13 +1550,16 @@ static void set_multicast_list(struct net_device *dev)
return;
cmd = &lp->mc_cmd;
cmd->cmd.command = CmdMulticastList;
- cmd->mc_cnt = dev->mc_count * 6;
+ cmd->mc_cnt = cnt * ETH_ALEN;
cp = cmd->mc_addrs;
- for (dmi = dev->mc_list; cnt && dmi != NULL; dmi = dmi->next, cnt--, cp += 6) {
- memcpy(cp, dmi->dmi_addr, 6);
+ netdev_for_each_mc_addr(dmi, dev) {
+ if (!cnt--)
+ break;
+ memcpy(cp, dmi->dmi_addr, ETH_ALEN);
if (i596_debug > 1)
DEB(DEB_MULTI,printk(KERN_INFO "%s: Adding address %pM\n",
dev->name, cp));
+ cp += ETH_ALEN;
}
i596_add_cmd(dev, &cmd->cmd);
}
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 18300625b05..7029cd50c45 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -90,6 +90,18 @@ config MACVLAN
To compile this driver as a module, choose M here: the module
will be called macvlan.
+config MACVTAP
+ tristate "MAC-VLAN based tap driver (EXPERIMENTAL)"
+ depends on MACVLAN
+ help
+ This adds a specialized tap character device driver that is based
+ on the MAC-VLAN network interface, called macvtap. A macvtap device
+ can be added in the same way as a macvlan device, using 'type
+ macvlan', and then be accessed through the tap user space interface.
+
+ To compile this driver as a module, choose M here: the module
+ will be called macvtap.
+
config EQUALIZER
tristate "EQL (serial line load balancing) support"
---help---
@@ -868,8 +880,8 @@ config BFIN_RX_DESC_NUM
Set the number of buffer packets used in driver.
config BFIN_MAC_RMII
- bool "RMII PHY Interface (EXPERIMENTAL)"
- depends on BFIN_MAC && EXPERIMENTAL
+ bool "RMII PHY Interface"
+ depends on BFIN_MAC
default y if BFIN527_EZKIT
default n if BFIN537_STAMP
help
@@ -983,6 +995,14 @@ config ETHOC
help
Say Y here if you want to use the OpenCores 10/100 Mbps Ethernet MAC.
+config GRETH
+ tristate "Aeroflex Gaisler GRETH Ethernet MAC support"
+ depends on SPARC
+ select PHYLIB
+ select CRC32
+ help
+ Say Y here if you want to use the Aeroflex Gaisler GRETH Ethernet MAC.
+
config SMC911X
tristate "SMSC LAN911[5678] support"
select CRC32
@@ -1368,6 +1388,17 @@ config AC3200
To compile this driver as a module, choose M here. The module
will be called ac3200.
+config KSZ884X_PCI
+ tristate "Micrel KSZ8841/2 PCI"
+ depends on NET_PCI && PCI
+ select MII
+ select CRC32
+ help
+ This PCI driver is for Micrel KSZ8841/KSZ8842 PCI Ethernet chip.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ksz884x.
+
config APRICOT
tristate "Apricot Xen-II on board Ethernet"
depends on NET_PCI && ISA
@@ -1883,7 +1914,8 @@ config 68360_ENET
config FEC
bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
- depends on M523x || M527x || M5272 || M528x || M520x || M532x || MACH_MX27 || ARCH_MX35 || ARCH_MX25
+ depends on M523x || M527x || M5272 || M528x || M520x || M532x || \
+ MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5
help
Say Y here if you want to use the built-in 10/100 Fast ethernet
controller on some Motorola ColdFire and Freescale i.MX processors.
@@ -1939,6 +1971,7 @@ config ATL2
config XILINX_EMACLITE
tristate "Xilinx 10/100 Ethernet Lite support"
depends on PPC32 || MICROBLAZE
+ select PHYLIB
help
This driver supports the 10/100 Ethernet Lite from Xilinx.
@@ -2356,20 +2389,6 @@ config GELIC_WIRELESS
the driver automatically distinguishes the models, you can
safely enable this option even if you have a wireless-less model.
-config GELIC_WIRELESS_OLD_PSK_INTERFACE
- bool "PS3 Wireless private PSK interface (OBSOLETE)"
- depends on GELIC_WIRELESS
- select WEXT_PRIV
- help
- This option retains the obsolete private interface to pass
- the PSK from user space programs to the driver. The PSK
- stands for 'Pre Shared Key' and is used for WPA[2]-PSK
- (WPA-Personal) environment.
- If WPA[2]-PSK is used and you need to use old programs that
- support only this old interface, say Y. Otherwise N.
-
- If unsure, say N.
-
config FSL_PQ_MDIO
tristate "Freescale PQ MDIO"
depends on FSL_SOC
@@ -2618,6 +2637,28 @@ config IXGBE_DCB
If unsure, say N.
+config IXGBEVF
+ tristate "Intel(R) 82599 Virtual Function Ethernet support"
+ depends on PCI_MSI
+ ---help---
+ This driver supports Intel(R) 82599 virtual functions. For more
+ information on how to identify your adapter, go to the Adapter &
+ Driver ID Guide at:
+
+ <http://support.intel.com/support/network/sb/CS-008441.htm>
+
+ For general information and support, go to the Intel support
+ website at:
+
+ <http://support.intel.com>
+
+ More specific information on configuring the driver is in
+ <file:Documentation/networking/ixgbevf.txt>.
+
+ To compile this driver as a module, choose M here. The module
+ will be called ixgbevf. MSI-X interrupt support is required
+ for this driver to work correctly.
+
config IXGB
tristate "Intel(R) PRO/10GbE support"
depends on PCI
@@ -2756,6 +2797,13 @@ config BNX2X
To compile this driver as a module, choose M here: the module
will be called bnx2x. This is recommended.
+config QLCNIC
+ tristate "QLOGIC QLCNIC 1/10Gb Converged Ethernet NIC Support"
+ depends on PCI
+ help
+ This driver supports QLogic QLE8240 and QLE8242 Converged Ethernet
+ devices.
+
config QLGE
tristate "QLogic QLGE 10Gb Ethernet Driver Support"
depends on PCI
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index ad1346dd9da..478886234c2 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -14,6 +14,7 @@ obj-$(CONFIG_IBM_NEW_EMAC) += ibm_newemac/
obj-$(CONFIG_IGB) += igb/
obj-$(CONFIG_IGBVF) += igbvf/
obj-$(CONFIG_IXGBE) += ixgbe/
+obj-$(CONFIG_IXGBEVF) += ixgbevf/
obj-$(CONFIG_IXGB) += ixgb/
obj-$(CONFIG_IP1000) += ipg.o
obj-$(CONFIG_CHELSIO_T1) += chelsio/
@@ -95,6 +96,7 @@ obj-$(CONFIG_SKFP) += skfp/
obj-$(CONFIG_KS8842) += ks8842.o
obj-$(CONFIG_KS8851) += ks8851.o
obj-$(CONFIG_KS8851_MLL) += ks8851_mll.o
+obj-$(CONFIG_KSZ884X_PCI) += ksz884x.o
obj-$(CONFIG_VIA_RHINE) += via-rhine.o
obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o
obj-$(CONFIG_ADAPTEC_STARFIRE) += starfire.o
@@ -148,6 +150,7 @@ ll_temac-objs := ll_temac_main.o ll_temac_mdio.o
obj-$(CONFIG_XILINX_LL_TEMAC) += ll_temac.o
obj-$(CONFIG_XILINX_EMACLITE) += xilinx_emaclite.o
obj-$(CONFIG_QLA3XXX) += qla3xxx.o
+obj-$(CONFIG_QLCNIC) += qlcnic/
obj-$(CONFIG_QLGE) += qlge/
obj-$(CONFIG_PPP) += ppp_generic.o
@@ -167,6 +170,7 @@ obj-$(CONFIG_XEN_NETDEV_FRONTEND) += xen-netfront.o
obj-$(CONFIG_DUMMY) += dummy.o
obj-$(CONFIG_IFB) += ifb.o
obj-$(CONFIG_MACVLAN) += macvlan.o
+obj-$(CONFIG_MACVTAP) += macvtap.o
obj-$(CONFIG_DE600) += de600.o
obj-$(CONFIG_DE620) += de620.o
obj-$(CONFIG_LANCE) += lance.o
@@ -246,6 +250,7 @@ pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o
obj-$(CONFIG_MLX4_CORE) += mlx4/
obj-$(CONFIG_ENC28J60) += enc28j60.o
obj-$(CONFIG_ETHOC) += ethoc.o
+obj-$(CONFIG_GRETH) += greth.o
obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o
diff --git a/drivers/net/a2065.c b/drivers/net/a2065.c
index b7ec0368d7e..bd4d829eca1 100644
--- a/drivers/net/a2065.c
+++ b/drivers/net/a2065.c
@@ -603,9 +603,8 @@ static void lance_load_multicast (struct net_device *dev)
struct lance_private *lp = netdev_priv(dev);
volatile struct lance_init_block *ib = lp->init_block;
volatile u16 *mcast_table = (u16 *)&ib->filter;
- struct dev_mc_list *dmi=dev->mc_list;
+ struct dev_mc_list *dmi;
char *addrs;
- int i;
u32 crc;
/* set all multicast bits */
@@ -619,9 +618,8 @@ static void lance_load_multicast (struct net_device *dev)
ib->filter [1] = 0;
/* Add addresses */
- for (i = 0; i < dev->mc_count; i++){
+ netdev_for_each_mc_addr(dmi, dev) {
addrs = dmi->dmi_addr;
- dmi = dmi->next;
/* multicast address? */
if (!(*addrs & 1))
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index d82a9a99475..4ae750ef1e1 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -134,7 +134,7 @@
#define PCI_DEVICE_ID_SGI_ACENIC 0x0009
#endif
-static struct pci_device_id acenic_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(acenic_pci_tbl) = {
{ PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_FIBRE,
PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, },
{ PCI_VENDOR_ID_ALTEON, PCI_DEVICE_ID_ALTEON_ACENIC_COPPER,
@@ -2845,7 +2845,7 @@ static void ace_set_multicast_list(struct net_device *dev)
* set the entire multicast list at a time and keeping track of
* it here is going to be messy.
*/
- if ((dev->mc_count) && !(ap->mcast_all)) {
+ if (!netdev_mc_empty(dev) && !ap->mcast_all) {
cmd.evt = C_SET_MULTICAST_MODE;
cmd.code = C_C_MCAST_ENABLE;
cmd.idx = 0;
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index 766aabfdfc7..b8a59d255b4 100644
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -113,7 +113,7 @@ MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0
module_param_array(dynamic_ipg, bool, NULL, 0);
MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable");
-static struct pci_device_id amd8111e_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(amd8111e_pci_tbl) = {
{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD8111E_7462,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
@@ -1176,8 +1176,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
/* Schedule a polling routine */
__napi_schedule(&lp->napi);
} else if (intren0 & RINTEN0) {
- printk("************Driver bug! \
- interrupt while in poll\n");
+ printk("************Driver bug! interrupt while in poll\n");
/* Fix by disable receive interrupts */
writel(RINTEN0, mmio + INTEN0);
}
@@ -1378,28 +1377,28 @@ list to the device.
*/
static void amd8111e_set_multicast_list(struct net_device *dev)
{
- struct dev_mc_list* mc_ptr;
+ struct dev_mc_list *mc_ptr;
struct amd8111e_priv *lp = netdev_priv(dev);
u32 mc_filter[2] ;
- int i,bit_num;
+ int bit_num;
+
if(dev->flags & IFF_PROMISC){
writel( VAL2 | PROM, lp->mmio + CMD2);
return;
}
else
writel( PROM, lp->mmio + CMD2);
- if(dev->flags & IFF_ALLMULTI || dev->mc_count > MAX_FILTER_SIZE){
+ if (dev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(dev) > MAX_FILTER_SIZE) {
/* get all multicast packet */
mc_filter[1] = mc_filter[0] = 0xffffffff;
- lp->mc_list = dev->mc_list;
lp->options |= OPTION_MULTICAST_ENABLE;
amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
return;
}
- if( dev->mc_count == 0 ){
+ if (netdev_mc_empty(dev)) {
/* get only own packets */
mc_filter[1] = mc_filter[0] = 0;
- lp->mc_list = NULL;
lp->options &= ~OPTION_MULTICAST_ENABLE;
amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
/* disable promiscous mode */
@@ -1408,10 +1407,8 @@ static void amd8111e_set_multicast_list(struct net_device *dev)
}
/* load all the multicast addresses in the logic filter */
lp->options |= OPTION_MULTICAST_ENABLE;
- lp->mc_list = dev->mc_list;
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0, mc_ptr = dev->mc_list; mc_ptr && i < dev->mc_count;
- i++, mc_ptr = mc_ptr->next) {
+ netdev_for_each_mc_addr(mc_ptr, dev) {
bit_num = (ether_crc_le(ETH_ALEN, mc_ptr->dmi_addr) >> 26) & 0x3f;
mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
}
diff --git a/drivers/net/amd8111e.h b/drivers/net/amd8111e.h
index 28c60a71ed5..ac36eb6981e 100644
--- a/drivers/net/amd8111e.h
+++ b/drivers/net/amd8111e.h
@@ -789,7 +789,6 @@ struct amd8111e_priv{
char opened;
struct net_device_stats stats;
unsigned int drv_rx_errors;
- struct dev_mc_list* mc_list;
struct amd8111e_coalesce_conf coal_conf;
struct ipg_info ipg_data;
diff --git a/drivers/net/appletalk/ltpc.c b/drivers/net/appletalk/ltpc.c
index dbfbd3b7ff8..8ea4ec705be 100644
--- a/drivers/net/appletalk/ltpc.c
+++ b/drivers/net/appletalk/ltpc.c
@@ -1125,7 +1125,6 @@ struct net_device * __init ltpc_probe(void)
printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, DMA%d. Using polled mode.\n",io,dma);
dev->netdev_ops = &ltpc_netdev;
- dev->mc_list = NULL;
dev->base_addr = io;
dev->irq = irq;
dev->dma = dma;
diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c
index dbf4de39754..b68e1eb405f 100644
--- a/drivers/net/arcnet/com20020-pci.c
+++ b/drivers/net/arcnet/com20020-pci.c
@@ -144,7 +144,7 @@ static void __devexit com20020pci_remove(struct pci_dev *pdev)
free_netdev(dev);
}
-static struct pci_device_id com20020pci_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(com20020pci_id_table) = {
{ 0x1571, 0xa001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1571, 0xa002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1571, 0xa003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c
index c35af3e106b..08d8be47dae 100644
--- a/drivers/net/ariadne.c
+++ b/drivers/net/ariadne.c
@@ -123,9 +123,7 @@ static void ariadne_reset(struct net_device *dev);
static irqreturn_t ariadne_interrupt(int irq, void *data);
static int ariadne_close(struct net_device *dev);
static struct net_device_stats *ariadne_get_stats(struct net_device *dev);
-#ifdef HAVE_MULTICAST
static void set_multicast_list(struct net_device *dev);
-#endif
static void memcpyw(volatile u_short *dest, u_short *src, int len)
@@ -821,7 +819,7 @@ static void set_multicast_list(struct net_device *dev)
lance->RDP = PROM; /* Set promiscuous mode */
} else {
short multicast_table[4];
- int num_addrs = dev->mc_count;
+ int num_addrs = netdev_mc_count(dev);
int i;
/* We don't use the multicast table, but rely on upper-layer filtering. */
memset(multicast_table, (num_addrs == 0) ? 0 : -1,
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index 164b37e85ee..f1f58c5e27b 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -351,13 +351,13 @@ static struct net_device_stats *am79c961_getstats (struct net_device *dev)
return &priv->stats;
}
-static void am79c961_mc_hash(struct dev_mc_list *dmi, unsigned short *hash)
+static void am79c961_mc_hash(char *addr, unsigned short *hash)
{
- if (dmi->dmi_addrlen == ETH_ALEN && dmi->dmi_addr[0] & 0x01) {
+ if (addr[0] & 0x01) {
int idx, bit;
u32 crc;
- crc = ether_crc_le(ETH_ALEN, dmi->dmi_addr);
+ crc = ether_crc_le(ETH_ALEN, addr);
idx = crc >> 30;
bit = (crc >> 26) & 15;
@@ -387,8 +387,8 @@ static void am79c961_setmulticastlist (struct net_device *dev)
memset(multi_hash, 0x00, sizeof(multi_hash));
- for (dmi = dev->mc_list; dmi; dmi = dmi->next)
- am79c961_mc_hash(dmi, multi_hash);
+ netdev_for_each_mc_addr(dmi, dev)
+ am79c961_mc_hash(dmi->dmi_addr, multi_hash);
}
spin_lock_irqsave(&priv->chip_lock, flags);
@@ -680,7 +680,7 @@ static const struct net_device_ops am79c961_netdev_ops = {
#endif
};
-static int __init am79c961_probe(struct platform_device *pdev)
+static int __devinit am79c961_probe(struct platform_device *pdev)
{
struct resource *res;
struct net_device *dev;
diff --git a/drivers/net/arm/at91_ether.c b/drivers/net/arm/at91_ether.c
index c8bc60a7040..8b23d5a175b 100644
--- a/drivers/net/arm/at91_ether.c
+++ b/drivers/net/arm/at91_ether.c
@@ -558,14 +558,11 @@ static void at91ether_sethashtable(struct net_device *dev)
{
struct dev_mc_list *curr;
unsigned long mc_filter[2];
- unsigned int i, bitnr;
+ unsigned int bitnr;
mc_filter[0] = mc_filter[1] = 0;
- curr = dev->mc_list;
- for (i = 0; i < dev->mc_count; i++, curr = curr->next) {
- if (!curr) break; /* unexpected end of list */
-
+ netdev_for_each_mc_addr(curr, dev) {
bitnr = hash_get_index(curr->dmi_addr);
mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
}
@@ -592,7 +589,7 @@ static void at91ether_set_multicast_list(struct net_device *dev)
at91_emac_write(AT91_EMAC_HSH, -1);
at91_emac_write(AT91_EMAC_HSL, -1);
cfg |= AT91_EMAC_MTI;
- } else if (dev->mc_count > 0) { /* Enable specific multicasts */
+ } else if (!netdev_mc_empty(dev)) { /* Enable specific multicasts */
at91ether_sethashtable(dev);
cfg |= AT91_EMAC_MTI;
} else if (dev->flags & (~IFF_ALLMULTI)) { /* Disable all multicast mode */
diff --git a/drivers/net/arm/ep93xx_eth.c b/drivers/net/arm/ep93xx_eth.c
index b25467ac895..bf72d57a0af 100644
--- a/drivers/net/arm/ep93xx_eth.c
+++ b/drivers/net/arm/ep93xx_eth.c
@@ -9,6 +9,8 @@
* (at your option) any later version.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__
+
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/kernel.h>
@@ -20,9 +22,9 @@
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
-#include <mach/ep93xx-regs.h>
-#include <mach/platform.h>
-#include <asm/io.h>
+#include <linux/io.h>
+
+#include <mach/hardware.h>
#define DRV_MODULE_NAME "ep93xx-eth"
#define DRV_MODULE_VERSION "0.1"
@@ -185,7 +187,47 @@ struct ep93xx_priv
#define wrw(ep, off, val) __raw_writew((val), (ep)->base_addr + (off))
#define wrl(ep, off, val) __raw_writel((val), (ep)->base_addr + (off))
-static int ep93xx_mdio_read(struct net_device *dev, int phy_id, int reg);
+static int ep93xx_mdio_read(struct net_device *dev, int phy_id, int reg)
+{
+ struct ep93xx_priv *ep = netdev_priv(dev);
+ int data;
+ int i;
+
+ wrl(ep, REG_MIICMD, REG_MIICMD_READ | (phy_id << 5) | reg);
+
+ for (i = 0; i < 10; i++) {
+ if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
+ break;
+ msleep(1);
+ }
+
+ if (i == 10) {
+ pr_info("mdio read timed out\n");
+ data = 0xffff;
+ } else {
+ data = rdl(ep, REG_MIIDATA);
+ }
+
+ return data;
+}
+
+static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int data)
+{
+ struct ep93xx_priv *ep = netdev_priv(dev);
+ int i;
+
+ wrl(ep, REG_MIIDATA, data);
+ wrl(ep, REG_MIICMD, REG_MIICMD_WRITE | (phy_id << 5) | reg);
+
+ for (i = 0; i < 10; i++) {
+ if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
+ break;
+ msleep(1);
+ }
+
+ if (i == 10)
+ pr_info("mdio write timed out\n");
+}
static struct net_device_stats *ep93xx_get_stats(struct net_device *dev)
{
@@ -217,14 +259,11 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
rstat->rstat1 = 0;
if (!(rstat0 & RSTAT0_EOF))
- printk(KERN_CRIT "ep93xx_rx: not end-of-frame "
- " %.8x %.8x\n", rstat0, rstat1);
+ pr_crit("not end-of-frame %.8x %.8x\n", rstat0, rstat1);
if (!(rstat0 & RSTAT0_EOB))
- printk(KERN_CRIT "ep93xx_rx: not end-of-buffer "
- " %.8x %.8x\n", rstat0, rstat1);
+ pr_crit("not end-of-buffer %.8x %.8x\n", rstat0, rstat1);
if ((rstat1 & RSTAT1_BUFFER_INDEX) >> 16 != entry)
- printk(KERN_CRIT "ep93xx_rx: entry mismatch "
- " %.8x %.8x\n", rstat0, rstat1);
+ pr_crit("entry mismatch %.8x %.8x\n", rstat0, rstat1);
if (!(rstat0 & RSTAT0_RWE)) {
ep->stats.rx_errors++;
@@ -241,8 +280,7 @@ static int ep93xx_rx(struct net_device *dev, int processed, int budget)
length = rstat1 & RSTAT1_FRAME_LENGTH;
if (length > MAX_PKT_SIZE) {
- printk(KERN_NOTICE "ep93xx_rx: invalid length "
- " %.8x %.8x\n", rstat0, rstat1);
+ pr_notice("invalid length %.8x %.8x\n", rstat0, rstat1);
goto err;
}
@@ -371,11 +409,9 @@ static void ep93xx_tx_complete(struct net_device *dev)
tstat->tstat0 = 0;
if (tstat0 & TSTAT0_FA)
- printk(KERN_CRIT "ep93xx_tx_complete: frame aborted "
- " %.8x\n", tstat0);
+ pr_crit("frame aborted %.8x\n", tstat0);
if ((tstat0 & TSTAT0_BUFFER_INDEX) != entry)
- printk(KERN_CRIT "ep93xx_tx_complete: entry mismatch "
- " %.8x\n", tstat0);
+ pr_crit("entry mismatch %.8x\n", tstat0);
if (tstat0 & TSTAT0_TXWE) {
int length = ep->descs->tdesc[entry].tdesc1 & 0xfff;
@@ -536,7 +572,7 @@ static int ep93xx_start_hw(struct net_device *dev)
}
if (i == 10) {
- printk(KERN_CRIT DRV_MODULE_NAME ": hw failed to reset\n");
+ pr_crit("hw failed to reset\n");
return 1;
}
@@ -581,7 +617,7 @@ static int ep93xx_start_hw(struct net_device *dev)
}
if (i == 10) {
- printk(KERN_CRIT DRV_MODULE_NAME ": hw failed to start\n");
+ pr_crit("hw failed to start\n");
return 1;
}
@@ -617,7 +653,7 @@ static void ep93xx_stop_hw(struct net_device *dev)
}
if (i == 10)
- printk(KERN_CRIT DRV_MODULE_NAME ": hw failed to reset\n");
+ pr_crit("hw failed to reset\n");
}
static int ep93xx_open(struct net_device *dev)
@@ -681,48 +717,6 @@ static int ep93xx_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return generic_mii_ioctl(&ep->mii, data, cmd, NULL);
}
-static int ep93xx_mdio_read(struct net_device *dev, int phy_id, int reg)
-{
- struct ep93xx_priv *ep = netdev_priv(dev);
- int data;
- int i;
-
- wrl(ep, REG_MIICMD, REG_MIICMD_READ | (phy_id << 5) | reg);
-
- for (i = 0; i < 10; i++) {
- if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
- break;
- msleep(1);
- }
-
- if (i == 10) {
- printk(KERN_INFO DRV_MODULE_NAME ": mdio read timed out\n");
- data = 0xffff;
- } else {
- data = rdl(ep, REG_MIIDATA);
- }
-
- return data;
-}
-
-static void ep93xx_mdio_write(struct net_device *dev, int phy_id, int reg, int data)
-{
- struct ep93xx_priv *ep = netdev_priv(dev);
- int i;
-
- wrl(ep, REG_MIIDATA, data);
- wrl(ep, REG_MIICMD, REG_MIICMD_WRITE | (phy_id << 5) | reg);
-
- for (i = 0; i < 10; i++) {
- if ((rdl(ep, REG_MIISTS) & REG_MIISTS_BUSY) == 0)
- break;
- msleep(1);
- }
-
- if (i == 10)
- printk(KERN_INFO DRV_MODULE_NAME ": mdio write timed out\n");
-}
-
static void ep93xx_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
strcpy(info->driver, DRV_MODULE_NAME);
@@ -825,12 +819,19 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
struct ep93xx_eth_data *data;
struct net_device *dev;
struct ep93xx_priv *ep;
+ struct resource *mem;
+ int irq;
int err;
if (pdev == NULL)
return -ENODEV;
data = pdev->dev.platform_data;
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!mem || irq < 0)
+ return -ENXIO;
+
dev = ep93xx_dev_alloc(data);
if (dev == NULL) {
err = -ENOMEM;
@@ -842,23 +843,21 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, dev);
- ep->res = request_mem_region(pdev->resource[0].start,
- pdev->resource[0].end - pdev->resource[0].start + 1,
- dev_name(&pdev->dev));
+ ep->res = request_mem_region(mem->start, resource_size(mem),
+ dev_name(&pdev->dev));
if (ep->res == NULL) {
dev_err(&pdev->dev, "Could not reserve memory region\n");
err = -ENOMEM;
goto err_out;
}
- ep->base_addr = ioremap(pdev->resource[0].start,
- pdev->resource[0].end - pdev->resource[0].start);
+ ep->base_addr = ioremap(mem->start, resource_size(mem));
if (ep->base_addr == NULL) {
dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
err = -EIO;
goto err_out;
}
- ep->irq = pdev->resource[1].start;
+ ep->irq = irq;
ep->mii.phy_id = data->phy_id;
ep->mii.phy_id_mask = 0x1f;
@@ -877,11 +876,8 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
goto err_out;
}
- printk(KERN_INFO "%s: ep93xx on-chip ethernet, IRQ %d, "
- "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x.\n", dev->name,
- ep->irq, data->dev_addr[0], data->dev_addr[1],
- data->dev_addr[2], data->dev_addr[3],
- data->dev_addr[4], data->dev_addr[5]);
+ printk(KERN_INFO "%s: ep93xx on-chip ethernet, IRQ %d, %pM\n",
+ dev->name, ep->irq, dev->dev_addr);
return 0;
diff --git a/drivers/net/arm/ether3.c b/drivers/net/arm/ether3.c
index 1f7a69c929a..d9de9bce239 100644
--- a/drivers/net/arm/ether3.c
+++ b/drivers/net/arm/ether3.c
@@ -463,7 +463,7 @@ static void ether3_setmulticastlist(struct net_device *dev)
if (dev->flags & IFF_PROMISC) {
/* promiscuous mode */
priv(dev)->regs.config1 |= CFG1_RECVPROMISC;
- } else if (dev->flags & IFF_ALLMULTI || dev->mc_count) {
+ } else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
priv(dev)->regs.config1 |= CFG1_RECVSPECBRMULTI;
} else
priv(dev)->regs.config1 |= CFG1_RECVSPECBROAD;
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index c3dfbdd2cdc..6e2ae1d06df 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -735,22 +735,25 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev)
static void eth_set_mcast_list(struct net_device *dev)
{
struct port *port = netdev_priv(dev);
- struct dev_mc_list *mclist = dev->mc_list;
+ struct dev_mc_list *mclist;
u8 diffs[ETH_ALEN], *addr;
- int cnt = dev->mc_count, i;
+ int i;
- if ((dev->flags & IFF_PROMISC) || !mclist || !cnt) {
+ if ((dev->flags & IFF_PROMISC) || netdev_mc_empty(dev)) {
__raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN,
&port->regs->rx_control[0]);
return;
}
memset(diffs, 0, ETH_ALEN);
- addr = mclist->dmi_addr; /* first MAC address */
- while (--cnt && (mclist = mclist->next))
+ addr = NULL;
+ netdev_for_each_mc_addr(mclist, dev) {
+ if (!addr)
+ addr = mclist->dmi_addr; /* first MAC address */
for (i = 0; i < ETH_ALEN; i++)
diffs[i] |= addr[i] ^ mclist->dmi_addr[i];
+ }
for (i = 0; i < ETH_ALEN; i++) {
__raw_writel(addr[i], &port->regs->mcast_addr[i]);
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index be256b34cea..8ca639127db 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -327,25 +327,24 @@ ks8695_refill_rxbuffers(struct ks8695_priv *ksp)
*/
static void
ks8695_init_partial_multicast(struct ks8695_priv *ksp,
- struct dev_mc_list *addr,
- int nr_addr)
+ struct net_device *ndev)
{
u32 low, high;
int i;
+ struct dev_mc_list *dmi;
- for (i = 0; i < nr_addr; i++, addr = addr->next) {
- /* Ran out of addresses? */
- if (!addr)
- break;
+ i = 0;
+ netdev_for_each_mc_addr(dmi, ndev) {
/* Ran out of space in chip? */
BUG_ON(i == KS8695_NR_ADDRESSES);
- low = (addr->dmi_addr[2] << 24) | (addr->dmi_addr[3] << 16) |
- (addr->dmi_addr[4] << 8) | (addr->dmi_addr[5]);
- high = (addr->dmi_addr[0] << 8) | (addr->dmi_addr[1]);
+ low = (dmi->dmi_addr[2] << 24) | (dmi->dmi_addr[3] << 16) |
+ (dmi->dmi_addr[4] << 8) | (dmi->dmi_addr[5]);
+ high = (dmi->dmi_addr[0] << 8) | (dmi->dmi_addr[1]);
ks8695_writereg(ksp, KS8695_AAL_(i), low);
ks8695_writereg(ksp, KS8695_AAH_(i), AAH_E | high);
+ i++;
}
/* Clear the remaining Additional Station Addresses */
@@ -1207,7 +1206,7 @@ ks8695_set_multicast(struct net_device *ndev)
if (ndev->flags & IFF_ALLMULTI) {
/* enable all multicast mode */
ctrl |= DRXC_RM;
- } else if (ndev->mc_count > KS8695_NR_ADDRESSES) {
+ } else if (netdev_mc_count(ndev) > KS8695_NR_ADDRESSES) {
/* more specific multicast addresses than can be
* handled in hardware
*/
@@ -1215,8 +1214,7 @@ ks8695_set_multicast(struct net_device *ndev)
} else {
/* enable specific multicasts */
ctrl &= ~DRXC_RM;
- ks8695_init_partial_multicast(ksp, ndev->mc_list,
- ndev->mc_count);
+ ks8695_init_partial_multicast(ksp, ndev);
}
ks8695_writereg(ksp, KS8695_DRXC, ctrl);
@@ -1335,7 +1333,6 @@ ks8695_stop(struct net_device *ndev)
netif_stop_queue(ndev);
napi_disable(&ksp->napi);
- netif_carrier_off(ndev);
ks8695_shutdown(ksp);
diff --git a/drivers/net/arm/w90p910_ether.c b/drivers/net/arm/w90p910_ether.c
index b7f3866d546..febd813c916 100644
--- a/drivers/net/arm/w90p910_ether.c
+++ b/drivers/net/arm/w90p910_ether.c
@@ -858,10 +858,10 @@ static void w90p910_ether_set_multicast_list(struct net_device *dev)
if (dev->flags & IFF_PROMISC)
rx_mode = CAMCMR_AUP | CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP;
- else if ((dev->flags & IFF_ALLMULTI) || dev->mc_list)
- rx_mode = CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP;
- else
- rx_mode = CAMCMR_ECMP | CAMCMR_ABP;
+ else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev))
+ rx_mode = CAMCMR_AMP | CAMCMR_ABP | CAMCMR_ECMP;
+ else
+ rx_mode = CAMCMR_ECMP | CAMCMR_ABP;
__raw_writel(rx_mode, ether->reg + REG_CAMCMR);
}
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c
index b14f4799d5d..309843ab886 100644
--- a/drivers/net/at1700.c
+++ b/drivers/net/at1700.c
@@ -839,21 +839,19 @@ set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) {
memset(mc_filter, 0xff, sizeof(mc_filter));
outb(3, ioaddr + RX_MODE); /* Enable promiscuous mode */
- } else if (dev->mc_count > MC_FILTERBREAK ||
+ } else if (netdev_mc_count(dev) > MC_FILTERBREAK ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
memset(mc_filter, 0xff, sizeof(mc_filter));
outb(2, ioaddr + RX_MODE); /* Use normal mode. */
- } else if (dev->mc_count == 0) {
+ } else if (netdev_mc_empty(dev)) {
memset(mc_filter, 0x00, sizeof(mc_filter));
outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */
} else {
struct dev_mc_list *mclist;
- int i;
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
unsigned int bit =
ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 26;
mc_filter[bit >> 3] |= (1 << bit);
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index cc9ed864391..280cfff48b4 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -1097,7 +1097,7 @@ static void set_multicast_list( struct net_device *dev )
REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */
} else {
short multicast_table[4];
- int num_addrs = dev->mc_count;
+ int num_addrs = netdev_mc_count(dev);
int i;
/* We don't use the multicast table, but rely on upper-layer
* filtering. */
diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h
index efe5435bc3d..84ae905bf73 100644
--- a/drivers/net/atl1c/atl1c.h
+++ b/drivers/net/atl1c/atl1c.h
@@ -313,6 +313,9 @@ enum atl1c_rss_type {
enum atl1c_nic_type {
athr_l1c = 0,
athr_l2c = 1,
+ athr_l2c_b,
+ athr_l2c_b2,
+ athr_l1d,
};
enum atl1c_trans_queue {
@@ -426,8 +429,12 @@ struct atl1c_hw {
#define ATL1C_ASPM_L1_SUPPORT 0x0100
#define ATL1C_ASPM_CTRL_MON 0x0200
#define ATL1C_HIB_DISABLE 0x0400
-#define ATL1C_LINK_CAP_1000M 0x0800
-#define ATL1C_FPGA_VERSION 0x8000
+#define ATL1C_APS_MODE_ENABLE 0x0800
+#define ATL1C_LINK_EXT_SYNC 0x1000
+#define ATL1C_CLK_GATING_EN 0x2000
+#define ATL1C_FPGA_VERSION 0x8000
+ u16 link_cap_flags;
+#define ATL1C_LINK_CAP_1000M 0x0001
u16 cmb_tpd;
u16 cmb_rrd;
u16 cmb_rx_timer; /* 2us resolution */
diff --git a/drivers/net/atl1c/atl1c_ethtool.c b/drivers/net/atl1c/atl1c_ethtool.c
index 9b1e0eaebb5..61a0f2ff11e 100644
--- a/drivers/net/atl1c/atl1c_ethtool.c
+++ b/drivers/net/atl1c/atl1c_ethtool.c
@@ -37,7 +37,7 @@ static int atl1c_get_settings(struct net_device *netdev,
SUPPORTED_100baseT_Full |
SUPPORTED_Autoneg |
SUPPORTED_TP);
- if (hw->ctrl_flags & ATL1C_LINK_CAP_1000M)
+ if (hw->link_cap_flags & ATL1C_LINK_CAP_1000M)
ecmd->supported |= SUPPORTED_1000baseT_Full;
ecmd->advertising = ADVERTISED_TP;
diff --git a/drivers/net/atl1c/atl1c_hw.c b/drivers/net/atl1c/atl1c_hw.c
index 3e69b940b8f..f1389d664a2 100644
--- a/drivers/net/atl1c/atl1c_hw.c
+++ b/drivers/net/atl1c/atl1c_hw.c
@@ -70,17 +70,39 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
u32 otp_ctrl_data;
u32 twsi_ctrl_data;
u8 eth_addr[ETH_ALEN];
+ u16 phy_data;
+ bool raise_vol = false;
/* init */
addr[0] = addr[1] = 0;
AT_READ_REG(hw, REG_OTP_CTRL, &otp_ctrl_data);
if (atl1c_check_eeprom_exist(hw)) {
- /* Enable OTP CLK */
- if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) {
- otp_ctrl_data |= OTP_CTRL_CLK_EN;
- AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data);
- AT_WRITE_FLUSH(hw);
- msleep(1);
+ if (hw->nic_type == athr_l1c || hw->nic_type == athr_l2c_b) {
+ /* Enable OTP CLK */
+ if (!(otp_ctrl_data & OTP_CTRL_CLK_EN)) {
+ otp_ctrl_data |= OTP_CTRL_CLK_EN;
+ AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data);
+ AT_WRITE_FLUSH(hw);
+ msleep(1);
+ }
+ }
+
+ if (hw->nic_type == athr_l2c_b ||
+ hw->nic_type == athr_l2c_b2 ||
+ hw->nic_type == athr_l1d) {
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x00);
+ if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data))
+ goto out;
+ phy_data &= 0xFF7F;
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data);
+
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B);
+ if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data))
+ goto out;
+ phy_data |= 0x8;
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data);
+ udelay(20);
+ raise_vol = true;
}
AT_READ_REG(hw, REG_TWSI_CTRL, &twsi_ctrl_data);
@@ -96,11 +118,31 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
return -1;
}
/* Disable OTP_CLK */
- if (otp_ctrl_data & OTP_CTRL_CLK_EN) {
- otp_ctrl_data &= ~OTP_CTRL_CLK_EN;
- AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data);
- AT_WRITE_FLUSH(hw);
- msleep(1);
+ if ((hw->nic_type == athr_l1c || hw->nic_type == athr_l2c)) {
+ if (otp_ctrl_data & OTP_CTRL_CLK_EN) {
+ otp_ctrl_data &= ~OTP_CTRL_CLK_EN;
+ AT_WRITE_REG(hw, REG_OTP_CTRL, otp_ctrl_data);
+ AT_WRITE_FLUSH(hw);
+ msleep(1);
+ }
+ }
+ if (raise_vol) {
+ if (hw->nic_type == athr_l2c_b ||
+ hw->nic_type == athr_l2c_b2 ||
+ hw->nic_type == athr_l1d) {
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x00);
+ if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data))
+ goto out;
+ phy_data |= 0x80;
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data);
+
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B);
+ if (atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data))
+ goto out;
+ phy_data &= 0xFFF7;
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data);
+ udelay(20);
+ }
}
/* maybe MAC-address is from BIOS */
@@ -114,6 +156,7 @@ static int atl1c_get_permanent_address(struct atl1c_hw *hw)
return 0;
}
+out:
return -1;
}
@@ -307,7 +350,7 @@ static int atl1c_phy_setup_adv(struct atl1c_hw *hw)
mii_adv_data |= ADVERTISE_10HALF | ADVERTISE_10FULL |
ADVERTISE_100HALF | ADVERTISE_100FULL;
- if (hw->ctrl_flags & ATL1C_LINK_CAP_1000M) {
+ if (hw->link_cap_flags & ATL1C_LINK_CAP_1000M) {
if (hw->autoneg_advertised & ADVERTISED_1000baseT_Half)
mii_giga_ctrl_data |= ADVERTISE_1000HALF;
if (hw->autoneg_advertised & ADVERTISED_1000baseT_Full)
@@ -389,6 +432,7 @@ int atl1c_phy_reset(struct atl1c_hw *hw)
{
struct atl1c_adapter *adapter = hw->adapter;
struct pci_dev *pdev = adapter->pdev;
+ u16 phy_data;
u32 phy_ctrl_data = GPHY_CTRL_DEFAULT;
u32 mii_ier_data = IER_LINK_UP | IER_LINK_DOWN;
int err;
@@ -404,6 +448,21 @@ int atl1c_phy_reset(struct atl1c_hw *hw)
AT_WRITE_FLUSH(hw);
msleep(10);
+ if (hw->nic_type == athr_l2c_b) {
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x0A);
+ atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data);
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data & 0xDFFF);
+ }
+
+ if (hw->nic_type == athr_l2c_b ||
+ hw->nic_type == athr_l2c_b2 ||
+ hw->nic_type == athr_l1d) {
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x3B);
+ atl1c_read_phy_reg(hw, MII_DBG_DATA, &phy_data);
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, phy_data & 0xFFF7);
+ msleep(20);
+ }
+
/*Enable PHY LinkChange Interrupt */
err = atl1c_write_phy_reg(hw, MII_IER, mii_ier_data);
if (err) {
diff --git a/drivers/net/atl1c/atl1c_hw.h b/drivers/net/atl1c/atl1c_hw.h
index c2c738df5c6..1eeb3ed9f0c 100644
--- a/drivers/net/atl1c/atl1c_hw.h
+++ b/drivers/net/atl1c/atl1c_hw.h
@@ -57,6 +57,7 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw);
#define REG_LINK_CTRL 0x68
#define LINK_CTRL_L0S_EN 0x01
#define LINK_CTRL_L1_EN 0x02
+#define LINK_CTRL_EXT_SYNC 0x80
#define REG_VPD_CAP 0x6C
#define VPD_CAP_ID_MASK 0xff
@@ -156,6 +157,8 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw);
#define PM_CTRL_PM_REQ_TIMER_SHIFT 20
#define PM_CTRL_LCKDET_TIMER_MASK 0x3F
#define PM_CTRL_LCKDET_TIMER_SHIFT 24
+#define PM_CTRL_EN_BUFS_RX_L0S 0x10000000
+#define PM_CTRL_SA_DLY_EN 0x20000000
#define PM_CTRL_MAC_ASPM_CHK 0x40000000
#define PM_CTRL_HOTRST 0x80000000
@@ -314,6 +317,8 @@ int atl1c_restart_autoneg(struct atl1c_hw *hw);
#define MAC_CTRL_BC_EN 0x4000000
#define MAC_CTRL_DBG 0x8000000
#define MAC_CTRL_SINGLE_PAUSE_EN 0x10000000
+#define MAC_CTRL_HASH_ALG_CRC32 0x20000000
+#define MAC_CTRL_SPEED_MODE_SW 0x40000000
/* MAC IPG/IFG Control Register */
#define REG_MAC_IPG_IFG 0x1484
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 2f4be59b9c0..50dc531a02d 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -21,11 +21,18 @@
#include "atl1c.h"
-#define ATL1C_DRV_VERSION "1.0.0.1-NAPI"
+#define ATL1C_DRV_VERSION "1.0.0.2-NAPI"
char atl1c_driver_name[] = "atl1c";
char atl1c_driver_version[] = ATL1C_DRV_VERSION;
#define PCI_DEVICE_ID_ATTANSIC_L2C 0x1062
#define PCI_DEVICE_ID_ATTANSIC_L1C 0x1063
+#define PCI_DEVICE_ID_ATHEROS_L2C_B 0x2060 /* AR8152 v1.1 Fast 10/100 */
+#define PCI_DEVICE_ID_ATHEROS_L2C_B2 0x2062 /* AR8152 v2.0 Fast 10/100 */
+#define PCI_DEVICE_ID_ATHEROS_L1D 0x1073 /* AR8151 v1.0 Gigabit 1000 */
+
+#define L2CB_V10 0xc0
+#define L2CB_V11 0xc1
+
/*
* atl1c_pci_tbl - PCI Device ID Table
*
@@ -35,9 +42,12 @@ char atl1c_driver_version[] = ATL1C_DRV_VERSION;
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
* Class, Class Mask, private data (not used) }
*/
-static struct pci_device_id atl1c_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(atl1c_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1C)},
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2C)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L2C_B2)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATHEROS_L1D)},
/* required last entry */
{ 0 }
};
@@ -367,7 +377,7 @@ static void atl1c_set_multi(struct net_device *netdev)
AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
/* comoute mc addresses' hash value ,and put it into hash table */
- for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
+ netdev_for_each_mc_addr(mc_ptr, netdev) {
hash_value = atl1c_hash_mc_addr(hw, mc_ptr->dmi_addr);
atl1c_hash_set(hw, hash_value);
}
@@ -593,11 +603,18 @@ static void atl1c_set_mac_type(struct atl1c_hw *hw)
case PCI_DEVICE_ID_ATTANSIC_L2C:
hw->nic_type = athr_l2c;
break;
-
case PCI_DEVICE_ID_ATTANSIC_L1C:
hw->nic_type = athr_l1c;
break;
-
+ case PCI_DEVICE_ID_ATHEROS_L2C_B:
+ hw->nic_type = athr_l2c_b;
+ break;
+ case PCI_DEVICE_ID_ATHEROS_L2C_B2:
+ hw->nic_type = athr_l2c_b2;
+ break;
+ case PCI_DEVICE_ID_ATHEROS_L1D:
+ hw->nic_type = athr_l1d;
+ break;
default:
break;
}
@@ -620,10 +637,13 @@ static int atl1c_setup_mac_funcs(struct atl1c_hw *hw)
hw->ctrl_flags |= ATL1C_ASPM_L0S_SUPPORT;
if (link_ctrl_data & LINK_CTRL_L1_EN)
hw->ctrl_flags |= ATL1C_ASPM_L1_SUPPORT;
+ if (link_ctrl_data & LINK_CTRL_EXT_SYNC)
+ hw->ctrl_flags |= ATL1C_LINK_EXT_SYNC;
- if (hw->nic_type == athr_l1c) {
+ if (hw->nic_type == athr_l1c ||
+ hw->nic_type == athr_l1d) {
hw->ctrl_flags |= ATL1C_ASPM_CTRL_MON;
- hw->ctrl_flags |= ATL1C_LINK_CAP_1000M;
+ hw->link_cap_flags |= ATL1C_LINK_CAP_1000M;
}
return 0;
}
@@ -1234,21 +1254,92 @@ static void atl1c_disable_l0s_l1(struct atl1c_hw *hw)
static void atl1c_set_aspm(struct atl1c_hw *hw, bool linkup)
{
u32 pm_ctrl_data;
+ u32 link_ctrl_data;
AT_READ_REG(hw, REG_PM_CTRL, &pm_ctrl_data);
-
+ AT_READ_REG(hw, REG_LINK_CTRL, &link_ctrl_data);
pm_ctrl_data &= ~PM_CTRL_SERDES_PD_EX_L1;
+
pm_ctrl_data &= ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
PM_CTRL_L1_ENTRY_TIMER_SHIFT);
+ pm_ctrl_data &= ~(PM_CTRL_LCKDET_TIMER_MASK <<
+ PM_CTRL_LCKDET_TIMER_SHIFT);
pm_ctrl_data |= PM_CTRL_MAC_ASPM_CHK;
+ pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
+ pm_ctrl_data |= PM_CTRL_RBER_EN;
+ pm_ctrl_data |= PM_CTRL_SDES_EN;
+
+ if (hw->nic_type == athr_l2c_b ||
+ hw->nic_type == athr_l1d ||
+ hw->nic_type == athr_l2c_b2) {
+ link_ctrl_data &= ~LINK_CTRL_EXT_SYNC;
+ if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE)) {
+ if (hw->nic_type == athr_l2c_b &&
+ hw->revision_id == L2CB_V10)
+ link_ctrl_data |= LINK_CTRL_EXT_SYNC;
+ }
+
+ AT_WRITE_REG(hw, REG_LINK_CTRL, link_ctrl_data);
+
+ pm_ctrl_data |= PM_CTRL_PCIE_RECV;
+ pm_ctrl_data |= AT_ASPM_L1_TIMER << PM_CTRL_PM_REQ_TIMER_SHIFT;
+ pm_ctrl_data &= ~PM_CTRL_EN_BUFS_RX_L0S;
+ pm_ctrl_data &= ~PM_CTRL_SA_DLY_EN;
+ pm_ctrl_data &= ~PM_CTRL_HOTRST;
+ pm_ctrl_data |= 1 << PM_CTRL_L1_ENTRY_TIMER_SHIFT;
+ pm_ctrl_data |= PM_CTRL_SERDES_PD_EX_L1;
+ }
if (linkup) {
- pm_ctrl_data |= PM_CTRL_SERDES_PLL_L1_EN;
- pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1;
+ pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
+ pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
+ if (hw->ctrl_flags & ATL1C_ASPM_L1_SUPPORT)
+ pm_ctrl_data |= PM_CTRL_ASPM_L1_EN;
+ if (hw->ctrl_flags & ATL1C_ASPM_L0S_SUPPORT)
+ pm_ctrl_data |= PM_CTRL_ASPM_L0S_EN;
+
+ if (hw->nic_type == athr_l2c_b ||
+ hw->nic_type == athr_l1d ||
+ hw->nic_type == athr_l2c_b2) {
+ if (hw->nic_type == athr_l2c_b)
+ if (!(hw->ctrl_flags & ATL1C_APS_MODE_ENABLE))
+ pm_ctrl_data &= PM_CTRL_ASPM_L0S_EN;
+ pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN;
+ pm_ctrl_data &= ~PM_CTRL_SERDES_PLL_L1_EN;
+ pm_ctrl_data &= ~PM_CTRL_SERDES_BUDS_RX_L1_EN;
+ pm_ctrl_data |= PM_CTRL_CLK_SWH_L1;
+ if (hw->adapter->link_speed == SPEED_100 ||
+ hw->adapter->link_speed == SPEED_1000) {
+ pm_ctrl_data &=
+ ~(PM_CTRL_L1_ENTRY_TIMER_MASK <<
+ PM_CTRL_L1_ENTRY_TIMER_SHIFT);
+ if (hw->nic_type == athr_l1d)
+ pm_ctrl_data |= 0xF <<
+ PM_CTRL_L1_ENTRY_TIMER_SHIFT;
+ else
+ pm_ctrl_data |= 7 <<
+ PM_CTRL_L1_ENTRY_TIMER_SHIFT;
+ }
+ } else {
+ pm_ctrl_data |= PM_CTRL_SERDES_L1_EN;
+ pm_ctrl_data |= PM_CTRL_SERDES_PLL_L1_EN;
+ pm_ctrl_data |= PM_CTRL_SERDES_BUDS_RX_L1_EN;
+ pm_ctrl_data &= ~PM_CTRL_CLK_SWH_L1;
+ pm_ctrl_data &= ~PM_CTRL_ASPM_L0S_EN;
+ pm_ctrl_data &= ~PM_CTRL_ASPM_L1_EN;
+ }
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0x29);
+ if (hw->adapter->link_speed == SPEED_10)
+ if (hw->nic_type == athr_l1d)
+ atl1c_write_phy_reg(hw, MII_DBG_ADDR, 0xB69D);
+ else
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB6DD);
+ else if (hw->adapter->link_speed == SPEED_100)
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, 0xB2DD);
+ else
+ atl1c_write_phy_reg(hw, MII_DBG_DATA, 0x96DD);
- pm_ctrl_data |= PM_CTRL_SERDES_BUDS_RX_L1_EN;
- pm_ctrl_data |= PM_CTRL_SERDES_L1_EN;
} else {
pm_ctrl_data &= ~PM_CTRL_SERDES_BUDS_RX_L1_EN;
pm_ctrl_data &= ~PM_CTRL_SERDES_L1_EN;
@@ -1302,6 +1393,10 @@ static void atl1c_setup_mac_ctrl(struct atl1c_adapter *adapter)
mac_ctrl_data |= MAC_CTRL_MC_ALL_EN;
mac_ctrl_data |= MAC_CTRL_SINGLE_PAUSE_EN;
+ if (hw->nic_type == athr_l1d || hw->nic_type == athr_l2c_b2) {
+ mac_ctrl_data |= MAC_CTRL_SPEED_MODE_SW;
+ mac_ctrl_data |= MAC_CTRL_HASH_ALG_CRC32;
+ }
AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
}
@@ -2596,11 +2691,8 @@ static int __devinit atl1c_probe(struct pci_dev *pdev,
memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
if (netif_msg_probe(adapter))
- dev_dbg(&pdev->dev,
- "mac address : %02x-%02x-%02x-%02x-%02x-%02x\n",
- adapter->hw.mac_addr[0], adapter->hw.mac_addr[1],
- adapter->hw.mac_addr[2], adapter->hw.mac_addr[3],
- adapter->hw.mac_addr[4], adapter->hw.mac_addr[5]);
+ dev_dbg(&pdev->dev, "mac address : %pM\n",
+ adapter->hw.mac_addr);
atl1c_hw_set_mac_addr(&adapter->hw);
INIT_WORK(&adapter->common_task, atl1c_common_task);
diff --git a/drivers/net/atl1e/atl1e_hw.c b/drivers/net/atl1e/atl1e_hw.c
index 4a770062011..76cc043def8 100644
--- a/drivers/net/atl1e/atl1e_hw.c
+++ b/drivers/net/atl1e/atl1e_hw.c
@@ -394,7 +394,6 @@ static int atl1e_phy_setup_autoneg_adv(struct atl1e_hw *hw)
int atl1e_phy_commit(struct atl1e_hw *hw)
{
struct atl1e_adapter *adapter = hw->adapter;
- struct pci_dev *pdev = adapter->pdev;
int ret_val;
u16 phy_data;
@@ -415,12 +414,12 @@ int atl1e_phy_commit(struct atl1e_hw *hw)
}
if (0 != (val & (MDIO_START | MDIO_BUSY))) {
- dev_err(&pdev->dev,
- "pcie linkdown at least for 25ms\n");
+ netdev_err(adapter->netdev,
+ "pcie linkdown at least for 25ms\n");
return ret_val;
}
- dev_err(&pdev->dev, "pcie linkup after %d ms\n", i);
+ netdev_err(adapter->netdev, "pcie linkup after %d ms\n", i);
}
return 0;
}
@@ -428,7 +427,6 @@ int atl1e_phy_commit(struct atl1e_hw *hw)
int atl1e_phy_init(struct atl1e_hw *hw)
{
struct atl1e_adapter *adapter = hw->adapter;
- struct pci_dev *pdev = adapter->pdev;
s32 ret_val;
u16 phy_val;
@@ -492,20 +490,22 @@ int atl1e_phy_init(struct atl1e_hw *hw)
/*Enable PHY LinkChange Interrupt */
ret_val = atl1e_write_phy_reg(hw, MII_INT_CTRL, 0xC00);
if (ret_val) {
- dev_err(&pdev->dev, "Error enable PHY linkChange Interrupt\n");
+ netdev_err(adapter->netdev,
+ "Error enable PHY linkChange Interrupt\n");
return ret_val;
}
/* setup AutoNeg parameters */
ret_val = atl1e_phy_setup_autoneg_adv(hw);
if (ret_val) {
- dev_err(&pdev->dev, "Error Setting up Auto-Negotiation\n");
+ netdev_err(adapter->netdev,
+ "Error Setting up Auto-Negotiation\n");
return ret_val;
}
/* SW.Reset & En-Auto-Neg to restart Auto-Neg*/
- dev_dbg(&pdev->dev, "Restarting Auto-Neg");
+ netdev_dbg(adapter->netdev, "Restarting Auto-Negotiation\n");
ret_val = atl1e_phy_commit(hw);
if (ret_val) {
- dev_err(&pdev->dev, "Error Resetting the phy");
+ netdev_err(adapter->netdev, "Error resetting the phy\n");
return ret_val;
}
@@ -559,9 +559,8 @@ int atl1e_reset_hw(struct atl1e_hw *hw)
}
if (timeout >= AT_HW_MAX_IDLE_DELAY) {
- dev_err(&pdev->dev,
- "MAC state machine cann't be idle since"
- " disabled for 10ms second\n");
+ netdev_err(adapter->netdev,
+ "MAC state machine can't be idle since disabled for 10ms second\n");
return AT_ERR_TIMEOUT;
}
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index 08f8c0969e9..73302ae468a 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -35,7 +35,7 @@ char atl1e_driver_version[] = DRV_VERSION;
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
* Class, Class Mask, private data (not used) }
*/
-static struct pci_device_id atl1e_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(atl1e_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1E)},
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, 0x1066)},
/* required last entry */
@@ -164,11 +164,10 @@ static int atl1e_check_link(struct atl1e_adapter *adapter)
{
struct atl1e_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
int err = 0;
u16 speed, duplex, phy_data;
- /* MII_BMSR must read twise */
+ /* MII_BMSR must read twice */
atl1e_read_phy_reg(hw, MII_BMSR, &phy_data);
atl1e_read_phy_reg(hw, MII_BMSR, &phy_data);
if ((phy_data & BMSR_LSTATUS) == 0) {
@@ -195,12 +194,11 @@ static int atl1e_check_link(struct atl1e_adapter *adapter)
adapter->link_speed = speed;
adapter->link_duplex = duplex;
atl1e_setup_mac_ctrl(adapter);
- dev_info(&pdev->dev,
- "%s: %s NIC Link is Up<%d Mbps %s>\n",
- atl1e_driver_name, netdev->name,
- adapter->link_speed,
- adapter->link_duplex == FULL_DUPLEX ?
- "Full Duplex" : "Half Duplex");
+ netdev_info(netdev,
+ "NIC Link is Up <%d Mbps %s Duplex>\n",
+ adapter->link_speed,
+ adapter->link_duplex == FULL_DUPLEX ?
+ "Full" : "Half");
}
if (!netif_carrier_ok(netdev)) {
@@ -230,7 +228,6 @@ static void atl1e_link_chg_task(struct work_struct *work)
static void atl1e_link_chg_event(struct atl1e_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
- struct pci_dev *pdev = adapter->pdev;
u16 phy_data = 0;
u16 link_up = 0;
@@ -243,8 +240,7 @@ static void atl1e_link_chg_event(struct atl1e_adapter *adapter)
if (!link_up) {
if (netif_carrier_ok(netdev)) {
/* old link state: Up */
- dev_info(&pdev->dev, "%s: %s NIC Link is Down\n",
- atl1e_driver_name, netdev->name);
+ netdev_info(netdev, "NIC Link is Down\n");
adapter->link_speed = SPEED_0;
netif_stop_queue(netdev);
}
@@ -311,7 +307,7 @@ static void atl1e_set_multi(struct net_device *netdev)
AT_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
/* comoute mc addresses' hash value ,and put it into hash table */
- for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
+ netdev_for_each_mc_addr(mc_ptr, netdev) {
hash_value = atl1e_hash_mc_addr(hw, mc_ptr->dmi_addr);
atl1e_hash_set(hw, hash_value);
}
@@ -321,10 +317,9 @@ static void atl1e_vlan_rx_register(struct net_device *netdev,
struct vlan_group *grp)
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
- struct pci_dev *pdev = adapter->pdev;
u32 mac_ctrl_data = 0;
- dev_dbg(&pdev->dev, "atl1e_vlan_rx_register\n");
+ netdev_dbg(adapter->netdev, "%s\n", __func__);
atl1e_irq_disable(adapter);
@@ -345,9 +340,7 @@ static void atl1e_vlan_rx_register(struct net_device *netdev,
static void atl1e_restore_vlan(struct atl1e_adapter *adapter)
{
- struct pci_dev *pdev = adapter->pdev;
-
- dev_dbg(&pdev->dev, "atl1e_restore_vlan !");
+ netdev_dbg(adapter->netdev, "%s\n", __func__);
atl1e_vlan_rx_register(adapter->netdev, adapter->vlgrp);
}
/*
@@ -391,7 +384,7 @@ static int atl1e_change_mtu(struct net_device *netdev, int new_mtu)
if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) ||
(max_frame > MAX_JUMBO_FRAME_SIZE)) {
- dev_warn(&adapter->pdev->dev, "invalid MTU setting\n");
+ netdev_warn(adapter->netdev, "invalid MTU setting\n");
return -EINVAL;
}
/* set MTU */
@@ -438,7 +431,6 @@ static int atl1e_mii_ioctl(struct net_device *netdev,
struct ifreq *ifr, int cmd)
{
struct atl1e_adapter *adapter = netdev_priv(netdev);
- struct pci_dev *pdev = adapter->pdev;
struct mii_ioctl_data *data = if_mii(ifr);
unsigned long flags;
int retval = 0;
@@ -466,8 +458,8 @@ static int atl1e_mii_ioctl(struct net_device *netdev,
goto out;
}
- dev_dbg(&pdev->dev, "<atl1e_mii_ioctl> write %x %x",
- data->reg_num, data->val_in);
+ netdev_dbg(adapter->netdev, "<atl1e_mii_ioctl> write %x %x\n",
+ data->reg_num, data->val_in);
if (atl1e_write_phy_reg(&adapter->hw,
data->reg_num, data->val_in)) {
retval = -EIO;
@@ -602,7 +594,7 @@ static int __devinit atl1e_sw_init(struct atl1e_adapter *adapter)
hw->dmaw_dly_cnt = 4;
if (atl1e_alloc_queues(adapter)) {
- dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+ netdev_err(adapter->netdev, "Unable to allocate memory for queues\n");
return -ENOMEM;
}
@@ -800,8 +792,8 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
adapter->ring_size, &adapter->ring_dma);
if (adapter->ring_vir_addr == NULL) {
- dev_err(&pdev->dev, "pci_alloc_consistent failed, "
- "size = D%d", size);
+ netdev_err(adapter->netdev,
+ "pci_alloc_consistent failed, size = D%d\n", size);
return -ENOMEM;
}
@@ -817,7 +809,8 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
size = sizeof(struct atl1e_tx_buffer) * (tx_ring->count);
tx_ring->tx_buffer = kzalloc(size, GFP_KERNEL);
if (tx_ring->tx_buffer == NULL) {
- dev_err(&pdev->dev, "kzalloc failed , size = D%d", size);
+ netdev_err(adapter->netdev, "kzalloc failed, size = D%d\n",
+ size);
err = -ENOMEM;
goto failed;
}
@@ -852,8 +845,8 @@ static int atl1e_setup_ring_resources(struct atl1e_adapter *adapter)
}
if (unlikely(offset > adapter->ring_size)) {
- dev_err(&pdev->dev, "offset(%d) > ring size(%d) !!\n",
- offset, adapter->ring_size);
+ netdev_err(adapter->netdev, "offset(%d) > ring size(%d) !!\n",
+ offset, adapter->ring_size);
err = -1;
goto failed;
}
@@ -1077,7 +1070,6 @@ static void atl1e_setup_mac_ctrl(struct atl1e_adapter *adapter)
static int atl1e_configure(struct atl1e_adapter *adapter)
{
struct atl1e_hw *hw = &adapter->hw;
- struct pci_dev *pdev = adapter->pdev;
u32 intr_status_data = 0;
@@ -1130,8 +1122,8 @@ static int atl1e_configure(struct atl1e_adapter *adapter)
intr_status_data = AT_READ_REG(hw, REG_ISR);
if (unlikely((intr_status_data & ISR_PHY_LINKDOWN) != 0)) {
- dev_err(&pdev->dev, "atl1e_configure failed,"
- "PCIE phy link down\n");
+ netdev_err(adapter->netdev,
+ "atl1e_configure failed, PCIE phy link down\n");
return -1;
}
@@ -1262,7 +1254,6 @@ static irqreturn_t atl1e_intr(int irq, void *data)
{
struct net_device *netdev = data;
struct atl1e_adapter *adapter = netdev_priv(netdev);
- struct pci_dev *pdev = adapter->pdev;
struct atl1e_hw *hw = &adapter->hw;
int max_ints = AT_MAX_INT_WORK;
int handled = IRQ_NONE;
@@ -1285,8 +1276,8 @@ static irqreturn_t atl1e_intr(int irq, void *data)
handled = IRQ_HANDLED;
/* check if PCIE PHY Link down */
if (status & ISR_PHY_LINKDOWN) {
- dev_err(&pdev->dev,
- "pcie phy linkdown %x\n", status);
+ netdev_err(adapter->netdev,
+ "pcie phy linkdown %x\n", status);
if (netif_running(adapter->netdev)) {
/* reset MAC */
atl1e_irq_reset(adapter);
@@ -1297,9 +1288,9 @@ static irqreturn_t atl1e_intr(int irq, void *data)
/* check if DMA read/write error */
if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) {
- dev_err(&pdev->dev,
- "PCIE DMA RW error (status = 0x%x)\n",
- status);
+ netdev_err(adapter->netdev,
+ "PCIE DMA RW error (status = 0x%x)\n",
+ status);
atl1e_irq_reset(adapter);
schedule_work(&adapter->reset_task);
break;
@@ -1382,7 +1373,6 @@ static struct atl1e_rx_page *atl1e_get_rx_page(struct atl1e_adapter *adapter,
static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
int *work_done, int work_to_do)
{
- struct pci_dev *pdev = adapter->pdev;
struct net_device *netdev = adapter->netdev;
struct atl1e_rx_ring *rx_ring = (struct atl1e_rx_ring *)
&adapter->rx_ring;
@@ -1404,11 +1394,10 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
rx_page->read_offset);
/* check sequence number */
if (prrs->seq_num != rx_page_desc[que].rx_nxseq) {
- dev_err(&pdev->dev,
- "rx sequence number"
- " error (rx=%d) (expect=%d)\n",
- prrs->seq_num,
- rx_page_desc[que].rx_nxseq);
+ netdev_err(netdev,
+ "rx sequence number error (rx=%d) (expect=%d)\n",
+ prrs->seq_num,
+ rx_page_desc[que].rx_nxseq);
rx_page_desc[que].rx_nxseq++;
/* just for debug use */
AT_WRITE_REG(&adapter->hw, REG_DEBUG_DATA0,
@@ -1424,9 +1413,9 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
RRS_ERR_DRIBBLE | RRS_ERR_CODE |
RRS_ERR_TRUNC)) {
/* hardware error, discard this packet*/
- dev_err(&pdev->dev,
- "rx packet desc error %x\n",
- *((u32 *)prrs + 1));
+ netdev_err(netdev,
+ "rx packet desc error %x\n",
+ *((u32 *)prrs + 1));
goto skip_pkt;
}
}
@@ -1435,8 +1424,8 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
RRS_PKT_SIZE_MASK) - 4; /* CRC */
skb = netdev_alloc_skb_ip_align(netdev, packet_size);
if (skb == NULL) {
- dev_warn(&pdev->dev, "%s: Memory squeeze,"
- "deferring packet.\n", netdev->name);
+ netdev_warn(netdev,
+ "Memory squeeze, deferring packet\n");
goto skip_pkt;
}
skb->dev = netdev;
@@ -1450,9 +1439,9 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
u16 vlan_tag = (prrs->vtag >> 4) |
((prrs->vtag & 7) << 13) |
((prrs->vtag & 8) << 9);
- dev_dbg(&pdev->dev,
- "RXD VLAN TAG<RRD>=0x%04x\n",
- prrs->vtag);
+ netdev_dbg(netdev,
+ "RXD VLAN TAG<RRD>=0x%04x\n",
+ prrs->vtag);
vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
vlan_tag);
} else {
@@ -1500,7 +1489,6 @@ static int atl1e_clean(struct napi_struct *napi, int budget)
{
struct atl1e_adapter *adapter =
container_of(napi, struct atl1e_adapter, napi);
- struct pci_dev *pdev = adapter->pdev;
u32 imr_data;
int work_done = 0;
@@ -1519,8 +1507,8 @@ quit_polling:
/* test debug */
if (test_bit(__AT_DOWN, &adapter->flags)) {
atomic_dec(&adapter->irq_sem);
- dev_err(&pdev->dev,
- "atl1e_clean is called when AT_DOWN\n");
+ netdev_err(adapter->netdev,
+ "atl1e_clean is called when AT_DOWN\n");
}
/* reenable RX intr */
/*atl1e_irq_enable(adapter); */
@@ -1618,7 +1606,6 @@ static u16 atl1e_cal_tdp_req(const struct sk_buff *skb)
static int atl1e_tso_csum(struct atl1e_adapter *adapter,
struct sk_buff *skb, struct atl1e_tpd_desc *tpd)
{
- struct pci_dev *pdev = adapter->pdev;
u8 hdr_len;
u32 real_len;
unsigned short offload_type;
@@ -1642,8 +1629,8 @@ static int atl1e_tso_csum(struct atl1e_adapter *adapter,
hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
if (unlikely(skb->len == hdr_len)) {
/* only xsum need */
- dev_warn(&pdev->dev,
- "IPV4 tso with zero data??\n");
+ netdev_warn(adapter->netdev,
+ "IPV4 tso with zero data??\n");
goto check_sum;
} else {
ip_hdr(skb)->check = 0;
@@ -1672,8 +1659,8 @@ check_sum:
cso = skb_transport_offset(skb);
if (unlikely(cso & 0x1)) {
- dev_err(&adapter->pdev->dev,
- "pay load offset should not ant event number\n");
+ netdev_err(adapter->netdev,
+ "payload offset should not ant event number\n");
return -1;
} else {
css = cso + skb->csum_offset;
@@ -1886,8 +1873,8 @@ static int atl1e_request_irq(struct atl1e_adapter *adapter)
adapter->have_msi = true;
err = pci_enable_msi(adapter->pdev);
if (err) {
- dev_dbg(&pdev->dev,
- "Unable to allocate MSI interrupt Error: %d\n", err);
+ netdev_dbg(adapter->netdev,
+ "Unable to allocate MSI interrupt Error: %d\n", err);
adapter->have_msi = false;
} else
netdev->irq = pdev->irq;
@@ -1898,13 +1885,13 @@ static int atl1e_request_irq(struct atl1e_adapter *adapter)
err = request_irq(adapter->pdev->irq, atl1e_intr, flags,
netdev->name, netdev);
if (err) {
- dev_dbg(&pdev->dev,
- "Unable to allocate interrupt Error: %d\n", err);
+ netdev_dbg(adapter->netdev,
+ "Unable to allocate interrupt Error: %d\n", err);
if (adapter->have_msi)
pci_disable_msi(adapter->pdev);
return err;
}
- dev_dbg(&pdev->dev, "atl1e_request_irq OK\n");
+ netdev_dbg(adapter->netdev, "atl1e_request_irq OK\n");
return err;
}
@@ -2078,7 +2065,7 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
(atl1e_write_phy_reg(hw,
MII_ADVERTISE, mii_advertise_data) != 0) ||
(atl1e_phy_commit(hw)) != 0) {
- dev_dbg(&pdev->dev, "set phy register failed\n");
+ netdev_dbg(adapter->netdev, "set phy register failed\n");
goto wol_dis;
}
@@ -2100,17 +2087,14 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
}
if ((mii_bmsr_data & BMSR_LSTATUS) == 0)
- dev_dbg(&pdev->dev,
- "%s: Link may change"
- "when suspend\n",
- atl1e_driver_name);
+ netdev_dbg(adapter->netdev,
+ "Link may change when suspend\n");
}
wol_ctrl_data |= WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN;
/* only link up can wake up */
if (atl1e_write_phy_reg(hw, MII_INT_CTRL, 0x400) != 0) {
- dev_dbg(&pdev->dev, "%s: read write phy "
- "register failed.\n",
- atl1e_driver_name);
+ netdev_dbg(adapter->netdev,
+ "read write phy register failed\n");
goto wol_dis;
}
}
@@ -2131,9 +2115,8 @@ static int atl1e_suspend(struct pci_dev *pdev, pm_message_t state)
if (wufc & AT_WUFC_MAG)
mac_ctrl_data |= MAC_CTRL_BC_EN;
- dev_dbg(&pdev->dev,
- "%s: suspend MAC=0x%x\n",
- atl1e_driver_name, mac_ctrl_data);
+ netdev_dbg(adapter->netdev, "suspend MAC=0x%x\n",
+ mac_ctrl_data);
AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data);
AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
@@ -2183,8 +2166,8 @@ static int atl1e_resume(struct pci_dev *pdev)
err = pci_enable_device(pdev);
if (err) {
- dev_err(&pdev->dev, "ATL1e: Cannot enable PCI"
- " device from suspend\n");
+ netdev_err(adapter->netdev,
+ "Cannot enable PCI device from suspend\n");
return err;
}
@@ -2315,7 +2298,7 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
err = atl1e_init_netdev(netdev, pdev);
if (err) {
- dev_err(&pdev->dev, "init netdevice failed\n");
+ netdev_err(netdev, "init netdevice failed\n");
goto err_init_netdev;
}
adapter = netdev_priv(netdev);
@@ -2326,7 +2309,7 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
adapter->hw.hw_addr = pci_iomap(pdev, BAR_0, 0);
if (!adapter->hw.hw_addr) {
err = -EIO;
- dev_err(&pdev->dev, "cannot map device registers\n");
+ netdev_err(netdev, "cannot map device registers\n");
goto err_ioremap;
}
netdev->base_addr = (unsigned long)adapter->hw.hw_addr;
@@ -2356,7 +2339,7 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
/* setup the private structure */
err = atl1e_sw_init(adapter);
if (err) {
- dev_err(&pdev->dev, "net device private data init failed\n");
+ netdev_err(netdev, "net device private data init failed\n");
goto err_sw_init;
}
@@ -2372,22 +2355,19 @@ static int __devinit atl1e_probe(struct pci_dev *pdev,
if (atl1e_read_mac_addr(&adapter->hw) != 0) {
err = -EIO;
- dev_err(&pdev->dev, "get mac address failed\n");
+ netdev_err(netdev, "get mac address failed\n");
goto err_eeprom;
}
memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
- dev_dbg(&pdev->dev, "mac address : %02x-%02x-%02x-%02x-%02x-%02x\n",
- adapter->hw.mac_addr[0], adapter->hw.mac_addr[1],
- adapter->hw.mac_addr[2], adapter->hw.mac_addr[3],
- adapter->hw.mac_addr[4], adapter->hw.mac_addr[5]);
+ netdev_dbg(netdev, "mac address : %pM\n", adapter->hw.mac_addr);
INIT_WORK(&adapter->reset_task, atl1e_reset_task);
INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task);
err = register_netdev(netdev);
if (err) {
- dev_err(&pdev->dev, "register netdevice failed\n");
+ netdev_err(netdev, "register netdevice failed\n");
goto err_register;
}
@@ -2488,8 +2468,8 @@ static pci_ers_result_t atl1e_io_slot_reset(struct pci_dev *pdev)
struct atl1e_adapter *adapter = netdev_priv(netdev);
if (pci_enable_device(pdev)) {
- dev_err(&pdev->dev,
- "ATL1e: Cannot re-enable PCI device after reset.\n");
+ netdev_err(adapter->netdev,
+ "Cannot re-enable PCI device after reset\n");
return PCI_ERS_RESULT_DISCONNECT;
}
pci_set_master(pdev);
@@ -2517,8 +2497,8 @@ static void atl1e_io_resume(struct pci_dev *pdev)
if (netif_running(netdev)) {
if (atl1e_up(adapter)) {
- dev_err(&pdev->dev,
- "ATL1e: can't bring device back up after reset\n");
+ netdev_err(adapter->netdev,
+ "can't bring device back up after reset\n");
return;
}
}
diff --git a/drivers/net/atl1e/atl1e_param.c b/drivers/net/atl1e/atl1e_param.c
index b3be59fd3fb..0ce60b6e7ef 100644
--- a/drivers/net/atl1e/atl1e_param.c
+++ b/drivers/net/atl1e/atl1e_param.c
@@ -116,7 +116,7 @@ struct atl1e_option {
} arg;
};
-static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt, struct pci_dev *pdev)
+static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt, struct atl1e_adapter *adapter)
{
if (*value == OPTION_UNSET) {
*value = opt->def;
@@ -127,16 +127,19 @@ static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt,
case enable_option:
switch (*value) {
case OPTION_ENABLED:
- dev_info(&pdev->dev, "%s Enabled\n", opt->name);
+ netdev_info(adapter->netdev,
+ "%s Enabled\n", opt->name);
return 0;
case OPTION_DISABLED:
- dev_info(&pdev->dev, "%s Disabled\n", opt->name);
+ netdev_info(adapter->netdev,
+ "%s Disabled\n", opt->name);
return 0;
}
break;
case range_option:
if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
- dev_info(&pdev->dev, "%s set to %i\n", opt->name, *value);
+ netdev_info(adapter->netdev, "%s set to %i\n",
+ opt->name, *value);
return 0;
}
break;
@@ -148,8 +151,8 @@ static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt,
ent = &opt->arg.l.p[i];
if (*value == ent->i) {
if (ent->str[0] != '\0')
- dev_info(&pdev->dev, "%s\n",
- ent->str);
+ netdev_info(adapter->netdev,
+ "%s\n", ent->str);
return 0;
}
}
@@ -159,8 +162,8 @@ static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt,
BUG();
}
- dev_info(&pdev->dev, "Invalid %s specified (%i) %s\n",
- opt->name, *value, opt->err);
+ netdev_info(adapter->netdev, "Invalid %s specified (%i) %s\n",
+ opt->name, *value, opt->err);
*value = opt->def;
return -1;
}
@@ -176,11 +179,13 @@ static int __devinit atl1e_validate_option(int *value, struct atl1e_option *opt,
*/
void __devinit atl1e_check_options(struct atl1e_adapter *adapter)
{
- struct pci_dev *pdev = adapter->pdev;
int bd = adapter->bd_number;
+
if (bd >= ATL1E_MAX_NIC) {
- dev_notice(&pdev->dev, "no configuration for board #%i\n", bd);
- dev_notice(&pdev->dev, "Using defaults for all values\n");
+ netdev_notice(adapter->netdev,
+ "no configuration for board #%i\n", bd);
+ netdev_notice(adapter->netdev,
+ "Using defaults for all values\n");
}
{ /* Transmit Ring Size */
@@ -196,7 +201,7 @@ void __devinit atl1e_check_options(struct atl1e_adapter *adapter)
int val;
if (num_tx_desc_cnt > bd) {
val = tx_desc_cnt[bd];
- atl1e_validate_option(&val, &opt, pdev);
+ atl1e_validate_option(&val, &opt, adapter);
adapter->tx_ring.count = (u16) val & 0xFFFC;
} else
adapter->tx_ring.count = (u16)opt.def;
@@ -215,7 +220,7 @@ void __devinit atl1e_check_options(struct atl1e_adapter *adapter)
int val;
if (num_rx_mem_size > bd) {
val = rx_mem_size[bd];
- atl1e_validate_option(&val, &opt, pdev);
+ atl1e_validate_option(&val, &opt, adapter);
adapter->rx_ring.page_size = (u32)val * 1024;
} else {
adapter->rx_ring.page_size = (u32)opt.def * 1024;
@@ -235,7 +240,7 @@ void __devinit atl1e_check_options(struct atl1e_adapter *adapter)
int val;
if (num_int_mod_timer > bd) {
val = int_mod_timer[bd];
- atl1e_validate_option(&val, &opt, pdev);
+ atl1e_validate_option(&val, &opt, adapter);
adapter->hw.imt = (u16) val;
} else
adapter->hw.imt = (u16)(opt.def);
@@ -254,7 +259,7 @@ void __devinit atl1e_check_options(struct atl1e_adapter *adapter)
int val;
if (num_media_type > bd) {
val = media_type[bd];
- atl1e_validate_option(&val, &opt, pdev);
+ atl1e_validate_option(&val, &opt, adapter);
adapter->hw.media_type = (u16) val;
} else
adapter->hw.media_type = (u16)(opt.def);
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index b6cf3263127..9ba547069db 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -232,7 +232,7 @@ static void __devinit atl1_check_options(struct atl1_adapter *adapter)
/*
* atl1_pci_tbl - PCI Device ID Table
*/
-static const struct pci_device_id atl1_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(atl1_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1)},
/* required last entry */
{0,}
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index ec52529394a..7061d7108f0 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -63,7 +63,7 @@ MODULE_VERSION(ATL2_DRV_VERSION);
/*
* atl2_pci_tbl - PCI Device ID Table
*/
-static struct pci_device_id atl2_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(atl2_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L2)},
/* required last entry */
{0,}
@@ -157,7 +157,7 @@ static void atl2_set_multi(struct net_device *netdev)
ATL2_WRITE_REG_ARRAY(hw, REG_RX_HASH_TABLE, 1, 0);
/* comoute mc addresses' hash value ,and put it into hash table */
- for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
+ netdev_for_each_mc_addr(mc_ptr, netdev) {
hash_value = atl2_hash_mc_addr(hw, mc_ptr->dmi_addr);
atl2_hash_set(hw, hash_value);
}
diff --git a/drivers/net/atlx/atlx.c b/drivers/net/atlx/atlx.c
index 3dc01421567..72f3306352e 100644
--- a/drivers/net/atlx/atlx.c
+++ b/drivers/net/atlx/atlx.c
@@ -144,7 +144,7 @@ static void atlx_set_multi(struct net_device *netdev)
iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
/* compute mc addresses' hash value ,and put it into hash table */
- for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
+ netdev_for_each_mc_addr(mc_ptr, netdev) {
hash_value = atlx_hash_mc_addr(hw, mc_ptr->dmi_addr);
atlx_hash_set(hw, hash_value);
}
diff --git a/drivers/net/atp.c b/drivers/net/atp.c
index 2f8261c9614..6ad16205dc1 100644
--- a/drivers/net/atp.c
+++ b/drivers/net/atp.c
@@ -861,7 +861,7 @@ static void set_rx_mode_8002(struct net_device *dev)
struct net_local *lp = netdev_priv(dev);
long ioaddr = dev->base_addr;
- if (dev->mc_count > 0 || (dev->flags & (IFF_ALLMULTI|IFF_PROMISC)))
+ if (!netdev_mc_empty(dev) || (dev->flags & (IFF_ALLMULTI|IFF_PROMISC)))
lp->addr_mode = CMR2h_PROMISC;
else
lp->addr_mode = CMR2h_Normal;
@@ -877,7 +877,8 @@ static void set_rx_mode_8012(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
new_mode = CMR2h_PROMISC;
- } else if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
+ } else if ((netdev_mc_count(dev) > 1000) ||
+ (dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
memset(mc_filter, 0xff, sizeof(mc_filter));
new_mode = CMR2h_Normal;
@@ -885,9 +886,7 @@ static void set_rx_mode_8012(struct net_device *dev)
struct dev_mc_list *mclist;
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next)
- {
+ netdev_for_each_mc_addr(mclist, dev) {
int filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f;
mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
}
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 6e5a68ecde0..4da191b87b0 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -952,21 +952,18 @@ static void au1000_multicast_list(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
aup->mac->control |= MAC_PROMISCUOUS;
} else if ((dev->flags & IFF_ALLMULTI) ||
- dev->mc_count > MULTICAST_FILTER_LIMIT) {
+ netdev_mc_count(dev) > MULTICAST_FILTER_LIMIT) {
aup->mac->control |= MAC_PASS_ALL_MULTI;
aup->mac->control &= ~MAC_PROMISCUOUS;
printk(KERN_INFO "%s: Pass all multicast\n", dev->name);
} else {
- int i;
struct dev_mc_list *mclist;
u32 mc_filter[2]; /* Multicast hash filter */
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev)
set_bit(ether_crc(ETH_ALEN, mclist->dmi_addr)>>26,
(long *)mc_filter);
- }
aup->mac->multi_hash_high = mc_filter[1];
aup->mac->multi_hash_low = mc_filter[0];
aup->mac->control &= ~MAC_PROMISCUOUS;
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 4869adb6958..332c6035628 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -10,6 +10,8 @@
* Distribute under GPL.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -34,7 +36,6 @@
#include "b44.h"
#define DRV_MODULE_NAME "b44"
-#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "2.0"
#define B44_DEF_MSG_ENABLE \
@@ -102,7 +103,7 @@ MODULE_PARM_DESC(b44_debug, "B44 bitmapped debugging message enable value");
#ifdef CONFIG_B44_PCI
-static const struct pci_device_id b44_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(b44_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B0) },
{ PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_BCM4401B1) },
@@ -189,11 +190,10 @@ static int b44_wait_bit(struct b44 *bp, unsigned long reg,
udelay(10);
}
if (i == timeout) {
- printk(KERN_ERR PFX "%s: BUG! Timeout waiting for bit %08x of register "
- "%lx to %s.\n",
- bp->dev->name,
- bit, reg,
- (clear ? "clear" : "set"));
+ if (net_ratelimit())
+ netdev_err(bp->dev, "BUG! Timeout waiting for bit %08x of register %lx to %s\n",
+ bit, reg, clear ? "clear" : "set");
+
return -ENODEV;
}
return 0;
@@ -333,13 +333,12 @@ static int b44_phy_reset(struct b44 *bp)
err = b44_readphy(bp, MII_BMCR, &val);
if (!err) {
if (val & BMCR_RESET) {
- printk(KERN_ERR PFX "%s: PHY Reset would not complete.\n",
- bp->dev->name);
+ netdev_err(bp->dev, "PHY Reset would not complete\n");
err = -ENODEV;
}
}
- return 0;
+ return err;
}
static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
@@ -413,7 +412,7 @@ static void b44_wap54g10_workaround(struct b44 *bp)
}
return;
error:
- printk(KERN_WARNING PFX "PHY: cannot reset MII transceiver isolate bit.\n");
+ pr_warning("PHY: cannot reset MII transceiver isolate bit\n");
}
#else
static inline void b44_wap54g10_workaround(struct b44 *bp)
@@ -506,18 +505,15 @@ static void b44_stats_update(struct b44 *bp)
static void b44_link_report(struct b44 *bp)
{
if (!netif_carrier_ok(bp->dev)) {
- printk(KERN_INFO PFX "%s: Link is down.\n", bp->dev->name);
+ netdev_info(bp->dev, "Link is down\n");
} else {
- printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
- bp->dev->name,
- (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
- (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
-
- printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
- "%s for RX.\n",
- bp->dev->name,
- (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
- (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
+ netdev_info(bp->dev, "Link is up at %d Mbps, %s duplex\n",
+ (bp->flags & B44_FLAG_100_BASE_T) ? 100 : 10,
+ (bp->flags & B44_FLAG_FULL_DUPLEX) ? "full" : "half");
+
+ netdev_info(bp->dev, "Flow control is %s for TX and %s for RX\n",
+ (bp->flags & B44_FLAG_TX_PAUSE) ? "on" : "off",
+ (bp->flags & B44_FLAG_RX_PAUSE) ? "on" : "off");
}
}
@@ -576,11 +572,9 @@ static void b44_check_phy(struct b44 *bp)
}
if (bmsr & BMSR_RFAULT)
- printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
- bp->dev->name);
+ netdev_warn(bp->dev, "Remote fault detected in PHY\n");
if (bmsr & BMSR_JCD)
- printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
- bp->dev->name);
+ netdev_warn(bp->dev, "Jabber detected in PHY\n");
}
}
@@ -815,7 +809,7 @@ static int b44_rx(struct b44 *bp, int budget)
struct sk_buff *copy_skb;
b44_recycle_rx(bp, cons, bp->rx_prod);
- copy_skb = dev_alloc_skb(len + 2);
+ copy_skb = netdev_alloc_skb(bp->dev, len + 2);
if (copy_skb == NULL)
goto drop_it_no_recycle;
@@ -901,7 +895,7 @@ static irqreturn_t b44_interrupt(int irq, void *dev_id)
handled = 1;
if (unlikely(!netif_running(dev))) {
- printk(KERN_INFO "%s: late interrupt.\n", dev->name);
+ netdev_info(dev, "late interrupt\n");
goto irq_ack;
}
@@ -926,8 +920,7 @@ static void b44_tx_timeout(struct net_device *dev)
{
struct b44 *bp = netdev_priv(dev);
- printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
- dev->name);
+ netdev_err(dev, "transmit timed out, resetting\n");
spin_lock_irq(&bp->lock);
@@ -956,8 +949,7 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* This is a hard error, log it. */
if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
netif_stop_queue(dev);
- printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
- dev->name);
+ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
goto err_out;
}
@@ -1333,7 +1325,7 @@ static void b44_halt(struct b44 *bp)
/* reset PHY */
b44_phy_reset(bp);
/* power down PHY */
- printk(KERN_INFO PFX "%s: powering down PHY\n", bp->dev->name);
+ netdev_info(bp->dev, "powering down PHY\n");
bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);
/* now reset the chip, but without enabling the MAC&PHY
* part of it. This has to be done _after_ we shut down the PHY */
@@ -1524,7 +1516,7 @@ static void b44_setup_pseudo_magicp(struct b44 *bp)
pwol_pattern = kzalloc(B44_PATTERN_SIZE, GFP_KERNEL);
if (!pwol_pattern) {
- printk(KERN_ERR PFX "Memory not available for WOL\n");
+ pr_err("Memory not available for WOL\n");
return;
}
@@ -1691,10 +1683,12 @@ static int __b44_load_mcast(struct b44 *bp, struct net_device *dev)
struct dev_mc_list *mclist;
int i, num_ents;
- num_ents = min_t(int, dev->mc_count, B44_MCAST_TABLE_SIZE);
- mclist = dev->mc_list;
- for (i = 0; mclist && i < num_ents; i++, mclist = mclist->next) {
- __b44_cam_write(bp, mclist->dmi_addr, i + 1);
+ num_ents = min_t(int, netdev_mc_count(dev), B44_MCAST_TABLE_SIZE);
+ i = 0;
+ netdev_for_each_mc_addr(mclist, dev) {
+ if (i == num_ents)
+ break;
+ __b44_cam_write(bp, mclist->dmi_addr, i++ + 1);
}
return i+1;
}
@@ -1716,7 +1710,7 @@ static void __b44_set_rx_mode(struct net_device *dev)
__b44_set_mac_addr(bp);
if ((dev->flags & IFF_ALLMULTI) ||
- (dev->mc_count > B44_MCAST_TABLE_SIZE))
+ (netdev_mc_count(dev) > B44_MCAST_TABLE_SIZE))
val |= RXCONFIG_ALLMULTI;
else
i = __b44_load_mcast(bp, dev);
@@ -2097,7 +2091,7 @@ static int __devinit b44_get_invariants(struct b44 *bp)
memcpy(bp->dev->dev_addr, addr, 6);
if (!is_valid_ether_addr(&bp->dev->dev_addr[0])){
- printk(KERN_ERR PFX "Invalid MAC address found in EEPROM\n");
+ pr_err("Invalid MAC address found in EEPROM\n");
return -EINVAL;
}
@@ -2142,12 +2136,12 @@ static int __devinit b44_init_one(struct ssb_device *sdev,
instance++;
if (b44_version_printed++ == 0)
- printk(KERN_INFO "%s", version);
+ pr_info("%s", version);
dev = alloc_etherdev(sizeof(*bp));
if (!dev) {
- dev_err(sdev->dev, "Etherdev alloc failed, aborting.\n");
+ dev_err(sdev->dev, "Etherdev alloc failed, aborting\n");
err = -ENOMEM;
goto out;
}
@@ -2186,13 +2180,13 @@ static int __devinit b44_init_one(struct ssb_device *sdev,
err = ssb_dma_set_mask(sdev, DMA_BIT_MASK(30));
if (err) {
dev_err(sdev->dev,
- "Required 30BIT DMA mask unsupported by the system.\n");
+ "Required 30BIT DMA mask unsupported by the system\n");
goto err_out_powerdown;
}
err = b44_get_invariants(bp);
if (err) {
dev_err(sdev->dev,
- "Problem fetching invariants of chip, aborting.\n");
+ "Problem fetching invariants of chip, aborting\n");
goto err_out_powerdown;
}
@@ -2212,7 +2206,7 @@ static int __devinit b44_init_one(struct ssb_device *sdev,
err = register_netdev(dev);
if (err) {
- dev_err(sdev->dev, "Cannot register net device, aborting.\n");
+ dev_err(sdev->dev, "Cannot register net device, aborting\n");
goto err_out_powerdown;
}
@@ -2223,8 +2217,12 @@ static int __devinit b44_init_one(struct ssb_device *sdev,
*/
b44_chip_reset(bp, B44_CHIP_RESET_FULL);
- printk(KERN_INFO "%s: Broadcom 44xx/47xx 10/100BaseT Ethernet %pM\n",
- dev->name, dev->dev_addr);
+ /* do a phy reset to test if there is an active phy */
+ if (b44_phy_reset(bp) < 0)
+ bp->phy_addr = B44_PHY_ADDR_NO_PHY;
+
+ netdev_info(dev, "Broadcom 44xx/47xx 10/100BaseT Ethernet %pM\n",
+ dev->dev_addr);
return 0;
@@ -2297,7 +2295,7 @@ static int b44_resume(struct ssb_device *sdev)
rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev);
if (rc) {
- printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name);
+ netdev_err(dev, "request_irq failed\n");
return rc;
}
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index 0bd47d32ec4..8cdcab7655c 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -619,7 +619,7 @@ static void bcm_enet_set_multicast_list(struct net_device *dev)
/* only 3 perfect match registers left, first one is used for
* own mac address */
- if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > 3)
+ if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3)
val |= ENET_RXCFG_ALLMCAST_MASK;
else
val &= ~ENET_RXCFG_ALLMCAST_MASK;
@@ -631,16 +631,13 @@ static void bcm_enet_set_multicast_list(struct net_device *dev)
return;
}
- for (i = 0, mc_list = dev->mc_list;
- (mc_list != NULL) && (i < dev->mc_count) && (i < 3);
- i++, mc_list = mc_list->next) {
+ i = 0;
+ netdev_for_each_mc_addr(mc_list, dev) {
u8 *dmi_addr;
u32 tmp;
- /* filter non ethernet address */
- if (mc_list->dmi_addrlen != 6)
- continue;
-
+ if (i == 3)
+ break;
/* update perfect match registers */
dmi_addr = mc_list->dmi_addr;
tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
@@ -649,7 +646,7 @@ static void bcm_enet_set_multicast_list(struct net_device *dev)
tmp = (dmi_addr[0] << 8 | dmi_addr[1]);
tmp |= ENET_PMH_DATAVALID_MASK;
- enet_writel(priv, tmp, ENET_PMH_REG(i + 1));
+ enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1));
}
for (; i < 3; i++) {
diff --git a/drivers/net/benet/Kconfig b/drivers/net/benet/Kconfig
index fdb6e81a437..1a41a49bb61 100644
--- a/drivers/net/benet/Kconfig
+++ b/drivers/net/benet/Kconfig
@@ -1,6 +1,6 @@
config BE2NET
- tristate "ServerEngines' 10Gbps NIC - BladeEngine 2"
+ tristate "ServerEngines' 10Gbps NIC - BladeEngine"
depends on PCI && INET
help
This driver implements the NIC functionality for ServerEngines'
- 10Gbps network adapter - BladeEngine 2.
+ 10Gbps network adapter - BladeEngine.
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 5bc74590c73..be81fb2d10f 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2009 ServerEngines
+ * Copyright (C) 2005 - 2010 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -32,28 +32,26 @@
#include "be_hw.h"
-#define DRV_VER "2.101.346u"
+#define DRV_VER "2.102.147u"
#define DRV_NAME "be2net"
#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
#define OC_NAME "Emulex OneConnect 10Gbps NIC"
#define OC_NAME1 "Emulex OneConnect 10Gbps NIC (be3)"
-#define DRV_DESC BE_NAME "Driver"
+#define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver"
#define BE_VENDOR_ID 0x19a2
#define BE_DEVICE_ID1 0x211
#define BE_DEVICE_ID2 0x221
#define OC_DEVICE_ID1 0x700
-#define OC_DEVICE_ID2 0x701
-#define OC_DEVICE_ID3 0x710
+#define OC_DEVICE_ID2 0x710
static inline char *nic_name(struct pci_dev *pdev)
{
switch (pdev->device) {
case OC_DEVICE_ID1:
- case OC_DEVICE_ID2:
return OC_NAME;
- case OC_DEVICE_ID3:
+ case OC_DEVICE_ID2:
return OC_NAME1;
case BE_DEVICE_ID2:
return BE3_NAME;
@@ -153,6 +151,7 @@ struct be_eq_obj {
struct be_mcc_obj {
struct be_queue_info q;
struct be_queue_info cq;
+ bool rearm_cq;
};
struct be_drvr_stats {
@@ -165,6 +164,7 @@ struct be_drvr_stats {
ulong be_tx_jiffies;
u64 be_tx_bytes;
u64 be_tx_bytes_prev;
+ u64 be_tx_pkts;
u32 be_tx_rate;
u32 cache_barrier[16];
@@ -176,6 +176,7 @@ struct be_drvr_stats {
ulong be_rx_jiffies;
u64 be_rx_bytes;
u64 be_rx_bytes_prev;
+ u64 be_rx_pkts;
u32 be_rx_rate;
/* number of non ether type II frames dropped where
* frame len > length field of Mac Hdr */
@@ -252,7 +253,8 @@ struct be_adapter {
bool rx_post_starved; /* Zero rx frags have been posted to BE */
struct vlan_group *vlan_grp;
- u16 num_vlans;
+ u16 vlans_added;
+ u16 max_vlans; /* Number of vlans supported */
u8 vlan_tag[VLAN_GROUP_ARRAY_LEN];
struct be_dma_mem mc_cmd_mem;
@@ -266,6 +268,7 @@ struct be_adapter {
u32 if_handle; /* Used to configure filtering */
u32 pmac_id; /* MAC addr handle used by BE card */
+ bool eeh_err;
bool link_up;
u32 port_num;
bool promiscuous;
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 006cb2efcd2..4b1f80519ca 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2009 ServerEngines
+ * Copyright (C) 2005 - 2010 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -104,10 +104,26 @@ static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
return NULL;
}
-int be_process_mcc(struct be_adapter *adapter)
+void be_async_mcc_enable(struct be_adapter *adapter)
+{
+ spin_lock_bh(&adapter->mcc_cq_lock);
+
+ be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
+ adapter->mcc_obj.rearm_cq = true;
+
+ spin_unlock_bh(&adapter->mcc_cq_lock);
+}
+
+void be_async_mcc_disable(struct be_adapter *adapter)
+{
+ adapter->mcc_obj.rearm_cq = false;
+}
+
+int be_process_mcc(struct be_adapter *adapter, int *status)
{
struct be_mcc_compl *compl;
- int num = 0, status = 0;
+ int num = 0;
+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
spin_lock_bh(&adapter->mcc_cq_lock);
while ((compl = be_mcc_compl_get(adapter))) {
@@ -119,31 +135,31 @@ int be_process_mcc(struct be_adapter *adapter)
be_async_link_state_process(adapter,
(struct be_async_event_link_state *) compl);
} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
- status = be_mcc_compl_process(adapter, compl);
- atomic_dec(&adapter->mcc_obj.q.used);
+ *status = be_mcc_compl_process(adapter, compl);
+ atomic_dec(&mcc_obj->q.used);
}
be_mcc_compl_use(compl);
num++;
}
- if (num)
- be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, num);
-
spin_unlock_bh(&adapter->mcc_cq_lock);
- return status;
+ return num;
}
/* Wait till no more pending mcc requests are present */
static int be_mcc_wait_compl(struct be_adapter *adapter)
{
#define mcc_timeout 120000 /* 12s timeout */
- int i, status;
+ int i, num, status = 0;
+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
+
for (i = 0; i < mcc_timeout; i++) {
- status = be_process_mcc(adapter);
- if (status)
- return status;
+ num = be_process_mcc(adapter, &status);
+ if (num)
+ be_cq_notify(adapter, mcc_obj->cq.id,
+ mcc_obj->rearm_cq, num);
- if (atomic_read(&adapter->mcc_obj.q.used) == 0)
+ if (atomic_read(&mcc_obj->q.used) == 0)
break;
udelay(100);
}
@@ -151,7 +167,7 @@ static int be_mcc_wait_compl(struct be_adapter *adapter)
dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
return -1;
}
- return 0;
+ return status;
}
/* Notify MCC requests and wait for completion */
@@ -167,7 +183,14 @@ static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
u32 ready;
do {
- ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
+ ready = ioread32(db);
+ if (ready == 0xffffffff) {
+ dev_err(&adapter->pdev->dev,
+ "pci slot disconnected\n");
+ return -1;
+ }
+
+ ready &= MPU_MAILBOX_DB_RDY_MASK;
if (ready)
break;
@@ -198,6 +221,11 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
struct be_mcc_mailbox *mbox = mbox_mem->va;
struct be_mcc_compl *compl = &mbox->compl;
+ /* wait for ready to be set */
+ status = be_mbox_db_ready_wait(adapter, db);
+ if (status != 0)
+ return status;
+
val |= MPU_MAILBOX_DB_HI_MASK;
/* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
@@ -397,6 +425,9 @@ int be_cmd_fw_clean(struct be_adapter *adapter)
u8 *wrb;
int status;
+ if (adapter->eeh_err)
+ return -EIO;
+
spin_lock(&adapter->mbox_lock);
wrb = (u8 *)wrb_from_mbox(adapter);
@@ -769,6 +800,9 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
u8 subsys = 0, opcode = 0;
int status;
+ if (adapter->eeh_err)
+ return -EIO;
+
spin_lock(&adapter->mbox_lock);
wrb = wrb_from_mbox(adapter);
@@ -857,6 +891,9 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
struct be_cmd_req_if_destroy *req;
int status;
+ if (adapter->eeh_err)
+ return -EIO;
+
spin_lock(&adapter->mbox_lock);
wrb = wrb_from_mbox(adapter);
@@ -1097,8 +1134,7 @@ err:
* (mc == NULL) => multicast promiscous
*/
int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
- struct dev_mc_list *mc_list, u32 mc_count,
- struct be_dma_mem *mem)
+ struct net_device *netdev, struct be_dma_mem *mem)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_mcast_mac_config *req = mem->va;
@@ -1125,13 +1161,14 @@ int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
req->interface_id = if_id;
- if (mc_list) {
+ if (netdev) {
int i;
struct dev_mc_list *mc;
- req->num_mac = cpu_to_le16(mc_count);
+ req->num_mac = cpu_to_le16(netdev_mc_count(netdev));
- for (mc = mc_list, i = 0; mc; mc = mc->next, i++)
+ i = 0;
+ netdev_for_each_mc_addr(mc, netdev)
memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN);
} else {
req->promiscuous = 1;
@@ -1375,7 +1412,7 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
u32 flash_type, u32 flash_opcode, u32 buf_size)
{
struct be_mcc_wrb *wrb;
- struct be_cmd_write_flashrom *req = cmd->va;
+ struct be_cmd_write_flashrom *req;
struct be_sge *sge;
int status;
@@ -1409,7 +1446,8 @@ err:
return status;
}
-int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc)
+int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
+ int offset)
{
struct be_mcc_wrb *wrb;
struct be_cmd_write_flashrom *req;
@@ -1430,9 +1468,9 @@ int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc)
be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);
- req->params.op_type = cpu_to_le32(FLASHROM_TYPE_REDBOOT);
+ req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
- req->params.offset = 0x3FFFC;
+ req->params.offset = offset;
req->params.data_buf_size = 0x4;
status = be_mcc_notify_wait(adapter);
@@ -1608,3 +1646,33 @@ err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
}
+
+extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
+ struct be_dma_mem *nonemb_cmd)
+{
+ struct be_mcc_wrb *wrb;
+ struct be_cmd_req_seeprom_read *req;
+ struct be_sge *sge;
+ int status;
+
+ spin_lock_bh(&adapter->mcc_lock);
+
+ wrb = wrb_from_mccq(adapter);
+ req = nonemb_cmd->va;
+ sge = nonembedded_sgl(wrb);
+
+ be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
+ OPCODE_COMMON_SEEPROM_READ);
+
+ be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+ OPCODE_COMMON_SEEPROM_READ, sizeof(*req));
+
+ sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
+ sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
+ sge->len = cpu_to_le32(nonemb_cmd->size);
+
+ status = be_mcc_notify_wait(adapter);
+
+ spin_unlock_bh(&adapter->mcc_lock);
+ return status;
+}
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 13b33c84108..cce61f9a371 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2009 ServerEngines
+ * Copyright (C) 2005 - 2010 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -124,6 +124,7 @@ struct be_mcc_mailbox {
#define OPCODE_COMMON_CQ_CREATE 12
#define OPCODE_COMMON_EQ_CREATE 13
#define OPCODE_COMMON_MCC_CREATE 21
+#define OPCODE_COMMON_SEEPROM_READ 30
#define OPCODE_COMMON_NTWK_RX_FILTER 34
#define OPCODE_COMMON_GET_FW_VERSION 35
#define OPCODE_COMMON_SET_FLOW_CONTROL 36
@@ -855,6 +856,19 @@ struct be_cmd_resp_ddrdma_test {
u8 rcv_buff[4096];
};
+/*********************** SEEPROM Read ***********************/
+
+#define BE_READ_SEEPROM_LEN 1024
+struct be_cmd_req_seeprom_read {
+ struct be_cmd_req_hdr hdr;
+ u8 rsvd0[BE_READ_SEEPROM_LEN];
+};
+
+struct be_cmd_resp_seeprom_read {
+ struct be_cmd_req_hdr hdr;
+ u8 seeprom_data[BE_READ_SEEPROM_LEN];
+};
+
extern int be_pci_fnum_get(struct be_adapter *adapter);
extern int be_cmd_POST(struct be_adapter *adapter);
extern int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
@@ -898,8 +912,7 @@ extern int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id,
extern int be_cmd_promiscuous_config(struct be_adapter *adapter,
u8 port_num, bool en);
extern int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
- struct dev_mc_list *mc_list, u32 mc_count,
- struct be_dma_mem *mem);
+ struct net_device *netdev, struct be_dma_mem *mem);
extern int be_cmd_set_flow_control(struct be_adapter *adapter,
u32 tx_fc, u32 rx_fc);
extern int be_cmd_get_flow_control(struct be_adapter *adapter,
@@ -907,7 +920,7 @@ extern int be_cmd_get_flow_control(struct be_adapter *adapter,
extern int be_cmd_query_fw_cfg(struct be_adapter *adapter,
u32 *port_num, u32 *cap);
extern int be_cmd_reset_function(struct be_adapter *adapter);
-extern int be_process_mcc(struct be_adapter *adapter);
+extern int be_process_mcc(struct be_adapter *adapter, int *status);
extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
u8 port_num, u8 beacon, u8 status, u8 state);
extern int be_cmd_get_beacon_state(struct be_adapter *adapter,
@@ -917,15 +930,21 @@ extern int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
extern int be_cmd_write_flashrom(struct be_adapter *adapter,
struct be_dma_mem *cmd, u32 flash_oper,
u32 flash_opcode, u32 buf_size);
-extern int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc);
+int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
+ int offset);
extern int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
struct be_dma_mem *nonemb_cmd);
extern int be_cmd_fw_init(struct be_adapter *adapter);
extern int be_cmd_fw_clean(struct be_adapter *adapter);
+extern void be_async_mcc_enable(struct be_adapter *adapter);
+extern void be_async_mcc_disable(struct be_adapter *adapter);
extern int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
u32 loopback_type, u32 pkt_size,
u32 num_pkts, u64 pattern);
extern int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
u32 byte_cnt, struct be_dma_mem *cmd);
+extern int be_cmd_get_seeprom_data(struct be_adapter *adapter,
+ struct be_dma_mem *nonemb_cmd);
extern int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
u8 loopback_type, u8 enable);
+
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 5d001c4deac..9560d48944a 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2009 ServerEngines
+ * Copyright (C) 2005 - 2010 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -112,6 +112,7 @@ static const char et_self_tests[][ETH_GSTRING_LEN] = {
"PHY Loopback test",
"External Loopback test",
"DDR DMA test"
+ "Link test"
};
#define ETHTOOL_TESTS_NUM ARRAY_SIZE(et_self_tests)
@@ -529,6 +530,9 @@ static void
be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
{
struct be_adapter *adapter = netdev_priv(netdev);
+ bool link_up;
+ u8 mac_speed = 0;
+ u16 qos_link_speed = 0;
memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
@@ -545,12 +549,20 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
&data[2]) != 0) {
test->flags |= ETH_TEST_FL_FAILED;
}
+ }
- data[3] = be_test_ddr_dma(adapter);
- if (data[3] != 0)
- test->flags |= ETH_TEST_FL_FAILED;
+ if (be_test_ddr_dma(adapter) != 0) {
+ data[3] = 1;
+ test->flags |= ETH_TEST_FL_FAILED;
}
+ if (be_cmd_link_status_query(adapter, &link_up, &mac_speed,
+ &qos_link_speed) != 0) {
+ test->flags |= ETH_TEST_FL_FAILED;
+ data[4] = -1;
+ } else if (mac_speed) {
+ data[4] = 1;
+ }
}
static int
@@ -567,12 +579,57 @@ be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
return be_load_fw(adapter, file_name);
}
+static int
+be_get_eeprom_len(struct net_device *netdev)
+{
+ return BE_READ_SEEPROM_LEN;
+}
+
+static int
+be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
+ uint8_t *data)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ struct be_dma_mem eeprom_cmd;
+ struct be_cmd_resp_seeprom_read *resp;
+ int status;
+
+ if (!eeprom->len)
+ return -EINVAL;
+
+ eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
+
+ memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
+ eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
+ eeprom_cmd.va = pci_alloc_consistent(adapter->pdev, eeprom_cmd.size,
+ &eeprom_cmd.dma);
+
+ if (!eeprom_cmd.va) {
+ dev_err(&adapter->pdev->dev,
+ "Memory allocation failure. Could not read eeprom\n");
+ return -ENOMEM;
+ }
+
+ status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);
+
+ if (!status) {
+ resp = (struct be_cmd_resp_seeprom_read *) eeprom_cmd.va;
+ memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len);
+ }
+ pci_free_consistent(adapter->pdev, eeprom_cmd.size, eeprom_cmd.va,
+ eeprom_cmd.dma);
+
+ return status;
+}
+
const struct ethtool_ops be_ethtool_ops = {
.get_settings = be_get_settings,
.get_drvinfo = be_get_drvinfo,
.get_wol = be_get_wol,
.set_wol = be_set_wol,
.get_link = ethtool_op_get_link,
+ .get_eeprom_len = be_get_eeprom_len,
+ .get_eeprom = be_read_eeprom,
.get_coalesce = be_get_coalesce,
.set_coalesce = be_set_coalesce,
.get_ringparam = be_get_ringparam,
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index e2b3beffd49..5ffb149181a 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2009 ServerEngines
+ * Copyright (C) 2005 - 2010 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -99,6 +99,63 @@
/* Number of entries posted */
#define DB_MCCQ_NUM_POSTED_SHIFT (16) /* bits 16 - 29 */
+/* Flashrom related descriptors */
+#define IMAGE_TYPE_FIRMWARE 160
+#define IMAGE_TYPE_BOOTCODE 224
+#define IMAGE_TYPE_OPTIONROM 32
+
+#define NUM_FLASHDIR_ENTRIES 32
+
+#define IMG_TYPE_ISCSI_ACTIVE 0
+#define IMG_TYPE_REDBOOT 1
+#define IMG_TYPE_BIOS 2
+#define IMG_TYPE_PXE_BIOS 3
+#define IMG_TYPE_FCOE_BIOS 8
+#define IMG_TYPE_ISCSI_BACKUP 9
+#define IMG_TYPE_FCOE_FW_ACTIVE 10
+#define IMG_TYPE_FCOE_FW_BACKUP 11
+#define IMG_TYPE_NCSI_BITFILE 13
+#define IMG_TYPE_NCSI_8051 14
+
+#define FLASHROM_OPER_FLASH 1
+#define FLASHROM_OPER_SAVE 2
+#define FLASHROM_OPER_REPORT 4
+
+#define FLASH_IMAGE_MAX_SIZE_g2 (1310720) /* Max firmware image sz */
+#define FLASH_BIOS_IMAGE_MAX_SIZE_g2 (262144) /* Max OPTION ROM img sz */
+#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g2 (262144) /* Max Redboot image sz */
+#define FLASH_IMAGE_MAX_SIZE_g3 (2097152) /* Max fw image size */
+#define FLASH_BIOS_IMAGE_MAX_SIZE_g3 (524288) /* Max OPTION ROM img sz */
+#define FLASH_REDBOOT_IMAGE_MAX_SIZE_g3 (1048576) /* Max Redboot image sz */
+
+#define FLASH_NCSI_MAGIC (0x16032009)
+#define FLASH_NCSI_DISABLED (0)
+#define FLASH_NCSI_ENABLED (1)
+
+#define FLASH_NCSI_BITFILE_HDR_OFFSET (0x600000)
+
+/* Offsets for components on Flash. */
+#define FLASH_iSCSI_PRIMARY_IMAGE_START_g2 (1048576)
+#define FLASH_iSCSI_BACKUP_IMAGE_START_g2 (2359296)
+#define FLASH_FCoE_PRIMARY_IMAGE_START_g2 (3670016)
+#define FLASH_FCoE_BACKUP_IMAGE_START_g2 (4980736)
+#define FLASH_iSCSI_BIOS_START_g2 (7340032)
+#define FLASH_PXE_BIOS_START_g2 (7864320)
+#define FLASH_FCoE_BIOS_START_g2 (524288)
+#define FLASH_REDBOOT_START_g2 (0)
+
+#define FLASH_iSCSI_PRIMARY_IMAGE_START_g3 (2097152)
+#define FLASH_iSCSI_BACKUP_IMAGE_START_g3 (4194304)
+#define FLASH_FCoE_PRIMARY_IMAGE_START_g3 (6291456)
+#define FLASH_FCoE_BACKUP_IMAGE_START_g3 (8388608)
+#define FLASH_iSCSI_BIOS_START_g3 (12582912)
+#define FLASH_PXE_BIOS_START_g3 (13107200)
+#define FLASH_FCoE_BIOS_START_g3 (13631488)
+#define FLASH_REDBOOT_START_g3 (262144)
+
+
+
+
/*
* BE descriptors: host memory data structures whose formats
* are hardwired in BE silicon.
@@ -107,6 +164,7 @@
#define EQ_ENTRY_VALID_MASK 0x1 /* bit 0 */
#define EQ_ENTRY_RES_ID_MASK 0xFFFF /* bits 16 - 31 */
#define EQ_ENTRY_RES_ID_SHIFT 16
+
struct be_eq_entry {
u32 evt;
};
@@ -221,41 +279,6 @@ struct be_eth_rx_compl {
u32 dw[4];
};
-/* Flashrom related descriptors */
-#define IMAGE_TYPE_FIRMWARE 160
-#define IMAGE_TYPE_BOOTCODE 224
-#define IMAGE_TYPE_OPTIONROM 32
-
-#define NUM_FLASHDIR_ENTRIES 32
-
-#define FLASHROM_TYPE_ISCSI_ACTIVE 0
-#define FLASHROM_TYPE_REDBOOT 1
-#define FLASHROM_TYPE_BIOS 2
-#define FLASHROM_TYPE_PXE_BIOS 3
-#define FLASHROM_TYPE_FCOE_BIOS 8
-#define FLASHROM_TYPE_ISCSI_BACKUP 9
-#define FLASHROM_TYPE_FCOE_FW_ACTIVE 10
-#define FLASHROM_TYPE_FCOE_FW_BACKUP 11
-
-#define FLASHROM_OPER_FLASH 1
-#define FLASHROM_OPER_SAVE 2
-#define FLASHROM_OPER_REPORT 4
-
-#define FLASH_IMAGE_MAX_SIZE (1310720) /* Max firmware image size */
-#define FLASH_BIOS_IMAGE_MAX_SIZE (262144) /* Max OPTION ROM image sz */
-#define FLASH_REDBOOT_IMAGE_MAX_SIZE (262144) /* Max redboot image sz */
-
-/* Offsets for components on Flash. */
-#define FLASH_iSCSI_PRIMARY_IMAGE_START (1048576)
-#define FLASH_iSCSI_BACKUP_IMAGE_START (2359296)
-#define FLASH_FCoE_PRIMARY_IMAGE_START (3670016)
-#define FLASH_FCoE_BACKUP_IMAGE_START (4980736)
-#define FLASH_iSCSI_BIOS_START (7340032)
-#define FLASH_PXE_BIOS_START (7864320)
-#define FLASH_FCoE_BIOS_START (524288)
-#define FLASH_REDBOOT_START (32768)
-#define FLASH_REDBOOT_ISM_START (0)
-
struct controller_id {
u32 vendor;
u32 device;
@@ -263,7 +286,20 @@ struct controller_id {
u32 subdevice;
};
-struct flash_file_hdr {
+struct flash_comp {
+ unsigned long offset;
+ int optype;
+ int size;
+};
+
+struct image_hdr {
+ u32 imageid;
+ u32 imageoffset;
+ u32 imagelength;
+ u32 image_checksum;
+ u8 image_version[32];
+};
+struct flash_file_hdr_g2 {
u8 sign[32];
u32 cksum;
u32 antidote;
@@ -275,6 +311,17 @@ struct flash_file_hdr {
u8 build[24];
};
+struct flash_file_hdr_g3 {
+ u8 sign[52];
+ u8 ufi_version[4];
+ u32 file_len;
+ u32 cksum;
+ u32 antidote;
+ u32 num_imgs;
+ u8 build[24];
+ u8 rsvd[32];
+};
+
struct flash_section_hdr {
u32 format_rev;
u32 cksum;
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 626b76c0ebc..a703ed8e24f 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2009 ServerEngines
+ * Copyright (C) 2005 - 2010 ServerEngines
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -34,7 +34,6 @@ static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
- { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -69,6 +68,9 @@ static void be_intr_set(struct be_adapter *adapter, bool enable)
u32 reg = ioread32(addr);
u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
+ if (adapter->eeh_err)
+ return;
+
if (!enabled && enable)
reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
else if (enabled && !enable)
@@ -100,6 +102,10 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid,
{
u32 val = 0;
val |= qid & DB_EQ_RING_ID_MASK;
+
+ if (adapter->eeh_err)
+ return;
+
if (arm)
val |= 1 << DB_EQ_REARM_SHIFT;
if (clear_int)
@@ -113,6 +119,10 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
{
u32 val = 0;
val |= qid & DB_CQ_RING_ID_MASK;
+
+ if (adapter->eeh_err)
+ return;
+
if (arm)
val |= 1 << DB_CQ_REARM_SHIFT;
val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
@@ -149,13 +159,10 @@ void netdev_stats_update(struct be_adapter *adapter)
struct net_device_stats *dev_stats = &adapter->netdev->stats;
struct be_erx_stats *erx_stats = &hw_stats->erx;
- dev_stats->rx_packets = port_stats->rx_total_frames;
- dev_stats->tx_packets = port_stats->tx_unicastframes +
- port_stats->tx_multicastframes + port_stats->tx_broadcastframes;
- dev_stats->rx_bytes = (u64) port_stats->rx_bytes_msd << 32 |
- (u64) port_stats->rx_bytes_lsd;
- dev_stats->tx_bytes = (u64) port_stats->tx_bytes_msd << 32 |
- (u64) port_stats->tx_bytes_lsd;
+ dev_stats->rx_packets = drvr_stats(adapter)->be_rx_pkts;
+ dev_stats->tx_packets = drvr_stats(adapter)->be_tx_pkts;
+ dev_stats->rx_bytes = drvr_stats(adapter)->be_rx_bytes;
+ dev_stats->tx_bytes = drvr_stats(adapter)->be_tx_bytes;
/* bad pkts received */
dev_stats->rx_errors = port_stats->rx_crc_errors +
@@ -312,12 +319,13 @@ static void be_tx_rate_update(struct be_adapter *adapter)
}
static void be_tx_stats_update(struct be_adapter *adapter,
- u32 wrb_cnt, u32 copied, bool stopped)
+ u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
{
struct be_drvr_stats *stats = drvr_stats(adapter);
stats->be_tx_reqs++;
stats->be_tx_wrbs += wrb_cnt;
stats->be_tx_bytes += copied;
+ stats->be_tx_pkts += (gso_segs ? gso_segs : 1);
if (stopped)
stats->be_tx_stops++;
}
@@ -462,7 +470,8 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
be_txq_notify(adapter, txq->id, wrb_cnt);
- be_tx_stats_update(adapter, wrb_cnt, copied, stopped);
+ be_tx_stats_update(adapter, wrb_cnt, copied,
+ skb_shinfo(skb)->gso_segs, stopped);
} else {
txq->head = start;
dev_kfree_skb_any(skb);
@@ -474,10 +483,12 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
{
struct be_adapter *adapter = netdev_priv(netdev);
if (new_mtu < BE_MIN_MTU ||
- new_mtu > BE_MAX_JUMBO_FRAME_SIZE) {
+ new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
+ (ETH_HLEN + ETH_FCS_LEN))) {
dev_info(&adapter->pdev->dev,
"MTU must be between %d and %d bytes\n",
- BE_MIN_MTU, BE_MAX_JUMBO_FRAME_SIZE);
+ BE_MIN_MTU,
+ (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
return -EINVAL;
}
dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
@@ -487,17 +498,16 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
}
/*
- * if there are BE_NUM_VLANS_SUPPORTED or lesser number of VLANS configured,
- * program them in BE. If more than BE_NUM_VLANS_SUPPORTED are configured,
- * set the BE in promiscuous VLAN mode.
+ * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
+ * If the user configures more, place BE in vlan promiscuous mode.
*/
static int be_vid_config(struct be_adapter *adapter)
{
u16 vtag[BE_NUM_VLANS_SUPPORTED];
u16 ntags = 0, i;
- int status;
+ int status = 0;
- if (adapter->num_vlans <= BE_NUM_VLANS_SUPPORTED) {
+ if (adapter->vlans_added <= adapter->max_vlans) {
/* Construct VLAN Table to give to HW */
for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) {
if (adapter->vlan_tag[i]) {
@@ -531,21 +541,21 @@ static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
{
struct be_adapter *adapter = netdev_priv(netdev);
- adapter->num_vlans++;
adapter->vlan_tag[vid] = 1;
-
- be_vid_config(adapter);
+ adapter->vlans_added++;
+ if (adapter->vlans_added <= (adapter->max_vlans + 1))
+ be_vid_config(adapter);
}
static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
{
struct be_adapter *adapter = netdev_priv(netdev);
- adapter->num_vlans--;
adapter->vlan_tag[vid] = 0;
-
vlan_group_set_device(adapter->vlan_grp, vid, NULL);
- be_vid_config(adapter);
+ adapter->vlans_added--;
+ if (adapter->vlans_added <= adapter->max_vlans)
+ be_vid_config(adapter);
}
static void be_set_multicast_list(struct net_device *netdev)
@@ -565,14 +575,15 @@ static void be_set_multicast_list(struct net_device *netdev)
}
/* Enable multicast promisc if num configured exceeds what we support */
- if (netdev->flags & IFF_ALLMULTI || netdev->mc_count > BE_MAX_MC) {
- be_cmd_multicast_set(adapter, adapter->if_handle, NULL, 0,
+ if (netdev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(netdev) > BE_MAX_MC) {
+ be_cmd_multicast_set(adapter, adapter->if_handle, NULL,
&adapter->mc_cmd_mem);
goto done;
}
- be_cmd_multicast_set(adapter, adapter->if_handle, netdev->mc_list,
- netdev->mc_count, &adapter->mc_cmd_mem);
+ be_cmd_multicast_set(adapter, adapter->if_handle, netdev,
+ &adapter->mc_cmd_mem);
done:
return;
}
@@ -607,6 +618,7 @@ static void be_rx_stats_update(struct be_adapter *adapter,
stats->be_rx_compl++;
stats->be_rx_frags += numfrags;
stats->be_rx_bytes += pktsize;
+ stats->be_rx_pkts++;
}
static inline bool do_pkt_csum(struct be_eth_rx_compl *rxcp, bool cso)
@@ -634,9 +646,11 @@ get_rx_page_info(struct be_adapter *adapter, u16 frag_idx)
rx_page_info = &adapter->rx_obj.page_info_tbl[frag_idx];
BUG_ON(!rx_page_info->page);
- if (rx_page_info->last_page_user)
+ if (rx_page_info->last_page_user) {
pci_unmap_page(adapter->pdev, pci_unmap_addr(rx_page_info, bus),
adapter->big_page_size, PCI_DMA_FROMDEVICE);
+ rx_page_info->last_page_user = false;
+ }
atomic_dec(&rxq->used);
return rx_page_info;
@@ -666,17 +680,17 @@ static void be_rx_compl_discard(struct be_adapter *adapter,
* indicated by rxcp.
*/
static void skb_fill_rx_data(struct be_adapter *adapter,
- struct sk_buff *skb, struct be_eth_rx_compl *rxcp)
+ struct sk_buff *skb, struct be_eth_rx_compl *rxcp,
+ u16 num_rcvd)
{
struct be_queue_info *rxq = &adapter->rx_obj.q;
struct be_rx_page_info *page_info;
- u16 rxq_idx, i, num_rcvd, j;
+ u16 rxq_idx, i, j;
u32 pktsize, hdr_len, curr_frag_len, size;
u8 *start;
rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
pktsize = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
- num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
page_info = get_rx_page_info(adapter, rxq_idx);
@@ -704,7 +718,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
skb->data_len = curr_frag_len - hdr_len;
skb->tail += hdr_len;
}
- memset(page_info, 0, sizeof(*page_info));
+ page_info->page = NULL;
if (pktsize <= rx_frag_size) {
BUG_ON(num_rcvd != 1);
@@ -737,7 +751,7 @@ static void skb_fill_rx_data(struct be_adapter *adapter,
skb->len += curr_frag_len;
skb->data_len += curr_frag_len;
- memset(page_info, 0, sizeof(*page_info));
+ page_info->page = NULL;
}
BUG_ON(j > MAX_SKB_FRAGS);
@@ -752,25 +766,23 @@ static void be_rx_compl_process(struct be_adapter *adapter,
{
struct sk_buff *skb;
u32 vlanf, vid;
+ u16 num_rcvd;
u8 vtm;
- vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
- vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
-
- /* vlanf could be wrongly set in some cards.
- * ignore if vtm is not set */
- if ((adapter->cap & 0x400) && !vtm)
- vlanf = 0;
+ num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
+ /* Is it a flush compl that has no data */
+ if (unlikely(num_rcvd == 0))
+ return;
skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
- if (!skb) {
+ if (unlikely(!skb)) {
if (net_ratelimit())
dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
be_rx_compl_discard(adapter, rxcp);
return;
}
- skb_fill_rx_data(adapter, skb, rxcp);
+ skb_fill_rx_data(adapter, skb, rxcp, num_rcvd);
if (do_pkt_csum(rxcp, adapter->rx_csum))
skb->ip_summed = CHECKSUM_NONE;
@@ -781,8 +793,16 @@ static void be_rx_compl_process(struct be_adapter *adapter,
skb->protocol = eth_type_trans(skb, adapter->netdev);
skb->dev = adapter->netdev;
- if (vlanf) {
- if (!adapter->vlan_grp || adapter->num_vlans == 0) {
+ vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
+ vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
+
+ /* vlanf could be wrongly set in some cards.
+ * ignore if vtm is not set */
+ if ((adapter->cap & 0x400) && !vtm)
+ vlanf = 0;
+
+ if (unlikely(vlanf)) {
+ if (!adapter->vlan_grp || adapter->vlans_added == 0) {
kfree_skb(skb);
return;
}
@@ -809,6 +829,10 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
u8 vtm;
num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
+ /* Is it a flush compl that has no data */
+ if (unlikely(num_rcvd == 0))
+ return;
+
pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
@@ -862,7 +886,7 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
vid = be16_to_cpu(vid);
- if (!adapter->vlan_grp || adapter->num_vlans == 0)
+ if (!adapter->vlan_grp || adapter->vlans_added == 0)
return;
vlan_gro_frags(&eq_obj->napi, adapter->vlan_grp, vid);
@@ -1104,6 +1128,9 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
struct be_queue_info *txq = &adapter->tx_obj.q;
struct be_eth_tx_compl *txcp;
u16 end_idx, cmpl = 0, timeo = 0;
+ struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
+ struct sk_buff *sent_skb;
+ bool dummy_wrb;
/* Wait for a max of 200ms for all the tx-completions to arrive. */
do {
@@ -1127,6 +1154,15 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
if (atomic_read(&txq->used))
dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
atomic_read(&txq->used));
+
+ /* free posted tx for which compls will never arrive */
+ while (atomic_read(&txq->used)) {
+ sent_skb = sent_skbs[txq->tail];
+ end_idx = txq->tail;
+ index_adv(&end_idx,
+ wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len);
+ be_tx_compl_process(adapter, end_idx);
+ }
}
static void be_mcc_queues_destroy(struct be_adapter *adapter)
@@ -1259,6 +1295,11 @@ static void be_rx_queues_destroy(struct be_adapter *adapter)
q = &adapter->rx_obj.q;
if (q->created) {
be_cmd_q_destroy(adapter, q, QTYPE_RXQ);
+
+ /* After the rxq is invalidated, wait for a grace time
+ * of 1ms for all dma to end and the flush compl to arrive
+ */
+ mdelay(1);
be_rx_q_clean(adapter);
}
be_queue_free(adapter, q);
@@ -1428,23 +1469,38 @@ int be_poll_rx(struct napi_struct *napi, int budget)
return work_done;
}
-void be_process_tx(struct be_adapter *adapter)
+/* As TX and MCC share the same EQ check for both TX and MCC completions.
+ * For TX/MCC we don't honour budget; consume everything
+ */
+static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
{
+ struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
+ struct be_adapter *adapter =
+ container_of(tx_eq, struct be_adapter, tx_eq);
struct be_queue_info *txq = &adapter->tx_obj.q;
struct be_queue_info *tx_cq = &adapter->tx_obj.cq;
struct be_eth_tx_compl *txcp;
- u32 num_cmpl = 0;
+ int tx_compl = 0, mcc_compl, status = 0;
u16 end_idx;
while ((txcp = be_tx_compl_get(tx_cq))) {
end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
- wrb_index, txcp);
+ wrb_index, txcp);
be_tx_compl_process(adapter, end_idx);
- num_cmpl++;
+ tx_compl++;
}
- if (num_cmpl) {
- be_cq_notify(adapter, tx_cq->id, true, num_cmpl);
+ mcc_compl = be_process_mcc(adapter, &status);
+
+ napi_complete(napi);
+
+ if (mcc_compl) {
+ struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
+ be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
+ }
+
+ if (tx_compl) {
+ be_cq_notify(adapter, adapter->tx_obj.cq.id, true, tx_compl);
/* As Tx wrbs have been freed up, wake up netdev queue if
* it was stopped due to lack of tx wrbs.
@@ -1455,24 +1511,8 @@ void be_process_tx(struct be_adapter *adapter)
}
drvr_stats(adapter)->be_tx_events++;
- drvr_stats(adapter)->be_tx_compl += num_cmpl;
+ drvr_stats(adapter)->be_tx_compl += tx_compl;
}
-}
-
-/* As TX and MCC share the same EQ check for both TX and MCC completions.
- * For TX/MCC we don't honour budget; consume everything
- */
-static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
-{
- struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
- struct be_adapter *adapter =
- container_of(tx_eq, struct be_adapter, tx_eq);
-
- napi_complete(napi);
-
- be_process_tx(adapter);
-
- be_process_mcc(adapter);
return 1;
}
@@ -1641,6 +1681,9 @@ static int be_open(struct net_device *netdev)
/* Rx compl queue may be in unarmed state; rearm it */
be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0);
+ /* Now that interrupts are on we can process async mcc */
+ be_async_mcc_enable(adapter);
+
status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
&link_speed);
if (status)
@@ -1766,6 +1809,8 @@ static int be_close(struct net_device *netdev)
cancel_delayed_work_sync(&adapter->work);
+ be_async_mcc_disable(adapter);
+
netif_stop_queue(netdev);
netif_carrier_off(netdev);
adapter->link_up = false;
@@ -1798,15 +1843,19 @@ char flash_cookie[2][16] = {"*** SE FLAS",
"H DIRECTORY *** "};
static bool be_flash_redboot(struct be_adapter *adapter,
- const u8 *p)
+ const u8 *p, u32 img_start, int image_size,
+ int hdr_size)
{
u32 crc_offset;
u8 flashed_crc[4];
int status;
- crc_offset = FLASH_REDBOOT_START + FLASH_REDBOOT_IMAGE_MAX_SIZE - 4
- + sizeof(struct flash_file_hdr) - 32*1024;
+
+ crc_offset = hdr_size + img_start + image_size - 4;
+
p += crc_offset;
- status = be_cmd_get_flash_crc(adapter, flashed_crc);
+
+ status = be_cmd_get_flash_crc(adapter, flashed_crc,
+ (img_start + image_size - 4));
if (status) {
dev_err(&adapter->pdev->dev,
"could not get crc from flash, not flashing redboot\n");
@@ -1818,102 +1867,124 @@ static bool be_flash_redboot(struct be_adapter *adapter,
return false;
else
return true;
-
}
-static int be_flash_image(struct be_adapter *adapter,
+static int be_flash_data(struct be_adapter *adapter,
const struct firmware *fw,
- struct be_dma_mem *flash_cmd, u32 flash_type)
+ struct be_dma_mem *flash_cmd, int num_of_images)
+
{
- int status;
- u32 flash_op, image_offset = 0, total_bytes, image_size = 0;
+ int status = 0, i, filehdr_size = 0;
+ u32 total_bytes = 0, flash_op;
int num_bytes;
const u8 *p = fw->data;
struct be_cmd_write_flashrom *req = flash_cmd->va;
-
- switch (flash_type) {
- case FLASHROM_TYPE_ISCSI_ACTIVE:
- image_offset = FLASH_iSCSI_PRIMARY_IMAGE_START;
- image_size = FLASH_IMAGE_MAX_SIZE;
- break;
- case FLASHROM_TYPE_ISCSI_BACKUP:
- image_offset = FLASH_iSCSI_BACKUP_IMAGE_START;
- image_size = FLASH_IMAGE_MAX_SIZE;
- break;
- case FLASHROM_TYPE_FCOE_FW_ACTIVE:
- image_offset = FLASH_FCoE_PRIMARY_IMAGE_START;
- image_size = FLASH_IMAGE_MAX_SIZE;
- break;
- case FLASHROM_TYPE_FCOE_FW_BACKUP:
- image_offset = FLASH_FCoE_BACKUP_IMAGE_START;
- image_size = FLASH_IMAGE_MAX_SIZE;
- break;
- case FLASHROM_TYPE_BIOS:
- image_offset = FLASH_iSCSI_BIOS_START;
- image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
- break;
- case FLASHROM_TYPE_FCOE_BIOS:
- image_offset = FLASH_FCoE_BIOS_START;
- image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
- break;
- case FLASHROM_TYPE_PXE_BIOS:
- image_offset = FLASH_PXE_BIOS_START;
- image_size = FLASH_BIOS_IMAGE_MAX_SIZE;
- break;
- case FLASHROM_TYPE_REDBOOT:
- if (!be_flash_redboot(adapter, fw->data))
- return 0;
- image_offset = FLASH_REDBOOT_ISM_START;
- image_size = FLASH_REDBOOT_IMAGE_MAX_SIZE;
- break;
- default:
- return 0;
+ struct flash_comp *pflashcomp;
+
+ struct flash_comp gen3_flash_types[8] = {
+ { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
+ FLASH_IMAGE_MAX_SIZE_g3},
+ { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
+ FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
+ { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
+ FLASH_BIOS_IMAGE_MAX_SIZE_g3},
+ { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
+ FLASH_BIOS_IMAGE_MAX_SIZE_g3},
+ { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
+ FLASH_BIOS_IMAGE_MAX_SIZE_g3},
+ { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
+ FLASH_IMAGE_MAX_SIZE_g3},
+ { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
+ FLASH_IMAGE_MAX_SIZE_g3},
+ { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
+ FLASH_IMAGE_MAX_SIZE_g3}
+ };
+ struct flash_comp gen2_flash_types[8] = {
+ { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
+ FLASH_IMAGE_MAX_SIZE_g2},
+ { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
+ FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
+ { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
+ FLASH_BIOS_IMAGE_MAX_SIZE_g2},
+ { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
+ FLASH_BIOS_IMAGE_MAX_SIZE_g2},
+ { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
+ FLASH_BIOS_IMAGE_MAX_SIZE_g2},
+ { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
+ FLASH_IMAGE_MAX_SIZE_g2},
+ { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
+ FLASH_IMAGE_MAX_SIZE_g2},
+ { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
+ FLASH_IMAGE_MAX_SIZE_g2}
+ };
+
+ if (adapter->generation == BE_GEN3) {
+ pflashcomp = gen3_flash_types;
+ filehdr_size = sizeof(struct flash_file_hdr_g3);
+ } else {
+ pflashcomp = gen2_flash_types;
+ filehdr_size = sizeof(struct flash_file_hdr_g2);
}
-
- p += sizeof(struct flash_file_hdr) + image_offset;
- if (p + image_size > fw->data + fw->size)
+ for (i = 0; i < 8; i++) {
+ if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
+ (!be_flash_redboot(adapter, fw->data,
+ pflashcomp[i].offset, pflashcomp[i].size,
+ filehdr_size)))
+ continue;
+ p = fw->data;
+ p += filehdr_size + pflashcomp[i].offset
+ + (num_of_images * sizeof(struct image_hdr));
+ if (p + pflashcomp[i].size > fw->data + fw->size)
return -1;
-
- total_bytes = image_size;
-
- while (total_bytes) {
- if (total_bytes > 32*1024)
- num_bytes = 32*1024;
- else
- num_bytes = total_bytes;
- total_bytes -= num_bytes;
-
- if (!total_bytes)
- flash_op = FLASHROM_OPER_FLASH;
- else
- flash_op = FLASHROM_OPER_SAVE;
- memcpy(req->params.data_buf, p, num_bytes);
- p += num_bytes;
- status = be_cmd_write_flashrom(adapter, flash_cmd,
- flash_type, flash_op, num_bytes);
- if (status) {
- dev_err(&adapter->pdev->dev,
- "cmd to write to flash rom failed. type/op %d/%d\n",
- flash_type, flash_op);
- return -1;
+ total_bytes = pflashcomp[i].size;
+ while (total_bytes) {
+ if (total_bytes > 32*1024)
+ num_bytes = 32*1024;
+ else
+ num_bytes = total_bytes;
+ total_bytes -= num_bytes;
+
+ if (!total_bytes)
+ flash_op = FLASHROM_OPER_FLASH;
+ else
+ flash_op = FLASHROM_OPER_SAVE;
+ memcpy(req->params.data_buf, p, num_bytes);
+ p += num_bytes;
+ status = be_cmd_write_flashrom(adapter, flash_cmd,
+ pflashcomp[i].optype, flash_op, num_bytes);
+ if (status) {
+ dev_err(&adapter->pdev->dev,
+ "cmd to write to flash rom failed.\n");
+ return -1;
+ }
+ yield();
}
- yield();
}
-
return 0;
}
+static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
+{
+ if (fhdr == NULL)
+ return 0;
+ if (fhdr->build[0] == '3')
+ return BE_GEN3;
+ else if (fhdr->build[0] == '2')
+ return BE_GEN2;
+ else
+ return 0;
+}
+
int be_load_fw(struct be_adapter *adapter, u8 *func)
{
char fw_file[ETHTOOL_FLASH_MAX_FILENAME];
const struct firmware *fw;
- struct flash_file_hdr *fhdr;
- struct flash_section_info *fsec = NULL;
+ struct flash_file_hdr_g2 *fhdr;
+ struct flash_file_hdr_g3 *fhdr3;
+ struct image_hdr *img_hdr_ptr = NULL;
struct be_dma_mem flash_cmd;
- int status;
+ int status, i = 0;
const u8 *p;
- bool entry_found = false;
- int flash_type;
char fw_ver[FW_VER_LEN];
char fw_cfg;
@@ -1931,34 +2002,9 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
goto fw_exit;
p = fw->data;
- fhdr = (struct flash_file_hdr *) p;
- if (memcmp(fhdr->sign, FW_FILE_HDR_SIGN, strlen(FW_FILE_HDR_SIGN))) {
- dev_err(&adapter->pdev->dev,
- "Firmware(%s) load error (signature did not match)\n",
- fw_file);
- status = -1;
- goto fw_exit;
- }
-
+ fhdr = (struct flash_file_hdr_g2 *) p;
dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
- p += sizeof(struct flash_file_hdr);
- while (p < (fw->data + fw->size)) {
- fsec = (struct flash_section_info *)p;
- if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie))) {
- entry_found = true;
- break;
- }
- p += 32;
- }
-
- if (!entry_found) {
- status = -1;
- dev_err(&adapter->pdev->dev,
- "Flash cookie not found in firmware image\n");
- goto fw_exit;
- }
-
flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
flash_cmd.va = pci_alloc_consistent(adapter->pdev, flash_cmd.size,
&flash_cmd.dma);
@@ -1969,12 +2015,26 @@ int be_load_fw(struct be_adapter *adapter, u8 *func)
goto fw_exit;
}
- for (flash_type = FLASHROM_TYPE_ISCSI_ACTIVE;
- flash_type <= FLASHROM_TYPE_FCOE_FW_BACKUP; flash_type++) {
- status = be_flash_image(adapter, fw, &flash_cmd,
- flash_type);
- if (status)
- break;
+ if ((adapter->generation == BE_GEN3) &&
+ (get_ufigen_type(fhdr) == BE_GEN3)) {
+ fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
+ for (i = 0; i < fhdr3->num_imgs; i++) {
+ img_hdr_ptr = (struct image_hdr *) (fw->data +
+ (sizeof(struct flash_file_hdr_g3) +
+ i * sizeof(struct image_hdr)));
+ if (img_hdr_ptr->imageid == 1) {
+ status = be_flash_data(adapter, fw,
+ &flash_cmd, fhdr3->num_imgs);
+ }
+
+ }
+ } else if ((adapter->generation == BE_GEN2) &&
+ (get_ufigen_type(fhdr) == BE_GEN2)) {
+ status = be_flash_data(adapter, fw, &flash_cmd, 0);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "UFI and Interface are not compatible for flashing\n");
+ status = -1;
}
pci_free_consistent(adapter->pdev, flash_cmd.size, flash_cmd.va,
@@ -2136,6 +2196,7 @@ static int be_ctrl_init(struct be_adapter *adapter)
spin_lock_init(&adapter->mcc_lock);
spin_lock_init(&adapter->mcc_cq_lock);
+ pci_save_state(adapter->pdev);
return 0;
free_mbox:
@@ -2222,6 +2283,11 @@ static int be_get_config(struct be_adapter *adapter)
memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
+ if (adapter->cap & 0x400)
+ adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
+ else
+ adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
+
return 0;
}
@@ -2394,13 +2460,123 @@ static int be_resume(struct pci_dev *pdev)
return 0;
}
+/*
+ * An FLR will stop BE from DMAing any data.
+ */
+static void be_shutdown(struct pci_dev *pdev)
+{
+ struct be_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ netif_device_detach(netdev);
+
+ be_cmd_reset_function(adapter);
+
+ if (adapter->wol)
+ be_setup_wol(adapter, true);
+
+ pci_disable_device(pdev);
+
+ return;
+}
+
+static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct be_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ dev_err(&adapter->pdev->dev, "EEH error detected\n");
+
+ adapter->eeh_err = true;
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev)) {
+ rtnl_lock();
+ be_close(netdev);
+ rtnl_unlock();
+ }
+ be_clear(adapter);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ pci_disable_device(pdev);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
+{
+ struct be_adapter *adapter = pci_get_drvdata(pdev);
+ int status;
+
+ dev_info(&adapter->pdev->dev, "EEH reset\n");
+ adapter->eeh_err = false;
+
+ status = pci_enable_device(pdev);
+ if (status)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ pci_set_master(pdev);
+ pci_set_power_state(pdev, 0);
+ pci_restore_state(pdev);
+
+ /* Check if card is ok and fw is ready */
+ status = be_cmd_POST(adapter);
+ if (status)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static void be_eeh_resume(struct pci_dev *pdev)
+{
+ int status = 0;
+ struct be_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+
+ dev_info(&adapter->pdev->dev, "EEH resume\n");
+
+ pci_save_state(pdev);
+
+ /* tell fw we're ready to fire cmds */
+ status = be_cmd_fw_init(adapter);
+ if (status)
+ goto err;
+
+ status = be_setup(adapter);
+ if (status)
+ goto err;
+
+ if (netif_running(netdev)) {
+ status = be_open(netdev);
+ if (status)
+ goto err;
+ }
+ netif_device_attach(netdev);
+ return;
+err:
+ dev_err(&adapter->pdev->dev, "EEH resume failed\n");
+ return;
+}
+
+static struct pci_error_handlers be_eeh_handlers = {
+ .error_detected = be_eeh_err_detected,
+ .slot_reset = be_eeh_reset,
+ .resume = be_eeh_resume,
+};
+
static struct pci_driver be_driver = {
.name = DRV_NAME,
.id_table = be_dev_ids,
.probe = be_probe,
.remove = be_remove,
.suspend = be_suspend,
- .resume = be_resume
+ .resume = be_resume,
+ .shutdown = be_shutdown,
+ .err_handler = &be_eeh_handlers
};
static int __init be_init_module(void)
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index 0b23bc4f56c..587f93cf03f 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -812,16 +812,14 @@ static void bfin_mac_timeout(struct net_device *dev)
static void bfin_mac_multicast_hash(struct net_device *dev)
{
u32 emac_hashhi, emac_hashlo;
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
char *addrs;
- int i;
u32 crc;
emac_hashhi = emac_hashlo = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ netdev_for_each_mc_addr(dmi, dev) {
addrs = dmi->dmi_addr;
- dmi = dmi->next;
/* skip non-multicast addresses */
if (!(*addrs & 1))
@@ -862,7 +860,7 @@ static void bfin_mac_set_multicast_list(struct net_device *dev)
sysctl = bfin_read_EMAC_OPMODE();
sysctl |= PAM;
bfin_write_EMAC_OPMODE(sysctl);
- } else if (dev->mc_count) {
+ } else if (!netdev_mc_empty(dev)) {
/* set up multicast hash table */
sysctl = bfin_read_EMAC_OPMODE();
sysctl |= HM;
diff --git a/drivers/net/bmac.c b/drivers/net/bmac.c
index 9b587c34419..119468e7632 100644
--- a/drivers/net/bmac.c
+++ b/drivers/net/bmac.c
@@ -973,7 +973,7 @@ static void bmac_set_multicast(struct net_device *dev)
{
struct dev_mc_list *dmi;
struct bmac_data *bp = netdev_priv(dev);
- int num_addrs = dev->mc_count;
+ int num_addrs = netdev_mc_count(dev);
unsigned short rx_cfg;
int i;
@@ -982,7 +982,7 @@ static void bmac_set_multicast(struct net_device *dev)
XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs));
- if((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
+ if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff;
bmac_update_hash_table_mask(dev, bp);
rx_cfg = bmac_rx_on(dev, 1, 0);
@@ -1000,7 +1000,7 @@ static void bmac_set_multicast(struct net_device *dev)
rx_cfg = bmac_rx_on(dev, 0, 0);
XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg));
} else {
- for (dmi=dev->mc_list; dmi!=NULL; dmi=dmi->next)
+ netdev_for_each_mc_addr(dmi, dev)
bmac_addhash(bp, dmi->dmi_addr);
bmac_update_hash_table_mask(dev, bp);
rx_cfg = bmac_rx_on(dev, 1, 0);
@@ -1015,13 +1015,13 @@ static void bmac_set_multicast(struct net_device *dev)
static void bmac_set_multicast(struct net_device *dev)
{
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
char *addrs;
int i;
unsigned short rx_cfg;
u32 crc;
- if((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
+ if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
bmwrite(dev, BHASH0, 0xffff);
bmwrite(dev, BHASH1, 0xffff);
bmwrite(dev, BHASH2, 0xffff);
@@ -1039,9 +1039,8 @@ static void bmac_set_multicast(struct net_device *dev)
for(i = 0; i < 4; i++) hash_table[i] = 0;
- for(i = 0; i < dev->mc_count; i++) {
+ netdev_for_each_mc_addr(dmi, dev) {
addrs = dmi->dmi_addr;
- dmi = dmi->next;
if(!(*addrs & 1))
continue;
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 65df1de447e..381887ba677 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -1,6 +1,6 @@
/* bnx2.c: Broadcom NX2 network driver.
*
- * Copyright (c) 2004-2009 Broadcom Corporation
+ * Copyright (c) 2004-2010 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -9,6 +9,7 @@
* Written by: Michael Chan (mchan@broadcom.com)
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -48,7 +49,6 @@
#include <linux/cache.h>
#include <linux/firmware.h>
#include <linux/log2.h>
-#include <linux/list.h>
#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
#define BCM_CNIC 1
@@ -58,14 +58,13 @@
#include "bnx2_fw.h"
#define DRV_MODULE_NAME "bnx2"
-#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "2.0.3"
-#define DRV_MODULE_RELDATE "Dec 03, 2009"
-#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j3.fw"
+#define DRV_MODULE_VERSION "2.0.8"
+#define DRV_MODULE_RELDATE "Feb 15, 2010"
+#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw"
#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
-#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j3.fw"
-#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-5.0.0.j3.fw"
-#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-5.0.0.j3.fw"
+#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j9.fw"
+#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw"
+#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-5.0.0.j10.fw"
#define RUN_AT(x) (jiffies + (x))
@@ -980,33 +979,27 @@ bnx2_report_link(struct bnx2 *bp)
{
if (bp->link_up) {
netif_carrier_on(bp->dev);
- printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
- bnx2_xceiver_str(bp));
-
- printk("%d Mbps ", bp->line_speed);
-
- if (bp->duplex == DUPLEX_FULL)
- printk("full duplex");
- else
- printk("half duplex");
+ netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
+ bnx2_xceiver_str(bp),
+ bp->line_speed,
+ bp->duplex == DUPLEX_FULL ? "full" : "half");
if (bp->flow_ctrl) {
if (bp->flow_ctrl & FLOW_CTRL_RX) {
- printk(", receive ");
+ pr_cont(", receive ");
if (bp->flow_ctrl & FLOW_CTRL_TX)
- printk("& transmit ");
+ pr_cont("& transmit ");
}
else {
- printk(", transmit ");
+ pr_cont(", transmit ");
}
- printk("flow control ON");
+ pr_cont("flow control ON");
}
- printk("\n");
- }
- else {
+ pr_cont("\n");
+ } else {
netif_carrier_off(bp->dev);
- printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
- bnx2_xceiver_str(bp));
+ netdev_err(bp->dev, "NIC %s Link is Down\n",
+ bnx2_xceiver_str(bp));
}
bnx2_report_fw_link(bp);
@@ -1278,7 +1271,7 @@ bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
if (lo_water >= bp->rx_ring_size)
lo_water = 0;
- hi_water = bp->rx_ring_size / 4;
+ hi_water = min_t(int, bp->rx_ring_size / 4, lo_water + 16);
if (hi_water <= lo_water)
lo_water = 0;
@@ -2483,8 +2476,7 @@ bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
/* If we timed out, inform the firmware that this is the case. */
if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
if (!silent)
- printk(KERN_ERR PFX "fw sync timeout, reset code = "
- "%x\n", msg_data);
+ pr_err("fw sync timeout, reset code = %x\n", msg_data);
msg_data &= ~BNX2_DRV_MSG_CODE;
msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
@@ -2600,8 +2592,7 @@ bnx2_alloc_bad_rbuf(struct bnx2 *bp)
good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
if (good_mbuf == NULL) {
- printk(KERN_ERR PFX "Failed to allocate memory in "
- "bnx2_alloc_bad_rbuf\n");
+ pr_err("Failed to allocate memory in %s\n", __func__);
return -ENOMEM;
}
@@ -3561,9 +3552,7 @@ bnx2_set_rx_mode(struct net_device *dev)
memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
-
+ netdev_for_each_mc_addr(mclist, dev) {
crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
bit = crc & 0xff;
regidx = (bit & 0xe0) >> 5;
@@ -3579,14 +3568,14 @@ bnx2_set_rx_mode(struct net_device *dev)
sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
}
- if (dev->uc.count > BNX2_MAX_UNICAST_ADDRESSES) {
+ if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
BNX2_RPM_SORT_USER0_PROM_VLAN;
} else if (!(dev->flags & IFF_PROMISC)) {
/* Add all entries into to the match filter list */
i = 0;
- list_for_each_entry(ha, &dev->uc.list, list) {
+ netdev_for_each_uc_addr(ha, dev) {
bnx2_set_mac_addr(bp, ha->addr,
i + BNX2_START_UNICAST_ADDRESS_INDEX);
sort_mode |= (1 <<
@@ -3657,15 +3646,13 @@ bnx2_request_firmware(struct bnx2 *bp)
rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
if (rc) {
- printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
- mips_fw_file);
+ pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
return rc;
}
rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
if (rc) {
- printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
- rv2p_fw_file);
+ pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
return rc;
}
mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
@@ -3676,15 +3663,13 @@ bnx2_request_firmware(struct bnx2 *bp)
check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
- printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
- mips_fw_file);
+ pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
return -EINVAL;
}
if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
- printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
- rv2p_fw_file);
+ pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
return -EINVAL;
}
@@ -4318,7 +4303,7 @@ bnx2_init_nvram(struct bnx2 *bp)
if (j == entry_count) {
bp->flash_info = NULL;
- printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
+ pr_alert("Unknown flash/EEPROM type\n");
return -ENODEV;
}
@@ -4738,7 +4723,7 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
- printk(KERN_ERR PFX "Chip reset did not complete\n");
+ pr_err("Chip reset did not complete\n");
return -EBUSY;
}
}
@@ -4746,7 +4731,7 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
/* Make sure byte swapping is properly configured. */
val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
if (val != 0x01020304) {
- printk(KERN_ERR PFX "Chip not in correct endian mode\n");
+ pr_err("Chip not in correct endian mode\n");
return -ENODEV;
}
@@ -4941,7 +4926,7 @@ bnx2_init_chip(struct bnx2 *bp)
BNX2_HC_CONFIG_COLLECT_STATS;
}
- if (bp->irq_nvecs > 1) {
+ if (bp->flags & BNX2_FLAG_USING_MSIX) {
REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
BNX2_HC_MSIX_BIT_VECTOR_VAL);
@@ -5167,9 +5152,8 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
ring_prod = prod = rxr->rx_pg_prod;
for (i = 0; i < bp->rx_pg_ring_size; i++) {
if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) {
- printk(KERN_WARNING PFX "%s: init'ed rx page ring %d "
- "with %d/%d pages only\n",
- bp->dev->name, ring_num, i, bp->rx_pg_ring_size);
+ netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
+ ring_num, i, bp->rx_pg_ring_size);
break;
}
prod = NEXT_RX_BD(prod);
@@ -5180,9 +5164,8 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
ring_prod = prod = rxr->rx_prod;
for (i = 0; i < bp->rx_ring_size; i++) {
if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) {
- printk(KERN_WARNING PFX "%s: init'ed rx ring %d with "
- "%d/%d skbs only\n",
- bp->dev->name, ring_num, i, bp->rx_ring_size);
+ netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
+ ring_num, i, bp->rx_ring_size);
break;
}
prod = NEXT_RX_BD(prod);
@@ -6145,6 +6128,10 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
+ /* Need to flush the previous three writes to ensure MSI-X
+ * is setup properly */
+ REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
+
for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
msix_ent[i].entry = i;
msix_ent[i].vector = 0;
@@ -6227,6 +6214,8 @@ bnx2_open(struct net_device *dev)
atomic_set(&bp->intr_sem, 0);
+ memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
+
bnx2_enable_int(bp);
if (bp->flags & BNX2_FLAG_USING_MSI) {
@@ -6234,11 +6223,7 @@ bnx2_open(struct net_device *dev)
* If MSI test fails, go back to INTx mode
*/
if (bnx2_test_intr(bp) != 0) {
- printk(KERN_WARNING PFX "%s: No interrupt was generated"
- " using MSI, switching to INTx mode. Please"
- " report this failure to the PCI maintainer"
- " and include system chipset information.\n",
- bp->dev->name);
+ netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
bnx2_disable_int(bp);
bnx2_free_irq(bp);
@@ -6258,9 +6243,9 @@ bnx2_open(struct net_device *dev)
}
}
if (bp->flags & BNX2_FLAG_USING_MSI)
- printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
+ netdev_info(dev, "using MSI\n");
else if (bp->flags & BNX2_FLAG_USING_MSIX)
- printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
+ netdev_info(dev, "using MSIX\n");
netif_tx_start_all_queues(dev);
@@ -6299,20 +6284,18 @@ bnx2_dump_state(struct bnx2 *bp)
{
struct net_device *dev = bp->dev;
- printk(KERN_ERR PFX "%s DEBUG: intr_sem[%x]\n", dev->name,
- atomic_read(&bp->intr_sem));
- printk(KERN_ERR PFX "%s DEBUG: EMAC_TX_STATUS[%08x] "
- "RPM_MGMT_PKT_CTRL[%08x]\n", dev->name,
- REG_RD(bp, BNX2_EMAC_TX_STATUS),
- REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
- printk(KERN_ERR PFX "%s DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
- dev->name, bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P0),
- bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P1));
- printk(KERN_ERR PFX "%s DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
- dev->name, REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
+ netdev_err(dev, "DEBUG: intr_sem[%x]\n", atomic_read(&bp->intr_sem));
+ netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] RPM_MGMT_PKT_CTRL[%08x]\n",
+ REG_RD(bp, BNX2_EMAC_TX_STATUS),
+ REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
+ netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
+ bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P0),
+ bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P1));
+ netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
+ REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
if (bp->flags & BNX2_FLAG_USING_MSIX)
- printk(KERN_ERR PFX "%s DEBUG: PBA[%08x]\n", dev->name,
- REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
+ netdev_err(dev, "DEBUG: PBA[%08x]\n",
+ REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
}
static void
@@ -6376,8 +6359,7 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(bnx2_tx_avail(bp, txr) <
(skb_shinfo(skb)->nr_frags + 1))) {
netif_tx_stop_queue(txq);
- printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
- dev->name);
+ netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
return NETDEV_TX_BUSY;
}
@@ -6538,92 +6520,121 @@ bnx2_close(struct net_device *dev)
return 0;
}
-#define GET_NET_STATS64(ctr) \
+static void
+bnx2_save_stats(struct bnx2 *bp)
+{
+ u32 *hw_stats = (u32 *) bp->stats_blk;
+ u32 *temp_stats = (u32 *) bp->temp_stats_blk;
+ int i;
+
+ /* The 1st 10 counters are 64-bit counters */
+ for (i = 0; i < 20; i += 2) {
+ u32 hi;
+ u64 lo;
+
+ hi = temp_stats[i] + hw_stats[i];
+ lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
+ if (lo > 0xffffffff)
+ hi++;
+ temp_stats[i] = hi;
+ temp_stats[i + 1] = lo & 0xffffffff;
+ }
+
+ for ( ; i < sizeof(struct statistics_block) / 4; i++)
+ temp_stats[i] += hw_stats[i];
+}
+
+#define GET_64BIT_NET_STATS64(ctr) \
(unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
(unsigned long) (ctr##_lo)
-#define GET_NET_STATS32(ctr) \
+#define GET_64BIT_NET_STATS32(ctr) \
(ctr##_lo)
#if (BITS_PER_LONG == 64)
-#define GET_NET_STATS GET_NET_STATS64
+#define GET_64BIT_NET_STATS(ctr) \
+ GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
+ GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
#else
-#define GET_NET_STATS GET_NET_STATS32
+#define GET_64BIT_NET_STATS(ctr) \
+ GET_64BIT_NET_STATS32(bp->stats_blk->ctr) + \
+ GET_64BIT_NET_STATS32(bp->temp_stats_blk->ctr)
#endif
+#define GET_32BIT_NET_STATS(ctr) \
+ (unsigned long) (bp->stats_blk->ctr + \
+ bp->temp_stats_blk->ctr)
+
static struct net_device_stats *
bnx2_get_stats(struct net_device *dev)
{
struct bnx2 *bp = netdev_priv(dev);
- struct statistics_block *stats_blk = bp->stats_blk;
struct net_device_stats *net_stats = &dev->stats;
if (bp->stats_blk == NULL) {
return net_stats;
}
net_stats->rx_packets =
- GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
- GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
- GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
+ GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
+ GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
+ GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
net_stats->tx_packets =
- GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
- GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
- GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
+ GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
+ GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
+ GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
net_stats->rx_bytes =
- GET_NET_STATS(stats_blk->stat_IfHCInOctets);
+ GET_64BIT_NET_STATS(stat_IfHCInOctets);
net_stats->tx_bytes =
- GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
+ GET_64BIT_NET_STATS(stat_IfHCOutOctets);
net_stats->multicast =
- GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
+ GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts);
net_stats->collisions =
- (unsigned long) stats_blk->stat_EtherStatsCollisions;
+ GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
net_stats->rx_length_errors =
- (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
- stats_blk->stat_EtherStatsOverrsizePkts);
+ GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
+ GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
net_stats->rx_over_errors =
- (unsigned long) (stats_blk->stat_IfInFTQDiscards +
- stats_blk->stat_IfInMBUFDiscards);
+ GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
+ GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
net_stats->rx_frame_errors =
- (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
+ GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
net_stats->rx_crc_errors =
- (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
+ GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
net_stats->rx_errors = net_stats->rx_length_errors +
net_stats->rx_over_errors + net_stats->rx_frame_errors +
net_stats->rx_crc_errors;
net_stats->tx_aborted_errors =
- (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
- stats_blk->stat_Dot3StatsLateCollisions);
+ GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
+ GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
(CHIP_ID(bp) == CHIP_ID_5708_A0))
net_stats->tx_carrier_errors = 0;
else {
net_stats->tx_carrier_errors =
- (unsigned long)
- stats_blk->stat_Dot3StatsCarrierSenseErrors;
+ GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
}
net_stats->tx_errors =
- (unsigned long)
- stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
- +
+ GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
net_stats->tx_aborted_errors +
net_stats->tx_carrier_errors;
net_stats->rx_missed_errors =
- (unsigned long) (stats_blk->stat_IfInFTQDiscards +
- stats_blk->stat_IfInMBUFDiscards + stats_blk->stat_FwRxDrop);
+ GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
+ GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
+ GET_32BIT_NET_STATS(stat_FwRxDrop);
return net_stats;
}
@@ -6717,32 +6728,15 @@ bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if (cmd->autoneg == AUTONEG_ENABLE) {
autoneg |= AUTONEG_SPEED;
- cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
-
- /* allow advertising 1 speed */
- if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
- (cmd->advertising == ADVERTISED_10baseT_Full) ||
- (cmd->advertising == ADVERTISED_100baseT_Half) ||
- (cmd->advertising == ADVERTISED_100baseT_Full)) {
-
- if (cmd->port == PORT_FIBRE)
- goto err_out_unlock;
-
- advertising = cmd->advertising;
-
- } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
- if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
- (cmd->port == PORT_TP))
- goto err_out_unlock;
- } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
- advertising = cmd->advertising;
- else if (cmd->advertising == ADVERTISED_1000baseT_Half)
- goto err_out_unlock;
- else {
- if (cmd->port == PORT_FIBRE)
- advertising = ETHTOOL_ALL_FIBRE_SPEED;
- else
+ advertising = cmd->advertising;
+ if (cmd->port == PORT_TP) {
+ advertising &= ETHTOOL_ALL_COPPER_SPEED;
+ if (!advertising)
advertising = ETHTOOL_ALL_COPPER_SPEED;
+ } else {
+ advertising &= ETHTOOL_ALL_FIBRE_SPEED;
+ if (!advertising)
+ advertising = ETHTOOL_ALL_FIBRE_SPEED;
}
advertising |= ADVERTISED_Autoneg;
}
@@ -7083,6 +7077,9 @@ static int
bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
{
if (netif_running(bp->dev)) {
+ /* Reset will erase chipset stats; save them */
+ bnx2_save_stats(bp);
+
bnx2_netif_stop(bp);
bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
bnx2_free_skbs(bp);
@@ -7104,6 +7101,13 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
dev_close(bp->dev);
return rc;
}
+#ifdef BCM_CNIC
+ mutex_lock(&bp->cnic_lock);
+ /* Let cnic know about the new status block. */
+ if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
+ bnx2_setup_cnic_irq_info(bp);
+ mutex_unlock(&bp->cnic_lock);
+#endif
bnx2_netif_start(bp);
}
return 0;
@@ -7427,6 +7431,7 @@ bnx2_get_ethtool_stats(struct net_device *dev,
struct bnx2 *bp = netdev_priv(dev);
int i;
u32 *hw_stats = (u32 *) bp->stats_blk;
+ u32 *temp_stats = (u32 *) bp->temp_stats_blk;
u8 *stats_len_arr = NULL;
if (hw_stats == NULL) {
@@ -7443,21 +7448,26 @@ bnx2_get_ethtool_stats(struct net_device *dev,
stats_len_arr = bnx2_5708_stats_len_arr;
for (i = 0; i < BNX2_NUM_STATS; i++) {
+ unsigned long offset;
+
if (stats_len_arr[i] == 0) {
/* skip this counter */
buf[i] = 0;
continue;
}
+
+ offset = bnx2_stats_offset_arr[i];
if (stats_len_arr[i] == 4) {
/* 4-byte counter */
- buf[i] = (u64)
- *(hw_stats + bnx2_stats_offset_arr[i]);
+ buf[i] = (u64) *(hw_stats + offset) +
+ *(temp_stats + offset);
continue;
}
/* 8-byte counter */
- buf[i] = (((u64) *(hw_stats +
- bnx2_stats_offset_arr[i])) << 32) +
- *(hw_stats + bnx2_stats_offset_arr[i] + 1);
+ buf[i] = (((u64) *(hw_stats + offset)) << 32) +
+ *(hw_stats + offset + 1) +
+ (((u64) *(temp_stats + offset)) << 32) +
+ *(temp_stats + offset + 1);
}
}
@@ -7625,7 +7635,7 @@ bnx2_change_mtu(struct net_device *dev, int new_mtu)
return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
}
-#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
+#ifdef CONFIG_NET_POLL_CONTROLLER
static void
poll_bnx2(struct net_device *dev)
{
@@ -7733,10 +7743,9 @@ bnx2_get_pci_speed(struct bnx2 *bp)
static void __devinit
bnx2_read_vpd_fw_ver(struct bnx2 *bp)
{
- int rc, i, v0_len = 0;
+ int rc, i, j;
u8 *data;
- u8 *v0_str = NULL;
- bool mn_match = false;
+ unsigned int block_end, rosize, len;
#define BNX2_VPD_NVRAM_OFFSET 0x300
#define BNX2_VPD_LEN 128
@@ -7758,53 +7767,42 @@ bnx2_read_vpd_fw_ver(struct bnx2 *bp)
data[i + 3] = data[i + BNX2_VPD_LEN];
}
- for (i = 0; i <= BNX2_VPD_LEN - 3; ) {
- unsigned char val = data[i];
- unsigned int block_end;
-
- if (val == 0x82 || val == 0x91) {
- i = (i + 3 + (data[i + 1] + (data[i + 2] << 8)));
- continue;
- }
-
- if (val != 0x90)
- goto vpd_done;
+ i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
+ if (i < 0)
+ goto vpd_done;
- block_end = (i + 3 + (data[i + 1] + (data[i + 2] << 8)));
- i += 3;
+ rosize = pci_vpd_lrdt_size(&data[i]);
+ i += PCI_VPD_LRDT_TAG_SIZE;
+ block_end = i + rosize;
- if (block_end > BNX2_VPD_LEN)
- goto vpd_done;
+ if (block_end > BNX2_VPD_LEN)
+ goto vpd_done;
- while (i < (block_end - 2)) {
- int len = data[i + 2];
+ j = pci_vpd_find_info_keyword(data, i, rosize,
+ PCI_VPD_RO_KEYWORD_MFR_ID);
+ if (j < 0)
+ goto vpd_done;
- if (i + 3 + len > block_end)
- goto vpd_done;
+ len = pci_vpd_info_field_size(&data[j]);
- if (data[i] == 'M' && data[i + 1] == 'N') {
- if (len != 4 ||
- memcmp(&data[i + 3], "1028", 4))
- goto vpd_done;
- mn_match = true;
+ j += PCI_VPD_INFO_FLD_HDR_SIZE;
+ if (j + len > block_end || len != 4 ||
+ memcmp(&data[j], "1028", 4))
+ goto vpd_done;
- } else if (data[i] == 'V' && data[i + 1] == '0') {
- if (len > BNX2_MAX_VER_SLEN)
- goto vpd_done;
+ j = pci_vpd_find_info_keyword(data, i, rosize,
+ PCI_VPD_RO_KEYWORD_VENDOR0);
+ if (j < 0)
+ goto vpd_done;
- v0_len = len;
- v0_str = &data[i + 3];
- }
- i += 3 + len;
+ len = pci_vpd_info_field_size(&data[j]);
- if (mn_match && v0_str) {
- memcpy(bp->fw_version, v0_str, v0_len);
- bp->fw_version[v0_len] = ' ';
- goto vpd_done;
- }
- }
+ j += PCI_VPD_INFO_FLD_HDR_SIZE;
+ if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
goto vpd_done;
- }
+
+ memcpy(bp->fw_version, &data[j], len);
+ bp->fw_version[len] = ' ';
vpd_done:
kfree(data);
@@ -7825,23 +7823,31 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->flags = 0;
bp->phy_flags = 0;
+ bp->temp_stats_blk =
+ kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
+
+ if (bp->temp_stats_blk == NULL) {
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
/* enable device (incl. PCI PM wakeup), and bus-mastering */
rc = pci_enable_device(pdev);
if (rc) {
- dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
goto err_out;
}
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
dev_err(&pdev->dev,
- "Cannot find PCI device base address, aborting.\n");
+ "Cannot find PCI device base address, aborting\n");
rc = -ENODEV;
goto err_out_disable;
}
rc = pci_request_regions(pdev, DRV_MODULE_NAME);
if (rc) {
- dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
+ dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
goto err_out_disable;
}
@@ -7851,7 +7857,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
if (bp->pm_cap == 0) {
dev_err(&pdev->dev,
- "Cannot find power management capability, aborting.\n");
+ "Cannot find power management capability, aborting\n");
rc = -EIO;
goto err_out_release;
}
@@ -7874,7 +7880,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->regview = ioremap_nocache(dev->base_addr, mem_len);
if (!bp->regview) {
- dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
+ dev_err(&pdev->dev, "Cannot map register space, aborting\n");
rc = -ENOMEM;
goto err_out_release;
}
@@ -7894,7 +7900,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
if (CHIP_NUM(bp) == CHIP_NUM_5709) {
if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
dev_err(&pdev->dev,
- "Cannot find PCIE capability, aborting.\n");
+ "Cannot find PCIE capability, aborting\n");
rc = -EIO;
goto err_out_unmap;
}
@@ -7905,7 +7911,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
if (bp->pcix_cap == 0) {
dev_err(&pdev->dev,
- "Cannot find PCIX capability, aborting.\n");
+ "Cannot find PCIX capability, aborting\n");
rc = -EIO;
goto err_out_unmap;
}
@@ -7934,11 +7940,11 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
if (rc) {
dev_err(&pdev->dev,
- "pci_set_consistent_dma_mask failed, aborting.\n");
+ "pci_set_consistent_dma_mask failed, aborting\n");
goto err_out_unmap;
}
} else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
- dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
+ dev_err(&pdev->dev, "System does not support DMA, aborting\n");
goto err_out_unmap;
}
@@ -7955,7 +7961,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
!(bp->flags & BNX2_FLAG_PCIX)) {
dev_err(&pdev->dev,
- "5706 A1 can only be used in a PCIX bus, aborting.\n");
+ "5706 A1 can only be used in a PCIX bus, aborting\n");
goto err_out_unmap;
}
@@ -7978,7 +7984,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
BNX2_DEV_INFO_SIGNATURE_MAGIC) {
- dev_err(&pdev->dev, "Firmware not running, aborting.\n");
+ dev_err(&pdev->dev, "Firmware not running, aborting\n");
rc = -ENODEV;
goto err_out_unmap;
}
@@ -8229,7 +8235,7 @@ static const struct net_device_ops bnx2_netdev_ops = {
#ifdef BCM_VLAN
.ndo_vlan_rx_register = bnx2_vlan_rx_register,
#endif
-#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
+#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = poll_bnx2,
#endif
};
@@ -8251,7 +8257,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
char str[40];
if (version_printed++ == 0)
- printk(KERN_INFO "%s", version);
+ pr_info("%s", version);
/* dev zeroed in init_etherdev */
dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
@@ -8301,15 +8307,13 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto error;
}
- printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
- "IRQ %d, node addr %pM\n",
- dev->name,
- board_info[ent->driver_data].name,
- ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
- ((CHIP_ID(bp) & 0x0ff0) >> 4),
- bnx2_bus_string(bp, str),
- dev->base_addr,
- bp->pdev->irq, dev->dev_addr);
+ netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
+ board_info[ent->driver_data].name,
+ ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
+ ((CHIP_ID(bp) & 0x0ff0) >> 4),
+ bnx2_bus_string(bp, str),
+ dev->base_addr,
+ bp->pdev->irq, dev->dev_addr);
return 0;
@@ -8346,6 +8350,8 @@ bnx2_remove_one(struct pci_dev *pdev)
if (bp->regview)
iounmap(bp->regview);
+ kfree(bp->temp_stats_blk);
+
free_netdev(dev);
pci_release_regions(pdev);
pci_disable_device(pdev);
@@ -8442,7 +8448,7 @@ static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
rtnl_lock();
if (pci_enable_device(pdev)) {
dev_err(&pdev->dev,
- "Cannot re-enable PCI device after reset.\n");
+ "Cannot re-enable PCI device after reset\n");
rtnl_unlock();
return PCI_ERS_RESULT_DISCONNECT;
}
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 939dc44d50a..cd4b0e4637a 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -349,7 +349,7 @@ struct l2_fhdr {
#define BNX2_L2CTX_BD_PRE_READ 0x00000000
#define BNX2_L2CTX_CTX_SIZE 0x00000000
#define BNX2_L2CTX_CTX_TYPE 0x00000000
-#define BNX2_L2CTX_LO_WATER_MARK_DEFAULT 32
+#define BNX2_L2CTX_LO_WATER_MARK_DEFAULT 4
#define BNX2_L2CTX_LO_WATER_MARK_SCALE 4
#define BNX2_L2CTX_LO_WATER_MARK_DIS 0
#define BNX2_L2CTX_HI_WATER_MARK_SHIFT 4
@@ -6851,6 +6851,7 @@ struct bnx2 {
dma_addr_t status_blk_mapping;
struct statistics_block *stats_blk;
+ struct statistics_block *temp_stats_blk;
dma_addr_t stats_blk_mapping;
int ctx_pages;
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index 602ab86b639..3c48a7a6830 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -1,6 +1,6 @@
/* bnx2x.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2009 Broadcom Corporation
+ * Copyright (c) 2007-2010 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -44,7 +44,6 @@
/* error/debug prints */
#define DRV_MODULE_NAME "bnx2x"
-#define PFX DRV_MODULE_NAME ": "
/* for messages that are currently off */
#define BNX2X_MSG_OFF 0
@@ -58,30 +57,40 @@
#define DP_LEVEL KERN_NOTICE /* was: KERN_DEBUG */
/* regular debug print */
-#define DP(__mask, __fmt, __args...) do { \
- if (bp->msglevel & (__mask)) \
- printk(DP_LEVEL "[%s:%d(%s)]" __fmt, __func__, __LINE__, \
- bp->dev ? (bp->dev->name) : "?", ##__args); \
- } while (0)
+#define DP(__mask, __fmt, __args...) \
+do { \
+ if (bp->msg_enable & (__mask)) \
+ printk(DP_LEVEL "[%s:%d(%s)]" __fmt, \
+ __func__, __LINE__, \
+ bp->dev ? (bp->dev->name) : "?", \
+ ##__args); \
+} while (0)
/* errors debug print */
-#define BNX2X_DBG_ERR(__fmt, __args...) do { \
- if (bp->msglevel & NETIF_MSG_PROBE) \
- printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \
- bp->dev ? (bp->dev->name) : "?", ##__args); \
- } while (0)
+#define BNX2X_DBG_ERR(__fmt, __args...) \
+do { \
+ if (netif_msg_probe(bp)) \
+ pr_err("[%s:%d(%s)]" __fmt, \
+ __func__, __LINE__, \
+ bp->dev ? (bp->dev->name) : "?", \
+ ##__args); \
+} while (0)
/* for errors (never masked) */
-#define BNX2X_ERR(__fmt, __args...) do { \
- printk(KERN_ERR "[%s:%d(%s)]" __fmt, __func__, __LINE__, \
- bp->dev ? (bp->dev->name) : "?", ##__args); \
- } while (0)
+#define BNX2X_ERR(__fmt, __args...) \
+do { \
+ pr_err("[%s:%d(%s)]" __fmt, \
+ __func__, __LINE__, \
+ bp->dev ? (bp->dev->name) : "?", \
+ ##__args); \
+} while (0)
/* before we have a dev->name use dev_info() */
-#define BNX2X_DEV_INFO(__fmt, __args...) do { \
- if (bp->msglevel & NETIF_MSG_PROBE) \
- dev_info(&bp->pdev->dev, __fmt, ##__args); \
- } while (0)
+#define BNX2X_DEV_INFO(__fmt, __args...) \
+do { \
+ if (netif_msg_probe(bp)) \
+ dev_info(&bp->pdev->dev, __fmt, ##__args); \
+} while (0)
#ifdef BNX2X_STOP_ON_ERROR
@@ -130,7 +139,7 @@
offset, len32); \
} while (0)
-#define VIRT_WR_DMAE_LEN(bp, data, addr, len32) \
+#define VIRT_WR_DMAE_LEN(bp, data, addr, len32, le32_swap) \
do { \
memcpy(GUNZIP_BUF(bp), data, (len32) * 4); \
bnx2x_write_big_buf_wb(bp, addr, len32); \
@@ -882,7 +891,7 @@ struct bnx2x {
/* End of fields used in the performance code paths */
int panic;
- int msglevel;
+ int msg_enable;
u32 flags;
#define PCIX_FLAG 1
diff --git a/drivers/net/bnx2x_fw_defs.h b/drivers/net/bnx2x_fw_defs.h
index 931dcace562..08d71bf438d 100644
--- a/drivers/net/bnx2x_fw_defs.h
+++ b/drivers/net/bnx2x_fw_defs.h
@@ -1,6 +1,6 @@
/* bnx2x_fw_defs.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2009 Broadcom Corporation
+ * Copyright (c) 2007-2010 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -471,6 +471,11 @@
/* Host coalescing constants */
+#define HC_IGU_BC_MODE 0
+#define HC_IGU_NBC_MODE 1
+
+#define HC_REGULAR_SEGMENT 0
+#define HC_DEFAULT_SEGMENT 1
/* index numbers */
#define HC_USTORM_DEF_SB_NUM_INDICES 8
diff --git a/drivers/net/bnx2x_hsi.h b/drivers/net/bnx2x_hsi.h
index 52585338ada..760069345b1 100644
--- a/drivers/net/bnx2x_hsi.h
+++ b/drivers/net/bnx2x_hsi.h
@@ -1,6 +1,6 @@
/* bnx2x_hsi.h: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2009 Broadcom Corporation
+ * Copyright (c) 2007-2010 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -1261,7 +1261,7 @@ struct host_func_stats {
#define BCM_5710_FW_MAJOR_VERSION 5
#define BCM_5710_FW_MINOR_VERSION 2
-#define BCM_5710_FW_REVISION_VERSION 7
+#define BCM_5710_FW_REVISION_VERSION 13
#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
@@ -2433,8 +2433,10 @@ struct common_ramrod_eth_rx_cqe {
u8 ramrod_type;
#define COMMON_RAMROD_ETH_RX_CQE_TYPE (0x1<<0)
#define COMMON_RAMROD_ETH_RX_CQE_TYPE_SHIFT 0
-#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0 (0x7F<<1)
-#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0_SHIFT 1
+#define COMMON_RAMROD_ETH_RX_CQE_ERROR (0x1<<1)
+#define COMMON_RAMROD_ETH_RX_CQE_ERROR_SHIFT 1
+#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0 (0x3F<<2)
+#define COMMON_RAMROD_ETH_RX_CQE_RESERVED0_SHIFT 2
u8 conn_type;
__le16 reserved1;
__le32 conn_and_cmd_data;
diff --git a/drivers/net/bnx2x_init_ops.h b/drivers/net/bnx2x_init_ops.h
index 38b970a14fd..2b1363a6fe7 100644
--- a/drivers/net/bnx2x_init_ops.h
+++ b/drivers/net/bnx2x_init_ops.h
@@ -2,7 +2,7 @@
* Static functions needed during the initialization.
* This file is "included" in bnx2x_main.c.
*
- * Copyright (c) 2007-2009 Broadcom Corporation
+ * Copyright (c) 2007-2010 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -138,11 +138,16 @@ static void bnx2x_write_big_buf_wb(struct bnx2x *bp, u32 addr, u32 len)
static void bnx2x_init_wr_wb(struct bnx2x *bp, u32 addr, const u32 *data,
u32 len)
{
+ const u32 *old_data = data;
+
data = (const u32 *)bnx2x_sel_blob(bp, addr, (const u8 *)data);
- if (bp->dmae_ready)
- VIRT_WR_DMAE_LEN(bp, data, addr, len);
- else
+ if (bp->dmae_ready) {
+ if (old_data != data)
+ VIRT_WR_DMAE_LEN(bp, data, addr, len, 1);
+ else
+ VIRT_WR_DMAE_LEN(bp, data, addr, len, 0);
+ } else
bnx2x_init_ind_wr(bp, addr, data, len);
}
diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x_link.c
index cf5778919b4..32e79c359e8 100644
--- a/drivers/net/bnx2x_link.c
+++ b/drivers/net/bnx2x_link.c
@@ -14,6 +14,8 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/pci.h>
@@ -2987,11 +2989,8 @@ static u8 bnx2x_verify_sfp_module(struct link_params *params)
else
vendor_pn[SFP_EEPROM_PART_NO_SIZE] = '\0';
- printk(KERN_INFO PFX "Warning: "
- "Unqualified SFP+ module "
- "detected on %s, Port %d from %s part number %s\n"
- , bp->dev->name, params->port,
- vendor_name, vendor_pn);
+ netdev_info(bp->dev, "Warning: Unqualified SFP+ module detected, Port %d from %s part number %s\n",
+ params->port, vendor_name, vendor_pn);
return -EINVAL;
}
@@ -4846,16 +4845,8 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
" has been detected on "
"port %d\n",
params->port);
- printk(KERN_ERR PFX "Error: Power"
- " fault on %s Port %d has"
- " been detected and the"
- " power to that SFP+ module"
- " has been removed to prevent"
- " failure of the card. Please"
- " remove the SFP+ module and"
- " restart the system to clear"
- " this error.\n"
- , bp->dev->name, params->port);
+ netdev_err(bp->dev, "Error: Power fault on Port %d has been detected and the power to that SFP+ module has been removed to prevent failure of the card. Please remove the SFP+ module and restart the system to clear this error.\n",
+ params->port);
/*
* Disable all RX_ALARMs except for
* mod_abs
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 306c2b8165e..ed785a30e98 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -1,6 +1,6 @@
/* bnx2x_main.c: Broadcom Everest network driver.
*
- * Copyright (c) 2007-2009 Broadcom Corporation
+ * Copyright (c) 2007-2010 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -57,8 +57,8 @@
#include "bnx2x_init_ops.h"
#include "bnx2x_dump.h"
-#define DRV_MODULE_VERSION "1.52.1-5"
-#define DRV_MODULE_RELDATE "2009/11/09"
+#define DRV_MODULE_VERSION "1.52.1-7"
+#define DRV_MODULE_RELDATE "2010/02/28"
#define BNX2X_BC_VER 0x040200
#include <linux/firmware.h>
@@ -140,7 +140,7 @@ static struct {
};
-static const struct pci_device_id bnx2x_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
{ PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
@@ -514,24 +514,24 @@ static void bnx2x_fw_dump(struct bnx2x *bp)
mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
mark = ((mark + 0x3) & ~0x3);
- printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
+ pr_err("begin fw dump (mark 0x%x)\n", mark);
- printk(KERN_ERR PFX);
+ pr_err("");
for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
for (word = 0; word < 8; word++)
data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
offset + 4*word));
data[8] = 0x0;
- printk(KERN_CONT "%s", (char *)data);
+ pr_cont("%s", (char *)data);
}
for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
for (word = 0; word < 8; word++)
data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
offset + 4*word));
data[8] = 0x0;
- printk(KERN_CONT "%s", (char *)data);
+ pr_cont("%s", (char *)data);
}
- printk(KERN_ERR PFX "end of fw dump\n");
+ pr_err("end of fw dump\n");
}
static void bnx2x_panic_dump(struct bnx2x *bp)
@@ -957,21 +957,34 @@ static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
fp->tx_pkt_cons = sw_cons;
fp->tx_bd_cons = bd_cons;
+ /* Need to make the tx_bd_cons update visible to start_xmit()
+ * before checking for netif_tx_queue_stopped(). Without the
+ * memory barrier, there is a small possibility that
+ * start_xmit() will miss it and cause the queue to be stopped
+ * forever.
+ */
+ smp_wmb();
+
/* TBD need a thresh? */
if (unlikely(netif_tx_queue_stopped(txq))) {
-
- /* Need to make the tx_bd_cons update visible to start_xmit()
- * before checking for netif_tx_queue_stopped(). Without the
- * memory barrier, there is a small possibility that
- * start_xmit() will miss it and cause the queue to be stopped
- * forever.
+ /* Taking tx_lock() is needed to prevent reenabling the queue
+ * while it's empty. This could have happen if rx_action() gets
+ * suspended in bnx2x_tx_int() after the condition before
+ * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
+ *
+ * stops the queue->sees fresh tx_bd_cons->releases the queue->
+ * sends some packets consuming the whole queue again->
+ * stops the queue
*/
- smp_mb();
+
+ __netif_tx_lock(txq, smp_processor_id());
if ((netif_tx_queue_stopped(txq)) &&
(bp->state == BNX2X_STATE_OPEN) &&
(bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
netif_tx_wake_queue(txq);
+
+ __netif_tx_unlock(txq);
}
return 0;
}
@@ -2136,7 +2149,7 @@ static void bnx2x_link_report(struct bnx2x *bp)
{
if (bp->flags & MF_FUNC_DIS) {
netif_carrier_off(bp->dev);
- printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
+ netdev_err(bp->dev, "NIC Link is Down\n");
return;
}
@@ -2145,7 +2158,7 @@ static void bnx2x_link_report(struct bnx2x *bp)
if (bp->state == BNX2X_STATE_OPEN)
netif_carrier_on(bp->dev);
- printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
+ netdev_info(bp->dev, "NIC Link is Up, ");
line_speed = bp->link_vars.line_speed;
if (IS_E1HMF(bp)) {
@@ -2157,29 +2170,29 @@ static void bnx2x_link_report(struct bnx2x *bp)
if (vn_max_rate < line_speed)
line_speed = vn_max_rate;
}
- printk("%d Mbps ", line_speed);
+ pr_cont("%d Mbps ", line_speed);
if (bp->link_vars.duplex == DUPLEX_FULL)
- printk("full duplex");
+ pr_cont("full duplex");
else
- printk("half duplex");
+ pr_cont("half duplex");
if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
- printk(", receive ");
+ pr_cont(", receive ");
if (bp->link_vars.flow_ctrl &
BNX2X_FLOW_CTRL_TX)
- printk("& transmit ");
+ pr_cont("& transmit ");
} else {
- printk(", transmit ");
+ pr_cont(", transmit ");
}
- printk("flow control ON");
+ pr_cont("flow control ON");
}
- printk("\n");
+ pr_cont("\n");
} else { /* link_down */
netif_carrier_off(bp->dev);
- printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
+ netdev_err(bp->dev, "NIC Link is Down\n");
}
}
@@ -2898,10 +2911,8 @@ static inline void bnx2x_fan_failure(struct bnx2x *bp)
bp->link_params.ext_phy_config);
/* log the failure */
- printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
- " the driver to shutdown the card to prevent permanent"
- " damage. Please contact Dell Support for assistance\n",
- bp->dev->name);
+ netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
+ "Please contact Dell Support for assistance.\n");
}
static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
@@ -4296,7 +4307,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
bnx2x_net_stats_update(bp);
bnx2x_drv_stats_update(bp);
- if (bp->msglevel & NETIF_MSG_TIMER) {
+ if (netif_msg_timer(bp)) {
struct bnx2x_fastpath *fp0_rx = bp->fp;
struct bnx2x_fastpath *fp0_tx = bp->fp;
struct tstorm_per_client_stats *old_tclient =
@@ -4306,7 +4317,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
struct net_device_stats *nstats = &bp->dev->stats;
int i;
- printk(KERN_DEBUG "%s:\n", bp->dev->name);
+ netdev_printk(KERN_DEBUG, bp->dev, "\n");
printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
" tx pkt (%lx)\n",
bnx2x_tx_avail(fp0_tx),
@@ -4464,7 +4475,7 @@ static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
/* Make sure the state has been "changed" */
smp_wmb();
- if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
+ if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
state, event, bp->stats_state);
}
@@ -5674,8 +5685,7 @@ gunzip_nomem2:
bp->gunzip_buf = NULL;
gunzip_nomem1:
- printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
- " un-compression\n", bp->dev->name);
+ netdev_err(bp->dev, "Cannot allocate firmware buffer for un-compression\n");
return -ENOMEM;
}
@@ -5721,14 +5731,13 @@ static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
rc = zlib_inflate(bp->strm, Z_FINISH);
if ((rc != Z_OK) && (rc != Z_STREAM_END))
- printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
- bp->dev->name, bp->strm->msg);
+ netdev_err(bp->dev, "Firmware decompression error: %s\n",
+ bp->strm->msg);
bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
if (bp->gunzip_outlen & 0x3)
- printk(KERN_ERR PFX "%s: Firmware decompression error:"
- " gunzip_outlen (%d) not aligned\n",
- bp->dev->name, bp->gunzip_outlen);
+ netdev_err(bp->dev, "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
+ bp->gunzip_outlen);
bp->gunzip_outlen >>= 2;
zlib_inflateEnd(bp->strm);
@@ -6213,8 +6222,8 @@ static int bnx2x_init_common(struct bnx2x *bp)
if (sizeof(union cdu_context) != 1024)
/* we currently assume that a context is 1024 bytes */
- printk(KERN_ALERT PFX "please adjust the size of"
- " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
+ pr_alert("please adjust the size of cdu_context(%ld)\n",
+ (long)sizeof(union cdu_context));
bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
val = (4 << 24) + (0 << 12) + 1024;
@@ -6938,19 +6947,21 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp)
}
}
-static void bnx2x_free_irq(struct bnx2x *bp)
+static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
{
if (bp->flags & USING_MSIX_FLAG) {
- bnx2x_free_msix_irqs(bp);
+ if (!disable_only)
+ bnx2x_free_msix_irqs(bp);
pci_disable_msix(bp->pdev);
bp->flags &= ~USING_MSIX_FLAG;
} else if (bp->flags & USING_MSI_FLAG) {
- free_irq(bp->pdev->irq, bp->dev);
+ if (!disable_only)
+ free_irq(bp->pdev->irq, bp->dev);
pci_disable_msi(bp->pdev);
bp->flags &= ~USING_MSI_FLAG;
- } else
+ } else if (!disable_only)
free_irq(bp->pdev->irq, bp->dev);
}
@@ -7018,11 +7029,10 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
}
i = BNX2X_NUM_QUEUES(bp);
- printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
- " ... fp[%d] %d\n",
- bp->dev->name, bp->msix_table[0].vector,
- 0, bp->msix_table[offset].vector,
- i - 1, bp->msix_table[offset + i - 1].vector);
+ netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
+ bp->msix_table[0].vector,
+ 0, bp->msix_table[offset].vector,
+ i - 1, bp->msix_table[offset + i - 1].vector);
return 0;
}
@@ -7443,8 +7453,10 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
rc = bnx2x_set_num_queues(bp);
- if (bnx2x_alloc_mem(bp))
+ if (bnx2x_alloc_mem(bp)) {
+ bnx2x_free_irq(bp, true);
return -ENOMEM;
+ }
for_each_queue(bp, i)
bnx2x_fp(bp, i, disable_tpa) =
@@ -7459,7 +7471,7 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
if (bp->flags & USING_MSIX_FLAG) {
rc = bnx2x_req_msix_irqs(bp);
if (rc) {
- pci_disable_msix(bp->pdev);
+ bnx2x_free_irq(bp, true);
goto load_error1;
}
} else {
@@ -7471,14 +7483,13 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
rc = bnx2x_req_irq(bp);
if (rc) {
BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
- if (bp->flags & USING_MSI_FLAG)
- pci_disable_msi(bp->pdev);
+ bnx2x_free_irq(bp, true);
goto load_error1;
}
if (bp->flags & USING_MSI_FLAG) {
bp->dev->irq = bp->pdev->irq;
- printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
- bp->dev->name, bp->pdev->irq);
+ netdev_info(bp->dev, "using MSI IRQ %d\n",
+ bp->pdev->irq);
}
}
@@ -7527,6 +7538,9 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
rc = bnx2x_init_hw(bp, load_code);
if (rc) {
BNX2X_ERR("HW init failed, aborting\n");
+ bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
+ bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
goto load_error2;
}
@@ -7664,7 +7678,7 @@ load_error3:
bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
load_error2:
/* Release IRQs */
- bnx2x_free_irq(bp);
+ bnx2x_free_irq(bp, false);
load_error1:
bnx2x_napi_disable(bp);
for_each_queue(bp, i)
@@ -7855,7 +7869,7 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
bnx2x_stats_handle(bp, STATS_EVENT_STOP);
/* Release IRQs */
- bnx2x_free_irq(bp);
+ bnx2x_free_irq(bp, false);
/* Wait until tx fastpath tasks complete */
for_each_queue(bp, i) {
@@ -8297,8 +8311,7 @@ static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
- printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
- val, val2, val3, val4);
+ pr_info("part number %X-%X-%X-%X\n", val, val2, val3, val4);
}
static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
@@ -8909,17 +8922,15 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
bnx2x_undi_unload(bp);
if (CHIP_REV_IS_FPGA(bp))
- printk(KERN_ERR PFX "FPGA detected\n");
+ pr_err("FPGA detected\n");
if (BP_NOMCP(bp) && (func == 0))
- printk(KERN_ERR PFX
- "MCP disabled, must load devices in order!\n");
+ pr_err("MCP disabled, must load devices in order!\n");
/* Set multi queue mode */
if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
- printk(KERN_ERR PFX
- "Multi disabled since int_mode requested is not MSI-X\n");
+ pr_err("Multi disabled since int_mode requested is not MSI-X\n");
multi_mode = ETH_RSS_MODE_DISABLED;
}
bp->multi_mode = multi_mode;
@@ -9345,7 +9356,7 @@ static u32 bnx2x_get_msglevel(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
- return bp->msglevel;
+ return bp->msg_enable;
}
static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
@@ -9353,7 +9364,7 @@ static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
struct bnx2x *bp = netdev_priv(dev);
if (capable(CAP_NET_ADMIN))
- bp->msglevel = level;
+ bp->msg_enable = level;
}
static int bnx2x_nway_reset(struct net_device *dev)
@@ -9962,12 +9973,14 @@ static int bnx2x_set_flags(struct net_device *dev, u32 data)
/* TPA requires Rx CSUM offloading */
if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
- if (!(dev->features & NETIF_F_LRO)) {
- dev->features |= NETIF_F_LRO;
- bp->flags |= TPA_ENABLE_FLAG;
- changed = 1;
- }
-
+ if (!disable_tpa) {
+ if (!(dev->features & NETIF_F_LRO)) {
+ dev->features |= NETIF_F_LRO;
+ bp->flags |= TPA_ENABLE_FLAG;
+ changed = 1;
+ }
+ } else
+ rc = -EINVAL;
} else if (dev->features & NETIF_F_LRO) {
dev->features &= ~NETIF_F_LRO;
bp->flags &= ~TPA_ENABLE_FLAG;
@@ -10425,7 +10438,8 @@ static int bnx2x_test_intr(struct bnx2x *bp)
config->hdr.length = 0;
if (CHIP_IS_E1(bp))
- config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
+ /* use last unicast entries */
+ config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
else
config->hdr.offset = BP_FUNC(bp);
config->hdr.client_id = bp->fp->cl_id;
@@ -10644,7 +10658,7 @@ static const struct {
((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
#define IS_E1HMF_MODE_STAT(bp) \
- (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
+ (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
{
@@ -11471,7 +11485,8 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
rx_mode = BNX2X_RX_MODE_PROMISC;
else if ((dev->flags & IFF_ALLMULTI) ||
- ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
+ ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
+ CHIP_IS_E1(bp)))
rx_mode = BNX2X_RX_MODE_ALLMULTI;
else { /* some multicasts */
@@ -11481,10 +11496,8 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
struct mac_configuration_cmd *config =
bnx2x_sp(bp, mcast_config);
- for (i = 0, mclist = dev->mc_list;
- mclist && (i < dev->mc_count);
- i++, mclist = mclist->next) {
-
+ i = 0;
+ netdev_for_each_mc_addr(mclist, dev) {
config->config_table[i].
cam_entry.msb_mac_addr =
swab16(*(u16 *)&mclist->dmi_addr[0]);
@@ -11512,6 +11525,7 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
cam_entry.middle_mac_addr,
config->config_table[i].
cam_entry.lsb_mac_addr);
+ i++;
}
old = config->hdr.length;
if (old > i) {
@@ -11553,10 +11567,7 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
memset(mc_filter, 0, 4 * MC_HASH_SIZE);
- for (i = 0, mclist = dev->mc_list;
- mclist && (i < dev->mc_count);
- i++, mclist = mclist->next) {
-
+ netdev_for_each_mc_addr(mclist, dev) {
DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
mclist->dmi_addr);
@@ -11731,7 +11742,7 @@ static void bnx2x_vlan_rx_register(struct net_device *dev,
#endif
-#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
+#ifdef CONFIG_NET_POLL_CONTROLLER
static void poll_bnx2x(struct net_device *dev)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -11755,7 +11766,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
#ifdef BCM_VLAN
.ndo_vlan_rx_register = bnx2x_vlan_rx_register,
#endif
-#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
+#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = poll_bnx2x,
#endif
};
@@ -11776,20 +11787,18 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
rc = pci_enable_device(pdev);
if (rc) {
- printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
+ pr_err("Cannot enable PCI device, aborting\n");
goto err_out;
}
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- printk(KERN_ERR PFX "Cannot find PCI device base address,"
- " aborting\n");
+ pr_err("Cannot find PCI device base address, aborting\n");
rc = -ENODEV;
goto err_out_disable;
}
if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
- printk(KERN_ERR PFX "Cannot find second PCI device"
- " base address, aborting\n");
+ pr_err("Cannot find second PCI device base address, aborting\n");
rc = -ENODEV;
goto err_out_disable;
}
@@ -11797,8 +11806,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
if (atomic_read(&pdev->enable_cnt) == 1) {
rc = pci_request_regions(pdev, DRV_MODULE_NAME);
if (rc) {
- printk(KERN_ERR PFX "Cannot obtain PCI resources,"
- " aborting\n");
+ pr_err("Cannot obtain PCI resources, aborting\n");
goto err_out_disable;
}
@@ -11808,16 +11816,14 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
if (bp->pm_cap == 0) {
- printk(KERN_ERR PFX "Cannot find power management"
- " capability, aborting\n");
+ pr_err("Cannot find power management capability, aborting\n");
rc = -EIO;
goto err_out_release;
}
bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
if (bp->pcie_cap == 0) {
- printk(KERN_ERR PFX "Cannot find PCI Express capability,"
- " aborting\n");
+ pr_err("Cannot find PCI Express capability, aborting\n");
rc = -EIO;
goto err_out_release;
}
@@ -11825,15 +11831,13 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
bp->flags |= USING_DAC_FLAG;
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
- printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
- " failed, aborting\n");
+ pr_err("pci_set_consistent_dma_mask failed, aborting\n");
rc = -EIO;
goto err_out_release;
}
} else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
- printk(KERN_ERR PFX "System does not support DMA,"
- " aborting\n");
+ pr_err("System does not support DMA, aborting\n");
rc = -EIO;
goto err_out_release;
}
@@ -11846,7 +11850,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
bp->regview = pci_ioremap_bar(pdev, 0);
if (!bp->regview) {
- printk(KERN_ERR PFX "Cannot map register space, aborting\n");
+ pr_err("Cannot map register space, aborting\n");
rc = -ENOMEM;
goto err_out_release;
}
@@ -11855,7 +11859,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
min_t(u64, BNX2X_DB_SIZE,
pci_resource_len(pdev, 2)));
if (!bp->doorbells) {
- printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
+ pr_err("Cannot map doorbell space, aborting\n");
rc = -ENOMEM;
goto err_out_unmap;
}
@@ -11957,8 +11961,7 @@ static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
offset = be32_to_cpu(sections[i].offset);
len = be32_to_cpu(sections[i].len);
if (offset + len > firmware->size) {
- printk(KERN_ERR PFX "Section %d length is out of "
- "bounds\n", i);
+ pr_err("Section %d length is out of bounds\n", i);
return -EINVAL;
}
}
@@ -11970,8 +11973,7 @@ static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
if (be16_to_cpu(ops_offsets[i]) > num_ops) {
- printk(KERN_ERR PFX "Section offset %d is out of "
- "bounds\n", i);
+ pr_err("Section offset %d is out of bounds\n", i);
return -EINVAL;
}
}
@@ -11983,8 +11985,7 @@ static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
(fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
(fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
(fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
- printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
- " Should be %d.%d.%d.%d\n",
+ pr_err("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
fw_ver[0], fw_ver[1], fw_ver[2],
fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
BCM_5710_FW_MINOR_VERSION,
@@ -12034,18 +12035,17 @@ static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
target[i] = be16_to_cpu(source[i]);
}
-#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
- do { \
- u32 len = be32_to_cpu(fw_hdr->arr.len); \
- bp->arr = kmalloc(len, GFP_KERNEL); \
- if (!bp->arr) { \
- printk(KERN_ERR PFX "Failed to allocate %d bytes " \
- "for "#arr"\n", len); \
- goto lbl; \
- } \
- func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
- (u8 *)bp->arr, len); \
- } while (0)
+#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
+do { \
+ u32 len = be32_to_cpu(fw_hdr->arr.len); \
+ bp->arr = kmalloc(len, GFP_KERNEL); \
+ if (!bp->arr) { \
+ pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
+ goto lbl; \
+ } \
+ func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
+ (u8 *)bp->arr, len); \
+} while (0)
static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
{
@@ -12058,18 +12058,17 @@ static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
else
fw_file_name = FW_FILE_NAME_E1H;
- printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
+ pr_info("Loading %s\n", fw_file_name);
rc = request_firmware(&bp->firmware, fw_file_name, dev);
if (rc) {
- printk(KERN_ERR PFX "Can't load firmware file %s\n",
- fw_file_name);
+ pr_err("Can't load firmware file %s\n", fw_file_name);
goto request_firmware_exit;
}
rc = bnx2x_check_firmware(bp);
if (rc) {
- printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
+ pr_err("Corrupt firmware file %s\n", fw_file_name);
goto request_firmware_exit;
}
@@ -12128,12 +12127,12 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
/* dev zeroed in init_etherdev */
dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
if (!dev) {
- printk(KERN_ERR PFX "Cannot allocate net device\n");
+ pr_err("Cannot allocate net device\n");
return -ENOMEM;
}
bp = netdev_priv(dev);
- bp->msglevel = debug;
+ bp->msg_enable = debug;
pci_set_drvdata(pdev, dev);
@@ -12150,7 +12149,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
/* Set init arrays */
rc = bnx2x_init_firmware(bp, &pdev->dev);
if (rc) {
- printk(KERN_ERR PFX "Error loading firmware\n");
+ pr_err("Error loading firmware\n");
goto init_one_exit;
}
@@ -12161,12 +12160,11 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
}
bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
- printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
- " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
- (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
- pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
- dev->base_addr, bp->pdev->irq);
- printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
+ netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
+ board_info[ent->driver_data].name,
+ (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
+ pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
+ dev->base_addr, bp->pdev->irq, dev->dev_addr);
return 0;
@@ -12194,7 +12192,7 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
struct bnx2x *bp;
if (!dev) {
- printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
+ pr_err("BAD net device from bnx2x_init_one\n");
return;
}
bp = netdev_priv(dev);
@@ -12227,7 +12225,7 @@ static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
struct bnx2x *bp;
if (!dev) {
- printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
+ pr_err("BAD net device from bnx2x_init_one\n");
return -ENODEV;
}
bp = netdev_priv(dev);
@@ -12259,7 +12257,7 @@ static int bnx2x_resume(struct pci_dev *pdev)
int rc;
if (!dev) {
- printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
+ pr_err("BAD net device from bnx2x_init_one\n");
return -ENODEV;
}
bp = netdev_priv(dev);
@@ -12298,7 +12296,7 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
/* Release IRQs */
- bnx2x_free_irq(bp);
+ bnx2x_free_irq(bp, false);
if (CHIP_IS_E1(bp)) {
struct mac_configuration_cmd *config =
@@ -12462,17 +12460,17 @@ static int __init bnx2x_init(void)
{
int ret;
- printk(KERN_INFO "%s", version);
+ pr_info("%s", version);
bnx2x_wq = create_singlethread_workqueue("bnx2x");
if (bnx2x_wq == NULL) {
- printk(KERN_ERR PFX "Cannot create workqueue\n");
+ pr_err("Cannot create workqueue\n");
return -ENOMEM;
}
ret = pci_register_driver(&bnx2x_pci_driver);
if (ret) {
- printk(KERN_ERR PFX "Cannot register driver\n");
+ pr_err("Cannot register driver\n");
destroy_workqueue(bnx2x_wq);
}
return ret;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index efa0e41bf3e..430c02267d7 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2615,6 +2615,17 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
unsigned char *arp_ptr;
__be32 sip, tip;
+ if (dev->priv_flags & IFF_802_1Q_VLAN) {
+ /*
+ * When using VLANS and bonding, dev and oriv_dev may be
+ * incorrect if the physical interface supports VLAN
+ * acceleration. With this change ARP validation now
+ * works for hosts only reachable on the VLAN interface.
+ */
+ dev = vlan_dev_real_dev(dev);
+ orig_dev = dev_get_by_index_rcu(dev_net(skb->dev),skb->skb_iif);
+ }
+
if (!(dev->priv_flags & IFF_BONDING) || !(dev->flags & IFF_MASTER))
goto out;
@@ -3296,7 +3307,7 @@ static void bond_remove_proc_entry(struct bonding *bond)
/* Create the bonding directory under /proc/net, if doesn't exist yet.
* Caller must hold rtnl_lock.
*/
-static void bond_create_proc_dir(struct bond_net *bn)
+static void __net_init bond_create_proc_dir(struct bond_net *bn)
{
if (!bn->proc_dir) {
bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net);
@@ -3309,7 +3320,7 @@ static void bond_create_proc_dir(struct bond_net *bn)
/* Destroy the bonding directory under /proc/net, if empty.
* Caller must hold rtnl_lock.
*/
-static void bond_destroy_proc_dir(struct bond_net *bn)
+static void __net_exit bond_destroy_proc_dir(struct bond_net *bn)
{
if (bn->proc_dir) {
remove_proc_entry(DRV_NAME, bn->net->proc_net);
@@ -3327,11 +3338,11 @@ static void bond_remove_proc_entry(struct bonding *bond)
{
}
-static void bond_create_proc_dir(struct bond_net *bn)
+static inline void bond_create_proc_dir(struct bond_net *bn)
{
}
-static void bond_destroy_proc_dir(struct bond_net *bn)
+static inline void bond_destroy_proc_dir(struct bond_net *bn)
{
}
@@ -3731,7 +3742,7 @@ static int bond_close(struct net_device *bond_dev)
static struct net_device_stats *bond_get_stats(struct net_device *bond_dev)
{
struct bonding *bond = netdev_priv(bond_dev);
- struct net_device_stats *stats = &bond->stats;
+ struct net_device_stats *stats = &bond_dev->stats;
struct net_device_stats local_stats;
struct slave *slave;
int i;
@@ -4935,6 +4946,8 @@ int bond_create(struct net *net, const char *name)
}
res = register_netdevice(bond_dev);
+ if (res < 0)
+ goto out_netdev;
out:
rtnl_unlock();
@@ -4944,7 +4957,7 @@ out_netdev:
goto out;
}
-static int bond_net_init(struct net *net)
+static int __net_init bond_net_init(struct net *net)
{
struct bond_net *bn = net_generic(net, bond_net_id);
@@ -4956,7 +4969,7 @@ static int bond_net_init(struct net *net)
return 0;
}
-static void bond_net_exit(struct net *net)
+static void __net_exit bond_net_exit(struct net *net)
{
struct bond_net *bn = net_generic(net, bond_net_id);
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 558ec135252..257a7a4dfce 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -197,7 +197,6 @@ struct bonding {
s8 send_grat_arp;
s8 send_unsol_na;
s8 setup_by_slave;
- struct net_device_stats stats;
#ifdef CONFIG_PROC_FS
struct proc_dir_entry *proc_entry;
char proc_file_name[IFNAMSIZ];
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 166cc7e579c..a2f29a38798 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -342,6 +342,9 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int mb, prio;
u32 reg_mid, reg_mcr;
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
+
mb = get_tx_next_mb(priv);
prio = get_tx_next_prio(priv);
@@ -1070,6 +1073,7 @@ static int __init at91_can_probe(struct platform_device *pdev)
priv->can.bittiming_const = &at91_bittiming_const;
priv->can.do_set_bittiming = at91_set_bittiming;
priv->can.do_set_mode = at91_set_mode;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
priv->reg_base = addr;
priv->dev = dev;
priv->clk = clk;
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 0ec1524523c..bf7f9ba2d90 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -318,6 +318,9 @@ static int bfin_can_start_xmit(struct sk_buff *skb, struct net_device *dev)
u16 val;
int i;
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
+
netif_stop_queue(dev);
/* fill id */
@@ -600,6 +603,7 @@ struct net_device *alloc_bfin_candev(void)
priv->can.bittiming_const = &bfin_can_bittiming_const;
priv->can.do_set_bittiming = bfin_can_set_bittiming;
priv->can.do_set_mode = bfin_can_set_mode;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
return dev;
}
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index c1bb29f0322..904aa369f80 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -574,6 +574,7 @@ static const struct nla_policy can_policy[IFLA_CAN_MAX + 1] = {
[IFLA_CAN_BITTIMING_CONST]
= { .len = sizeof(struct can_bittiming_const) },
[IFLA_CAN_CLOCK] = { .len = sizeof(struct can_clock) },
+ [IFLA_CAN_BERR_COUNTER] = { .len = sizeof(struct can_berr_counter) },
};
static int can_changelink(struct net_device *dev,
@@ -592,6 +593,8 @@ static int can_changelink(struct net_device *dev,
if (dev->flags & IFF_UP)
return -EBUSY;
cm = nla_data(data[IFLA_CAN_CTRLMODE]);
+ if (cm->flags & ~priv->ctrlmode_supported)
+ return -EOPNOTSUPP;
priv->ctrlmode &= ~cm->mask;
priv->ctrlmode |= cm->flags;
}
@@ -647,6 +650,8 @@ static size_t can_get_size(const struct net_device *dev)
size += nla_total_size(sizeof(u32)); /* IFLA_CAN_RESTART_MS */
size += sizeof(struct can_bittiming); /* IFLA_CAN_BITTIMING */
size += sizeof(struct can_clock); /* IFLA_CAN_CLOCK */
+ if (priv->do_get_berr_counter) /* IFLA_CAN_BERR_COUNTER */
+ size += sizeof(struct can_berr_counter);
if (priv->bittiming_const) /* IFLA_CAN_BITTIMING_CONST */
size += sizeof(struct can_bittiming_const);
@@ -657,6 +662,7 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct can_priv *priv = netdev_priv(dev);
struct can_ctrlmode cm = {.flags = priv->ctrlmode};
+ struct can_berr_counter bec;
enum can_state state = priv->state;
if (priv->do_get_state)
@@ -667,6 +673,8 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
NLA_PUT(skb, IFLA_CAN_BITTIMING,
sizeof(priv->bittiming), &priv->bittiming);
NLA_PUT(skb, IFLA_CAN_CLOCK, sizeof(cm), &priv->clock);
+ if (priv->do_get_berr_counter && !priv->do_get_berr_counter(dev, &bec))
+ NLA_PUT(skb, IFLA_CAN_BERR_COUNTER, sizeof(bec), &bec);
if (priv->bittiming_const)
NLA_PUT(skb, IFLA_CAN_BITTIMING_CONST,
sizeof(*priv->bittiming_const), priv->bittiming_const);
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
index 1a72ca066a1..f8cc168ec76 100644
--- a/drivers/net/can/mcp251x.c
+++ b/drivers/net/can/mcp251x.c
@@ -180,6 +180,14 @@
#define RXBEID0_OFF 4
#define RXBDLC_OFF 5
#define RXBDAT_OFF 6
+#define RXFSIDH(n) ((n) * 4)
+#define RXFSIDL(n) ((n) * 4 + 1)
+#define RXFEID8(n) ((n) * 4 + 2)
+#define RXFEID0(n) ((n) * 4 + 3)
+#define RXMSIDH(n) ((n) * 4 + 0x20)
+#define RXMSIDL(n) ((n) * 4 + 0x21)
+#define RXMEID8(n) ((n) * 4 + 0x22)
+#define RXMEID0(n) ((n) * 4 + 0x23)
#define GET_BYTE(val, byte) \
(((val) >> ((byte) * 8)) & 0xff)
@@ -219,7 +227,8 @@ struct mcp251x_priv {
struct net_device *net;
struct spi_device *spi;
- struct mutex spi_lock; /* SPI buffer lock */
+ struct mutex mcp_lock; /* SPI device lock */
+
u8 *spi_tx_buf;
u8 *spi_rx_buf;
dma_addr_t spi_tx_dma;
@@ -227,11 +236,11 @@ struct mcp251x_priv {
struct sk_buff *tx_skb;
int tx_len;
+
struct workqueue_struct *wq;
struct work_struct tx_work;
- struct work_struct irq_work;
- struct completion awake;
- int wake;
+ struct work_struct restart_work;
+
int force_quit;
int after_suspend;
#define AFTER_SUSPEND_UP 1
@@ -245,7 +254,8 @@ static void mcp251x_clean(struct net_device *net)
{
struct mcp251x_priv *priv = netdev_priv(net);
- net->stats.tx_errors++;
+ if (priv->tx_skb || priv->tx_len)
+ net->stats.tx_errors++;
if (priv->tx_skb)
dev_kfree_skb(priv->tx_skb);
if (priv->tx_len)
@@ -300,16 +310,12 @@ static u8 mcp251x_read_reg(struct spi_device *spi, uint8_t reg)
struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
u8 val = 0;
- mutex_lock(&priv->spi_lock);
-
priv->spi_tx_buf[0] = INSTRUCTION_READ;
priv->spi_tx_buf[1] = reg;
mcp251x_spi_trans(spi, 3);
val = priv->spi_rx_buf[2];
- mutex_unlock(&priv->spi_lock);
-
return val;
}
@@ -317,15 +323,11 @@ static void mcp251x_write_reg(struct spi_device *spi, u8 reg, uint8_t val)
{
struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
- mutex_lock(&priv->spi_lock);
-
priv->spi_tx_buf[0] = INSTRUCTION_WRITE;
priv->spi_tx_buf[1] = reg;
priv->spi_tx_buf[2] = val;
mcp251x_spi_trans(spi, 3);
-
- mutex_unlock(&priv->spi_lock);
}
static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
@@ -333,16 +335,12 @@ static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
{
struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
- mutex_lock(&priv->spi_lock);
-
priv->spi_tx_buf[0] = INSTRUCTION_BIT_MODIFY;
priv->spi_tx_buf[1] = reg;
priv->spi_tx_buf[2] = mask;
priv->spi_tx_buf[3] = val;
mcp251x_spi_trans(spi, 4);
-
- mutex_unlock(&priv->spi_lock);
}
static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
@@ -358,10 +356,8 @@ static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
mcp251x_write_reg(spi, TXBCTRL(tx_buf_idx) + i,
buf[i]);
} else {
- mutex_lock(&priv->spi_lock);
memcpy(priv->spi_tx_buf, buf, TXBDAT_OFF + len);
mcp251x_spi_trans(spi, TXBDAT_OFF + len);
- mutex_unlock(&priv->spi_lock);
}
}
@@ -408,13 +404,9 @@ static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
for (; i < (RXBDAT_OFF + len); i++)
buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i);
} else {
- mutex_lock(&priv->spi_lock);
-
priv->spi_tx_buf[RXBCTRL_OFF] = INSTRUCTION_READ_RXB(buf_idx);
mcp251x_spi_trans(spi, SPI_TRANSFER_BUF_LEN);
memcpy(buf, priv->spi_rx_buf, SPI_TRANSFER_BUF_LEN);
-
- mutex_unlock(&priv->spi_lock);
}
}
@@ -467,21 +459,6 @@ static void mcp251x_hw_sleep(struct spi_device *spi)
mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_SLEEP);
}
-static void mcp251x_hw_wakeup(struct spi_device *spi)
-{
- struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
-
- priv->wake = 1;
-
- /* Can only wake up by generating a wake-up interrupt. */
- mcp251x_write_bits(spi, CANINTE, CANINTE_WAKIE, CANINTE_WAKIE);
- mcp251x_write_bits(spi, CANINTF, CANINTF_WAKIF, CANINTF_WAKIF);
-
- /* Wait until the device is awake */
- if (!wait_for_completion_timeout(&priv->awake, HZ))
- dev_err(&spi->dev, "MCP251x didn't wake-up\n");
-}
-
static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb,
struct net_device *net)
{
@@ -490,16 +467,11 @@ static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb,
if (priv->tx_skb || priv->tx_len) {
dev_warn(&spi->dev, "hard_xmit called while tx busy\n");
- netif_stop_queue(net);
return NETDEV_TX_BUSY;
}
- if (skb->len != sizeof(struct can_frame)) {
- dev_err(&spi->dev, "dropping packet - bad length\n");
- dev_kfree_skb(skb);
- net->stats.tx_dropped++;
+ if (can_dropped_invalid_skb(net, skb))
return NETDEV_TX_OK;
- }
netif_stop_queue(net);
priv->tx_skb = skb;
@@ -515,12 +487,13 @@ static int mcp251x_do_set_mode(struct net_device *net, enum can_mode mode)
switch (mode) {
case CAN_MODE_START:
+ mcp251x_clean(net);
/* We have to delay work since SPI I/O may sleep */
priv->can.state = CAN_STATE_ERROR_ACTIVE;
priv->restart_tx = 1;
if (priv->can.restart_ms == 0)
priv->after_suspend = AFTER_SUSPEND_RESTART;
- queue_work(priv->wq, &priv->irq_work);
+ queue_work(priv->wq, &priv->restart_work);
break;
default:
return -EOPNOTSUPP;
@@ -529,7 +502,7 @@ static int mcp251x_do_set_mode(struct net_device *net, enum can_mode mode)
return 0;
}
-static void mcp251x_set_normal_mode(struct spi_device *spi)
+static int mcp251x_set_normal_mode(struct spi_device *spi)
{
struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
unsigned long timeout;
@@ -537,12 +510,14 @@ static void mcp251x_set_normal_mode(struct spi_device *spi)
/* Enable interrupts */
mcp251x_write_reg(spi, CANINTE,
CANINTE_ERRIE | CANINTE_TX2IE | CANINTE_TX1IE |
- CANINTE_TX0IE | CANINTE_RX1IE | CANINTE_RX0IE |
- CANINTF_MERRF);
+ CANINTE_TX0IE | CANINTE_RX1IE | CANINTE_RX0IE);
if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
/* Put device into loopback mode */
mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_LOOPBACK);
+ } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
+ /* Put device into listen-only mode */
+ mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_LISTEN_ONLY);
} else {
/* Put device into normal mode */
mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_NORMAL);
@@ -554,11 +529,12 @@ static void mcp251x_set_normal_mode(struct spi_device *spi)
if (time_after(jiffies, timeout)) {
dev_err(&spi->dev, "MCP251x didn't"
" enter in normal mode\n");
- return;
+ return -EBUSY;
}
}
}
priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ return 0;
}
static int mcp251x_do_set_bittiming(struct net_device *net)
@@ -589,33 +565,39 @@ static int mcp251x_setup(struct net_device *net, struct mcp251x_priv *priv,
{
mcp251x_do_set_bittiming(net);
- /* Enable RX0->RX1 buffer roll over and disable filters */
- mcp251x_write_bits(spi, RXBCTRL(0),
- RXBCTRL_BUKT | RXBCTRL_RXM0 | RXBCTRL_RXM1,
- RXBCTRL_BUKT | RXBCTRL_RXM0 | RXBCTRL_RXM1);
- mcp251x_write_bits(spi, RXBCTRL(1),
- RXBCTRL_RXM0 | RXBCTRL_RXM1,
- RXBCTRL_RXM0 | RXBCTRL_RXM1);
+ mcp251x_write_reg(spi, RXBCTRL(0),
+ RXBCTRL_BUKT | RXBCTRL_RXM0 | RXBCTRL_RXM1);
+ mcp251x_write_reg(spi, RXBCTRL(1),
+ RXBCTRL_RXM0 | RXBCTRL_RXM1);
return 0;
}
-static void mcp251x_hw_reset(struct spi_device *spi)
+static int mcp251x_hw_reset(struct spi_device *spi)
{
struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
int ret;
-
- mutex_lock(&priv->spi_lock);
+ unsigned long timeout;
priv->spi_tx_buf[0] = INSTRUCTION_RESET;
-
ret = spi_write(spi, priv->spi_tx_buf, 1);
-
- mutex_unlock(&priv->spi_lock);
-
- if (ret)
+ if (ret) {
dev_err(&spi->dev, "reset failed: ret = %d\n", ret);
+ return -EIO;
+ }
+
/* Wait for reset to finish */
+ timeout = jiffies + HZ;
mdelay(10);
+ while ((mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK)
+ != CANCTRL_REQOP_CONF) {
+ schedule();
+ if (time_after(jiffies, timeout)) {
+ dev_err(&spi->dev, "MCP251x didn't"
+ " enter in conf mode after reset\n");
+ return -EBUSY;
+ }
+ }
+ return 0;
}
static int mcp251x_hw_probe(struct spi_device *spi)
@@ -639,63 +621,17 @@ static int mcp251x_hw_probe(struct spi_device *spi)
return (st1 == 0x80 && st2 == 0x07) ? 1 : 0;
}
-static irqreturn_t mcp251x_can_isr(int irq, void *dev_id)
-{
- struct net_device *net = (struct net_device *)dev_id;
- struct mcp251x_priv *priv = netdev_priv(net);
-
- /* Schedule bottom half */
- if (!work_pending(&priv->irq_work))
- queue_work(priv->wq, &priv->irq_work);
-
- return IRQ_HANDLED;
-}
-
-static int mcp251x_open(struct net_device *net)
+static void mcp251x_open_clean(struct net_device *net)
{
struct mcp251x_priv *priv = netdev_priv(net);
struct spi_device *spi = priv->spi;
struct mcp251x_platform_data *pdata = spi->dev.platform_data;
- int ret;
-
- ret = open_candev(net);
- if (ret) {
- dev_err(&spi->dev, "unable to set initial baudrate!\n");
- return ret;
- }
+ free_irq(spi->irq, priv);
+ mcp251x_hw_sleep(spi);
if (pdata->transceiver_enable)
- pdata->transceiver_enable(1);
-
- priv->force_quit = 0;
- priv->tx_skb = NULL;
- priv->tx_len = 0;
-
- ret = request_irq(spi->irq, mcp251x_can_isr,
- IRQF_TRIGGER_FALLING, DEVICE_NAME, net);
- if (ret) {
- dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
- if (pdata->transceiver_enable)
- pdata->transceiver_enable(0);
- close_candev(net);
- return ret;
- }
-
- mcp251x_hw_wakeup(spi);
- mcp251x_hw_reset(spi);
- ret = mcp251x_setup(net, priv, spi);
- if (ret) {
- free_irq(spi->irq, net);
- mcp251x_hw_sleep(spi);
- if (pdata->transceiver_enable)
- pdata->transceiver_enable(0);
- close_candev(net);
- return ret;
- }
- mcp251x_set_normal_mode(spi);
- netif_wake_queue(net);
-
- return 0;
+ pdata->transceiver_enable(0);
+ close_candev(net);
}
static int mcp251x_stop(struct net_device *net)
@@ -706,17 +642,19 @@ static int mcp251x_stop(struct net_device *net)
close_candev(net);
+ priv->force_quit = 1;
+ free_irq(spi->irq, priv);
+ destroy_workqueue(priv->wq);
+ priv->wq = NULL;
+
+ mutex_lock(&priv->mcp_lock);
+
/* Disable and clear pending interrupts */
mcp251x_write_reg(spi, CANINTE, 0x00);
mcp251x_write_reg(spi, CANINTF, 0x00);
- priv->force_quit = 1;
- free_irq(spi->irq, net);
- flush_workqueue(priv->wq);
-
mcp251x_write_reg(spi, TXBCTRL(0), 0);
- if (priv->tx_skb || priv->tx_len)
- mcp251x_clean(net);
+ mcp251x_clean(net);
mcp251x_hw_sleep(spi);
@@ -725,9 +663,27 @@ static int mcp251x_stop(struct net_device *net)
priv->can.state = CAN_STATE_STOPPED;
+ mutex_unlock(&priv->mcp_lock);
+
return 0;
}
+static void mcp251x_error_skb(struct net_device *net, int can_id, int data1)
+{
+ struct sk_buff *skb;
+ struct can_frame *frame;
+
+ skb = alloc_can_err_skb(net, &frame);
+ if (skb) {
+ frame->can_id = can_id;
+ frame->data[1] = data1;
+ netif_rx(skb);
+ } else {
+ dev_err(&net->dev,
+ "cannot allocate error skb\n");
+ }
+}
+
static void mcp251x_tx_work_handler(struct work_struct *ws)
{
struct mcp251x_priv *priv = container_of(ws, struct mcp251x_priv,
@@ -736,33 +692,32 @@ static void mcp251x_tx_work_handler(struct work_struct *ws)
struct net_device *net = priv->net;
struct can_frame *frame;
+ mutex_lock(&priv->mcp_lock);
if (priv->tx_skb) {
- frame = (struct can_frame *)priv->tx_skb->data;
-
if (priv->can.state == CAN_STATE_BUS_OFF) {
mcp251x_clean(net);
- netif_wake_queue(net);
- return;
+ } else {
+ frame = (struct can_frame *)priv->tx_skb->data;
+
+ if (frame->can_dlc > CAN_FRAME_MAX_DATA_LEN)
+ frame->can_dlc = CAN_FRAME_MAX_DATA_LEN;
+ mcp251x_hw_tx(spi, frame, 0);
+ priv->tx_len = 1 + frame->can_dlc;
+ can_put_echo_skb(priv->tx_skb, net, 0);
+ priv->tx_skb = NULL;
}
- if (frame->can_dlc > CAN_FRAME_MAX_DATA_LEN)
- frame->can_dlc = CAN_FRAME_MAX_DATA_LEN;
- mcp251x_hw_tx(spi, frame, 0);
- priv->tx_len = 1 + frame->can_dlc;
- can_put_echo_skb(priv->tx_skb, net, 0);
- priv->tx_skb = NULL;
}
+ mutex_unlock(&priv->mcp_lock);
}
-static void mcp251x_irq_work_handler(struct work_struct *ws)
+static void mcp251x_restart_work_handler(struct work_struct *ws)
{
struct mcp251x_priv *priv = container_of(ws, struct mcp251x_priv,
- irq_work);
+ restart_work);
struct spi_device *spi = priv->spi;
struct net_device *net = priv->net;
- u8 txbnctrl;
- u8 intf;
- enum can_state new_state;
+ mutex_lock(&priv->mcp_lock);
if (priv->after_suspend) {
mdelay(10);
mcp251x_hw_reset(spi);
@@ -771,45 +726,54 @@ static void mcp251x_irq_work_handler(struct work_struct *ws)
mcp251x_set_normal_mode(spi);
} else if (priv->after_suspend & AFTER_SUSPEND_UP) {
netif_device_attach(net);
- /* Clean since we lost tx buffer */
- if (priv->tx_skb || priv->tx_len) {
- mcp251x_clean(net);
- netif_wake_queue(net);
- }
+ mcp251x_clean(net);
mcp251x_set_normal_mode(spi);
+ netif_wake_queue(net);
} else {
mcp251x_hw_sleep(spi);
}
priv->after_suspend = 0;
+ priv->force_quit = 0;
}
- if (priv->can.restart_ms == 0 && priv->can.state == CAN_STATE_BUS_OFF)
- return;
+ if (priv->restart_tx) {
+ priv->restart_tx = 0;
+ mcp251x_write_reg(spi, TXBCTRL(0), 0);
+ mcp251x_clean(net);
+ netif_wake_queue(net);
+ mcp251x_error_skb(net, CAN_ERR_RESTARTED, 0);
+ }
+ mutex_unlock(&priv->mcp_lock);
+}
- while (!priv->force_quit && !freezing(current)) {
- u8 eflag = mcp251x_read_reg(spi, EFLG);
- int can_id = 0, data1 = 0;
+static irqreturn_t mcp251x_can_ist(int irq, void *dev_id)
+{
+ struct mcp251x_priv *priv = dev_id;
+ struct spi_device *spi = priv->spi;
+ struct net_device *net = priv->net;
- mcp251x_write_reg(spi, EFLG, 0x00);
+ mutex_lock(&priv->mcp_lock);
+ while (!priv->force_quit) {
+ enum can_state new_state;
+ u8 intf = mcp251x_read_reg(spi, CANINTF);
+ u8 eflag;
+ int can_id = 0, data1 = 0;
- if (priv->restart_tx) {
- priv->restart_tx = 0;
- mcp251x_write_reg(spi, TXBCTRL(0), 0);
- if (priv->tx_skb || priv->tx_len)
- mcp251x_clean(net);
- netif_wake_queue(net);
- can_id |= CAN_ERR_RESTARTED;
+ if (intf & CANINTF_RX0IF) {
+ mcp251x_hw_rx(spi, 0);
+ /* Free one buffer ASAP */
+ mcp251x_write_bits(spi, CANINTF, intf & CANINTF_RX0IF,
+ 0x00);
}
- if (priv->wake) {
- /* Wait whilst the device wakes up */
- mdelay(10);
- priv->wake = 0;
- }
+ if (intf & CANINTF_RX1IF)
+ mcp251x_hw_rx(spi, 1);
- intf = mcp251x_read_reg(spi, CANINTF);
mcp251x_write_bits(spi, CANINTF, intf, 0x00);
+ eflag = mcp251x_read_reg(spi, EFLG);
+ mcp251x_write_reg(spi, EFLG, 0x00);
+
/* Update can state */
if (eflag & EFLG_TXBO) {
new_state = CAN_STATE_BUS_OFF;
@@ -850,59 +814,31 @@ static void mcp251x_irq_work_handler(struct work_struct *ws)
}
priv->can.state = new_state;
- if ((intf & CANINTF_ERRIF) || (can_id & CAN_ERR_RESTARTED)) {
- struct sk_buff *skb;
- struct can_frame *frame;
-
- /* Create error frame */
- skb = alloc_can_err_skb(net, &frame);
- if (skb) {
- /* Set error frame flags based on bus state */
- frame->can_id = can_id;
- frame->data[1] = data1;
-
- /* Update net stats for overflows */
- if (eflag & (EFLG_RX0OVR | EFLG_RX1OVR)) {
- if (eflag & EFLG_RX0OVR)
- net->stats.rx_over_errors++;
- if (eflag & EFLG_RX1OVR)
- net->stats.rx_over_errors++;
- frame->can_id |= CAN_ERR_CRTL;
- frame->data[1] |=
- CAN_ERR_CRTL_RX_OVERFLOW;
- }
-
- netif_rx(skb);
- } else {
- dev_info(&spi->dev,
- "cannot allocate error skb\n");
+ if (intf & CANINTF_ERRIF) {
+ /* Handle overflow counters */
+ if (eflag & (EFLG_RX0OVR | EFLG_RX1OVR)) {
+ if (eflag & EFLG_RX0OVR)
+ net->stats.rx_over_errors++;
+ if (eflag & EFLG_RX1OVR)
+ net->stats.rx_over_errors++;
+ can_id |= CAN_ERR_CRTL;
+ data1 |= CAN_ERR_CRTL_RX_OVERFLOW;
}
+ mcp251x_error_skb(net, can_id, data1);
}
if (priv->can.state == CAN_STATE_BUS_OFF) {
if (priv->can.restart_ms == 0) {
+ priv->force_quit = 1;
can_bus_off(net);
mcp251x_hw_sleep(spi);
- return;
+ break;
}
}
if (intf == 0)
break;
- if (intf & CANINTF_WAKIF)
- complete(&priv->awake);
-
- if (intf & CANINTF_MERRF) {
- /* If there are pending Tx buffers, restart queue */
- txbnctrl = mcp251x_read_reg(spi, TXBCTRL(0));
- if (!(txbnctrl & TXBCTRL_TXREQ)) {
- if (priv->tx_skb || priv->tx_len)
- mcp251x_clean(net);
- netif_wake_queue(net);
- }
- }
-
if (intf & (CANINTF_TX2IF | CANINTF_TX1IF | CANINTF_TX0IF)) {
net->stats.tx_packets++;
net->stats.tx_bytes += priv->tx_len - 1;
@@ -913,12 +849,66 @@ static void mcp251x_irq_work_handler(struct work_struct *ws)
netif_wake_queue(net);
}
- if (intf & CANINTF_RX0IF)
- mcp251x_hw_rx(spi, 0);
+ }
+ mutex_unlock(&priv->mcp_lock);
+ return IRQ_HANDLED;
+}
- if (intf & CANINTF_RX1IF)
- mcp251x_hw_rx(spi, 1);
+static int mcp251x_open(struct net_device *net)
+{
+ struct mcp251x_priv *priv = netdev_priv(net);
+ struct spi_device *spi = priv->spi;
+ struct mcp251x_platform_data *pdata = spi->dev.platform_data;
+ int ret;
+
+ ret = open_candev(net);
+ if (ret) {
+ dev_err(&spi->dev, "unable to set initial baudrate!\n");
+ return ret;
+ }
+
+ mutex_lock(&priv->mcp_lock);
+ if (pdata->transceiver_enable)
+ pdata->transceiver_enable(1);
+
+ priv->force_quit = 0;
+ priv->tx_skb = NULL;
+ priv->tx_len = 0;
+
+ ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist,
+ IRQF_TRIGGER_FALLING, DEVICE_NAME, priv);
+ if (ret) {
+ dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
+ if (pdata->transceiver_enable)
+ pdata->transceiver_enable(0);
+ close_candev(net);
+ goto open_unlock;
+ }
+
+ priv->wq = create_freezeable_workqueue("mcp251x_wq");
+ INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
+ INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
+
+ ret = mcp251x_hw_reset(spi);
+ if (ret) {
+ mcp251x_open_clean(net);
+ goto open_unlock;
+ }
+ ret = mcp251x_setup(net, priv, spi);
+ if (ret) {
+ mcp251x_open_clean(net);
+ goto open_unlock;
}
+ ret = mcp251x_set_normal_mode(spi);
+ if (ret) {
+ mcp251x_open_clean(net);
+ goto open_unlock;
+ }
+ netif_wake_queue(net);
+
+open_unlock:
+ mutex_unlock(&priv->mcp_lock);
+ return ret;
}
static const struct net_device_ops mcp251x_netdev_ops = {
@@ -952,11 +942,13 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi)
priv->can.bittiming_const = &mcp251x_bittiming_const;
priv->can.do_set_mode = mcp251x_do_set_mode;
priv->can.clock.freq = pdata->oscillator_frequency / 2;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
+ CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY;
priv->net = net;
dev_set_drvdata(&spi->dev, priv);
priv->spi = spi;
- mutex_init(&priv->spi_lock);
+ mutex_init(&priv->mcp_lock);
/* If requested, allocate DMA buffers */
if (mcp251x_enable_dma) {
@@ -1005,18 +997,12 @@ static int __devinit mcp251x_can_probe(struct spi_device *spi)
SET_NETDEV_DEV(net, &spi->dev);
- priv->wq = create_freezeable_workqueue("mcp251x_wq");
-
- INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
- INIT_WORK(&priv->irq_work, mcp251x_irq_work_handler);
-
- init_completion(&priv->awake);
-
/* Configure the SPI bus */
spi->mode = SPI_MODE_0;
spi->bits_per_word = 8;
spi_setup(spi);
+ /* Here is OK to not lock the MCP, no one knows about it yet */
if (!mcp251x_hw_probe(spi)) {
dev_info(&spi->dev, "Probe failed\n");
goto error_probe;
@@ -1059,10 +1045,6 @@ static int __devexit mcp251x_can_remove(struct spi_device *spi)
unregister_candev(net);
free_candev(net);
- priv->force_quit = 1;
- flush_workqueue(priv->wq);
- destroy_workqueue(priv->wq);
-
if (mcp251x_enable_dma) {
dma_free_coherent(&spi->dev, PAGE_SIZE,
priv->spi_tx_buf, priv->spi_tx_dma);
@@ -1084,6 +1066,12 @@ static int mcp251x_can_suspend(struct spi_device *spi, pm_message_t state)
struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
struct net_device *net = priv->net;
+ priv->force_quit = 1;
+ disable_irq(spi->irq);
+ /*
+ * Note: at this point neither IST nor workqueues are running.
+ * open/stop cannot be called anyway so locking is not needed
+ */
if (netif_running(net)) {
netif_device_detach(net);
@@ -1110,16 +1098,18 @@ static int mcp251x_can_resume(struct spi_device *spi)
if (priv->after_suspend & AFTER_SUSPEND_POWER) {
pdata->power_enable(1);
- queue_work(priv->wq, &priv->irq_work);
+ queue_work(priv->wq, &priv->restart_work);
} else {
if (priv->after_suspend & AFTER_SUSPEND_UP) {
if (pdata->transceiver_enable)
pdata->transceiver_enable(1);
- queue_work(priv->wq, &priv->irq_work);
+ queue_work(priv->wq, &priv->restart_work);
} else {
priv->after_suspend = 0;
}
}
+ priv->force_quit = 0;
+ enable_irq(spi->irq);
return 0;
}
#else
diff --git a/drivers/net/can/mscan/Kconfig b/drivers/net/can/mscan/Kconfig
index cd0f2d6f375..27d1d398e25 100644
--- a/drivers/net/can/mscan/Kconfig
+++ b/drivers/net/can/mscan/Kconfig
@@ -11,12 +11,13 @@ if CAN_MSCAN
config CAN_MPC5XXX
tristate "Freescale MPC5xxx onboard CAN controller"
- depends on PPC_MPC52xx
+ depends on (PPC_MPC52xx || PPC_MPC512x)
---help---
If you say yes here you get support for Freescale's MPC5xxx
- onboard CAN controller.
+ onboard CAN controller. Currently, the MPC5200, MPC5200B and
+ MPC5121 (Rev. 2 and later) are supported.
- This driver can also be built as a module. If so, the module
+ This driver can also be built as a module. If so, the module
will be called mscan-mpc5xxx.ko.
endif
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index 1de6f6349b1..03e7c48465a 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -29,6 +29,7 @@
#include <linux/can/dev.h>
#include <linux/of_platform.h>
#include <sysdev/fsl_soc.h>
+#include <linux/clk.h>
#include <linux/io.h>
#include <asm/mpc52xx.h>
@@ -36,22 +37,21 @@
#define DRV_NAME "mpc5xxx_can"
-static struct of_device_id mpc52xx_cdm_ids[] __devinitdata = {
+struct mpc5xxx_can_data {
+ unsigned int type;
+ u32 (*get_clock)(struct of_device *ofdev, const char *clock_name,
+ int *mscan_clksrc);
+};
+
+#ifdef CONFIG_PPC_MPC52xx
+static struct of_device_id __devinitdata mpc52xx_cdm_ids[] = {
{ .compatible = "fsl,mpc5200-cdm", },
{}
};
-/*
- * Get frequency of the MSCAN clock source
- *
- * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock (IP_CLK)
- * can be selected. According to the MPC5200 user's manual, the oscillator
- * clock is the better choice as it has less jitter but due to a hardware
- * bug, it can not be selected for the old MPC5200 Rev. A chips.
- */
-
-static unsigned int __devinit mpc52xx_can_clock_freq(struct of_device *of,
- int clock_src)
+static u32 __devinit mpc52xx_can_get_clock(struct of_device *ofdev,
+ const char *clock_name,
+ int *mscan_clksrc)
{
unsigned int pvr;
struct mpc52xx_cdm __iomem *cdm;
@@ -61,21 +61,33 @@ static unsigned int __devinit mpc52xx_can_clock_freq(struct of_device *of,
pvr = mfspr(SPRN_PVR);
- freq = mpc5xxx_get_bus_frequency(of->node);
+ /*
+ * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock
+ * (IP_CLK) can be selected as MSCAN clock source. According to
+ * the MPC5200 user's manual, the oscillator clock is the better
+ * choice as it has less jitter. For this reason, it is selected
+ * by default. Unfortunately, it can not be selected for the old
+ * MPC5200 Rev. A chips due to a hardware bug (check errata).
+ */
+ if (clock_name && strcmp(clock_name, "ip") == 0)
+ *mscan_clksrc = MSCAN_CLKSRC_BUS;
+ else
+ *mscan_clksrc = MSCAN_CLKSRC_XTAL;
+
+ freq = mpc5xxx_get_bus_frequency(ofdev->node);
if (!freq)
return 0;
- if (clock_src == MSCAN_CLKSRC_BUS || pvr == 0x80822011)
+ if (*mscan_clksrc == MSCAN_CLKSRC_BUS || pvr == 0x80822011)
return freq;
/* Determine SYS_XTAL_IN frequency from the clock domain settings */
np_cdm = of_find_matching_node(NULL, mpc52xx_cdm_ids);
if (!np_cdm) {
- dev_err(&of->dev, "can't get clock node!\n");
+ dev_err(&ofdev->dev, "can't get clock node!\n");
return 0;
}
cdm = of_iomap(np_cdm, 0);
- of_node_put(np_cdm);
if (in_8(&cdm->ipb_clk_sel) & 0x1)
freq *= 2;
@@ -84,26 +96,174 @@ static unsigned int __devinit mpc52xx_can_clock_freq(struct of_device *of,
freq *= (val & (1 << 5)) ? 8 : 4;
freq /= (val & (1 << 6)) ? 12 : 16;
+ of_node_put(np_cdm);
iounmap(cdm);
return freq;
}
+#else /* !CONFIG_PPC_MPC52xx */
+static u32 __devinit mpc52xx_can_get_clock(struct of_device *ofdev,
+ const char *clock_name,
+ int *mscan_clksrc)
+{
+ return 0;
+}
+#endif /* CONFIG_PPC_MPC52xx */
+
+#ifdef CONFIG_PPC_MPC512x
+struct mpc512x_clockctl {
+ u32 spmr; /* System PLL Mode Reg */
+ u32 sccr[2]; /* System Clk Ctrl Reg 1 & 2 */
+ u32 scfr1; /* System Clk Freq Reg 1 */
+ u32 scfr2; /* System Clk Freq Reg 2 */
+ u32 reserved;
+ u32 bcr; /* Bread Crumb Reg */
+ u32 pccr[12]; /* PSC Clk Ctrl Reg 0-11 */
+ u32 spccr; /* SPDIF Clk Ctrl Reg */
+ u32 cccr; /* CFM Clk Ctrl Reg */
+ u32 dccr; /* DIU Clk Cnfg Reg */
+ u32 mccr[4]; /* MSCAN Clk Ctrl Reg 1-3 */
+};
+
+static struct of_device_id __devinitdata mpc512x_clock_ids[] = {
+ { .compatible = "fsl,mpc5121-clock", },
+ {}
+};
+
+static u32 __devinit mpc512x_can_get_clock(struct of_device *ofdev,
+ const char *clock_name,
+ int *mscan_clksrc)
+{
+ struct mpc512x_clockctl __iomem *clockctl;
+ struct device_node *np_clock;
+ struct clk *sys_clk, *ref_clk;
+ int plen, clockidx, clocksrc = -1;
+ u32 sys_freq, val, clockdiv = 1, freq = 0;
+ const u32 *pval;
+
+ np_clock = of_find_matching_node(NULL, mpc512x_clock_ids);
+ if (!np_clock) {
+ dev_err(&ofdev->dev, "couldn't find clock node\n");
+ return -ENODEV;
+ }
+ clockctl = of_iomap(np_clock, 0);
+ if (!clockctl) {
+ dev_err(&ofdev->dev, "couldn't map clock registers\n");
+ return 0;
+ }
+
+ /* Determine the MSCAN device index from the physical address */
+ pval = of_get_property(ofdev->node, "reg", &plen);
+ BUG_ON(!pval || plen < sizeof(*pval));
+ clockidx = (*pval & 0x80) ? 1 : 0;
+ if (*pval & 0x2000)
+ clockidx += 2;
+
+ /*
+ * Clock source and divider selection: 3 different clock sources
+ * can be selected: "ip", "ref" or "sys". For the latter two, a
+ * clock divider can be defined as well. If the clock source is
+ * not specified by the device tree, we first try to find an
+ * optimal CAN source clock based on the system clock. If that
+ * is not posslible, the reference clock will be used.
+ */
+ if (clock_name && !strcmp(clock_name, "ip")) {
+ *mscan_clksrc = MSCAN_CLKSRC_IPS;
+ freq = mpc5xxx_get_bus_frequency(ofdev->node);
+ } else {
+ *mscan_clksrc = MSCAN_CLKSRC_BUS;
+
+ pval = of_get_property(ofdev->node,
+ "fsl,mscan-clock-divider", &plen);
+ if (pval && plen == sizeof(*pval))
+ clockdiv = *pval;
+ if (!clockdiv)
+ clockdiv = 1;
+
+ if (!clock_name || !strcmp(clock_name, "sys")) {
+ sys_clk = clk_get(&ofdev->dev, "sys_clk");
+ if (!sys_clk) {
+ dev_err(&ofdev->dev, "couldn't get sys_clk\n");
+ goto exit_unmap;
+ }
+ /* Get and round up/down sys clock rate */
+ sys_freq = 1000000 *
+ ((clk_get_rate(sys_clk) + 499999) / 1000000);
+
+ if (!clock_name) {
+ /* A multiple of 16 MHz would be optimal */
+ if ((sys_freq % 16000000) == 0) {
+ clocksrc = 0;
+ clockdiv = sys_freq / 16000000;
+ freq = sys_freq / clockdiv;
+ }
+ } else {
+ clocksrc = 0;
+ freq = sys_freq / clockdiv;
+ }
+ }
+
+ if (clocksrc < 0) {
+ ref_clk = clk_get(&ofdev->dev, "ref_clk");
+ if (!ref_clk) {
+ dev_err(&ofdev->dev, "couldn't get ref_clk\n");
+ goto exit_unmap;
+ }
+ clocksrc = 1;
+ freq = clk_get_rate(ref_clk) / clockdiv;
+ }
+ }
+
+ /* Disable clock */
+ out_be32(&clockctl->mccr[clockidx], 0x0);
+ if (clocksrc >= 0) {
+ /* Set source and divider */
+ val = (clocksrc << 14) | ((clockdiv - 1) << 17);
+ out_be32(&clockctl->mccr[clockidx], val);
+ /* Enable clock */
+ out_be32(&clockctl->mccr[clockidx], val | 0x10000);
+ }
+
+ /* Enable MSCAN clock domain */
+ val = in_be32(&clockctl->sccr[1]);
+ if (!(val & (1 << 25)))
+ out_be32(&clockctl->sccr[1], val | (1 << 25));
+
+ dev_dbg(&ofdev->dev, "using '%s' with frequency divider %d\n",
+ *mscan_clksrc == MSCAN_CLKSRC_IPS ? "ips_clk" :
+ clocksrc == 1 ? "ref_clk" : "sys_clk", clockdiv);
+
+exit_unmap:
+ of_node_put(np_clock);
+ iounmap(clockctl);
+
+ return freq;
+}
+#else /* !CONFIG_PPC_MPC512x */
+static u32 __devinit mpc512x_can_get_clock(struct of_device *ofdev,
+ const char *clock_name,
+ int *mscan_clksrc)
+{
+ return 0;
+}
+#endif /* CONFIG_PPC_MPC512x */
static int __devinit mpc5xxx_can_probe(struct of_device *ofdev,
const struct of_device_id *id)
{
+ struct mpc5xxx_can_data *data = (struct mpc5xxx_can_data *)id->data;
struct device_node *np = ofdev->node;
struct net_device *dev;
struct mscan_priv *priv;
void __iomem *base;
- const char *clk_src;
- int err, irq, clock_src;
+ const char *clock_name = NULL;
+ int irq, mscan_clksrc = 0;
+ int err = -ENOMEM;
- base = of_iomap(ofdev->node, 0);
+ base = of_iomap(np, 0);
if (!base) {
dev_err(&ofdev->dev, "couldn't ioremap\n");
- err = -ENOMEM;
- goto exit_release_mem;
+ return err;
}
irq = irq_of_parse_and_map(np, 0);
@@ -114,37 +274,27 @@ static int __devinit mpc5xxx_can_probe(struct of_device *ofdev,
}
dev = alloc_mscandev();
- if (!dev) {
- err = -ENOMEM;
+ if (!dev)
goto exit_dispose_irq;
- }
priv = netdev_priv(dev);
priv->reg_base = base;
dev->irq = irq;
- /*
- * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock
- * (IP_CLK) can be selected as MSCAN clock source. According to
- * the MPC5200 user's manual, the oscillator clock is the better
- * choice as it has less jitter. For this reason, it is selected
- * by default.
- */
- clk_src = of_get_property(np, "fsl,mscan-clock-source", NULL);
- if (clk_src && strcmp(clk_src, "ip") == 0)
- clock_src = MSCAN_CLKSRC_BUS;
- else
- clock_src = MSCAN_CLKSRC_XTAL;
- priv->can.clock.freq = mpc52xx_can_clock_freq(ofdev, clock_src);
+ clock_name = of_get_property(np, "fsl,mscan-clock-source", NULL);
+
+ BUG_ON(!data);
+ priv->type = data->type;
+ priv->can.clock.freq = data->get_clock(ofdev, clock_name,
+ &mscan_clksrc);
if (!priv->can.clock.freq) {
- dev_err(&ofdev->dev, "couldn't get MSCAN clock frequency\n");
- err = -ENODEV;
+ dev_err(&ofdev->dev, "couldn't get MSCAN clock properties\n");
goto exit_free_mscan;
}
SET_NETDEV_DEV(dev, &ofdev->dev);
- err = register_mscandev(dev, clock_src);
+ err = register_mscandev(dev, mscan_clksrc);
if (err) {
dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
DRV_NAME, err);
@@ -164,7 +314,7 @@ exit_dispose_irq:
irq_dispose_mapping(irq);
exit_unmap_mem:
iounmap(base);
-exit_release_mem:
+
return err;
}
@@ -225,8 +375,20 @@ static int mpc5xxx_can_resume(struct of_device *ofdev)
}
#endif
+static struct mpc5xxx_can_data __devinitdata mpc5200_can_data = {
+ .type = MSCAN_TYPE_MPC5200,
+ .get_clock = mpc52xx_can_get_clock,
+};
+
+static struct mpc5xxx_can_data __devinitdata mpc5121_can_data = {
+ .type = MSCAN_TYPE_MPC5121,
+ .get_clock = mpc512x_can_get_clock,
+};
+
static struct of_device_id __devinitdata mpc5xxx_can_table[] = {
- {.compatible = "fsl,mpc5200-mscan"},
+ { .compatible = "fsl,mpc5200-mscan", .data = &mpc5200_can_data, },
+ /* Note that only MPC5121 Rev. 2 (and later) is supported */
+ { .compatible = "fsl,mpc5121-mscan", .data = &mpc5121_can_data, },
{},
};
@@ -255,5 +417,5 @@ static void __exit mpc5xxx_can_exit(void)
module_exit(mpc5xxx_can_exit);
MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
-MODULE_DESCRIPTION("Freescale MPC5200 CAN driver");
+MODULE_DESCRIPTION("Freescale MPC5xxx CAN driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 07346f880ca..6b7dd578d41 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -4,7 +4,7 @@
* Copyright (C) 2005-2006 Andrey Volkov <avolkov@varma-el.com>,
* Varma Electronics Oy
* Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
- * Copytight (C) 2008-2009 Pengutronix <kernel@pengutronix.de>
+ * Copyright (C) 2008-2009 Pengutronix <kernel@pengutronix.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the version 2 of the GNU General Public License
@@ -152,6 +152,12 @@ static int mscan_start(struct net_device *dev)
priv->shadow_canrier = 0;
priv->flags = 0;
+ if (priv->type == MSCAN_TYPE_MPC5121) {
+ /* Clear pending bus-off condition */
+ if (in_8(&regs->canmisc) & MSCAN_BOHOLD)
+ out_8(&regs->canmisc, MSCAN_BOHOLD);
+ }
+
err = mscan_set_mode(dev, MSCAN_NORMAL_MODE);
if (err)
return err;
@@ -163,8 +169,29 @@ static int mscan_start(struct net_device *dev)
out_8(&regs->cantier, 0);
/* Enable receive interrupts. */
- out_8(&regs->canrier, MSCAN_OVRIE | MSCAN_RXFIE | MSCAN_CSCIE |
- MSCAN_RSTATE1 | MSCAN_RSTATE0 | MSCAN_TSTATE1 | MSCAN_TSTATE0);
+ out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE);
+
+ return 0;
+}
+
+static int mscan_restart(struct net_device *dev)
+{
+ struct mscan_priv *priv = netdev_priv(dev);
+
+ if (priv->type == MSCAN_TYPE_MPC5121) {
+ struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
+
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+ WARN(!(in_8(&regs->canmisc) & MSCAN_BOHOLD),
+ "bus-off state expected");
+ out_8(&regs->canmisc, MSCAN_BOHOLD);
+ /* Re-enable receive interrupts. */
+ out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE);
+ } else {
+ if (priv->can.state <= CAN_STATE_BUS_OFF)
+ mscan_set_mode(dev, MSCAN_INIT_MODE);
+ return mscan_start(dev);
+ }
return 0;
}
@@ -177,8 +204,8 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
int i, rtr, buf_id;
u32 can_id;
- if (frame->can_dlc > 8)
- return -EINVAL;
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
out_8(&regs->cantier, 0);
@@ -359,9 +386,12 @@ static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame,
* automatically. To avoid that we stop the chip doing
* a light-weight stop (we are in irq-context).
*/
- out_8(&regs->cantier, 0);
- out_8(&regs->canrier, 0);
- setbits8(&regs->canctl0, MSCAN_SLPRQ | MSCAN_INITRQ);
+ if (priv->type != MSCAN_TYPE_MPC5121) {
+ out_8(&regs->cantier, 0);
+ out_8(&regs->canrier, 0);
+ setbits8(&regs->canctl0,
+ MSCAN_SLPRQ | MSCAN_INITRQ);
+ }
can_bus_off(dev);
break;
default:
@@ -491,9 +521,7 @@ static int mscan_do_set_mode(struct net_device *dev, enum can_mode mode)
switch (mode) {
case CAN_MODE_START:
- if (priv->can.state <= CAN_STATE_BUS_OFF)
- mscan_set_mode(dev, MSCAN_INIT_MODE);
- ret = mscan_start(dev);
+ ret = mscan_restart(dev);
if (ret)
break;
if (netif_queue_stopped(dev))
@@ -592,18 +620,21 @@ static const struct net_device_ops mscan_netdev_ops = {
.ndo_start_xmit = mscan_start_xmit,
};
-int register_mscandev(struct net_device *dev, int clock_src)
+int register_mscandev(struct net_device *dev, int mscan_clksrc)
{
struct mscan_priv *priv = netdev_priv(dev);
struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
u8 ctl1;
ctl1 = in_8(&regs->canctl1);
- if (clock_src)
+ if (mscan_clksrc)
ctl1 |= MSCAN_CLKSRC;
else
ctl1 &= ~MSCAN_CLKSRC;
+ if (priv->type == MSCAN_TYPE_MPC5121)
+ ctl1 |= MSCAN_BORM; /* bus-off recovery upon request */
+
ctl1 |= MSCAN_CANE;
out_8(&regs->canctl1, ctl1);
udelay(100);
@@ -655,6 +686,7 @@ struct net_device *alloc_mscandev(void)
priv->can.bittiming_const = &mscan_bittiming_const;
priv->can.do_set_bittiming = mscan_do_set_bittiming;
priv->can.do_set_mode = mscan_do_set_mode;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
for (i = 0; i < TX_QUEUE_SIZE; i++) {
priv->tx_queue[i].id = i;
diff --git a/drivers/net/can/mscan/mscan.h b/drivers/net/can/mscan/mscan.h
index 00fc4aaf1ed..4ff966473bc 100644
--- a/drivers/net/can/mscan/mscan.h
+++ b/drivers/net/can/mscan/mscan.h
@@ -38,18 +38,20 @@
#define MSCAN_CLKSRC 0x40
#define MSCAN_LOOPB 0x20
#define MSCAN_LISTEN 0x10
+#define MSCAN_BORM 0x08
#define MSCAN_WUPM 0x04
#define MSCAN_SLPAK 0x02
#define MSCAN_INITAK 0x01
-/* Use the MPC5200 MSCAN variant? */
+/* Use the MPC5XXX MSCAN variant? */
#ifdef CONFIG_PPC
-#define MSCAN_FOR_MPC5200
+#define MSCAN_FOR_MPC5XXX
#endif
-#ifdef MSCAN_FOR_MPC5200
+#ifdef MSCAN_FOR_MPC5XXX
#define MSCAN_CLKSRC_BUS 0
#define MSCAN_CLKSRC_XTAL MSCAN_CLKSRC
+#define MSCAN_CLKSRC_IPS MSCAN_CLKSRC
#else
#define MSCAN_CLKSRC_BUS MSCAN_CLKSRC
#define MSCAN_CLKSRC_XTAL 0
@@ -136,7 +138,7 @@
#define MSCAN_EFF_RTR_SHIFT 0
#define MSCAN_EFF_FLAGS 0x18 /* IDE + SRR */
-#ifdef MSCAN_FOR_MPC5200
+#ifdef MSCAN_FOR_MPC5XXX
#define _MSCAN_RESERVED_(n, num) u8 _res##n[num]
#define _MSCAN_RESERVED_DSR_SIZE 2
#else
@@ -165,67 +167,66 @@ struct mscan_regs {
u8 cantbsel; /* + 0x14 0x0a */
u8 canidac; /* + 0x15 0x0b */
u8 reserved; /* + 0x16 0x0c */
- _MSCAN_RESERVED_(6, 5); /* + 0x17 */
-#ifndef MSCAN_FOR_MPC5200
- u8 canmisc; /* 0x0d */
-#endif
+ _MSCAN_RESERVED_(6, 2); /* + 0x17 */
+ u8 canmisc; /* + 0x19 0x0d */
+ _MSCAN_RESERVED_(7, 2); /* + 0x1a */
u8 canrxerr; /* + 0x1c 0x0e */
u8 cantxerr; /* + 0x1d 0x0f */
- _MSCAN_RESERVED_(7, 2); /* + 0x1e */
+ _MSCAN_RESERVED_(8, 2); /* + 0x1e */
u16 canidar1_0; /* + 0x20 0x10 */
- _MSCAN_RESERVED_(8, 2); /* + 0x22 */
+ _MSCAN_RESERVED_(9, 2); /* + 0x22 */
u16 canidar3_2; /* + 0x24 0x12 */
- _MSCAN_RESERVED_(9, 2); /* + 0x26 */
+ _MSCAN_RESERVED_(10, 2); /* + 0x26 */
u16 canidmr1_0; /* + 0x28 0x14 */
- _MSCAN_RESERVED_(10, 2); /* + 0x2a */
+ _MSCAN_RESERVED_(11, 2); /* + 0x2a */
u16 canidmr3_2; /* + 0x2c 0x16 */
- _MSCAN_RESERVED_(11, 2); /* + 0x2e */
+ _MSCAN_RESERVED_(12, 2); /* + 0x2e */
u16 canidar5_4; /* + 0x30 0x18 */
- _MSCAN_RESERVED_(12, 2); /* + 0x32 */
+ _MSCAN_RESERVED_(13, 2); /* + 0x32 */
u16 canidar7_6; /* + 0x34 0x1a */
- _MSCAN_RESERVED_(13, 2); /* + 0x36 */
+ _MSCAN_RESERVED_(14, 2); /* + 0x36 */
u16 canidmr5_4; /* + 0x38 0x1c */
- _MSCAN_RESERVED_(14, 2); /* + 0x3a */
+ _MSCAN_RESERVED_(15, 2); /* + 0x3a */
u16 canidmr7_6; /* + 0x3c 0x1e */
- _MSCAN_RESERVED_(15, 2); /* + 0x3e */
+ _MSCAN_RESERVED_(16, 2); /* + 0x3e */
struct {
u16 idr1_0; /* + 0x40 0x20 */
- _MSCAN_RESERVED_(16, 2); /* + 0x42 */
+ _MSCAN_RESERVED_(17, 2); /* + 0x42 */
u16 idr3_2; /* + 0x44 0x22 */
- _MSCAN_RESERVED_(17, 2); /* + 0x46 */
+ _MSCAN_RESERVED_(18, 2); /* + 0x46 */
u16 dsr1_0; /* + 0x48 0x24 */
- _MSCAN_RESERVED_(18, 2); /* + 0x4a */
+ _MSCAN_RESERVED_(19, 2); /* + 0x4a */
u16 dsr3_2; /* + 0x4c 0x26 */
- _MSCAN_RESERVED_(19, 2); /* + 0x4e */
+ _MSCAN_RESERVED_(20, 2); /* + 0x4e */
u16 dsr5_4; /* + 0x50 0x28 */
- _MSCAN_RESERVED_(20, 2); /* + 0x52 */
+ _MSCAN_RESERVED_(21, 2); /* + 0x52 */
u16 dsr7_6; /* + 0x54 0x2a */
- _MSCAN_RESERVED_(21, 2); /* + 0x56 */
+ _MSCAN_RESERVED_(22, 2); /* + 0x56 */
u8 dlr; /* + 0x58 0x2c */
- u8:8; /* + 0x59 0x2d */
- _MSCAN_RESERVED_(22, 2); /* + 0x5a */
+ u8 reserved; /* + 0x59 0x2d */
+ _MSCAN_RESERVED_(23, 2); /* + 0x5a */
u16 time; /* + 0x5c 0x2e */
} rx;
- _MSCAN_RESERVED_(23, 2); /* + 0x5e */
+ _MSCAN_RESERVED_(24, 2); /* + 0x5e */
struct {
u16 idr1_0; /* + 0x60 0x30 */
- _MSCAN_RESERVED_(24, 2); /* + 0x62 */
+ _MSCAN_RESERVED_(25, 2); /* + 0x62 */
u16 idr3_2; /* + 0x64 0x32 */
- _MSCAN_RESERVED_(25, 2); /* + 0x66 */
+ _MSCAN_RESERVED_(26, 2); /* + 0x66 */
u16 dsr1_0; /* + 0x68 0x34 */
- _MSCAN_RESERVED_(26, 2); /* + 0x6a */
+ _MSCAN_RESERVED_(27, 2); /* + 0x6a */
u16 dsr3_2; /* + 0x6c 0x36 */
- _MSCAN_RESERVED_(27, 2); /* + 0x6e */
+ _MSCAN_RESERVED_(28, 2); /* + 0x6e */
u16 dsr5_4; /* + 0x70 0x38 */
- _MSCAN_RESERVED_(28, 2); /* + 0x72 */
+ _MSCAN_RESERVED_(29, 2); /* + 0x72 */
u16 dsr7_6; /* + 0x74 0x3a */
- _MSCAN_RESERVED_(29, 2); /* + 0x76 */
+ _MSCAN_RESERVED_(30, 2); /* + 0x76 */
u8 dlr; /* + 0x78 0x3c */
u8 tbpr; /* + 0x79 0x3d */
- _MSCAN_RESERVED_(30, 2); /* + 0x7a */
+ _MSCAN_RESERVED_(31, 2); /* + 0x7a */
u16 time; /* + 0x7c 0x3e */
} tx;
- _MSCAN_RESERVED_(31, 2); /* + 0x7e */
+ _MSCAN_RESERVED_(32, 2); /* + 0x7e */
} __attribute__ ((packed));
#undef _MSCAN_RESERVED_
@@ -237,6 +238,15 @@ struct mscan_regs {
#define MSCAN_POWEROFF_MODE (MSCAN_CSWAI | MSCAN_SLPRQ)
#define MSCAN_SET_MODE_RETRIES 255
#define MSCAN_ECHO_SKB_MAX 3
+#define MSCAN_RX_INTS_ENABLE (MSCAN_OVRIE | MSCAN_RXFIE | MSCAN_CSCIE | \
+ MSCAN_RSTATE1 | MSCAN_RSTATE0 | \
+ MSCAN_TSTATE1 | MSCAN_TSTATE0)
+
+/* MSCAN type variants */
+enum {
+ MSCAN_TYPE_MPC5200,
+ MSCAN_TYPE_MPC5121
+};
#define BTR0_BRP_MASK 0x3f
#define BTR0_SJW_SHIFT 6
@@ -270,6 +280,7 @@ struct tx_queue_entry {
struct mscan_priv {
struct can_priv can; /* must be the first member */
+ unsigned int type; /* MSCAN type variants */
long open_time;
unsigned long flags;
void __iomem *reg_base; /* ioremap'ed address to registers */
@@ -285,12 +296,7 @@ struct mscan_priv {
};
extern struct net_device *alloc_mscandev(void);
-/*
- * clock_src:
- * 1 = The MSCAN clock source is the onchip Bus Clock.
- * 0 = The MSCAN clock source is the chip Oscillator Clock.
- */
-extern int register_mscandev(struct net_device *dev, int clock_src);
+extern int register_mscandev(struct net_device *dev, int mscan_clksrc);
extern void unregister_mscandev(struct net_device *dev);
#endif /* __MSCAN_H__ */
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index 4c674927f24..9e277d64a31 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -44,4 +44,16 @@ config CAN_KVASER_PCI
This driver is for the the PCIcanx and PCIcan cards (1, 2 or
4 channel) from Kvaser (http://www.kvaser.com).
+config CAN_PLX_PCI
+ tristate "PLX90xx PCI-bridge based Cards"
+ depends on PCI
+ ---help---
+ This driver is for CAN interface cards based on
+ the PLX90xx PCI bridge.
+ Driver supports now:
+ - Adlink PCI-7841/cPCI-7841 card (http://www.adlinktech.com/)
+ - Adlink PCI-7841/cPCI-7841 SE card
+ - Marathon CAN-bus-PCI card (http://www.marathon.ru/)
+ - TEWS TECHNOLOGIES TPMC810 card (http://www.tews.com/)
+
endif
diff --git a/drivers/net/can/sja1000/Makefile b/drivers/net/can/sja1000/Makefile
index 9d245ac0396..ce924553995 100644
--- a/drivers/net/can/sja1000/Makefile
+++ b/drivers/net/can/sja1000/Makefile
@@ -8,5 +8,6 @@ obj-$(CONFIG_CAN_SJA1000_PLATFORM) += sja1000_platform.o
obj-$(CONFIG_CAN_SJA1000_OF_PLATFORM) += sja1000_of_platform.o
obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o
obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o
+obj-$(CONFIG_CAN_PLX_PCI) += plx_pci.o
ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/sja1000/ems_pci.c b/drivers/net/can/sja1000/ems_pci.c
index fd04789d337..87300606abb 100644
--- a/drivers/net/can/sja1000/ems_pci.c
+++ b/drivers/net/can/sja1000/ems_pci.c
@@ -102,7 +102,7 @@ struct ems_pci_card {
#define EMS_PCI_BASE_SIZE 4096 /* size of controller area */
-static struct pci_device_id ems_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(ems_pci_tbl) = {
/* CPC-PCI v1 */
{PCI_VENDOR_ID_SIEMENS, 0x2104, PCI_ANY_ID, PCI_ANY_ID,},
/* CPC-PCI v2 */
diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c
index 7dd7769b971..441e776a7f5 100644
--- a/drivers/net/can/sja1000/kvaser_pci.c
+++ b/drivers/net/can/sja1000/kvaser_pci.c
@@ -109,7 +109,7 @@ struct kvaser_pci {
#define KVASER_PCI_VENDOR_ID2 0x1a07 /* the PCI device and vendor IDs */
#define KVASER_PCI_DEVICE_ID2 0x0008
-static struct pci_device_id kvaser_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(kvaser_pci_tbl) = {
{KVASER_PCI_VENDOR_ID1, KVASER_PCI_DEVICE_ID1, PCI_ANY_ID, PCI_ANY_ID,},
{KVASER_PCI_VENDOR_ID2, KVASER_PCI_DEVICE_ID2, PCI_ANY_ID, PCI_ANY_ID,},
{ 0,}
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
new file mode 100644
index 00000000000..6b46a6395f8
--- /dev/null
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -0,0 +1,472 @@
+/*
+ * Copyright (C) 2008-2010 Pavel Cheblakov <P.B.Cheblakov@inp.nsk.su>
+ *
+ * Derived from the ems_pci.c driver:
+ * Copyright (C) 2007 Wolfgang Grandegger <wg@grandegger.com>
+ * Copyright (C) 2008 Markus Plessing <plessing@ems-wuensche.com>
+ * Copyright (C) 2008 Sebastian Haas <haas@ems-wuensche.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/can.h>
+#include <linux/can/dev.h>
+#include <linux/io.h>
+
+#include "sja1000.h"
+
+#define DRV_NAME "sja1000_plx_pci"
+
+MODULE_AUTHOR("Pavel Cheblakov <P.B.Cheblakov@inp.nsk.su>");
+MODULE_DESCRIPTION("Socket-CAN driver for PLX90xx PCI-bridge cards with "
+ "the SJA1000 chips");
+MODULE_SUPPORTED_DEVICE("Adlink PCI-7841/cPCI-7841, "
+ "Adlink PCI-7841/cPCI-7841 SE, "
+ "Marathon CAN-bus-PCI, "
+ "TEWS TECHNOLOGIES TPMC810");
+MODULE_LICENSE("GPL v2");
+
+#define PLX_PCI_MAX_CHAN 2
+
+struct plx_pci_card {
+ int channels; /* detected channels count */
+ struct net_device *net_dev[PLX_PCI_MAX_CHAN];
+ void __iomem *conf_addr;
+};
+
+#define PLX_PCI_CAN_CLOCK (16000000 / 2)
+
+/* PLX90xx registers */
+#define PLX_INTCSR 0x4c /* Interrupt Control/Status */
+#define PLX_CNTRL 0x50 /* User I/O, Direct Slave Response,
+ * Serial EEPROM, and Initialization
+ * Control register
+ */
+
+#define PLX_LINT1_EN 0x1 /* Local interrupt 1 enable */
+#define PLX_LINT2_EN (1 << 3) /* Local interrupt 2 enable */
+#define PLX_PCI_INT_EN (1 << 6) /* PCI Interrupt Enable */
+#define PLX_PCI_RESET (1 << 30) /* PCI Adapter Software Reset */
+
+/*
+ * The board configuration is probably following:
+ * RX1 is connected to ground.
+ * TX1 is not connected.
+ * CLKO is not connected.
+ * Setting the OCR register to 0xDA is a good idea.
+ * This means normal output mode, push-pull and the correct polarity.
+ */
+#define PLX_PCI_OCR (OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL)
+
+/*
+ * In the CDR register, you should set CBP to 1.
+ * You will probably also want to set the clock divider value to 7
+ * (meaning direct oscillator output) because the second SJA1000 chip
+ * is driven by the first one CLKOUT output.
+ */
+#define PLX_PCI_CDR (CDR_CBP | CDR_CLKOUT_MASK)
+
+/* SJA1000 Control Register in the BasicCAN Mode */
+#define REG_CR 0x00
+
+/* States of some SJA1000 registers after hardware reset in the BasicCAN mode*/
+#define REG_CR_BASICCAN_INITIAL 0x21
+#define REG_CR_BASICCAN_INITIAL_MASK 0xa1
+#define REG_SR_BASICCAN_INITIAL 0x0c
+#define REG_IR_BASICCAN_INITIAL 0xe0
+
+/* States of some SJA1000 registers after hardware reset in the PeliCAN mode*/
+#define REG_MOD_PELICAN_INITIAL 0x01
+#define REG_SR_PELICAN_INITIAL 0x3c
+#define REG_IR_PELICAN_INITIAL 0x00
+
+#define ADLINK_PCI_VENDOR_ID 0x144A
+#define ADLINK_PCI_DEVICE_ID 0x7841
+
+#define MARATHON_PCI_DEVICE_ID 0x2715
+
+#define TEWS_PCI_VENDOR_ID 0x1498
+#define TEWS_PCI_DEVICE_ID_TMPC810 0x032A
+
+static void plx_pci_reset_common(struct pci_dev *pdev);
+static void plx_pci_reset_marathon(struct pci_dev *pdev);
+
+struct plx_pci_channel_map {
+ u32 bar;
+ u32 offset;
+ u32 size; /* 0x00 - auto, e.g. length of entire bar */
+};
+
+struct plx_pci_card_info {
+ const char *name;
+ int channel_count;
+ u32 can_clock;
+ u8 ocr; /* output control register */
+ u8 cdr; /* clock divider register */
+
+ /* Parameters for mapping local configuration space */
+ struct plx_pci_channel_map conf_map;
+
+ /* Parameters for mapping the SJA1000 chips */
+ struct plx_pci_channel_map chan_map_tbl[PLX_PCI_MAX_CHAN];
+
+ /* Pointer to device-dependent reset function */
+ void (*reset_func)(struct pci_dev *pdev);
+};
+
+static struct plx_pci_card_info plx_pci_card_info_adlink __devinitdata = {
+ "Adlink PCI-7841/cPCI-7841", 2,
+ PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+ {1, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x80, 0x80} },
+ &plx_pci_reset_common
+ /* based on PLX9052 */
+};
+
+static struct plx_pci_card_info plx_pci_card_info_adlink_se __devinitdata = {
+ "Adlink PCI-7841/cPCI-7841 SE", 2,
+ PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+ {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x80, 0x80} },
+ &plx_pci_reset_common
+ /* based on PLX9052 */
+};
+
+static struct plx_pci_card_info plx_pci_card_info_marathon __devinitdata = {
+ "Marathon CAN-bus-PCI", 2,
+ PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+ {0, 0x00, 0x00}, { {2, 0x00, 0x00}, {4, 0x00, 0x00} },
+ &plx_pci_reset_marathon
+ /* based on PLX9052 */
+};
+
+static struct plx_pci_card_info plx_pci_card_info_tews __devinitdata = {
+ "TEWS TECHNOLOGIES TPMC810", 2,
+ PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+ {0, 0x00, 0x00}, { {2, 0x000, 0x80}, {2, 0x100, 0x80} },
+ &plx_pci_reset_common
+ /* based on PLX9030 */
+};
+
+static DEFINE_PCI_DEVICE_TABLE(plx_pci_tbl) = {
+ {
+ /* Adlink PCI-7841/cPCI-7841 */
+ ADLINK_PCI_VENDOR_ID, ADLINK_PCI_DEVICE_ID,
+ PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_NETWORK_OTHER << 8, ~0,
+ (kernel_ulong_t)&plx_pci_card_info_adlink
+ },
+ {
+ /* Adlink PCI-7841/cPCI-7841 SE */
+ ADLINK_PCI_VENDOR_ID, ADLINK_PCI_DEVICE_ID,
+ PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_COMMUNICATION_OTHER << 8, ~0,
+ (kernel_ulong_t)&plx_pci_card_info_adlink_se
+ },
+ {
+ /* Marathon CAN-bus-PCI card */
+ PCI_VENDOR_ID_PLX, MARATHON_PCI_DEVICE_ID,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_marathon
+ },
+ {
+ /* TEWS TECHNOLOGIES TPMC810 card */
+ TEWS_PCI_VENDOR_ID, TEWS_PCI_DEVICE_ID_TMPC810,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0,
+ (kernel_ulong_t)&plx_pci_card_info_tews
+ },
+ { 0,}
+};
+MODULE_DEVICE_TABLE(pci, plx_pci_tbl);
+
+static u8 plx_pci_read_reg(const struct sja1000_priv *priv, int port)
+{
+ return ioread8(priv->reg_base + port);
+}
+
+static void plx_pci_write_reg(const struct sja1000_priv *priv, int port, u8 val)
+{
+ iowrite8(val, priv->reg_base + port);
+}
+
+/*
+ * Check if a CAN controller is present at the specified location
+ * by trying to switch 'em from the Basic mode into the PeliCAN mode.
+ * Also check states of some registers in reset mode.
+ */
+static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv)
+{
+ int flag = 0;
+
+ /*
+ * Check registers after hardware reset (the Basic mode)
+ * See states on p. 10 of the Datasheet.
+ */
+ if ((priv->read_reg(priv, REG_CR) & REG_CR_BASICCAN_INITIAL_MASK) ==
+ REG_CR_BASICCAN_INITIAL &&
+ (priv->read_reg(priv, REG_SR) == REG_SR_BASICCAN_INITIAL) &&
+ (priv->read_reg(priv, REG_IR) == REG_IR_BASICCAN_INITIAL))
+ flag = 1;
+
+ /* Bring the SJA1000 into the PeliCAN mode*/
+ priv->write_reg(priv, REG_CDR, CDR_PELICAN);
+
+ /*
+ * Check registers after reset in the PeliCAN mode.
+ * See states on p. 23 of the Datasheet.
+ */
+ if (priv->read_reg(priv, REG_MOD) == REG_MOD_PELICAN_INITIAL &&
+ priv->read_reg(priv, REG_SR) == REG_SR_PELICAN_INITIAL &&
+ priv->read_reg(priv, REG_IR) == REG_IR_PELICAN_INITIAL)
+ return flag;
+
+ return 0;
+}
+
+/*
+ * PLX90xx software reset
+ * Also LRESET# asserts and brings to reset device on the Local Bus (if wired).
+ * For most cards it's enough for reset the SJA1000 chips.
+ */
+static void plx_pci_reset_common(struct pci_dev *pdev)
+{
+ struct plx_pci_card *card = pci_get_drvdata(pdev);
+ u32 cntrl;
+
+ cntrl = ioread32(card->conf_addr + PLX_CNTRL);
+ cntrl |= PLX_PCI_RESET;
+ iowrite32(cntrl, card->conf_addr + PLX_CNTRL);
+ udelay(100);
+ cntrl ^= PLX_PCI_RESET;
+ iowrite32(cntrl, card->conf_addr + PLX_CNTRL);
+};
+
+/* Special reset function for Marathon card */
+static void plx_pci_reset_marathon(struct pci_dev *pdev)
+{
+ void __iomem *reset_addr;
+ int i;
+ int reset_bar[2] = {3, 5};
+
+ plx_pci_reset_common(pdev);
+
+ for (i = 0; i < 2; i++) {
+ reset_addr = pci_iomap(pdev, reset_bar[i], 0);
+ if (!reset_addr) {
+ dev_err(&pdev->dev, "Failed to remap reset "
+ "space %d (BAR%d)\n", i, reset_bar[i]);
+ } else {
+ /* reset the SJA1000 chip */
+ iowrite8(0x1, reset_addr);
+ udelay(100);
+ pci_iounmap(pdev, reset_addr);
+ }
+ }
+}
+
+static void plx_pci_del_card(struct pci_dev *pdev)
+{
+ struct plx_pci_card *card = pci_get_drvdata(pdev);
+ struct net_device *dev;
+ struct sja1000_priv *priv;
+ int i = 0;
+
+ for (i = 0; i < card->channels; i++) {
+ dev = card->net_dev[i];
+ if (!dev)
+ continue;
+
+ dev_info(&pdev->dev, "Removing %s\n", dev->name);
+ unregister_sja1000dev(dev);
+ priv = netdev_priv(dev);
+ if (priv->reg_base)
+ pci_iounmap(pdev, priv->reg_base);
+ free_sja1000dev(dev);
+ }
+
+ plx_pci_reset_common(pdev);
+
+ /*
+ * Disable interrupts from PCI-card (PLX90xx) and disable Local_1,
+ * Local_2 interrupts
+ */
+ iowrite32(0x0, card->conf_addr + PLX_INTCSR);
+
+ if (card->conf_addr)
+ pci_iounmap(pdev, card->conf_addr);
+
+ kfree(card);
+
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+/*
+ * Probe PLX90xx based device for the SJA1000 chips and register each
+ * available CAN channel to SJA1000 Socket-CAN subsystem.
+ */
+static int __devinit plx_pci_add_card(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct sja1000_priv *priv;
+ struct net_device *dev;
+ struct plx_pci_card *card;
+ struct plx_pci_card_info *ci;
+ int err, i;
+ u32 val;
+ void __iomem *addr;
+
+ ci = (struct plx_pci_card_info *)ent->driver_data;
+
+ if (pci_enable_device(pdev) < 0) {
+ dev_err(&pdev->dev, "Failed to enable PCI device\n");
+ return -ENODEV;
+ }
+
+ dev_info(&pdev->dev, "Detected \"%s\" card at slot #%i\n",
+ ci->name, PCI_SLOT(pdev->devfn));
+
+ /* Allocate card structures to hold addresses, ... */
+ card = kzalloc(sizeof(*card), GFP_KERNEL);
+ if (!card) {
+ dev_err(&pdev->dev, "Unable to allocate memory\n");
+ pci_disable_device(pdev);
+ return -ENOMEM;
+ }
+
+ pci_set_drvdata(pdev, card);
+
+ card->channels = 0;
+
+ /* Remap PLX90xx configuration space */
+ addr = pci_iomap(pdev, ci->conf_map.bar, ci->conf_map.size);
+ if (!addr) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "Failed to remap configuration space "
+ "(BAR%d)\n", ci->conf_map.bar);
+ goto failure_cleanup;
+ }
+ card->conf_addr = addr + ci->conf_map.offset;
+
+ ci->reset_func(pdev);
+
+ /* Detect available channels */
+ for (i = 0; i < ci->channel_count; i++) {
+ struct plx_pci_channel_map *cm = &ci->chan_map_tbl[i];
+
+ dev = alloc_sja1000dev(0);
+ if (!dev) {
+ err = -ENOMEM;
+ goto failure_cleanup;
+ }
+
+ card->net_dev[i] = dev;
+ priv = netdev_priv(dev);
+ priv->priv = card;
+ priv->irq_flags = IRQF_SHARED;
+
+ dev->irq = pdev->irq;
+
+ /*
+ * Remap IO space of the SJA1000 chips
+ * This is device-dependent mapping
+ */
+ addr = pci_iomap(pdev, cm->bar, cm->size);
+ if (!addr) {
+ err = -ENOMEM;
+ dev_err(&pdev->dev, "Failed to remap BAR%d\n", cm->bar);
+ goto failure_cleanup;
+ }
+
+ priv->reg_base = addr + cm->offset;
+ priv->read_reg = plx_pci_read_reg;
+ priv->write_reg = plx_pci_write_reg;
+
+ /* Check if channel is present */
+ if (plx_pci_check_sja1000(priv)) {
+ priv->can.clock.freq = ci->can_clock;
+ priv->ocr = ci->ocr;
+ priv->cdr = ci->cdr;
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ /* Register SJA1000 device */
+ err = register_sja1000dev(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Registering device failed "
+ "(err=%d)\n", err);
+ free_sja1000dev(dev);
+ goto failure_cleanup;
+ }
+
+ card->channels++;
+
+ dev_info(&pdev->dev, "Channel #%d at 0x%p, irq %d "
+ "registered as %s\n", i + 1, priv->reg_base,
+ dev->irq, dev->name);
+ } else {
+ dev_err(&pdev->dev, "Channel #%d not detected\n",
+ i + 1);
+ free_sja1000dev(dev);
+ }
+ }
+
+ if (!card->channels) {
+ err = -ENODEV;
+ goto failure_cleanup;
+ }
+
+ /*
+ * Enable interrupts from PCI-card (PLX90xx) and enable Local_1,
+ * Local_2 interrupts from the SJA1000 chips
+ */
+ val = ioread32(card->conf_addr + PLX_INTCSR);
+ val |= PLX_LINT1_EN | PLX_LINT2_EN | PLX_PCI_INT_EN;
+ iowrite32(val, card->conf_addr + PLX_INTCSR);
+
+ return 0;
+
+failure_cleanup:
+ dev_err(&pdev->dev, "Error: %d. Cleaning Up.\n", err);
+
+ plx_pci_del_card(pdev);
+
+ return err;
+}
+
+static struct pci_driver plx_pci_driver = {
+ .name = DRV_NAME,
+ .id_table = plx_pci_tbl,
+ .probe = plx_pci_add_card,
+ .remove = plx_pci_del_card,
+};
+
+static int __init plx_pci_init(void)
+{
+ return pci_register_driver(&plx_pci_driver);
+}
+
+static void __exit plx_pci_exit(void)
+{
+ pci_unregister_driver(&plx_pci_driver);
+}
+
+module_init(plx_pci_init);
+module_exit(plx_pci_exit);
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 542a4f7255b..145b1a731a5 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -130,8 +130,12 @@ static void set_normal_mode(struct net_device *dev)
/* check reset bit */
if ((status & MOD_RM) == 0) {
priv->can.state = CAN_STATE_ERROR_ACTIVE;
- /* enable all interrupts */
- priv->write_reg(priv, REG_IER, IRQ_ALL);
+ /* enable interrupts */
+ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)
+ priv->write_reg(priv, REG_IER, IRQ_ALL);
+ else
+ priv->write_reg(priv, REG_IER,
+ IRQ_ALL & ~IRQ_BEI);
return;
}
@@ -203,6 +207,17 @@ static int sja1000_set_bittiming(struct net_device *dev)
return 0;
}
+static int sja1000_get_berr_counter(const struct net_device *dev,
+ struct can_berr_counter *bec)
+{
+ struct sja1000_priv *priv = netdev_priv(dev);
+
+ bec->txerr = priv->read_reg(priv, REG_TXERR);
+ bec->rxerr = priv->read_reg(priv, REG_RXERR);
+
+ return 0;
+}
+
/*
* initialize SJA1000 chip:
* - reset chip
@@ -249,6 +264,9 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
uint8_t dreg;
int i;
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
+
netif_stop_queue(dev);
fi = dlc = cf->can_dlc;
@@ -434,6 +452,8 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
CAN_ERR_CRTL_TX_PASSIVE :
CAN_ERR_CRTL_RX_PASSIVE;
}
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
}
priv->can.state = state;
@@ -564,6 +584,9 @@ struct net_device *alloc_sja1000dev(int sizeof_priv)
priv->can.bittiming_const = &sja1000_bittiming_const;
priv->can.do_set_bittiming = sja1000_set_bittiming;
priv->can.do_set_mode = sja1000_set_mode;
+ priv->can.do_get_berr_counter = sja1000_get_berr_counter;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
+ CAN_CTRLMODE_BERR_REPORTING;
if (sizeof_priv)
priv->priv = (void *)priv + sizeof(struct sja1000_priv);
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 5c993c2da52..0c3d2ba0d17 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -28,9 +28,11 @@
* .mbx_offset = 0x2000,
* .int_line = 0,
* .revision = 1,
+ * .transceiver_switch = hecc_phy_control,
* };
*
- * Please see include/can/platform/ti_hecc.h for description of above fields
+ * Please see include/linux/can/platform/ti_hecc.h for description of
+ * above fields.
*
*/
@@ -220,6 +222,7 @@ struct ti_hecc_priv {
u32 tx_head;
u32 tx_tail;
u32 rx_next;
+ void (*transceiver_switch)(int);
};
static inline int get_tx_head_mb(struct ti_hecc_priv *priv)
@@ -317,6 +320,13 @@ static int ti_hecc_set_btc(struct ti_hecc_priv *priv)
return 0;
}
+static void ti_hecc_transceiver_switch(const struct ti_hecc_priv *priv,
+ int on)
+{
+ if (priv->transceiver_switch)
+ priv->transceiver_switch(on);
+}
+
static void ti_hecc_reset(struct net_device *ndev)
{
u32 cnt;
@@ -477,6 +487,9 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
u32 mbxno, mbx_mask, data;
unsigned long flags;
+ if (can_dropped_invalid_skb(ndev, skb))
+ return NETDEV_TX_OK;
+
mbxno = get_tx_head_mb(priv);
mbx_mask = BIT(mbxno);
spin_lock_irqsave(&priv->mbx_lock, flags);
@@ -491,7 +504,6 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
spin_unlock_irqrestore(&priv->mbx_lock, flags);
/* Prepare mailbox for transmission */
- data = min_t(u8, cf->can_dlc, 8);
if (cf->can_id & CAN_RTR_FLAG) /* Remote transmission request */
data |= HECC_CANMCF_RTR;
data |= get_tx_head_prio(priv) << 8;
@@ -816,15 +828,17 @@ static int ti_hecc_open(struct net_device *ndev)
return err;
}
+ ti_hecc_transceiver_switch(priv, 1);
+
/* Open common can device */
err = open_candev(ndev);
if (err) {
dev_err(ndev->dev.parent, "open_candev() failed %d\n", err);
+ ti_hecc_transceiver_switch(priv, 0);
free_irq(ndev->irq, ndev);
return err;
}
- clk_enable(priv->clk);
ti_hecc_start(ndev);
napi_enable(&priv->napi);
netif_start_queue(ndev);
@@ -840,8 +854,8 @@ static int ti_hecc_close(struct net_device *ndev)
napi_disable(&priv->napi);
ti_hecc_stop(ndev);
free_irq(ndev->irq, ndev);
- clk_disable(priv->clk);
close_candev(ndev);
+ ti_hecc_transceiver_switch(priv, 0);
return 0;
}
@@ -903,10 +917,12 @@ static int ti_hecc_probe(struct platform_device *pdev)
priv->hecc_ram_offset = pdata->hecc_ram_offset;
priv->mbx_offset = pdata->mbx_offset;
priv->int_line = pdata->int_line;
+ priv->transceiver_switch = pdata->transceiver_switch;
priv->can.bittiming_const = &ti_hecc_bittiming_const;
priv->can.do_set_mode = ti_hecc_do_set_mode;
priv->can.do_get_state = ti_hecc_get_state;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
ndev->irq = irq->start;
ndev->flags |= IFF_ECHO;
@@ -925,6 +941,7 @@ static int ti_hecc_probe(struct platform_device *pdev)
netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll,
HECC_DEF_NAPI_WEIGHT);
+ clk_enable(priv->clk);
err = register_candev(ndev);
if (err) {
dev_err(&pdev->dev, "register_candev() failed\n");
@@ -953,6 +970,7 @@ static int __devexit ti_hecc_remove(struct platform_device *pdev)
struct net_device *ndev = platform_get_drvdata(pdev);
struct ti_hecc_priv *priv = netdev_priv(ndev);
+ clk_disable(priv->clk);
clk_put(priv->clk);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
iounmap(priv->base);
@@ -964,6 +982,48 @@ static int __devexit ti_hecc_remove(struct platform_device *pdev)
return 0;
}
+
+#ifdef CONFIG_PM
+static int ti_hecc_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct ti_hecc_priv *priv = netdev_priv(dev);
+
+ if (netif_running(dev)) {
+ netif_stop_queue(dev);
+ netif_device_detach(dev);
+ }
+
+ hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
+ priv->can.state = CAN_STATE_SLEEPING;
+
+ clk_disable(priv->clk);
+
+ return 0;
+}
+
+static int ti_hecc_resume(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct ti_hecc_priv *priv = netdev_priv(dev);
+
+ clk_enable(priv->clk);
+
+ hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
+ priv->can.state = CAN_STATE_ERROR_ACTIVE;
+
+ if (netif_running(dev)) {
+ netif_device_attach(dev);
+ netif_start_queue(dev);
+ }
+
+ return 0;
+}
+#else
+#define ti_hecc_suspend NULL
+#define ti_hecc_resume NULL
+#endif
+
/* TI HECC netdevice driver: platform driver structure */
static struct platform_driver ti_hecc_driver = {
.driver = {
@@ -972,6 +1032,8 @@ static struct platform_driver ti_hecc_driver = {
},
.probe = ti_hecc_probe,
.remove = __devexit_p(ti_hecc_remove),
+ .suspend = ti_hecc_suspend,
+ .resume = ti_hecc_resume,
};
static int __init ti_hecc_init_driver(void)
@@ -979,14 +1041,15 @@ static int __init ti_hecc_init_driver(void)
printk(KERN_INFO DRV_DESC "\n");
return platform_driver_register(&ti_hecc_driver);
}
-module_init(ti_hecc_init_driver);
static void __exit ti_hecc_exit_driver(void)
{
printk(KERN_INFO DRV_DESC " unloaded\n");
platform_driver_unregister(&ti_hecc_driver);
}
+
module_exit(ti_hecc_exit_driver);
+module_init(ti_hecc_init_driver);
MODULE_AUTHOR("Anant Gole <anantgole@ti.com>");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
index bbc78e0b8a1..97ff6febad6 100644
--- a/drivers/net/can/usb/Kconfig
+++ b/drivers/net/can/usb/Kconfig
@@ -5,6 +5,6 @@ config CAN_EMS_USB
tristate "EMS CPC-USB/ARM7 CAN/USB interface"
---help---
This driver is for the one channel CPC-USB/ARM7 CAN/USB interface
- from from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de).
+ from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de).
endmenu
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index efbb05c71bf..11c87840cc0 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -767,6 +767,9 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
size_t size = CPC_HEADER_SIZE + CPC_MSG_HEADER_LEN
+ sizeof(struct cpc_can_msg);
+ if (can_dropped_invalid_skb(netdev, skb))
+ return NETDEV_TX_OK;
+
/* create a URB, and a buffer for it, and copy the data to the URB */
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb) {
@@ -1019,8 +1022,7 @@ static int ems_usb_probe(struct usb_interface *intf,
dev->can.bittiming_const = &ems_usb_bittiming_const;
dev->can.do_set_bittiming = ems_usb_set_bittiming;
dev->can.do_set_mode = ems_usb_set_mode;
-
- netdev->flags |= IFF_ECHO; /* we support local echo */
+ dev->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
netdev->netdev_ops = &ems_usb_netdev_ops;
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 80ac5631398..d124d837ae5 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -47,6 +47,7 @@
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/can.h>
+#include <linux/can/dev.h>
#include <net/rtnetlink.h>
static __initdata const char banner[] =
@@ -70,10 +71,11 @@ MODULE_PARM_DESC(echo, "Echo sent frames (for testing). Default: 0 (Off)");
static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
{
+ struct can_frame *cf = (struct can_frame *)skb->data;
struct net_device_stats *stats = &dev->stats;
stats->rx_packets++;
- stats->rx_bytes += skb->len;
+ stats->rx_bytes += cf->can_dlc;
skb->protocol = htons(ETH_P_CAN);
skb->pkt_type = PACKET_BROADCAST;
@@ -85,11 +87,15 @@ static void vcan_rx(struct sk_buff *skb, struct net_device *dev)
static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
{
+ struct can_frame *cf = (struct can_frame *)skb->data;
struct net_device_stats *stats = &dev->stats;
int loop;
+ if (can_dropped_invalid_skb(dev, skb))
+ return NETDEV_TX_OK;
+
stats->tx_packets++;
- stats->tx_bytes += skb->len;
+ stats->tx_bytes += cf->can_dlc;
/* set flag whether this packet has to be looped back */
loop = skb->pkt_type == PACKET_LOOPBACK;
@@ -103,7 +109,7 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
* CAN core already did the echo for us
*/
stats->rx_packets++;
- stats->rx_bytes += skb->len;
+ stats->rx_bytes += cf->can_dlc;
}
kfree_skb(skb);
return NETDEV_TX_OK;
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index f857afe8e48..7cbcfb0ade1 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -66,6 +66,7 @@
* by default, the selective clear mask is set up to process rx packets.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
@@ -106,7 +107,7 @@
#define cas_page_unmap(x) kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ)
#define CAS_NCPUS num_online_cpus()
-#if defined(CONFIG_CASSINI_NAPI) && defined(HAVE_NETDEV_POLL)
+#ifdef CONFIG_CASSINI_NAPI
#define USE_NAPI
#define cas_skb_release(x) netif_receive_skb(x)
#else
@@ -143,7 +144,6 @@
#undef RX_COUNT_BUFFERS /* define to calculate RX buffer stats */
#define DRV_MODULE_NAME "cassini"
-#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "1.6"
#define DRV_MODULE_RELDATE "21 May 2008"
@@ -236,7 +236,7 @@ static u16 link_modes[] __devinitdata = {
CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */
};
-static struct pci_device_id cas_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(cas_pci_tbl) = {
{ PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN,
@@ -649,9 +649,8 @@ static cas_page_t *cas_page_dequeue(struct cas *cp)
cas_spare_recover(cp, GFP_ATOMIC);
spin_lock(&cp->rx_spare_lock);
if (list_empty(&cp->rx_spare_list)) {
- if (netif_msg_rx_err(cp))
- printk(KERN_ERR "%s: no spare buffers "
- "available.\n", cp->dev->name);
+ netif_err(cp, rx_err, cp->dev,
+ "no spare buffers available\n");
spin_unlock(&cp->rx_spare_lock);
return NULL;
}
@@ -728,12 +727,10 @@ static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep)
#endif
start_aneg:
if (cp->lstate == link_up) {
- printk(KERN_INFO "%s: PCS link down.\n",
- cp->dev->name);
+ netdev_info(cp->dev, "PCS link down\n");
} else {
if (changed) {
- printk(KERN_INFO "%s: link configuration changed\n",
- cp->dev->name);
+ netdev_info(cp->dev, "link configuration changed\n");
}
}
cp->lstate = link_down;
@@ -826,12 +823,12 @@ static int cas_saturn_firmware_init(struct cas *cp)
err = request_firmware(&fw, fw_name, &cp->pdev->dev);
if (err) {
- printk(KERN_ERR "cassini: Failed to load firmware \"%s\"\n",
+ pr_err("Failed to load firmware \"%s\"\n",
fw_name);
return err;
}
if (fw->size < 2) {
- printk(KERN_ERR "cassini: bogus length %zu in \"%s\"\n",
+ pr_err("bogus length %zu in \"%s\"\n",
fw->size, fw_name);
err = -EINVAL;
goto out;
@@ -841,7 +838,7 @@ static int cas_saturn_firmware_init(struct cas *cp)
cp->fw_data = vmalloc(cp->fw_size);
if (!cp->fw_data) {
err = -ENOMEM;
- printk(KERN_ERR "cassini: \"%s\" Failed %d\n", fw_name, err);
+ pr_err("\"%s\" Failed %d\n", fw_name, err);
goto out;
}
memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
@@ -986,9 +983,8 @@ static void cas_phy_init(struct cas *cp)
break;
}
if (limit <= 0)
- printk(KERN_WARNING "%s: PCS reset bit would not "
- "clear [%08x].\n", cp->dev->name,
- readl(cp->regs + REG_PCS_STATE_MACHINE));
+ netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n",
+ readl(cp->regs + REG_PCS_STATE_MACHINE));
/* Make sure PCS is disabled while changing advertisement
* configuration.
@@ -1030,11 +1026,8 @@ static int cas_pcs_link_check(struct cas *cp)
*/
if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |
PCS_MII_STATUS_REMOTE_FAULT)) ==
- (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT)) {
- if (netif_msg_link(cp))
- printk(KERN_INFO "%s: PCS RemoteFault\n",
- cp->dev->name);
- }
+ (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT))
+ netif_info(cp, link, cp->dev, "PCS RemoteFault\n");
/* work around link detection issue by querying the PCS state
* machine directly.
@@ -1081,10 +1074,8 @@ static int cas_pcs_link_check(struct cas *cp)
cp->link_transition = LINK_TRANSITION_ON_FAILURE;
}
netif_carrier_off(cp->dev);
- if (cp->opened && netif_msg_link(cp)) {
- printk(KERN_INFO "%s: PCS link down.\n",
- cp->dev->name);
- }
+ if (cp->opened)
+ netif_info(cp, link, cp->dev, "PCS link down\n");
/* Cassini only: if you force a mode, there can be
* sync problems on link down. to fix that, the following
@@ -1139,9 +1130,8 @@ static int cas_txmac_interrupt(struct net_device *dev,
if (!txmac_stat)
return 0;
- if (netif_msg_intr(cp))
- printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n",
- cp->dev->name, txmac_stat);
+ netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+ "txmac interrupt, txmac_stat: 0x%x\n", txmac_stat);
/* Defer timer expiration is quite normal,
* don't even log the event.
@@ -1152,14 +1142,12 @@ static int cas_txmac_interrupt(struct net_device *dev,
spin_lock(&cp->stat_lock[0]);
if (txmac_stat & MAC_TX_UNDERRUN) {
- printk(KERN_ERR "%s: TX MAC xmit underrun.\n",
- dev->name);
+ netdev_err(dev, "TX MAC xmit underrun\n");
cp->net_stats[0].tx_fifo_errors++;
}
if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {
- printk(KERN_ERR "%s: TX MAC max packet size error.\n",
- dev->name);
+ netdev_err(dev, "TX MAC max packet size error\n");
cp->net_stats[0].tx_errors++;
}
@@ -1487,8 +1475,7 @@ static int cas_rxmac_reset(struct cas *cp)
udelay(10);
}
if (limit == STOP_TRIES) {
- printk(KERN_ERR "%s: RX MAC will not disable, resetting whole "
- "chip.\n", dev->name);
+ netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
return 1;
}
@@ -1500,8 +1487,7 @@ static int cas_rxmac_reset(struct cas *cp)
udelay(10);
}
if (limit == STOP_TRIES) {
- printk(KERN_ERR "%s: RX DMA will not disable, resetting whole "
- "chip.\n", dev->name);
+ netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
return 1;
}
@@ -1515,8 +1501,7 @@ static int cas_rxmac_reset(struct cas *cp)
udelay(10);
}
if (limit == STOP_TRIES) {
- printk(KERN_ERR "%s: RX reset command will not execute, "
- "resetting whole chip.\n", dev->name);
+ netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
return 1;
}
@@ -1545,9 +1530,7 @@ static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
if (!stat)
return 0;
- if (netif_msg_intr(cp))
- printk(KERN_DEBUG "%s: rxmac interrupt, stat: 0x%x\n",
- cp->dev->name, stat);
+ netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat);
/* these are all rollovers */
spin_lock(&cp->stat_lock[0]);
@@ -1580,9 +1563,8 @@ static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
if (!stat)
return 0;
- if (netif_msg_intr(cp))
- printk(KERN_DEBUG "%s: mac interrupt, stat: 0x%x\n",
- cp->dev->name, stat);
+ netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+ "mac interrupt, stat: 0x%x\n", stat);
/* This interrupt is just for pause frame and pause
* tracking. It is useful for diagnostics and debug
@@ -1605,9 +1587,7 @@ static inline int cas_mdio_link_not_up(struct cas *cp)
switch (cp->lstate) {
case link_force_ret:
- if (netif_msg_link(cp))
- printk(KERN_INFO "%s: Autoneg failed again, keeping"
- " forced mode\n", cp->dev->name);
+ netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n");
cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
cp->timer_ticks = 5;
cp->lstate = link_force_ok;
@@ -1675,9 +1655,9 @@ static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
cas_mif_poll(cp, 0);
cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
cp->timer_ticks = 5;
- if (cp->opened && netif_msg_link(cp))
- printk(KERN_INFO "%s: Got link after fallback, retrying"
- " autoneg once...\n", cp->dev->name);
+ if (cp->opened)
+ netif_info(cp, link, cp->dev,
+ "Got link after fallback, retrying autoneg once...\n");
cas_phy_write(cp, MII_BMCR,
cp->link_fcntl | BMCR_ANENABLE |
BMCR_ANRESTART);
@@ -1704,9 +1684,8 @@ static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
cp->link_transition = LINK_TRANSITION_LINK_DOWN;
netif_carrier_off(cp->dev);
- if (cp->opened && netif_msg_link(cp))
- printk(KERN_INFO "%s: Link down\n",
- cp->dev->name);
+ if (cp->opened)
+ netif_info(cp, link, cp->dev, "Link down\n");
restart = 1;
} else if (++cp->timer_ticks > 10)
@@ -1737,23 +1716,23 @@ static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
if (!stat)
return 0;
- printk(KERN_ERR "%s: PCI error [%04x:%04x] ", dev->name, stat,
- readl(cp->regs + REG_BIM_DIAG));
+ netdev_err(dev, "PCI error [%04x:%04x]",
+ stat, readl(cp->regs + REG_BIM_DIAG));
/* cassini+ has this reserved */
if ((stat & PCI_ERR_BADACK) &&
((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
- printk("<No ACK64# during ABS64 cycle> ");
+ pr_cont(" <No ACK64# during ABS64 cycle>");
if (stat & PCI_ERR_DTRTO)
- printk("<Delayed transaction timeout> ");
+ pr_cont(" <Delayed transaction timeout>");
if (stat & PCI_ERR_OTHER)
- printk("<other> ");
+ pr_cont(" <other>");
if (stat & PCI_ERR_BIM_DMA_WRITE)
- printk("<BIM DMA 0 write req> ");
+ pr_cont(" <BIM DMA 0 write req>");
if (stat & PCI_ERR_BIM_DMA_READ)
- printk("<BIM DMA 0 read req> ");
- printk("\n");
+ pr_cont(" <BIM DMA 0 read req>");
+ pr_cont("\n");
if (stat & PCI_ERR_OTHER) {
u16 cfg;
@@ -1762,25 +1741,19 @@ static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
* true cause.
*/
pci_read_config_word(cp->pdev, PCI_STATUS, &cfg);
- printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n",
- dev->name, cfg);
+ netdev_err(dev, "Read PCI cfg space status [%04x]\n", cfg);
if (cfg & PCI_STATUS_PARITY)
- printk(KERN_ERR "%s: PCI parity error detected.\n",
- dev->name);
+ netdev_err(dev, "PCI parity error detected\n");
if (cfg & PCI_STATUS_SIG_TARGET_ABORT)
- printk(KERN_ERR "%s: PCI target abort.\n",
- dev->name);
+ netdev_err(dev, "PCI target abort\n");
if (cfg & PCI_STATUS_REC_TARGET_ABORT)
- printk(KERN_ERR "%s: PCI master acks target abort.\n",
- dev->name);
+ netdev_err(dev, "PCI master acks target abort\n");
if (cfg & PCI_STATUS_REC_MASTER_ABORT)
- printk(KERN_ERR "%s: PCI master abort.\n", dev->name);
+ netdev_err(dev, "PCI master abort\n");
if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR)
- printk(KERN_ERR "%s: PCI system error SERR#.\n",
- dev->name);
+ netdev_err(dev, "PCI system error SERR#\n");
if (cfg & PCI_STATUS_DETECTED_PARITY)
- printk(KERN_ERR "%s: PCI parity error.\n",
- dev->name);
+ netdev_err(dev, "PCI parity error\n");
/* Write the error bits back to clear them. */
cfg &= (PCI_STATUS_PARITY |
@@ -1806,9 +1779,8 @@ static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
{
if (status & INTR_RX_TAG_ERROR) {
/* corrupt RX tag framing */
- if (netif_msg_rx_err(cp))
- printk(KERN_DEBUG "%s: corrupt rx tag framing\n",
- cp->dev->name);
+ netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
+ "corrupt rx tag framing\n");
spin_lock(&cp->stat_lock[0]);
cp->net_stats[0].rx_errors++;
spin_unlock(&cp->stat_lock[0]);
@@ -1817,9 +1789,8 @@ static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
if (status & INTR_RX_LEN_MISMATCH) {
/* length mismatch. */
- if (netif_msg_rx_err(cp))
- printk(KERN_DEBUG "%s: length mismatch for rx frame\n",
- cp->dev->name);
+ netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
+ "length mismatch for rx frame\n");
spin_lock(&cp->stat_lock[0]);
cp->net_stats[0].rx_errors++;
spin_unlock(&cp->stat_lock[0]);
@@ -1861,12 +1832,11 @@ do_reset:
#if 1
atomic_inc(&cp->reset_task_pending);
atomic_inc(&cp->reset_task_pending_all);
- printk(KERN_ERR "%s:reset called in cas_abnormal_irq [0x%x]\n",
- dev->name, status);
+ netdev_err(dev, "reset called in cas_abnormal_irq [0x%x]\n", status);
schedule_work(&cp->reset_task);
#else
atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
- printk(KERN_ERR "reset called in cas_abnormal_irq\n");
+ netdev_err(dev, "reset called in cas_abnormal_irq\n");
schedule_work(&cp->reset_task);
#endif
return 1;
@@ -1920,9 +1890,8 @@ static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
if (count < 0)
break;
- if (netif_msg_tx_done(cp))
- printk(KERN_DEBUG "%s: tx[%d] done, slot %d\n",
- cp->dev->name, ring, entry);
+ netif_printk(cp, tx_done, KERN_DEBUG, cp->dev,
+ "tx[%d] done, slot %d\n", ring, entry);
skbs[entry] = NULL;
cp->tx_tiny_use[ring][entry].nbufs = 0;
@@ -1969,9 +1938,9 @@ static void cas_tx(struct net_device *dev, struct cas *cp,
#ifdef USE_TX_COMPWB
u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
#endif
- if (netif_msg_intr(cp))
- printk(KERN_DEBUG "%s: tx interrupt, status: 0x%x, %llx\n",
- cp->dev->name, status, (unsigned long long)compwb);
+ netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+ "tx interrupt, status: 0x%x, %llx\n",
+ status, (unsigned long long)compwb);
/* process all the rings */
for (ring = 0; ring < N_TX_RINGS; ring++) {
#ifdef USE_TX_COMPWB
@@ -2050,10 +2019,8 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
hlen = min(cp->page_size - off, dlen);
if (hlen < 0) {
- if (netif_msg_rx_err(cp)) {
- printk(KERN_DEBUG "%s: rx page overflow: "
- "%d\n", cp->dev->name, hlen);
- }
+ netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
+ "rx page overflow: %d\n", hlen);
dev_kfree_skb_irq(skb);
return -1;
}
@@ -2130,10 +2097,8 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
hlen = min(cp->page_size - off, dlen);
if (hlen < 0) {
- if (netif_msg_rx_err(cp)) {
- printk(KERN_DEBUG "%s: rx page overflow: "
- "%d\n", cp->dev->name, hlen);
- }
+ netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
+ "rx page overflow: %d\n", hlen);
dev_kfree_skb_irq(skb);
return -1;
}
@@ -2265,9 +2230,8 @@ static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
entry = cp->rx_old[ring];
- if (netif_msg_intr(cp))
- printk(KERN_DEBUG "%s: rxd[%d] interrupt, done: %d\n",
- cp->dev->name, ring, entry);
+ netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+ "rxd[%d] interrupt, done: %d\n", ring, entry);
cluster = -1;
count = entry & 0x3;
@@ -2337,11 +2301,10 @@ static int cas_rx_ringN(struct cas *cp, int ring, int budget)
int entry, drops;
int npackets = 0;
- if (netif_msg_intr(cp))
- printk(KERN_DEBUG "%s: rx[%d] interrupt, done: %d/%d\n",
- cp->dev->name, ring,
- readl(cp->regs + REG_RX_COMP_HEAD),
- cp->rx_new[ring]);
+ netif_printk(cp, intr, KERN_DEBUG, cp->dev,
+ "rx[%d] interrupt, done: %d/%d\n",
+ ring,
+ readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]);
entry = cp->rx_new[ring];
drops = 0;
@@ -2442,8 +2405,7 @@ static int cas_rx_ringN(struct cas *cp, int ring, int budget)
cp->rx_new[ring] = entry;
if (drops)
- printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n",
- cp->dev->name);
+ netdev_info(cp->dev, "Memory squeeze, deferring packet\n");
return npackets;
}
@@ -2457,10 +2419,9 @@ static void cas_post_rxcs_ringN(struct net_device *dev,
last = cp->rx_cur[ring];
entry = cp->rx_new[ring];
- if (netif_msg_intr(cp))
- printk(KERN_DEBUG "%s: rxc[%d] interrupt, done: %d/%d\n",
- dev->name, ring, readl(cp->regs + REG_RX_COMP_HEAD),
- entry);
+ netif_printk(cp, intr, KERN_DEBUG, dev,
+ "rxc[%d] interrupt, done: %d/%d\n",
+ ring, readl(cp->regs + REG_RX_COMP_HEAD), entry);
/* zero and re-mark descriptors */
while (last != entry) {
@@ -2729,42 +2690,38 @@ static void cas_tx_timeout(struct net_device *dev)
{
struct cas *cp = netdev_priv(dev);
- printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
+ netdev_err(dev, "transmit timed out, resetting\n");
if (!cp->hw_running) {
- printk("%s: hrm.. hw not running!\n", dev->name);
+ netdev_err(dev, "hrm.. hw not running!\n");
return;
}
- printk(KERN_ERR "%s: MIF_STATE[%08x]\n",
- dev->name, readl(cp->regs + REG_MIF_STATE_MACHINE));
-
- printk(KERN_ERR "%s: MAC_STATE[%08x]\n",
- dev->name, readl(cp->regs + REG_MAC_STATE_MACHINE));
-
- printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x] "
- "FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
- dev->name,
- readl(cp->regs + REG_TX_CFG),
- readl(cp->regs + REG_MAC_TX_STATUS),
- readl(cp->regs + REG_MAC_TX_CFG),
- readl(cp->regs + REG_TX_FIFO_PKT_CNT),
- readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
- readl(cp->regs + REG_TX_FIFO_READ_PTR),
- readl(cp->regs + REG_TX_SM_1),
- readl(cp->regs + REG_TX_SM_2));
-
- printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n",
- dev->name,
- readl(cp->regs + REG_RX_CFG),
- readl(cp->regs + REG_MAC_RX_STATUS),
- readl(cp->regs + REG_MAC_RX_CFG));
-
- printk(KERN_ERR "%s: HP_STATE[%08x:%08x:%08x:%08x]\n",
- dev->name,
- readl(cp->regs + REG_HP_STATE_MACHINE),
- readl(cp->regs + REG_HP_STATUS0),
- readl(cp->regs + REG_HP_STATUS1),
- readl(cp->regs + REG_HP_STATUS2));
+ netdev_err(dev, "MIF_STATE[%08x]\n",
+ readl(cp->regs + REG_MIF_STATE_MACHINE));
+
+ netdev_err(dev, "MAC_STATE[%08x]\n",
+ readl(cp->regs + REG_MAC_STATE_MACHINE));
+
+ netdev_err(dev, "TX_STATE[%08x:%08x:%08x] FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
+ readl(cp->regs + REG_TX_CFG),
+ readl(cp->regs + REG_MAC_TX_STATUS),
+ readl(cp->regs + REG_MAC_TX_CFG),
+ readl(cp->regs + REG_TX_FIFO_PKT_CNT),
+ readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
+ readl(cp->regs + REG_TX_FIFO_READ_PTR),
+ readl(cp->regs + REG_TX_SM_1),
+ readl(cp->regs + REG_TX_SM_2));
+
+ netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
+ readl(cp->regs + REG_RX_CFG),
+ readl(cp->regs + REG_MAC_RX_STATUS),
+ readl(cp->regs + REG_MAC_RX_CFG));
+
+ netdev_err(dev, "HP_STATE[%08x:%08x:%08x:%08x]\n",
+ readl(cp->regs + REG_HP_STATE_MACHINE),
+ readl(cp->regs + REG_HP_STATUS0),
+ readl(cp->regs + REG_HP_STATUS1),
+ readl(cp->regs + REG_HP_STATUS2));
#if 1
atomic_inc(&cp->reset_task_pending);
@@ -2830,8 +2787,7 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
netif_stop_queue(dev);
spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
- printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
- "queue awake!\n", dev->name);
+ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
return 1;
}
@@ -2908,11 +2864,9 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
netif_stop_queue(dev);
- if (netif_msg_tx_queued(cp))
- printk(KERN_DEBUG "%s: tx[%d] queued, slot %d, skblen %d, "
- "avail %d\n",
- dev->name, ring, entry, skb->len,
- TX_BUFFS_AVAIL(cp, ring));
+ netif_printk(cp, tx_queued, KERN_DEBUG, dev,
+ "tx[%d] queued, slot %d, skblen %d, avail %d\n",
+ ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring));
writel(entry, cp->regs + REG_TX_KICKN(ring));
spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
return 0;
@@ -2999,6 +2953,40 @@ static inline void cas_init_dma(struct cas *cp)
cas_init_rx_dma(cp);
}
+static void cas_process_mc_list(struct cas *cp)
+{
+ u16 hash_table[16];
+ u32 crc;
+ struct dev_mc_list *dmi;
+ int i = 1;
+
+ memset(hash_table, 0, sizeof(hash_table));
+ netdev_for_each_mc_addr(dmi, cp->dev) {
+ if (i <= CAS_MC_EXACT_MATCH_SIZE) {
+ /* use the alternate mac address registers for the
+ * first 15 multicast addresses
+ */
+ writel((dmi->dmi_addr[4] << 8) | dmi->dmi_addr[5],
+ cp->regs + REG_MAC_ADDRN(i*3 + 0));
+ writel((dmi->dmi_addr[2] << 8) | dmi->dmi_addr[3],
+ cp->regs + REG_MAC_ADDRN(i*3 + 1));
+ writel((dmi->dmi_addr[0] << 8) | dmi->dmi_addr[1],
+ cp->regs + REG_MAC_ADDRN(i*3 + 2));
+ i++;
+ }
+ else {
+ /* use hw hash table for the next series of
+ * multicast addresses
+ */
+ crc = ether_crc_le(ETH_ALEN, dmi->dmi_addr);
+ crc >>= 24;
+ hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
+ }
+ }
+ for (i = 0; i < 16; i++)
+ writel(hash_table[i], cp->regs + REG_MAC_HASH_TABLEN(i));
+}
+
/* Must be invoked under cp->lock. */
static u32 cas_setup_multicast(struct cas *cp)
{
@@ -3014,43 +3002,7 @@ static u32 cas_setup_multicast(struct cas *cp)
rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
} else {
- u16 hash_table[16];
- u32 crc;
- struct dev_mc_list *dmi = cp->dev->mc_list;
- int i;
-
- /* use the alternate mac address registers for the
- * first 15 multicast addresses
- */
- for (i = 1; i <= CAS_MC_EXACT_MATCH_SIZE; i++) {
- if (!dmi) {
- writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 0));
- writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 1));
- writel(0x0, cp->regs + REG_MAC_ADDRN(i*3 + 2));
- continue;
- }
- writel((dmi->dmi_addr[4] << 8) | dmi->dmi_addr[5],
- cp->regs + REG_MAC_ADDRN(i*3 + 0));
- writel((dmi->dmi_addr[2] << 8) | dmi->dmi_addr[3],
- cp->regs + REG_MAC_ADDRN(i*3 + 1));
- writel((dmi->dmi_addr[0] << 8) | dmi->dmi_addr[1],
- cp->regs + REG_MAC_ADDRN(i*3 + 2));
- dmi = dmi->next;
- }
-
- /* use hw hash table for the next series of
- * multicast addresses
- */
- memset(hash_table, 0, sizeof(hash_table));
- while (dmi) {
- crc = ether_crc_le(ETH_ALEN, dmi->dmi_addr);
- crc >>= 24;
- hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf));
- dmi = dmi->next;
- }
- for (i=0; i < 16; i++)
- writel(hash_table[i], cp->regs +
- REG_MAC_HASH_TABLEN(i));
+ cas_process_mc_list(cp);
rxcfg |= MAC_RX_CFG_HASH_FILTER_EN;
}
@@ -3100,10 +3052,10 @@ static void cas_mac_reset(struct cas *cp)
if (readl(cp->regs + REG_MAC_TX_RESET) |
readl(cp->regs + REG_MAC_RX_RESET))
- printk(KERN_ERR "%s: mac tx[%d]/rx[%d] reset failed [%08x]\n",
- cp->dev->name, readl(cp->regs + REG_MAC_TX_RESET),
- readl(cp->regs + REG_MAC_RX_RESET),
- readl(cp->regs + REG_MAC_STATE_MACHINE));
+ netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n",
+ readl(cp->regs + REG_MAC_TX_RESET),
+ readl(cp->regs + REG_MAC_RX_RESET),
+ readl(cp->regs + REG_MAC_STATE_MACHINE));
}
@@ -3423,7 +3375,7 @@ use_random_mac_addr:
goto done;
/* Sun MAC prefix then 3 random bytes. */
- printk(PFX "MAC address not found in ROM VPD\n");
+ pr_info("MAC address not found in ROM VPD\n");
dev_addr[0] = 0x08;
dev_addr[1] = 0x00;
dev_addr[2] = 0x20;
@@ -3484,7 +3436,7 @@ static int cas_check_invariants(struct cas *cp)
__free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT;
} else {
- printk(PFX "MTU limited to %d bytes\n", CAS_MAX_MTU);
+ printk("MTU limited to %d bytes\n", CAS_MAX_MTU);
}
}
#endif
@@ -3529,7 +3481,7 @@ static int cas_check_invariants(struct cas *cp)
}
}
}
- printk(KERN_ERR PFX "MII phy did not respond [%08x]\n",
+ pr_err("MII phy did not respond [%08x]\n",
readl(cp->regs + REG_MIF_STATE_MACHINE));
return -1;
@@ -3574,21 +3526,19 @@ static inline void cas_start_dma(struct cas *cp)
val = readl(cp->regs + REG_MAC_RX_CFG);
if ((val & MAC_RX_CFG_EN)) {
if (txfailed) {
- printk(KERN_ERR
- "%s: enabling mac failed [tx:%08x:%08x].\n",
- cp->dev->name,
- readl(cp->regs + REG_MIF_STATE_MACHINE),
- readl(cp->regs + REG_MAC_STATE_MACHINE));
+ netdev_err(cp->dev,
+ "enabling mac failed [tx:%08x:%08x]\n",
+ readl(cp->regs + REG_MIF_STATE_MACHINE),
+ readl(cp->regs + REG_MAC_STATE_MACHINE));
}
goto enable_rx_done;
}
udelay(10);
}
- printk(KERN_ERR "%s: enabling mac failed [%s:%08x:%08x].\n",
- cp->dev->name,
- (txfailed? "tx,rx":"rx"),
- readl(cp->regs + REG_MIF_STATE_MACHINE),
- readl(cp->regs + REG_MAC_STATE_MACHINE));
+ netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n",
+ (txfailed ? "tx,rx" : "rx"),
+ readl(cp->regs + REG_MIF_STATE_MACHINE),
+ readl(cp->regs + REG_MAC_STATE_MACHINE));
enable_rx_done:
cas_unmask_intr(cp); /* enable interrupts */
@@ -3690,9 +3640,8 @@ static void cas_set_link_modes(struct cas *cp)
}
}
- if (netif_msg_link(cp))
- printk(KERN_INFO "%s: Link up at %d Mbps, %s-duplex.\n",
- cp->dev->name, speed, (full_duplex ? "full" : "half"));
+ netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n",
+ speed, full_duplex ? "full" : "half");
val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED;
if (CAS_PHY_MII(cp->phy_type)) {
@@ -3762,18 +3711,14 @@ static void cas_set_link_modes(struct cas *cp)
if (netif_msg_link(cp)) {
if (pause & 0x01) {
- printk(KERN_INFO "%s: Pause is enabled "
- "(rxfifo: %d off: %d on: %d)\n",
- cp->dev->name,
- cp->rx_fifo_size,
- cp->rx_pause_off,
- cp->rx_pause_on);
+ netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
+ cp->rx_fifo_size,
+ cp->rx_pause_off,
+ cp->rx_pause_on);
} else if (pause & 0x10) {
- printk(KERN_INFO "%s: TX pause enabled\n",
- cp->dev->name);
+ netdev_info(cp->dev, "TX pause enabled\n");
} else {
- printk(KERN_INFO "%s: Pause is disabled\n",
- cp->dev->name);
+ netdev_info(cp->dev, "Pause is disabled\n");
}
}
@@ -3849,7 +3794,7 @@ static void cas_global_reset(struct cas *cp, int blkflag)
goto done;
udelay(10);
}
- printk(KERN_ERR "%s: sw reset failed.\n", cp->dev->name);
+ netdev_err(cp->dev, "sw reset failed\n");
done:
/* enable various BIM interrupts */
@@ -3955,7 +3900,7 @@ static int cas_change_mtu(struct net_device *dev, int new_mtu)
#else
atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
CAS_RESET_ALL : CAS_RESET_MTU);
- printk(KERN_ERR "reset called in cas_change_mtu\n");
+ pr_err("reset called in cas_change_mtu\n");
schedule_work(&cp->reset_task);
#endif
@@ -4237,10 +4182,8 @@ static void cas_link_timer(unsigned long data)
if (((tlm == 0x5) || (tlm == 0x3)) &&
(CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) {
- if (netif_msg_tx_err(cp))
- printk(KERN_DEBUG "%s: tx err: "
- "MAC_STATE[%08x]\n",
- cp->dev->name, val);
+ netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
+ "tx err: MAC_STATE[%08x]\n", val);
reset = 1;
goto done;
}
@@ -4249,10 +4192,9 @@ static void cas_link_timer(unsigned long data)
wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR);
rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR);
if ((val == 0) && (wptr != rptr)) {
- if (netif_msg_tx_err(cp))
- printk(KERN_DEBUG "%s: tx err: "
- "TX_FIFO[%08x:%08x:%08x]\n",
- cp->dev->name, val, wptr, rptr);
+ netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
+ "tx err: TX_FIFO[%08x:%08x:%08x]\n",
+ val, wptr, rptr);
reset = 1;
}
@@ -4268,7 +4210,7 @@ done:
schedule_work(&cp->reset_task);
#else
atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
- printk(KERN_ERR "reset called in cas_link_timer\n");
+ pr_err("reset called in cas_link_timer\n");
schedule_work(&cp->reset_task);
#endif
}
@@ -4361,8 +4303,7 @@ static int cas_open(struct net_device *dev)
*/
if (request_irq(cp->pdev->irq, cas_interrupt,
IRQF_SHARED, dev->name, (void *) dev)) {
- printk(KERN_ERR "%s: failed to request irq !\n",
- cp->dev->name);
+ netdev_err(cp->dev, "failed to request irq !\n");
err = -EAGAIN;
goto err_spare;
}
@@ -5002,24 +4943,24 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
if (cas_version_printed++ == 0)
- printk(KERN_INFO "%s", version);
+ pr_info("%s", version);
err = pci_enable_device(pdev);
if (err) {
- dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
return err;
}
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
dev_err(&pdev->dev, "Cannot find proper PCI device "
- "base address, aborting.\n");
+ "base address, aborting\n");
err = -ENODEV;
goto err_out_disable_pdev;
}
dev = alloc_etherdev(sizeof(*cp));
if (!dev) {
- dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n");
+ dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
err = -ENOMEM;
goto err_out_disable_pdev;
}
@@ -5027,7 +4968,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
err = pci_request_regions(pdev, dev->name);
if (err) {
- dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
+ dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
goto err_out_free_netdev;
}
pci_set_master(pdev);
@@ -5041,8 +4982,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
pci_cmd |= PCI_COMMAND_PARITY;
pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
if (pci_try_set_mwi(pdev))
- printk(KERN_WARNING PFX "Could not enable MWI for %s\n",
- pci_name(pdev));
+ pr_warning("Could not enable MWI for %s\n", pci_name(pdev));
cas_program_bridge(pdev);
@@ -5085,7 +5025,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "No usable DMA configuration, "
- "aborting.\n");
+ "aborting\n");
goto err_out_free_res;
}
pci_using_dac = 0;
@@ -5144,7 +5084,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
/* give us access to cassini registers */
cp->regs = pci_iomap(pdev, 0, casreg_len);
if (!cp->regs) {
- dev_err(&pdev->dev, "Cannot map device registers, aborting.\n");
+ dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
goto err_out_free_res;
}
cp->casreg_len = casreg_len;
@@ -5163,7 +5103,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
&cp->block_dvma);
if (!cp->init_block) {
- dev_err(&pdev->dev, "Cannot allocate init block, aborting.\n");
+ dev_err(&pdev->dev, "Cannot allocate init block, aborting\n");
goto err_out_iounmap;
}
@@ -5197,18 +5137,17 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
dev->features |= NETIF_F_HIGHDMA;
if (register_netdev(dev)) {
- dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
+ dev_err(&pdev->dev, "Cannot register net device, aborting\n");
goto err_out_free_consistent;
}
i = readl(cp->regs + REG_BIM_CFG);
- printk(KERN_INFO "%s: Sun Cassini%s (%sbit/%sMHz PCI/%s) "
- "Ethernet[%d] %pM\n", dev->name,
- (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
- (i & BIM_CFG_32BIT) ? "32" : "64",
- (i & BIM_CFG_66MHZ) ? "66" : "33",
- (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq,
- dev->dev_addr);
+ netdev_info(dev, "Sun Cassini%s (%sbit/%sMHz PCI/%s) Ethernet[%d] %pM\n",
+ (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
+ (i & BIM_CFG_32BIT) ? "32" : "64",
+ (i & BIM_CFG_66MHZ) ? "66" : "33",
+ (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq,
+ dev->dev_addr);
pci_set_drvdata(pdev, dev);
cp->hw_running = 1;
@@ -5322,7 +5261,7 @@ static int cas_resume(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct cas *cp = netdev_priv(dev);
- printk(KERN_INFO "%s: resuming\n", dev->name);
+ netdev_info(dev, "resuming\n");
mutex_lock(&cp->pm_mutex);
cas_hard_reset(cp);
diff --git a/drivers/net/chelsio/common.h b/drivers/net/chelsio/common.h
index 699d22c5fe0..2d11afe4531 100644
--- a/drivers/net/chelsio/common.h
+++ b/drivers/net/chelsio/common.h
@@ -36,6 +36,8 @@
* *
****************************************************************************/
+#define pr_fmt(fmt) "cxgb: " fmt
+
#ifndef _CXGB_COMMON_H_
#define _CXGB_COMMON_H_
@@ -55,28 +57,6 @@
#define DRV_DESCRIPTION "Chelsio 10Gb Ethernet Driver"
#define DRV_NAME "cxgb"
#define DRV_VERSION "2.2"
-#define PFX DRV_NAME ": "
-
-#define CH_ERR(fmt, ...) printk(KERN_ERR PFX fmt, ## __VA_ARGS__)
-#define CH_WARN(fmt, ...) printk(KERN_WARNING PFX fmt, ## __VA_ARGS__)
-#define CH_ALERT(fmt, ...) printk(KERN_ALERT PFX fmt, ## __VA_ARGS__)
-
-/*
- * More powerful macro that selectively prints messages based on msg_enable.
- * For info and debugging messages.
- */
-#define CH_MSG(adapter, level, category, fmt, ...) do { \
- if ((adapter)->msg_enable & NETIF_MSG_##category) \
- printk(KERN_##level PFX "%s: " fmt, (adapter)->name, \
- ## __VA_ARGS__); \
-} while (0)
-
-#ifdef DEBUG
-# define CH_DBG(adapter, category, fmt, ...) \
- CH_MSG(adapter, DEBUG, category, fmt, ## __VA_ARGS__)
-#else
-# define CH_DBG(fmt, ...)
-#endif
#define CH_DEVICE(devid, ssid, idx) \
{ PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, ssid, 0, 0, idx }
@@ -90,25 +70,13 @@
typedef struct adapter adapter_t;
struct t1_rx_mode {
- struct net_device *dev;
- u32 idx;
- struct dev_mc_list *list;
+ struct net_device *dev;
};
#define t1_rx_mode_promisc(rm) (rm->dev->flags & IFF_PROMISC)
#define t1_rx_mode_allmulti(rm) (rm->dev->flags & IFF_ALLMULTI)
-#define t1_rx_mode_mc_cnt(rm) (rm->dev->mc_count)
-
-static inline u8 *t1_get_next_mcaddr(struct t1_rx_mode *rm)
-{
- u8 *addr = NULL;
-
- if (rm->idx++ < rm->dev->mc_count) {
- addr = rm->list->dmi_addr;
- rm->list = rm->list->next;
- }
- return addr;
-}
+#define t1_rx_mode_mc_cnt(rm) (netdev_mc_count(rm->dev))
+#define t1_get_netdev(rm) (rm->dev)
#define MAX_NPORTS 4
#define PORT_MASK ((1 << MAX_NPORTS) - 1)
@@ -334,7 +302,7 @@ static inline int t1_is_asic(const adapter_t *adapter)
return adapter->params.is_asic;
}
-extern struct pci_device_id t1_pci_tbl[];
+extern const struct pci_device_id t1_pci_tbl[];
static inline int adapter_matches_type(const adapter_t *adapter,
int version, int revision)
diff --git a/drivers/net/chelsio/cxgb2.c b/drivers/net/chelsio/cxgb2.c
index 082cdb28b51..0f71304e054 100644
--- a/drivers/net/chelsio/cxgb2.c
+++ b/drivers/net/chelsio/cxgb2.c
@@ -125,8 +125,6 @@ static void t1_set_rxmode(struct net_device *dev)
struct t1_rx_mode rm;
rm.dev = dev;
- rm.idx = 0;
- rm.list = dev->mc_list;
mac->ops->set_rx_mode(mac, &rm);
}
@@ -976,7 +974,7 @@ void t1_fatal_err(struct adapter *adapter)
t1_sge_stop(adapter->sge);
t1_interrupts_disable(adapter);
}
- CH_ALERT("%s: encountered fatal error, operation suspended\n",
+ pr_alert("%s: encountered fatal error, operation suspended\n",
adapter->name);
}
@@ -1020,7 +1018,7 @@ static int __devinit init_one(struct pci_dev *pdev,
return err;
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- CH_ERR("%s: cannot find PCI device memory base address\n",
+ pr_err("%s: cannot find PCI device memory base address\n",
pci_name(pdev));
err = -ENODEV;
goto out_disable_pdev;
@@ -1030,20 +1028,20 @@ static int __devinit init_one(struct pci_dev *pdev,
pci_using_dac = 1;
if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
- CH_ERR("%s: unable to obtain 64-bit DMA for "
+ pr_err("%s: unable to obtain 64-bit DMA for "
"consistent allocations\n", pci_name(pdev));
err = -ENODEV;
goto out_disable_pdev;
}
} else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
- CH_ERR("%s: no usable DMA configuration\n", pci_name(pdev));
+ pr_err("%s: no usable DMA configuration\n", pci_name(pdev));
goto out_disable_pdev;
}
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
- CH_ERR("%s: cannot obtain PCI resources\n", pci_name(pdev));
+ pr_err("%s: cannot obtain PCI resources\n", pci_name(pdev));
goto out_disable_pdev;
}
@@ -1071,7 +1069,7 @@ static int __devinit init_one(struct pci_dev *pdev,
adapter->regs = ioremap(mmio_start, mmio_len);
if (!adapter->regs) {
- CH_ERR("%s: cannot map device registers\n",
+ pr_err("%s: cannot map device registers\n",
pci_name(pdev));
err = -ENOMEM;
goto out_free_dev;
@@ -1150,8 +1148,8 @@ static int __devinit init_one(struct pci_dev *pdev,
for (i = 0; i < bi->port_number; ++i) {
err = register_netdev(adapter->port[i].dev);
if (err)
- CH_WARN("%s: cannot register net device %s, skipping\n",
- pci_name(pdev), adapter->port[i].dev->name);
+ pr_warning("%s: cannot register net device %s, skipping\n",
+ pci_name(pdev), adapter->port[i].dev->name);
else {
/*
* Change the name we use for messages to the name of
@@ -1164,7 +1162,7 @@ static int __devinit init_one(struct pci_dev *pdev,
}
}
if (!adapter->registered_device_map) {
- CH_ERR("%s: could not register any net devices\n",
+ pr_err("%s: could not register any net devices\n",
pci_name(pdev));
goto out_release_adapter_res;
}
diff --git a/drivers/net/chelsio/espi.c b/drivers/net/chelsio/espi.c
index 1e0749e000b..639ff195573 100644
--- a/drivers/net/chelsio/espi.c
+++ b/drivers/net/chelsio/espi.c
@@ -76,7 +76,7 @@ static int tricn_write(adapter_t *adapter, int bundle_addr, int module_addr,
} while (busy && --attempts);
if (busy)
- CH_ERR("%s: TRICN write timed out\n", adapter->name);
+ pr_err("%s: TRICN write timed out\n", adapter->name);
return busy;
}
@@ -86,7 +86,7 @@ static int tricn_init(adapter_t *adapter)
int i, sme = 1;
if (!(readl(adapter->regs + A_ESPI_RX_RESET) & F_RX_CLK_STATUS)) {
- CH_ERR("%s: ESPI clock not ready\n", adapter->name);
+ pr_err("%s: ESPI clock not ready\n", adapter->name);
return -1;
}
diff --git a/drivers/net/chelsio/pm3393.c b/drivers/net/chelsio/pm3393.c
index 2117c4fbb10..a6eb30a6e2b 100644
--- a/drivers/net/chelsio/pm3393.c
+++ b/drivers/net/chelsio/pm3393.c
@@ -251,8 +251,9 @@ static int pm3393_interrupt_handler(struct cmac *cmac)
/* Read the master interrupt status register. */
pmread(cmac, SUNI1x10GEXP_REG_MASTER_INTERRUPT_STATUS,
&master_intr_status);
- CH_DBG(cmac->adapter, INTR, "PM3393 intr cause 0x%x\n",
- master_intr_status);
+ if (netif_msg_intr(cmac->adapter))
+ dev_dbg(&cmac->adapter->pdev->dev, "PM3393 intr cause 0x%x\n",
+ master_intr_status);
/* TBD XXX Lets just clear everything for now */
pm3393_interrupt_clear(cmac);
@@ -375,12 +376,12 @@ static int pm3393_set_rx_mode(struct cmac *cmac, struct t1_rx_mode *rm)
rx_mode |= SUNI1x10GEXP_BITMSK_RXXG_MHASH_EN;
} else if (t1_rx_mode_mc_cnt(rm)) {
/* Accept one or more multicast(s). */
- u8 *addr;
+ struct dev_mc_list *dmi;
int bit;
u16 mc_filter[4] = { 0, };
- while ((addr = t1_get_next_mcaddr(rm))) {
- bit = (ether_crc(ETH_ALEN, addr) >> 23) & 0x3f; /* bit[23:28] */
+ netdev_for_each_mc_addr(dmi, t1_get_netdev(rm)) {
+ bit = (ether_crc(ETH_ALEN, dmi->dmi_addr) >> 23) & 0x3f; /* bit[23:28] */
mc_filter[bit >> 4] |= 1 << (bit & 0xf);
}
pmwrite(cmac, SUNI1x10GEXP_REG_RXXG_MULTICAST_HASH_LOW, mc_filter[0]);
@@ -776,11 +777,12 @@ static int pm3393_mac_reset(adapter_t * adapter)
successful_reset = (is_pl4_reset_finished && !is_pl4_outof_lock
&& is_xaui_mabc_pll_locked);
- CH_DBG(adapter, HW,
- "PM3393 HW reset %d: pl4_reset 0x%x, val 0x%x, "
- "is_pl4_outof_lock 0x%x, xaui_locked 0x%x\n",
- i, is_pl4_reset_finished, val, is_pl4_outof_lock,
- is_xaui_mabc_pll_locked);
+ if (netif_msg_hw(adapter))
+ dev_dbg(&adapter->pdev->dev,
+ "PM3393 HW reset %d: pl4_reset 0x%x, val 0x%x, "
+ "is_pl4_outof_lock 0x%x, xaui_locked 0x%x\n",
+ i, is_pl4_reset_finished, val,
+ is_pl4_outof_lock, is_xaui_mabc_pll_locked);
}
return successful_reset ? 0 : 1;
}
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 109d2783e4d..71384114a4e 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -267,7 +267,7 @@ struct sge {
struct sk_buff *espibug_skb[MAX_NPORTS];
u32 sge_control; /* shadow value of sge control reg */
struct sge_intr_counts stats;
- struct sge_port_stats *port_stats[MAX_NPORTS];
+ struct sge_port_stats __percpu *port_stats[MAX_NPORTS];
struct sched *tx_sched;
struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
};
@@ -953,7 +953,7 @@ int t1_sge_intr_error_handler(struct sge *sge)
sge->stats.respQ_empty++;
if (cause & F_RESPQ_OVERFLOW) {
sge->stats.respQ_overflow++;
- CH_ALERT("%s: SGE response queue overflow\n",
+ pr_alert("%s: SGE response queue overflow\n",
adapter->name);
}
if (cause & F_FL_EXHAUSTED) {
@@ -962,12 +962,12 @@ int t1_sge_intr_error_handler(struct sge *sge)
}
if (cause & F_PACKET_TOO_BIG) {
sge->stats.pkt_too_big++;
- CH_ALERT("%s: SGE max packet size exceeded\n",
+ pr_alert("%s: SGE max packet size exceeded\n",
adapter->name);
}
if (cause & F_PACKET_MISMATCH) {
sge->stats.pkt_mismatch++;
- CH_ALERT("%s: SGE packet mismatch\n", adapter->name);
+ pr_alert("%s: SGE packet mismatch\n", adapter->name);
}
if (cause & SGE_INT_FATAL)
t1_fatal_err(adapter);
@@ -1101,7 +1101,7 @@ static void unexpected_offload(struct adapter *adapter, struct freelQ *fl)
pci_dma_sync_single_for_cpu(adapter->pdev, pci_unmap_addr(ce, dma_addr),
pci_unmap_len(ce, dma_len), PCI_DMA_FROMDEVICE);
- CH_ERR("%s: unexpected offload packet, cmd %u\n",
+ pr_err("%s: unexpected offload packet, cmd %u\n",
adapter->name, *skb->data);
recycle_fl_buf(fl, fl->cidx);
}
@@ -1687,7 +1687,7 @@ static int t1_sge_tx(struct sk_buff *skb, struct adapter *adapter,
netif_stop_queue(dev);
set_bit(dev->if_port, &sge->stopped_tx_queues);
sge->stats.cmdQ_full[2]++;
- CH_ERR("%s: Tx ring full while queue awake!\n",
+ pr_err("%s: Tx ring full while queue awake!\n",
adapter->name);
}
spin_unlock(&q->lock);
diff --git a/drivers/net/chelsio/subr.c b/drivers/net/chelsio/subr.c
index 17720c6e5bf..53bde15fc94 100644
--- a/drivers/net/chelsio/subr.c
+++ b/drivers/net/chelsio/subr.c
@@ -90,7 +90,7 @@ int __t1_tpi_write(adapter_t *adapter, u32 addr, u32 value)
tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1,
TPI_ATTEMPTS, 3);
if (tpi_busy)
- CH_ALERT("%s: TPI write to 0x%x failed\n",
+ pr_alert("%s: TPI write to 0x%x failed\n",
adapter->name, addr);
return tpi_busy;
}
@@ -118,7 +118,7 @@ int __t1_tpi_read(adapter_t *adapter, u32 addr, u32 *valp)
tpi_busy = t1_wait_op_done(adapter, A_TPI_CSR, F_TPIRDY, 1,
TPI_ATTEMPTS, 3);
if (tpi_busy)
- CH_ALERT("%s: TPI read from 0x%x failed\n",
+ pr_alert("%s: TPI read from 0x%x failed\n",
adapter->name, addr);
else
*valp = readl(adapter->regs + A_TPI_RD_DATA);
@@ -262,7 +262,7 @@ static int mi1_wait_until_ready(adapter_t *adapter, int mi1_reg)
udelay(10);
} while (busy && --attempts);
if (busy)
- CH_ALERT("%s: MDIO operation timed out\n", adapter->name);
+ pr_alert("%s: MDIO operation timed out\n", adapter->name);
return busy;
}
@@ -528,7 +528,7 @@ static const struct board_info t1_board[] = {
};
-struct pci_device_id t1_pci_tbl[] = {
+DEFINE_PCI_DEVICE_TABLE(t1_pci_tbl) = {
CH_DEVICE(8, 0, CH_BRD_T110_1CU),
CH_DEVICE(8, 1, CH_BRD_T110_1CU),
CH_DEVICE(7, 0, CH_BRD_N110_1F),
@@ -581,7 +581,7 @@ int t1_seeprom_read(adapter_t *adapter, u32 addr, __le32 *data)
} while (!(val & F_VPD_OP_FLAG) && --i);
if (!(val & F_VPD_OP_FLAG)) {
- CH_ERR("%s: reading EEPROM address 0x%x failed\n",
+ pr_err("%s: reading EEPROM address 0x%x failed\n",
adapter->name, addr);
return -EIO;
}
@@ -734,8 +734,9 @@ int t1_elmer0_ext_intr_handler(adapter_t *adapter)
break;
case CHBT_BOARD_8000:
case CHBT_BOARD_CHT110:
- CH_DBG(adapter, INTR, "External interrupt cause 0x%x\n",
- cause);
+ if (netif_msg_intr(adapter))
+ dev_dbg(&adapter->pdev->dev,
+ "External interrupt cause 0x%x\n", cause);
if (cause & ELMER0_GP_BIT1) { /* PMC3393 INTB */
struct cmac *mac = adapter->port[0].mac;
@@ -746,8 +747,9 @@ int t1_elmer0_ext_intr_handler(adapter_t *adapter)
t1_tpi_read(adapter,
A_ELMER0_GPI_STAT, &mod_detect);
- CH_MSG(adapter, INFO, LINK, "XPAK %s\n",
- mod_detect ? "removed" : "inserted");
+ if (netif_msg_link(adapter))
+ dev_info(&adapter->pdev->dev, "XPAK %s\n",
+ mod_detect ? "removed" : "inserted");
}
break;
#ifdef CONFIG_CHELSIO_T1_COUGAR
@@ -1084,7 +1086,7 @@ static void __devinit init_link_config(struct link_config *lc,
#ifdef CONFIG_CHELSIO_T1_COUGAR
if (bi->clock_cspi && !(adapter->cspi = t1_cspi_create(adapter))) {
- CH_ERR("%s: CSPI initialization failed\n",
+ pr_err("%s: CSPI initialization failed\n",
adapter->name);
goto error;
}
@@ -1105,20 +1107,20 @@ int __devinit t1_init_sw_modules(adapter_t *adapter,
adapter->sge = t1_sge_create(adapter, &adapter->params.sge);
if (!adapter->sge) {
- CH_ERR("%s: SGE initialization failed\n",
+ pr_err("%s: SGE initialization failed\n",
adapter->name);
goto error;
}
if (bi->espi_nports && !(adapter->espi = t1_espi_create(adapter))) {
- CH_ERR("%s: ESPI initialization failed\n",
+ pr_err("%s: ESPI initialization failed\n",
adapter->name);
goto error;
}
adapter->tp = t1_tp_create(adapter, &adapter->params.tp);
if (!adapter->tp) {
- CH_ERR("%s: TP initialization failed\n",
+ pr_err("%s: TP initialization failed\n",
adapter->name);
goto error;
}
@@ -1138,14 +1140,14 @@ int __devinit t1_init_sw_modules(adapter_t *adapter,
adapter->port[i].phy = bi->gphy->create(adapter->port[i].dev,
phy_addr, bi->mdio_ops);
if (!adapter->port[i].phy) {
- CH_ERR("%s: PHY %d initialization failed\n",
+ pr_err("%s: PHY %d initialization failed\n",
adapter->name, i);
goto error;
}
adapter->port[i].mac = mac = bi->gmac->create(adapter, i);
if (!mac) {
- CH_ERR("%s: MAC %d initialization failed\n",
+ pr_err("%s: MAC %d initialization failed\n",
adapter->name, i);
goto error;
}
@@ -1157,7 +1159,7 @@ int __devinit t1_init_sw_modules(adapter_t *adapter,
if (!t1_is_asic(adapter) || bi->chip_mac == CHBT_MAC_DUMMY)
mac->ops->macaddress_get(mac, hw_addr);
else if (vpd_macaddress_get(adapter, i, hw_addr)) {
- CH_ERR("%s: could not read MAC address from VPD ROM\n",
+ pr_err("%s: could not read MAC address from VPD ROM\n",
adapter->port[i].dev->name);
goto error;
}
diff --git a/drivers/net/chelsio/vsc7326.c b/drivers/net/chelsio/vsc7326.c
index 99b51f61fe7..c844111cffe 100644
--- a/drivers/net/chelsio/vsc7326.c
+++ b/drivers/net/chelsio/vsc7326.c
@@ -48,14 +48,14 @@ static void vsc_read(adapter_t *adapter, u32 addr, u32 *val)
i++;
} while (((status & 1) == 0) && (i < 50));
if (i == 50)
- CH_ERR("Invalid tpi read from MAC, breaking loop.\n");
+ pr_err("Invalid tpi read from MAC, breaking loop.\n");
t1_tpi_read(adapter, (REG_LOCAL_DATA << 2) + 4, &vlo);
t1_tpi_read(adapter, REG_LOCAL_DATA << 2, &vhi);
*val = (vhi << 16) | vlo;
- /* CH_ERR("rd: block: 0x%x sublock: 0x%x reg: 0x%x data: 0x%x\n",
+ /* pr_err("rd: block: 0x%x sublock: 0x%x reg: 0x%x data: 0x%x\n",
((addr&0xe000)>>13), ((addr&0x1e00)>>9),
((addr&0x01fe)>>1), *val); */
spin_unlock_bh(&adapter->mac_lock);
@@ -66,7 +66,7 @@ static void vsc_write(adapter_t *adapter, u32 addr, u32 data)
spin_lock_bh(&adapter->mac_lock);
t1_tpi_write(adapter, (addr << 2) + 4, data & 0xFFFF);
t1_tpi_write(adapter, addr << 2, (data >> 16) & 0xFFFF);
- /* CH_ERR("wr: block: 0x%x sublock: 0x%x reg: 0x%x data: 0x%x\n",
+ /* pr_err("wr: block: 0x%x sublock: 0x%x reg: 0x%x data: 0x%x\n",
((addr&0xe000)>>13), ((addr&0x1e00)>>9),
((addr&0x01fe)>>1), data); */
spin_unlock_bh(&adapter->mac_lock);
@@ -225,7 +225,7 @@ static void run_table(adapter_t *adapter, struct init_table *ib, int len)
for (i = 0; i < len; i++) {
if (ib[i].addr == INITBLOCK_SLEEP) {
udelay( ib[i].data );
- CH_ERR("sleep %d us\n",ib[i].data);
+ pr_err("sleep %d us\n",ib[i].data);
} else
vsc_write( adapter, ib[i].addr, ib[i].data );
}
@@ -241,7 +241,7 @@ static int bist_rd(adapter_t *adapter, int moduleid, int address)
(address != 0x2) &&
(address != 0xd) &&
(address != 0xe))
- CH_ERR("No bist address: 0x%x\n", address);
+ pr_err("No bist address: 0x%x\n", address);
data = ((0x00 << 24) | ((address & 0xff) << 16) | (0x00 << 8) |
((moduleid & 0xff) << 0));
@@ -251,9 +251,9 @@ static int bist_rd(adapter_t *adapter, int moduleid, int address)
vsc_read(adapter, REG_RAM_BIST_RESULT, &result);
if ((result & (1 << 9)) != 0x0)
- CH_ERR("Still in bist read: 0x%x\n", result);
+ pr_err("Still in bist read: 0x%x\n", result);
else if ((result & (1 << 8)) != 0x0)
- CH_ERR("bist read error: 0x%x\n", result);
+ pr_err("bist read error: 0x%x\n", result);
return (result & 0xff);
}
@@ -268,10 +268,10 @@ static int bist_wr(adapter_t *adapter, int moduleid, int address, int value)
(address != 0x2) &&
(address != 0xd) &&
(address != 0xe))
- CH_ERR("No bist address: 0x%x\n", address);
+ pr_err("No bist address: 0x%x\n", address);
if (value > 255)
- CH_ERR("Suspicious write out of range value: 0x%x\n", value);
+ pr_err("Suspicious write out of range value: 0x%x\n", value);
data = ((0x01 << 24) | ((address & 0xff) << 16) | (value << 8) |
((moduleid & 0xff) << 0));
@@ -281,9 +281,9 @@ static int bist_wr(adapter_t *adapter, int moduleid, int address, int value)
vsc_read(adapter, REG_RAM_BIST_CMD, &result);
if ((result & (1 << 27)) != 0x0)
- CH_ERR("Still in bist write: 0x%x\n", result);
+ pr_err("Still in bist write: 0x%x\n", result);
else if ((result & (1 << 26)) != 0x0)
- CH_ERR("bist write error: 0x%x\n", result);
+ pr_err("bist write error: 0x%x\n", result);
return 0;
}
@@ -306,7 +306,7 @@ static int check_bist(adapter_t *adapter, int moduleid)
column = ((bist_rd(adapter,moduleid, 0x0e)<<8) +
(bist_rd(adapter,moduleid, 0x0d)));
if ((result & 3) != 0x3)
- CH_ERR("Result: 0x%x BIST error in ram %d, column: 0x%04x\n",
+ pr_err("Result: 0x%x BIST error in ram %d, column: 0x%04x\n",
result, moduleid, column);
return 0;
}
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 4332b3a2faf..9781942992e 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -1,6 +1,6 @@
/* cnic.c: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2009 Broadcom Corporation
+ * Copyright (c) 2006-2010 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -10,6 +10,8 @@
* Modified and maintained by: Michael Chan <mchan@broadcom.com>
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
@@ -47,7 +49,6 @@
#include "cnic_defs.h"
#define DRV_MODULE_NAME "cnic"
-#define PFX DRV_MODULE_NAME ": "
static char version[] __devinitdata =
"Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
@@ -326,6 +327,12 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
if (l5_cid >= MAX_CM_SK_TBL_SZ)
break;
+ rcu_read_lock();
+ if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) {
+ rc = -ENODEV;
+ rcu_read_unlock();
+ break;
+ }
csk = &cp->csk_tbl[l5_cid];
csk_hold(csk);
if (cnic_in_use(csk)) {
@@ -340,6 +347,7 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
cnic_cm_set_pg(csk);
}
csk_put(csk);
+ rcu_read_unlock();
rc = 0;
}
}
@@ -409,14 +417,13 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
struct cnic_dev *dev;
if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
- printk(KERN_ERR PFX "cnic_register_driver: Bad type %d\n",
- ulp_type);
+ pr_err("%s: Bad type %d\n", __func__, ulp_type);
return -EINVAL;
}
mutex_lock(&cnic_lock);
if (cnic_ulp_tbl[ulp_type]) {
- printk(KERN_ERR PFX "cnic_register_driver: Type %d has already "
- "been registered\n", ulp_type);
+ pr_err("%s: Type %d has already been registered\n",
+ __func__, ulp_type);
mutex_unlock(&cnic_lock);
return -EBUSY;
}
@@ -455,15 +462,14 @@ int cnic_unregister_driver(int ulp_type)
int i = 0;
if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
- printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n",
- ulp_type);
+ pr_err("%s: Bad type %d\n", __func__, ulp_type);
return -EINVAL;
}
mutex_lock(&cnic_lock);
ulp_ops = cnic_ulp_tbl[ulp_type];
if (!ulp_ops) {
- printk(KERN_ERR PFX "cnic_unregister_driver: Type %d has not "
- "been registered\n", ulp_type);
+ pr_err("%s: Type %d has not been registered\n",
+ __func__, ulp_type);
goto out_unlock;
}
read_lock(&cnic_dev_lock);
@@ -471,8 +477,8 @@ int cnic_unregister_driver(int ulp_type)
struct cnic_local *cp = dev->cnic_priv;
if (rcu_dereference(cp->ulp_ops[ulp_type])) {
- printk(KERN_ERR PFX "cnic_unregister_driver: Type %d "
- "still has devices registered\n", ulp_type);
+ pr_err("%s: Type %d still has devices registered\n",
+ __func__, ulp_type);
read_unlock(&cnic_dev_lock);
goto out_unlock;
}
@@ -492,8 +498,7 @@ int cnic_unregister_driver(int ulp_type)
}
if (atomic_read(&ulp_ops->ref_count) != 0)
- printk(KERN_WARNING PFX "%s: Failed waiting for ref count to go"
- " to zero.\n", dev->netdev->name);
+ netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n");
return 0;
out_unlock:
@@ -511,20 +516,19 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
struct cnic_ulp_ops *ulp_ops;
if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
- printk(KERN_ERR PFX "cnic_register_device: Bad type %d\n",
- ulp_type);
+ pr_err("%s: Bad type %d\n", __func__, ulp_type);
return -EINVAL;
}
mutex_lock(&cnic_lock);
if (cnic_ulp_tbl[ulp_type] == NULL) {
- printk(KERN_ERR PFX "cnic_register_device: Driver with type %d "
- "has not been registered\n", ulp_type);
+ pr_err("%s: Driver with type %d has not been registered\n",
+ __func__, ulp_type);
mutex_unlock(&cnic_lock);
return -EAGAIN;
}
if (rcu_dereference(cp->ulp_ops[ulp_type])) {
- printk(KERN_ERR PFX "cnic_register_device: Type %d has already "
- "been registered to this device\n", ulp_type);
+ pr_err("%s: Type %d has already been registered to this device\n",
+ __func__, ulp_type);
mutex_unlock(&cnic_lock);
return -EBUSY;
}
@@ -552,8 +556,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
int i = 0;
if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
- printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n",
- ulp_type);
+ pr_err("%s: Bad type %d\n", __func__, ulp_type);
return -EINVAL;
}
mutex_lock(&cnic_lock);
@@ -561,8 +564,8 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL);
cnic_put(dev);
} else {
- printk(KERN_ERR PFX "cnic_unregister_device: device not "
- "registered to this ulp type %d\n", ulp_type);
+ pr_err("%s: device not registered to this ulp type %d\n",
+ __func__, ulp_type);
mutex_unlock(&cnic_lock);
return -EINVAL;
}
@@ -576,8 +579,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
i++;
}
if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
- printk(KERN_WARNING PFX "%s: Failed waiting for ULP up call"
- " to complete.\n", dev->netdev->name);
+ netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
return 0;
}
@@ -898,7 +900,8 @@ static int cnic_alloc_uio(struct cnic_dev *dev) {
uinfo->mem[0].memtype = UIO_MEM_PHYS;
if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
- uinfo->mem[1].addr = (unsigned long) cp->status_blk & PAGE_MASK;
+ uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
+ PAGE_MASK;
if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
else
@@ -1101,10 +1104,9 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
if (ret)
goto error;
- cp->bnx2x_status_blk = cp->status_blk;
cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
- memset(cp->bnx2x_status_blk, 0, sizeof(struct host_status_block));
+ memset(cp->status_blk.bnx2x, 0, sizeof(*cp->status_blk.bnx2x));
cp->l2_rx_ring_size = 15;
@@ -1865,8 +1867,7 @@ static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
}
if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
- printk(KERN_ERR PFX "%s: conn_buf size too big\n",
- dev->netdev->name);
+ netdev_err(dev->netdev, "conn_buf size too big\n");
return -ENOMEM;
}
conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
@@ -2026,13 +2027,13 @@ static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
break;
default:
ret = 0;
- printk(KERN_ERR PFX "%s: Unknown type of KWQE(0x%x)\n",
- dev->netdev->name, opcode);
+ netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
+ opcode);
break;
}
if (ret < 0)
- printk(KERN_ERR PFX "%s: KWQE(0x%x) failed\n",
- dev->netdev->name, opcode);
+ netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
+ opcode);
i += work;
}
return 0;
@@ -2074,8 +2075,8 @@ static void service_kcqes(struct cnic_dev *dev, int num_cqes)
else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
goto end;
else {
- printk(KERN_ERR PFX "%s: Unknown type of KCQE(0x%x)\n",
- dev->netdev->name, kcqe_op_flag);
+ netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
+ kcqe_op_flag);
goto end;
}
@@ -2204,7 +2205,7 @@ static void cnic_service_bnx2_msix(unsigned long data)
{
struct cnic_dev *dev = (struct cnic_dev *) data;
struct cnic_local *cp = dev->cnic_priv;
- struct status_block_msix *status_blk = cp->bnx2_status_blk;
+ struct status_block_msix *status_blk = cp->status_blk.bnx2;
u32 status_idx = status_blk->status_idx;
u16 hw_prod, sw_prod;
int kcqe_cnt;
@@ -2250,7 +2251,7 @@ static irqreturn_t cnic_irq(int irq, void *dev_instance)
if (cp->ack_int)
cp->ack_int(dev);
- prefetch(cp->status_blk);
+ prefetch(cp->status_blk.gen);
prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags)))
@@ -2291,7 +2292,7 @@ static void cnic_service_bnx2x_bh(unsigned long data)
struct cnic_local *cp = dev->cnic_priv;
u16 hw_prod, sw_prod;
struct cstorm_status_block_c *sblk =
- &cp->bnx2x_status_blk->c_status_block;
+ &cp->status_blk.bnx2x->c_status_block;
u32 status_idx = sblk->status_block_index;
int kcqe_cnt;
@@ -2333,7 +2334,7 @@ static int cnic_service_bnx2x(void *data, void *status_blk)
struct cnic_local *cp = dev->cnic_priv;
u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX;
- prefetch(cp->status_blk);
+ prefetch(cp->status_blk.bnx2x);
prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags)))
@@ -2513,7 +2514,7 @@ static int cnic_cm_offload_pg(struct cnic_sock *csk)
l4kwqe->sa5 = dev->mac_addr[5];
l4kwqe->etype = ETH_P_IP;
- l4kwqe->ipid_count = DEF_IPID_COUNT;
+ l4kwqe->ipid_start = DEF_IPID_START;
l4kwqe->host_opaque = csk->l5_cid;
if (csk->vlan_id) {
@@ -2859,8 +2860,8 @@ static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
{
struct cnic_dev *dev = csk->dev;
struct cnic_local *cp = dev->cnic_priv;
- int is_v6, err, rc = -ENETUNREACH;
- struct dst_entry *dst;
+ int is_v6, rc = 0;
+ struct dst_entry *dst = NULL;
struct net_device *realdev;
u32 local_port;
@@ -2876,39 +2877,31 @@ static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
clear_bit(SK_F_IPV6, &csk->flags);
if (is_v6) {
-#if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
set_bit(SK_F_IPV6, &csk->flags);
- err = cnic_get_v6_route(&saddr->remote.v6, &dst);
- if (err)
- return err;
-
- if (!dst || dst->error || !dst->dev)
- goto err_out;
+ cnic_get_v6_route(&saddr->remote.v6, &dst);
memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
sizeof(struct in6_addr));
csk->dst_port = saddr->remote.v6.sin6_port;
local_port = saddr->local.v6.sin6_port;
-#else
- return rc;
-#endif
} else {
- err = cnic_get_v4_route(&saddr->remote.v4, &dst);
- if (err)
- return err;
-
- if (!dst || dst->error || !dst->dev)
- goto err_out;
+ cnic_get_v4_route(&saddr->remote.v4, &dst);
csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
csk->dst_port = saddr->remote.v4.sin_port;
local_port = saddr->local.v4.sin_port;
}
- csk->vlan_id = cnic_get_vlan(dst->dev, &realdev);
- if (realdev != dev->netdev)
- goto err_out;
+ csk->vlan_id = 0;
+ csk->mtu = dev->netdev->mtu;
+ if (dst && dst->dev) {
+ u16 vlan = cnic_get_vlan(dst->dev, &realdev);
+ if (realdev == dev->netdev) {
+ csk->vlan_id = vlan;
+ csk->mtu = dst_mtu(dst);
+ }
+ }
if (local_port >= CNIC_LOCAL_PORT_MIN &&
local_port < CNIC_LOCAL_PORT_MAX) {
@@ -2926,9 +2919,6 @@ static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
}
csk->src_port = local_port;
- csk->mtu = dst_mtu(dst);
- rc = 0;
-
err_out:
dst_release(dst);
return rc;
@@ -3052,6 +3042,14 @@ static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
goto done;
}
+ /* Possible PG kcqe status: SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
+ if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) {
+ clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+ cnic_cm_upcall(cp, csk,
+ L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
+ goto done;
+ }
+
csk->pg_cid = kcqe->pg_cid;
set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
cnic_cm_conn_req(csk);
@@ -3089,6 +3087,13 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
}
switch (opcode) {
+ case L5CM_RAMROD_CMD_ID_TCP_CONNECT:
+ if (l4kcqe->status != 0) {
+ clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+ cnic_cm_upcall(cp, csk,
+ L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
+ }
+ break;
case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
if (l4kcqe->status == 0)
set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
@@ -3099,7 +3104,10 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
break;
case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
- if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags))
+ if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
+ cnic_cm_upcall(cp, csk, opcode);
+ break;
+ } else if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags))
csk->state = opcode;
/* fall through */
case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
@@ -3163,6 +3171,16 @@ static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
if (!test_and_set_bit(SK_F_CLOSING, &csk->flags))
return 1;
}
+ /* 57710+ only workaround to handle unsolicited RESET_COMP
+ * which will be treated like a RESET RCVD notification
+ * which triggers the clean up procedure
+ */
+ else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP) {
+ if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
+ csk->state = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
+ return 1;
+ }
+ }
return 0;
}
@@ -3172,10 +3190,8 @@ static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
struct cnic_local *cp = dev->cnic_priv;
clear_bit(SK_F_CONNECT_START, &csk->flags);
- if (cnic_ready_to_close(csk, opcode)) {
- cnic_close_conn(csk);
- cnic_cm_upcall(cp, csk, opcode);
- }
+ cnic_close_conn(csk);
+ cnic_cm_upcall(cp, csk, opcode);
}
static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
@@ -3393,8 +3409,7 @@ static int cnic_init_bnx2_irq(struct cnic_dev *dev)
CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
- cp->bnx2_status_blk = cp->status_blk;
- cp->last_status_idx = cp->bnx2_status_blk->status_idx;
+ cp->last_status_idx = cp->status_blk.bnx2->status_idx;
tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix,
(unsigned long) dev);
err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0,
@@ -3403,7 +3418,7 @@ static int cnic_init_bnx2_irq(struct cnic_dev *dev)
tasklet_disable(&cp->cnic_irq_task);
return err;
}
- while (cp->bnx2_status_blk->status_completion_producer_index &&
+ while (cp->status_blk.bnx2->status_completion_producer_index &&
i < 10) {
CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
1 << (11 + sblk_num));
@@ -3411,13 +3426,13 @@ static int cnic_init_bnx2_irq(struct cnic_dev *dev)
i++;
barrier();
}
- if (cp->bnx2_status_blk->status_completion_producer_index) {
+ if (cp->status_blk.bnx2->status_completion_producer_index) {
cnic_free_irq(dev);
goto failed;
}
} else {
- struct status_block *sblk = cp->status_blk;
+ struct status_block *sblk = cp->status_blk.gen;
u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
int i = 0;
@@ -3435,8 +3450,7 @@ static int cnic_init_bnx2_irq(struct cnic_dev *dev)
return 0;
failed:
- printk(KERN_ERR PFX "%s: " "KCQ index not resetting to 0.\n",
- dev->netdev->name);
+ netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
return -EBUSY;
}
@@ -3475,7 +3489,7 @@ static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
int i;
struct tx_bd *txbd;
dma_addr_t buf_map;
- struct status_block *s_blk = cp->status_blk;
+ struct status_block *s_blk = cp->status_blk.gen;
sb_id = cp->status_blk_num;
tx_cid = 20;
@@ -3483,7 +3497,7 @@ static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
cnic_init_context(dev, tx_cid + 1);
cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
- struct status_block_msix *sblk = cp->status_blk;
+ struct status_block_msix *sblk = cp->status_blk.bnx2;
tx_cid = TX_TSS_CID + sb_id - 1;
cnic_init_context(dev, tx_cid);
@@ -3539,7 +3553,7 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
u32 cid_addr, sb_id, val, coal_reg, coal_val;
int i;
struct rx_bd *rxbd;
- struct status_block *s_blk = cp->status_blk;
+ struct status_block *s_blk = cp->status_blk.gen;
sb_id = cp->status_blk_num;
cnic_init_context(dev, 2);
@@ -3547,7 +3561,7 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
coal_reg = BNX2_HC_COMMAND;
coal_val = CNIC_RD(dev, coal_reg);
if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
- struct status_block_msix *sblk = cp->status_blk;
+ struct status_block_msix *sblk = cp->status_blk.bnx2;
cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
coal_reg = BNX2_HC_COALESCE_NOW;
@@ -3646,7 +3660,7 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
{
struct cnic_local *cp = dev->cnic_priv;
struct cnic_eth_dev *ethdev = cp->ethdev;
- struct status_block *sblk = cp->status_blk;
+ struct status_block *sblk = cp->status_blk.gen;
u32 val;
int err;
@@ -3758,8 +3772,7 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
err = cnic_init_bnx2_irq(dev);
if (err) {
- printk(KERN_ERR PFX "%s: cnic_init_irq failed\n",
- dev->netdev->name);
+ netdev_err(dev->netdev, "cnic_init_irq failed\n");
cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
return err;
@@ -4122,8 +4135,7 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
offsetof(struct cstorm_status_block_c,
index_values[HC_INDEX_C_ISCSI_EQ_CONS]));
if (eq_idx != 0) {
- printk(KERN_ERR PFX "%s: EQ cons index %x != 0\n",
- dev->netdev->name, eq_idx);
+ netdev_err(dev->netdev, "EQ cons index %x != 0\n", eq_idx);
return -EBUSY;
}
ret = cnic_init_bnx2x_irq(dev);
@@ -4208,8 +4220,7 @@ static int cnic_register_netdev(struct cnic_dev *dev)
err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
if (err)
- printk(KERN_ERR PFX "%s: register_cnic failed\n",
- dev->netdev->name);
+ netdev_err(dev->netdev, "register_cnic failed\n");
return err;
}
@@ -4238,13 +4249,12 @@ static int cnic_start_hw(struct cnic_dev *dev)
cp->chip_id = ethdev->chip_id;
pci_dev_get(dev->pcidev);
cp->func = PCI_FUNC(dev->pcidev->devfn);
- cp->status_blk = ethdev->irq_arr[0].status_blk;
+ cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
err = cp->alloc_resc(dev);
if (err) {
- printk(KERN_ERR PFX "%s: allocate resource failure\n",
- dev->netdev->name);
+ netdev_err(dev->netdev, "allocate resource failure\n");
goto err1;
}
@@ -4326,10 +4336,9 @@ static void cnic_free_dev(struct cnic_dev *dev)
i++;
}
if (atomic_read(&dev->ref_count) != 0)
- printk(KERN_ERR PFX "%s: Failed waiting for ref count to go"
- " to zero.\n", dev->netdev->name);
+ netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
- printk(KERN_INFO PFX "Removed CNIC device: %s\n", dev->netdev->name);
+ netdev_info(dev->netdev, "Removed CNIC device\n");
dev_put(dev->netdev);
kfree(dev);
}
@@ -4345,8 +4354,7 @@ static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
cdev = kzalloc(alloc_size , GFP_KERNEL);
if (cdev == NULL) {
- printk(KERN_ERR PFX "%s: allocate dev struct failure\n",
- dev->name);
+ netdev_err(dev, "allocate dev struct failure\n");
return NULL;
}
@@ -4364,7 +4372,7 @@ static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
spin_lock_init(&cp->cnic_ulp_lock);
- printk(KERN_INFO PFX "Added CNIC device: %s\n", dev->name);
+ netdev_info(dev, "Added CNIC device\n");
return cdev;
}
@@ -4605,7 +4613,7 @@ static int __init cnic_init(void)
{
int rc = 0;
- printk(KERN_INFO "%s", version);
+ pr_info("%s", version);
rc = register_netdevice_notifier(&cnic_netdev_notifier);
if (rc) {
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
index 241d09acc0d..a0d853dff98 100644
--- a/drivers/net/cnic.h
+++ b/drivers/net/cnic.h
@@ -1,6 +1,6 @@
/* cnic.h: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2009 Broadcom Corporation
+ * Copyright (c) 2006-2010 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -101,7 +101,7 @@ struct cnic_redirect_entry {
#define BNX2X_KWQ_DATA(cp, x) \
&(cp)->kwq_16_data[BNX2X_KWQ_DATA_PG(cp, x)][BNX2X_KWQ_DATA_IDX(cp, x)]
-#define DEF_IPID_COUNT 0xc001
+#define DEF_IPID_START 0x8000
#define DEF_KA_TIMEOUT 10000
#define DEF_KA_INTERVAL 300000
@@ -224,9 +224,12 @@ struct cnic_local {
u16 kcq_prod_idx;
u32 kcq_io_addr;
- void *status_blk;
- struct status_block_msix *bnx2_status_blk;
- struct host_status_block *bnx2x_status_blk;
+ union {
+ void *gen;
+ struct status_block_msix *bnx2;
+ struct host_status_block *bnx2x;
+ } status_blk;
+
struct host_def_status_block *bnx2x_def_status_blk;
u32 status_blk_num;
diff --git a/drivers/net/cnic_defs.h b/drivers/net/cnic_defs.h
index 9827b278dc7..7ce694d41b6 100644
--- a/drivers/net/cnic_defs.h
+++ b/drivers/net/cnic_defs.h
@@ -1,7 +1,7 @@
/* cnic.c: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2009 Broadcom Corporation
+ * Copyright (c) 2006-2010 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index 8aaf98bdd4f..110c62072e6 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -1,6 +1,6 @@
/* cnic_if.h: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006 Broadcom Corporation
+ * Copyright (c) 2006-2010 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -12,8 +12,8 @@
#ifndef CNIC_IF_H
#define CNIC_IF_H
-#define CNIC_MODULE_VERSION "2.1.0"
-#define CNIC_MODULE_RELDATE "Oct 10, 2009"
+#define CNIC_MODULE_VERSION "2.1.1"
+#define CNIC_MODULE_RELDATE "Feb 22, 2010"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index bf2072e5420..b85c81f60d1 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -328,7 +328,6 @@ static int cpmac_config(struct net_device *dev, struct ifmap *map)
static void cpmac_set_multicast_list(struct net_device *dev)
{
struct dev_mc_list *iter;
- int i;
u8 tmp;
u32 mbp, bit, hash[2] = { 0, };
struct cpmac_priv *priv = netdev_priv(dev);
@@ -348,8 +347,7 @@ static void cpmac_set_multicast_list(struct net_device *dev)
* cpmac uses some strange mac address hashing
* (not crc32)
*/
- for (i = 0, iter = dev->mc_list; i < dev->mc_count;
- i++, iter = iter->next) {
+ netdev_for_each_mc_addr(iter, dev) {
bit = 0;
tmp = iter->dmi_addr[0];
bit ^= (tmp >> 2) ^ (tmp << 4);
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index a24be34a3f7..dd24aadb778 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -1564,7 +1564,7 @@ static void
set_multicast_list(struct net_device *dev)
{
struct net_local *lp = netdev_priv(dev);
- int num_addr = dev->mc_count;
+ int num_addr = netdev_mc_count(dev);
unsigned long int lo_bits;
unsigned long int hi_bits;
@@ -1596,13 +1596,12 @@ set_multicast_list(struct net_device *dev)
} else {
/* MC mode, receive normal and MC packets */
char hash_ix;
- struct dev_mc_list *dmi = dev->mc_list;
- int i;
+ struct dev_mc_list *dmi;
char *baddr;
lo_bits = 0x00000000ul;
hi_bits = 0x00000000ul;
- for (i = 0; i < num_addr; i++) {
+ netdev_for_each_mc_addr(dmi, dev) {
/* Calculate the hash index for the GA registers */
hash_ix = 0;
@@ -1632,7 +1631,6 @@ set_multicast_list(struct net_device *dev)
} else {
lo_bits |= (1 << hash_ix);
}
- dmi = dmi->next;
}
/* Disable individual receive */
SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, individual, discard);
diff --git a/drivers/net/cs89x0.c b/drivers/net/cs89x0.c
index 0e79cef95c0..14624019ce7 100644
--- a/drivers/net/cs89x0.c
+++ b/drivers/net/cs89x0.c
@@ -1785,7 +1785,7 @@ static void set_multicast_list(struct net_device *dev)
{
lp->rx_mode = RX_ALL_ACCEPT;
}
- else if((dev->flags&IFF_ALLMULTI)||dev->mc_list)
+ else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev))
{
/* The multicast-accept list is initialized to accept-all, and we
rely on higher-level filtering for now. */
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 3e8618b4efb..4cd7f420766 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -264,6 +264,10 @@ struct adapter {
struct work_struct fatal_error_handler_task;
struct work_struct link_fault_handler_task;
+ struct work_struct db_full_task;
+ struct work_struct db_empty_task;
+ struct work_struct db_drop_task;
+
struct dentry *debugfs_root;
struct mutex mdio_lock;
@@ -335,6 +339,7 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
int t3_get_desc(const struct sge_qset *qs, unsigned int qnum, unsigned int idx,
unsigned char *data);
irqreturn_t t3_sge_intr_msix(int irq, void *cookie);
+extern struct workqueue_struct *cxgb3_wq;
int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size);
diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
index 6ff356d4c7a..fe08a004b0d 100644
--- a/drivers/net/cxgb3/common.h
+++ b/drivers/net/cxgb3/common.h
@@ -67,32 +67,6 @@
/* Additional NETIF_MSG_* categories */
#define NETIF_MSG_MMIO 0x8000000
-struct t3_rx_mode {
- struct net_device *dev;
- struct dev_mc_list *mclist;
- unsigned int idx;
-};
-
-static inline void init_rx_mode(struct t3_rx_mode *p, struct net_device *dev,
- struct dev_mc_list *mclist)
-{
- p->dev = dev;
- p->mclist = mclist;
- p->idx = 0;
-}
-
-static inline u8 *t3_get_next_mcaddr(struct t3_rx_mode *rm)
-{
- u8 *addr = NULL;
-
- if (rm->mclist && rm->idx < rm->dev->mc_count) {
- addr = rm->mclist->dmi_addr;
- rm->mclist = rm->mclist->next;
- rm->idx++;
- }
- return addr;
-}
-
enum {
MAX_NPORTS = 2, /* max # of ports */
MAX_FRAME_SIZE = 10240, /* max MAC frame size, including header + FCS */
@@ -746,7 +720,7 @@ void t3_mac_enable_exact_filters(struct cmac *mac);
int t3_mac_enable(struct cmac *mac, int which);
int t3_mac_disable(struct cmac *mac, int which);
int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu);
-int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm);
+int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev);
int t3_mac_set_address(struct cmac *mac, unsigned int idx, u8 addr[6]);
int t3_mac_set_num_ucast(struct cmac *mac, int n);
const struct mac_stats *t3_mac_update_stats(struct cmac *mac);
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 89bec9c3c14..3e453e1d97e 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -45,6 +45,7 @@
#include <linux/firmware.h>
#include <linux/log2.h>
#include <linux/stringify.h>
+#include <linux/sched.h>
#include <asm/uaccess.h>
#include "common.h"
@@ -80,7 +81,7 @@ enum {
#define CH_DEVICE(devid, idx) \
{ PCI_VENDOR_ID_CHELSIO, devid, PCI_ANY_ID, PCI_ANY_ID, 0, 0, idx }
-static const struct pci_device_id cxgb3_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(cxgb3_pci_tbl) = {
CH_DEVICE(0x20, 0), /* PE9000 */
CH_DEVICE(0x21, 1), /* T302E */
CH_DEVICE(0x22, 2), /* T310E */
@@ -140,7 +141,7 @@ MODULE_PARM_DESC(ofld_disable, "whether to enable offload at init time or not");
* will block keventd as it needs the rtnl lock, and we'll deadlock waiting
* for our work to complete. Get our own work queue to solve this.
*/
-static struct workqueue_struct *cxgb3_wq;
+struct workqueue_struct *cxgb3_wq;
/**
* link_report - show link status and link speed/duplex
@@ -324,11 +325,9 @@ void t3_os_phymod_changed(struct adapter *adap, int port_id)
static void cxgb_set_rxmode(struct net_device *dev)
{
- struct t3_rx_mode rm;
struct port_info *pi = netdev_priv(dev);
- init_rx_mode(&rm, dev, dev->mc_list);
- t3_mac_set_rx_mode(&pi->mac, &rm);
+ t3_mac_set_rx_mode(&pi->mac, dev);
}
/**
@@ -339,17 +338,15 @@ static void cxgb_set_rxmode(struct net_device *dev)
*/
static void link_start(struct net_device *dev)
{
- struct t3_rx_mode rm;
struct port_info *pi = netdev_priv(dev);
struct cmac *mac = &pi->mac;
- init_rx_mode(&rm, dev, dev->mc_list);
t3_mac_reset(mac);
t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
t3_mac_set_mtu(mac, dev->mtu);
t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
- t3_mac_set_rx_mode(mac, &rm);
+ t3_mac_set_rx_mode(mac, dev);
t3_link_start(&pi->phy, mac, &pi->link_config);
t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
}
@@ -590,6 +587,19 @@ static void setup_rss(struct adapter *adap)
V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map);
}
+static void ring_dbs(struct adapter *adap)
+{
+ int i, j;
+
+ for (i = 0; i < SGE_QSETS; i++) {
+ struct sge_qset *qs = &adap->sge.qs[i];
+
+ if (qs->adap)
+ for (j = 0; j < SGE_TXQ_PER_SET; j++)
+ t3_write_reg(adap, A_SG_KDOORBELL, F_SELEGRCNTX | V_EGRCNTX(qs->txq[j].cntxt_id));
+ }
+}
+
static void init_napi(struct adapter *adap)
{
int i;
@@ -2754,6 +2764,42 @@ static void t3_adap_check_task(struct work_struct *work)
spin_unlock_irq(&adapter->work_lock);
}
+static void db_full_task(struct work_struct *work)
+{
+ struct adapter *adapter = container_of(work, struct adapter,
+ db_full_task);
+
+ cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_FULL, 0);
+}
+
+static void db_empty_task(struct work_struct *work)
+{
+ struct adapter *adapter = container_of(work, struct adapter,
+ db_empty_task);
+
+ cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_EMPTY, 0);
+}
+
+static void db_drop_task(struct work_struct *work)
+{
+ struct adapter *adapter = container_of(work, struct adapter,
+ db_drop_task);
+ unsigned long delay = 1000;
+ unsigned short r;
+
+ cxgb3_event_notify(&adapter->tdev, OFFLOAD_DB_DROP, 0);
+
+ /*
+ * Sleep a while before ringing the driver qset dbs.
+ * The delay is between 1000-2023 usecs.
+ */
+ get_random_bytes(&r, 2);
+ delay += r & 1023;
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(usecs_to_jiffies(delay));
+ ring_dbs(adapter);
+}
+
/*
* Processes external (PHY) interrupts in process context.
*/
@@ -3222,6 +3268,11 @@ static int __devinit init_one(struct pci_dev *pdev,
INIT_LIST_HEAD(&adapter->adapter_list);
INIT_WORK(&adapter->ext_intr_handler_task, ext_intr_task);
INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
+
+ INIT_WORK(&adapter->db_full_task, db_full_task);
+ INIT_WORK(&adapter->db_empty_task, db_empty_task);
+ INIT_WORK(&adapter->db_drop_task, db_drop_task);
+
INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index 75064eea1d8..9498361119d 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -1252,7 +1252,7 @@ int cxgb3_offload_activate(struct adapter *adapter)
struct mtutab mtutab;
unsigned int l2t_capacity;
- t = kcalloc(1, sizeof(*t), GFP_KERNEL);
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
if (!t)
return -ENOMEM;
diff --git a/drivers/net/cxgb3/cxgb3_offload.h b/drivers/net/cxgb3/cxgb3_offload.h
index 670aa62042d..929c298115c 100644
--- a/drivers/net/cxgb3/cxgb3_offload.h
+++ b/drivers/net/cxgb3/cxgb3_offload.h
@@ -73,7 +73,10 @@ enum {
OFFLOAD_STATUS_UP,
OFFLOAD_STATUS_DOWN,
OFFLOAD_PORT_DOWN,
- OFFLOAD_PORT_UP
+ OFFLOAD_PORT_UP,
+ OFFLOAD_DB_FULL,
+ OFFLOAD_DB_EMPTY,
+ OFFLOAD_DB_DROP
};
struct cxgb3_client {
diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
index 1b5327b5a96..cb42353c9fd 100644
--- a/drivers/net/cxgb3/regs.h
+++ b/drivers/net/cxgb3/regs.h
@@ -254,6 +254,22 @@
#define V_LOPIODRBDROPERR(x) ((x) << S_LOPIODRBDROPERR)
#define F_LOPIODRBDROPERR V_LOPIODRBDROPERR(1U)
+#define S_HIPRIORITYDBFULL 7
+#define V_HIPRIORITYDBFULL(x) ((x) << S_HIPRIORITYDBFULL)
+#define F_HIPRIORITYDBFULL V_HIPRIORITYDBFULL(1U)
+
+#define S_HIPRIORITYDBEMPTY 6
+#define V_HIPRIORITYDBEMPTY(x) ((x) << S_HIPRIORITYDBEMPTY)
+#define F_HIPRIORITYDBEMPTY V_HIPRIORITYDBEMPTY(1U)
+
+#define S_LOPRIORITYDBFULL 5
+#define V_LOPRIORITYDBFULL(x) ((x) << S_LOPRIORITYDBFULL)
+#define F_LOPRIORITYDBFULL V_LOPRIORITYDBFULL(1U)
+
+#define S_LOPRIORITYDBEMPTY 4
+#define V_LOPRIORITYDBEMPTY(x) ((x) << S_LOPRIORITYDBEMPTY)
+#define F_LOPRIORITYDBEMPTY V_LOPRIORITYDBEMPTY(1U)
+
#define S_RSPQDISABLED 3
#define V_RSPQDISABLED(x) ((x) << S_RSPQDISABLED)
#define F_RSPQDISABLED V_RSPQDISABLED(1U)
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index 318a018ca7c..78e265b484b 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -42,6 +42,7 @@
#include "sge_defs.h"
#include "t3_cpl.h"
#include "firmware_exports.h"
+#include "cxgb3_offload.h"
#define USE_GTS 0
@@ -480,6 +481,7 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
{
if (q->pend_cred >= q->credits / 4) {
q->pend_cred = 0;
+ wmb();
t3_write_reg(adap, A_SG_KDOORBELL, V_EGRCNTX(q->cntxt_id));
}
}
@@ -2286,11 +2288,14 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
while (likely(budget_left && is_new_response(r, q))) {
int packet_complete, eth, ethpad = 2, lro = qs->lro_enabled;
struct sk_buff *skb = NULL;
- u32 len, flags = ntohl(r->flags);
- __be32 rss_hi = *(const __be32 *)r,
- rss_lo = r->rss_hdr.rss_hash_val;
+ u32 len, flags;
+ __be32 rss_hi, rss_lo;
+ rmb();
eth = r->rss_hdr.opcode == CPL_RX_PKT;
+ rss_hi = *(const __be32 *)r;
+ rss_lo = r->rss_hdr.rss_hash_val;
+ flags = ntohl(r->flags);
if (unlikely(flags & F_RSPD_ASYNC_NOTIF)) {
skb = alloc_skb(AN_PKT_SIZE, GFP_ATOMIC);
@@ -2501,7 +2506,10 @@ static int process_pure_responses(struct adapter *adap, struct sge_qset *qs,
refill_rspq(adap, q, q->credits);
q->credits = 0;
}
- } while (is_new_response(r, q) && is_pure_response(r));
+ if (!is_new_response(r, q))
+ break;
+ rmb();
+ } while (is_pure_response(r));
if (sleeping)
check_ring_db(adap, qs, sleeping);
@@ -2535,6 +2543,7 @@ static inline int handle_responses(struct adapter *adap, struct sge_rspq *q)
if (!is_new_response(r, q))
return -1;
+ rmb();
if (is_pure_response(r) && process_pure_responses(adap, qs, r) == 0) {
t3_write_reg(adap, A_SG_GTS, V_RSPQ(q->cntxt_id) |
V_NEWTIMER(q->holdoff_tmr) | V_NEWINDEX(q->cidx));
@@ -2833,8 +2842,13 @@ void t3_sge_err_intr_handler(struct adapter *adapter)
}
if (status & (F_HIPIODRBDROPERR | F_LOPIODRBDROPERR))
- CH_ALERT(adapter, "SGE dropped %s priority doorbell\n",
- status & F_HIPIODRBDROPERR ? "high" : "lo");
+ queue_work(cxgb3_wq, &adapter->db_drop_task);
+
+ if (status & (F_HIPRIORITYDBFULL | F_LOPRIORITYDBFULL))
+ queue_work(cxgb3_wq, &adapter->db_full_task);
+
+ if (status & (F_HIPRIORITYDBEMPTY | F_LOPRIORITYDBEMPTY))
+ queue_work(cxgb3_wq, &adapter->db_empty_task);
t3_write_reg(adapter, A_SG_INT_CAUSE, status);
if (status & SGE_FATALERR)
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index 032cfe06557..95a8ba0759f 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -1262,7 +1262,8 @@ void t3_link_changed(struct adapter *adapter, int port_id)
lc->fc = fc;
}
- t3_os_link_changed(adapter, port_id, link_ok, speed, duplex, fc);
+ t3_os_link_changed(adapter, port_id, link_ok && !pi->link_fault,
+ speed, duplex, fc);
}
void t3_link_fault(struct adapter *adapter, int port_id)
@@ -1432,7 +1433,10 @@ static int t3_handle_intr_status(struct adapter *adapter, unsigned int reg,
F_IRPARITYERROR | V_ITPARITYERROR(M_ITPARITYERROR) | \
V_FLPARITYERROR(M_FLPARITYERROR) | F_LODRBPARITYERROR | \
F_HIDRBPARITYERROR | F_LORCQPARITYERROR | \
- F_HIRCQPARITYERROR)
+ F_HIRCQPARITYERROR | F_LOPRIORITYDBFULL | \
+ F_HIPRIORITYDBFULL | F_LOPRIORITYDBEMPTY | \
+ F_HIPRIORITYDBEMPTY | F_HIPIODRBDROPERR | \
+ F_LOPIODRBDROPERR)
#define MC5_INTR_MASK (F_PARITYERR | F_ACTRGNFULL | F_UNKNOWNCMD | \
F_REQQPARERR | F_DISPQPARERR | F_DELACTEMPTY | \
F_NFASRCHFAIL)
diff --git a/drivers/net/cxgb3/xgmac.c b/drivers/net/cxgb3/xgmac.c
index 0109ee4f2f9..c142a2132e9 100644
--- a/drivers/net/cxgb3/xgmac.c
+++ b/drivers/net/cxgb3/xgmac.c
@@ -297,29 +297,30 @@ static int hash_hw_addr(const u8 * addr)
return hash;
}
-int t3_mac_set_rx_mode(struct cmac *mac, struct t3_rx_mode *rm)
+int t3_mac_set_rx_mode(struct cmac *mac, struct net_device *dev)
{
u32 val, hash_lo, hash_hi;
struct adapter *adap = mac->adapter;
unsigned int oft = mac->offset;
val = t3_read_reg(adap, A_XGM_RX_CFG + oft) & ~F_COPYALLFRAMES;
- if (rm->dev->flags & IFF_PROMISC)
+ if (dev->flags & IFF_PROMISC)
val |= F_COPYALLFRAMES;
t3_write_reg(adap, A_XGM_RX_CFG + oft, val);
- if (rm->dev->flags & IFF_ALLMULTI)
+ if (dev->flags & IFF_ALLMULTI)
hash_lo = hash_hi = 0xffffffff;
else {
- u8 *addr;
+ struct dev_mc_list *dmi;
int exact_addr_idx = mac->nucast;
hash_lo = hash_hi = 0;
- while ((addr = t3_get_next_mcaddr(rm)))
+ netdev_for_each_mc_addr(dmi, dev)
if (exact_addr_idx < EXACT_ADDR_FILTERS)
- set_addr_filter(mac, exact_addr_idx++, addr);
+ set_addr_filter(mac, exact_addr_idx++,
+ dmi->dmi_addr);
else {
- int hash = hash_hw_addr(addr);
+ int hash = hash_hw_addr(dmi->dmi_addr);
if (hash < 32)
hash_lo |= (1 << hash);
@@ -353,6 +354,9 @@ int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu)
* packet size register includes header, but not FCS.
*/
mtu += 14;
+ if (mtu > 1536)
+ mtu += 4;
+
if (mtu > MAX_FRAME_SIZE - 4)
return -EINVAL;
t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu);
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index 1605bc225b0..1ac9440eb3f 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -957,19 +957,18 @@ static void emac_dev_mcast_set(struct net_device *ndev)
} else {
mbp_enable = (mbp_enable & ~EMAC_MBP_RXPROMISC);
if ((ndev->flags & IFF_ALLMULTI) ||
- (ndev->mc_count > EMAC_DEF_MAX_MULTICAST_ADDRESSES)) {
+ netdev_mc_count(ndev) > EMAC_DEF_MAX_MULTICAST_ADDRESSES) {
mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL);
}
- if (ndev->mc_count > 0) {
+ if (!netdev_mc_empty(ndev)) {
struct dev_mc_list *mc_ptr;
mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
emac_add_mcast(priv, EMAC_ALL_MULTI_CLR, NULL);
/* program multicast address list into EMAC hardware */
- for (mc_ptr = ndev->mc_list; mc_ptr;
- mc_ptr = mc_ptr->next) {
+ netdev_for_each_mc_addr(mc_ptr, ndev) {
emac_add_mcast(priv, EMAC_MULTICAST_ADD,
- (u8 *)mc_ptr->dmi_addr);
+ (u8 *) mc_ptr->dmi_addr);
}
} else {
mbp_enable = (mbp_enable & ~EMAC_MBP_RXMCAST);
@@ -2683,8 +2682,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
priv->emac_base_phys = res->start + pdata->ctrl_reg_offset;
size = res->end - res->start + 1;
if (!request_mem_region(res->start, size, ndev->name)) {
- dev_err(emac_dev, "DaVinci EMAC: failed request_mem_region() \
- for regs\n");
+ dev_err(emac_dev, "DaVinci EMAC: failed request_mem_region() for regs\n");
rc = -ENXIO;
goto probe_quit;
}
diff --git a/drivers/net/de620.c b/drivers/net/de620.c
index 45794f6cb0f..a0a6830b5e6 100644
--- a/drivers/net/de620.c
+++ b/drivers/net/de620.c
@@ -464,7 +464,7 @@ static int de620_close(struct net_device *dev)
static void de620_set_multicast_list(struct net_device *dev)
{
- if (dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
+ if (!netdev_mc_empty(dev) || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
{ /* Enable promiscuous mode */
de620_set_register(dev, W_TCR, (TCR_DEF & ~RXPBM) | RXALL);
}
diff --git a/drivers/net/declance.c b/drivers/net/declance.c
index be9590253aa..8cf3cc6f20e 100644
--- a/drivers/net/declance.c
+++ b/drivers/net/declance.c
@@ -940,9 +940,8 @@ static void lance_load_multicast(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
volatile u16 *ib = (volatile u16 *)dev->mem_start;
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
char *addrs;
- int i;
u32 crc;
/* set all multicast bits */
@@ -960,9 +959,8 @@ static void lance_load_multicast(struct net_device *dev)
*lib_ptr(ib, filter[3], lp->type) = 0;
/* Add addresses */
- for (i = 0; i < dev->mc_count; i++) {
+ netdev_for_each_mc_addr(dmi, dev) {
addrs = dmi->dmi_addr;
- dmi = dmi->next;
/* multicast address? */
if (!(*addrs & 1))
diff --git a/drivers/net/defxx.c b/drivers/net/defxx.c
index 6a6ea038d7a..ed53a8d45f4 100644
--- a/drivers/net/defxx.c
+++ b/drivers/net/defxx.c
@@ -1052,12 +1052,9 @@ static int __devinit dfx_driver_init(struct net_device *dev,
board_name = "DEFEA";
if (dfx_bus_pci)
board_name = "DEFPA";
- pr_info("%s: %s at %saddr = 0x%llx, IRQ = %d, "
- "Hardware addr = %02X-%02X-%02X-%02X-%02X-%02X\n",
+ pr_info("%s: %s at %saddr = 0x%llx, IRQ = %d, Hardware addr = %pMF\n",
print_name, board_name, dfx_use_mmio ? "" : "I/O ",
- (long long)bar_start, dev->irq,
- dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
- dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+ (long long)bar_start, dev->irq, dev->dev_addr);
/*
* Get memory for descriptor block, consumer block, and other buffers
@@ -2230,7 +2227,7 @@ static void dfx_ctl_set_multicast_list(struct net_device *dev)
* perfect filtering will be used.
*/
- if (dev->mc_count > (PI_CMD_ADDR_FILTER_K_SIZE - bp->uc_count))
+ if (netdev_mc_count(dev) > (PI_CMD_ADDR_FILTER_K_SIZE - bp->uc_count))
{
bp->group_prom = PI_FSTATE_K_PASS; /* Enable LLC group prom mode */
bp->mc_count = 0; /* Don't add mc addrs to CAM */
@@ -2238,17 +2235,16 @@ static void dfx_ctl_set_multicast_list(struct net_device *dev)
else
{
bp->group_prom = PI_FSTATE_K_BLOCK; /* Disable LLC group prom mode */
- bp->mc_count = dev->mc_count; /* Add mc addrs to CAM */
+ bp->mc_count = netdev_mc_count(dev); /* Add mc addrs to CAM */
}
/* Copy addresses to multicast address table, then update adapter CAM */
- dmi = dev->mc_list; /* point to first multicast addr */
- for (i=0; i < bp->mc_count; i++)
- {
- memcpy(&bp->mc_table[i*FDDI_K_ALEN], dmi->dmi_addr, FDDI_K_ALEN);
- dmi = dmi->next; /* point to next multicast addr */
- }
+ i = 0;
+ netdev_for_each_mc_addr(dmi, dev)
+ memcpy(&bp->mc_table[i++ * FDDI_K_ALEN],
+ dmi->dmi_addr, FDDI_K_ALEN);
+
if (dfx_ctl_update_cam(bp) != DFX_K_SUCCESS)
{
DBG_printk("%s: Could not update multicast address table!\n", dev->name);
@@ -3631,7 +3627,7 @@ static int __devinit dfx_pci_register(struct pci_dev *,
const struct pci_device_id *);
static void __devexit dfx_pci_unregister(struct pci_dev *);
-static struct pci_device_id dfx_pci_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(dfx_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_FDDI) },
{ }
};
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 0c1f491d20b..744c1928dfc 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -1272,7 +1272,7 @@ static void set_multicast_list(struct net_device *dev)
static void SetMulticastFilter(struct net_device *dev)
{
struct depca_private *lp = netdev_priv(dev);
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
char *addrs;
int i, j, bit, byte;
u16 hashcode;
@@ -1287,9 +1287,8 @@ static void SetMulticastFilter(struct net_device *dev)
lp->init_block.mcast_table[i] = 0;
}
/* Add multicast addresses */
- for (i = 0; i < dev->mc_count; i++) { /* for each address in the list */
+ netdev_for_each_mc_addr(dmi, dev) {
addrs = dmi->dmi_addr;
- dmi = dmi->next;
if ((*addrs & 0x01) == 1) { /* multicast address? */
crc = ether_crc(ETH_ALEN, addrs);
hashcode = (crc & 1); /* hashcode is 6 LSb of CRC ... */
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index 2a8b6a7c0b8..b05bad82982 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -1128,19 +1128,16 @@ set_multicast (struct net_device *dev)
/* Receive all frames promiscuously. */
rx_mode = ReceiveAllFrames;
} else if ((dev->flags & IFF_ALLMULTI) ||
- (dev->mc_count > multicast_filter_limit)) {
+ (netdev_mc_count(dev) > multicast_filter_limit)) {
/* Receive broadcast and multicast frames */
rx_mode = ReceiveBroadcast | ReceiveMulticast | ReceiveUnicast;
- } else if (dev->mc_count > 0) {
- int i;
+ } else if (!netdev_mc_empty(dev)) {
struct dev_mc_list *mclist;
/* Receive broadcast frames and multicast frames filtering
by Hashtable */
rx_mode =
ReceiveBroadcast | ReceiveMulticastHash | ReceiveUnicast;
- for (i=0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist=mclist->next)
- {
+ netdev_for_each_mc_addr(mclist, dev) {
int bit, index = 0;
int crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
/* The inverted high significant 6 bits of CRC are
diff --git a/drivers/net/dl2k.h b/drivers/net/dl2k.h
index 266ec8777ca..7caab3d26a9 100644
--- a/drivers/net/dl2k.h
+++ b/drivers/net/dl2k.h
@@ -537,7 +537,7 @@ struct netdev_private {
driver_data Data private to the driver.
*/
-static const struct pci_device_id rio_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(rio_pci_tbl) = {
{0x1186, 0x4000, PCI_ANY_ID, PCI_ANY_ID, },
{0x13f0, 0x1021, PCI_ANY_ID, PCI_ANY_ID, },
{ }
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index b3773006568..1c67f1138ca 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -724,8 +724,7 @@ static void
dm9000_hash_table(struct net_device *dev)
{
board_info_t *db = netdev_priv(dev);
- struct dev_mc_list *mcptr = dev->mc_list;
- int mc_cnt = dev->mc_count;
+ struct dev_mc_list *mcptr;
int i, oft;
u32 hash_val;
u16 hash_table[4];
@@ -753,7 +752,7 @@ dm9000_hash_table(struct net_device *dev)
rcr |= RCR_ALL;
/* the multicast address in Hash Table : 64 bits */
- for (i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
+ netdev_for_each_mc_addr(mcptr, dev) {
hash_val = ether_crc_le(6, mcptr->dmi_addr) & 0x3f;
hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
}
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 839fb2b136d..a26ccab057d 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -208,7 +208,7 @@ MODULE_PARM_DESC(use_io, "Force use of i/o access mode");
#define INTEL_8255X_ETHERNET_DEVICE(device_id, ich) {\
PCI_VENDOR_ID_INTEL, device_id, PCI_ANY_ID, PCI_ANY_ID, \
PCI_CLASS_NETWORK_ETHERNET << 8, 0xFFFF00, ich }
-static struct pci_device_id e100_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(e100_id_table) = {
INTEL_8255X_ETHERNET_DEVICE(0x1029, 0),
INTEL_8255X_ETHERNET_DEVICE(0x1030, 0),
INTEL_8255X_ETHERNET_DEVICE(0x1031, 3),
@@ -1537,14 +1537,18 @@ static int e100_hw_init(struct nic *nic)
static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
{
struct net_device *netdev = nic->netdev;
- struct dev_mc_list *list = netdev->mc_list;
- u16 i, count = min(netdev->mc_count, E100_MAX_MULTICAST_ADDRS);
+ struct dev_mc_list *list;
+ u16 i, count = min(netdev_mc_count(netdev), E100_MAX_MULTICAST_ADDRS);
cb->command = cpu_to_le16(cb_multi);
cb->u.multi.count = cpu_to_le16(count * ETH_ALEN);
- for (i = 0; list && i < count; i++, list = list->next)
- memcpy(&cb->u.multi.addr[i*ETH_ALEN], &list->dmi_addr,
+ i = 0;
+ netdev_for_each_mc_addr(list, netdev) {
+ if (i == count)
+ break;
+ memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &list->dmi_addr,
ETH_ALEN);
+ }
}
static void e100_set_multicast_list(struct net_device *netdev)
@@ -1552,7 +1556,7 @@ static void e100_set_multicast_list(struct net_device *netdev)
struct nic *nic = netdev_priv(netdev);
DPRINTK(HW, DEBUG, "mc_count=%d, flags=0x%04X\n",
- netdev->mc_count, netdev->flags);
+ netdev_mc_count(netdev), netdev->flags);
if (netdev->flags & IFF_PROMISC)
nic->flags |= promiscuous;
@@ -1560,7 +1564,7 @@ static void e100_set_multicast_list(struct net_device *netdev)
nic->flags &= ~promiscuous;
if (netdev->flags & IFF_ALLMULTI ||
- netdev->mc_count > E100_MAX_MULTICAST_ADDRS)
+ netdev_mc_count(netdev) > E100_MAX_MULTICAST_ADDRS)
nic->flags |= multicast_all;
else
nic->flags &= ~multicast_all;
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index e8932db7ee7..9902b33b716 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -349,6 +349,7 @@ extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
extern void e1000_update_stats(struct e1000_adapter *adapter);
+extern bool e1000_has_link(struct e1000_adapter *adapter);
extern void e1000_power_up_phy(struct e1000_adapter *);
extern void e1000_set_ethtool_ops(struct net_device *netdev);
extern void e1000_check_options(struct e1000_adapter *adapter);
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index 13e9ece1688..c67e9311727 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -215,6 +215,23 @@ static int e1000_set_settings(struct net_device *netdev,
return 0;
}
+static u32 e1000_get_link(struct net_device *netdev)
+{
+ struct e1000_adapter *adapter = netdev_priv(netdev);
+
+ /*
+ * If the link is not reported up to netdev, interrupts are disabled,
+ * and so the physical link state may have changed since we last
+ * looked. Set get_link_status to make sure that the true link
+ * state is interrogated, rather than pulling a cached and possibly
+ * stale link state from the driver.
+ */
+ if (!netif_carrier_ok(netdev))
+ adapter->hw.get_link_status = 1;
+
+ return e1000_has_link(adapter);
+}
+
static void e1000_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
@@ -1892,7 +1909,7 @@ static const struct ethtool_ops e1000_ethtool_ops = {
.get_msglevel = e1000_get_msglevel,
.set_msglevel = e1000_set_msglevel,
.nway_reset = e1000_nway_reset,
- .get_link = ethtool_op_get_link,
+ .get_link = e1000_get_link,
.get_eeprom_len = e1000_get_eeprom_len,
.get_eeprom = e1000_get_eeprom,
.set_eeprom = e1000_set_eeprom,
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 765543663a4..8be6faee43e 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -42,7 +42,7 @@ static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation
* Macro expands to...
* {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
*/
-static struct pci_device_id e1000_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
INTEL_E1000_ETHERNET_DEVICE(0x1000),
INTEL_E1000_ETHERNET_DEVICE(0x1001),
INTEL_E1000_ETHERNET_DEVICE(0x1004),
@@ -847,6 +847,9 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
goto err_pci_reg;
pci_set_master(pdev);
+ err = pci_save_state(pdev);
+ if (err)
+ goto err_alloc_etherdev;
err = -ENOMEM;
netdev = alloc_etherdev(sizeof(struct e1000_adapter));
@@ -2127,7 +2130,7 @@ static void e1000_set_rx_mode(struct net_device *netdev)
rctl |= E1000_RCTL_VFE;
}
- if (netdev->uc.count > rar_entries - 1) {
+ if (netdev_uc_count(netdev) > rar_entries - 1) {
rctl |= E1000_RCTL_UPE;
} else if (!(netdev->flags & IFF_PROMISC)) {
rctl &= ~E1000_RCTL_UPE;
@@ -2150,7 +2153,7 @@ static void e1000_set_rx_mode(struct net_device *netdev)
*/
i = 1;
if (use_uc)
- list_for_each_entry(ha, &netdev->uc.list, list) {
+ netdev_for_each_uc_addr(ha, netdev) {
if (i == rar_entries)
break;
e1000_rar_set(hw, ha->addr, i++);
@@ -2158,29 +2161,25 @@ static void e1000_set_rx_mode(struct net_device *netdev)
WARN_ON(i == rar_entries);
- mc_ptr = netdev->mc_list;
-
- for (; i < rar_entries; i++) {
- if (mc_ptr) {
- e1000_rar_set(hw, mc_ptr->da_addr, i);
- mc_ptr = mc_ptr->next;
+ netdev_for_each_mc_addr(mc_ptr, netdev) {
+ if (i == rar_entries) {
+ /* load any remaining addresses into the hash table */
+ u32 hash_reg, hash_bit, mta;
+ hash_value = e1000_hash_mc_addr(hw, mc_ptr->da_addr);
+ hash_reg = (hash_value >> 5) & 0x7F;
+ hash_bit = hash_value & 0x1F;
+ mta = (1 << hash_bit);
+ mcarray[hash_reg] |= mta;
} else {
- E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
- E1000_WRITE_FLUSH();
- E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
- E1000_WRITE_FLUSH();
+ e1000_rar_set(hw, mc_ptr->da_addr, i++);
}
}
- /* load any remaining addresses into the hash table */
-
- for (; mc_ptr; mc_ptr = mc_ptr->next) {
- u32 hash_reg, hash_bit, mta;
- hash_value = e1000_hash_mc_addr(hw, mc_ptr->da_addr);
- hash_reg = (hash_value >> 5) & 0x7F;
- hash_bit = hash_value & 0x1F;
- mta = (1 << hash_bit);
- mcarray[hash_reg] |= mta;
+ for (; i < rar_entries; i++) {
+ E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
+ E1000_WRITE_FLUSH();
+ E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
+ E1000_WRITE_FLUSH();
}
/* write the hash table completely, write from bottom to avoid
@@ -2246,7 +2245,7 @@ static void e1000_82547_tx_fifo_stall(unsigned long data)
}
}
-static bool e1000_has_link(struct e1000_adapter *adapter)
+bool e1000_has_link(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
bool link_active = false;
@@ -4613,6 +4612,7 @@ static int e1000_resume(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
+ pci_save_state(pdev);
if (adapter->need_ioport)
err = pci_enable_device(pdev);
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 02d67d047d9..3c95acb3a87 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -267,8 +267,14 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
}
switch (hw->mac.type) {
+ case e1000_82573:
+ func->set_lan_id = e1000_set_lan_id_single_port;
+ func->check_mng_mode = e1000e_check_mng_mode_generic;
+ func->led_on = e1000e_led_on_generic;
+ break;
case e1000_82574:
case e1000_82583:
+ func->set_lan_id = e1000_set_lan_id_single_port;
func->check_mng_mode = e1000_check_mng_mode_82574;
func->led_on = e1000_led_on_82574;
break;
@@ -922,9 +928,12 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
ew32(IMC, 0xffffffff);
icr = er32(ICR);
- if (hw->mac.type == e1000_82571 &&
- hw->dev_spec.e82571.alt_mac_addr_is_present)
- e1000e_set_laa_state_82571(hw, true);
+ /* Install any alternate MAC address into RAR0 */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+ if (ret_val)
+ return ret_val;
+
+ e1000e_set_laa_state_82571(hw, true);
/* Reinitialize the 82571 serdes link state machine */
if (hw->phy.media_type == e1000_media_type_internal_serdes)
@@ -1225,32 +1234,6 @@ static s32 e1000_led_on_82574(struct e1000_hw *hw)
}
/**
- * e1000_update_mc_addr_list_82571 - Update Multicast addresses
- * @hw: pointer to the HW structure
- * @mc_addr_list: array of multicast addresses to program
- * @mc_addr_count: number of multicast addresses to program
- * @rar_used_count: the first RAR register free to program
- * @rar_count: total number of supported Receive Address Registers
- *
- * Updates the Receive Address Registers and Multicast Table Array.
- * The caller must have a packed mc_addr_list of multicast addresses.
- * The parameter rar_count will usually be hw->mac.rar_entry_count
- * unless there are workarounds that change this.
- **/
-static void e1000_update_mc_addr_list_82571(struct e1000_hw *hw,
- u8 *mc_addr_list,
- u32 mc_addr_count,
- u32 rar_used_count,
- u32 rar_count)
-{
- if (e1000e_get_laa_state_82571(hw))
- rar_count--;
-
- e1000e_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count,
- rar_used_count, rar_count);
-}
-
-/**
* e1000_setup_link_82571 - Setup flow control and link settings
* @hw: pointer to the HW structure
*
@@ -1621,6 +1604,29 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
}
/**
+ * e1000_read_mac_addr_82571 - Read device MAC address
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+
+ /*
+ * If there's an alternate MAC address place it in RAR0
+ * so that it will override the Si installed default perm
+ * address.
+ */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_read_mac_addr_generic(hw);
+
+out:
+ return ret_val;
+}
+
+/**
* e1000_power_down_phy_copper_82571 - Remove link during PHY power down
* @hw: pointer to the HW structure
*
@@ -1695,10 +1701,11 @@ static struct e1000_mac_operations e82571_mac_ops = {
.cleanup_led = e1000e_cleanup_led_generic,
.clear_hw_cntrs = e1000_clear_hw_cntrs_82571,
.get_bus_info = e1000e_get_bus_info_pcie,
+ .set_lan_id = e1000_set_lan_id_multi_port_pcie,
/* .get_link_up_info: media type dependent */
/* .led_on: mac type dependent */
.led_off = e1000e_led_off_generic,
- .update_mc_addr_list = e1000_update_mc_addr_list_82571,
+ .update_mc_addr_list = e1000e_update_mc_addr_list_generic,
.write_vfta = e1000_write_vfta_generic,
.clear_vfta = e1000_clear_vfta_82571,
.reset_hw = e1000_reset_hw_82571,
@@ -1706,6 +1713,7 @@ static struct e1000_mac_operations e82571_mac_ops = {
.setup_link = e1000_setup_link_82571,
/* .setup_physical_interface: media type dependent */
.setup_led = e1000e_setup_led_generic,
+ .read_mac_addr = e1000_read_mac_addr_82571,
};
static struct e1000_phy_operations e82_phy_ops_igp = {
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index e02e38221ed..db05ec35574 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -460,6 +460,8 @@
*/
#define E1000_RAR_ENTRIES 15
#define E1000_RAH_AV 0x80000000 /* Receive descriptor valid */
+#define E1000_RAL_MAC_ADDR_LEN 4
+#define E1000_RAH_MAC_ADDR_LEN 2
/* Error Codes */
#define E1000_ERR_NVM 1
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index d236efaf747..c2ec095d216 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -459,7 +459,7 @@ extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter);
extern void e1000e_free_rx_resources(struct e1000_adapter *adapter);
extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
extern void e1000e_update_stats(struct e1000_adapter *adapter);
-extern bool e1000_has_link(struct e1000_adapter *adapter);
+extern bool e1000e_has_link(struct e1000_adapter *adapter);
extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
@@ -503,6 +503,8 @@ extern s32 e1000e_cleanup_led_generic(struct e1000_hw *hw);
extern s32 e1000e_led_on_generic(struct e1000_hw *hw);
extern s32 e1000e_led_off_generic(struct e1000_hw *hw);
extern s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw);
+extern void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
+extern void e1000_set_lan_id_single_port(struct e1000_hw *hw);
extern s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex);
extern s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex);
extern s32 e1000e_disable_pcie_master(struct e1000_hw *hw);
@@ -517,9 +519,7 @@ extern void e1000_clear_vfta_generic(struct e1000_hw *hw);
extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
u8 *mc_addr_list,
- u32 mc_addr_count,
- u32 rar_used_count,
- u32 rar_count);
+ u32 mc_addr_count);
extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw);
extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop);
@@ -530,6 +530,7 @@ extern s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw);
extern s32 e1000e_force_mac_fc(struct e1000_hw *hw);
extern s32 e1000e_blink_led(struct e1000_hw *hw);
extern void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
+extern s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
extern void e1000e_reset_adaptive(struct e1000_hw *hw);
extern void e1000e_update_adaptive(struct e1000_hw *hw);
@@ -629,7 +630,15 @@ extern s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16
extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw);
extern void e1000e_release_nvm(struct e1000_hw *hw);
extern void e1000e_reload_nvm(struct e1000_hw *hw);
-extern s32 e1000e_read_mac_addr(struct e1000_hw *hw);
+extern s32 e1000_read_mac_addr_generic(struct e1000_hw *hw);
+
+static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw)
+{
+ if (hw->mac.ops.read_mac_addr)
+ return hw->mac.ops.read_mac_addr(hw);
+
+ return e1000_read_mac_addr_generic(hw);
+}
static inline s32 e1000_validate_nvm_checksum(struct e1000_hw *hw)
{
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index e2aa3b78856..27d21589a69 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -246,6 +246,9 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
break;
}
+ /* set lan id for port to determine which phy lock to use */
+ hw->mac.ops.set_lan_id(hw);
+
return 0;
}
@@ -814,7 +817,9 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
ew32(IMC, 0xffffffff);
icr = er32(ICR);
- return 0;
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+
+ return ret_val;
}
/**
@@ -1340,6 +1345,29 @@ static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
}
/**
+ * e1000_read_mac_addr_80003es2lan - Read device MAC address
+ * @hw: pointer to the HW structure
+ **/
+static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw)
+{
+ s32 ret_val = 0;
+
+ /*
+ * If there's an alternate MAC address place it in RAR0
+ * so that it will override the Si installed default perm
+ * address.
+ */
+ ret_val = e1000_check_alt_mac_addr_generic(hw);
+ if (ret_val)
+ goto out;
+
+ ret_val = e1000_read_mac_addr_generic(hw);
+
+out:
+ return ret_val;
+}
+
+/**
* e1000_power_down_phy_copper_80003es2lan - Remove link during PHY power down
* @hw: pointer to the HW structure
*
@@ -1403,12 +1431,14 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
}
static struct e1000_mac_operations es2_mac_ops = {
+ .read_mac_addr = e1000_read_mac_addr_80003es2lan,
.id_led_init = e1000e_id_led_init,
.check_mng_mode = e1000e_check_mng_mode_generic,
/* check_for_link dependent on media type */
.cleanup_led = e1000e_cleanup_led_generic,
.clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan,
.get_bus_info = e1000e_get_bus_info_pcie,
+ .set_lan_id = e1000_set_lan_id_multi_port_pcie,
.get_link_up_info = e1000_get_link_up_info_80003es2lan,
.led_on = e1000e_led_on_generic,
.led_off = e1000e_led_off_generic,
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 0aa50c229c7..b33e3cbe9ab 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -202,7 +202,7 @@ static u32 e1000_get_link(struct net_device *netdev)
if (!netif_carrier_ok(netdev))
mac->get_link_status = 1;
- return e1000_has_link(adapter);
+ return e1000e_has_link(adapter);
}
static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index eccf29b75c4..8bdcd5f24ef 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -389,6 +389,9 @@ enum e1e_registers {
#define E1000_FUNC_1 1
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0 0
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
+
enum e1000_mac_type {
e1000_82571,
e1000_82572,
@@ -746,16 +749,18 @@ struct e1000_mac_operations {
void (*clear_hw_cntrs)(struct e1000_hw *);
void (*clear_vfta)(struct e1000_hw *);
s32 (*get_bus_info)(struct e1000_hw *);
+ void (*set_lan_id)(struct e1000_hw *);
s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
s32 (*led_on)(struct e1000_hw *);
s32 (*led_off)(struct e1000_hw *);
- void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32, u32, u32);
+ void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32);
s32 (*reset_hw)(struct e1000_hw *);
s32 (*init_hw)(struct e1000_hw *);
s32 (*setup_link)(struct e1000_hw *);
s32 (*setup_physical_interface)(struct e1000_hw *);
s32 (*setup_led)(struct e1000_hw *);
void (*write_vfta)(struct e1000_hw *, u32, u32);
+ s32 (*read_mac_addr)(struct e1000_hw *);
};
/* Function pointers for the PHY. */
@@ -814,6 +819,10 @@ struct e1000_mac_info {
u16 ifs_ratio;
u16 ifs_step_size;
u16 mta_reg_count;
+
+ /* Maximum size of the MTA register table in all supported adapters */
+ #define MAX_MTA_REG 128
+ u32 mta_shadow[MAX_MTA_REG];
u16 rar_entry_count;
u8 forced_speed_duplex;
@@ -897,7 +906,6 @@ struct e1000_fc_info {
struct e1000_dev_spec_82571 {
bool laa_is_present;
- bool alt_mac_addr_is_present;
u32 smb_counter;
};
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 8b6ecd12788..54d03a0ce3c 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -3368,6 +3368,7 @@ static struct e1000_mac_operations ich8_mac_ops = {
/* cleanup_led dependent on mac type */
.clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan,
.get_bus_info = e1000_get_bus_info_ich8lan,
+ .set_lan_id = e1000_set_lan_id_single_port,
.get_link_up_info = e1000_get_link_up_info_ich8lan,
/* led_on dependent on mac type */
/* led_off dependent on mac type */
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 2fa9b36a2c5..2425ed11d5c 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -51,10 +51,10 @@ enum e1000_mng_mode {
**/
s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
{
+ struct e1000_mac_info *mac = &hw->mac;
struct e1000_bus_info *bus = &hw->bus;
struct e1000_adapter *adapter = hw->adapter;
- u32 status;
- u16 pcie_link_status, pci_header_type, cap_offset;
+ u16 pcie_link_status, cap_offset;
cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
if (!cap_offset) {
@@ -68,20 +68,46 @@ s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
PCIE_LINK_WIDTH_SHIFT);
}
- pci_read_config_word(adapter->pdev, PCI_HEADER_TYPE_REGISTER,
- &pci_header_type);
- if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) {
- status = er32(STATUS);
- bus->func = (status & E1000_STATUS_FUNC_MASK)
- >> E1000_STATUS_FUNC_SHIFT;
- } else {
- bus->func = 0;
- }
+ mac->ops.set_lan_id(hw);
return 0;
}
/**
+ * e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
+ *
+ * @hw: pointer to the HW structure
+ *
+ * Determines the LAN function id by reading memory-mapped registers
+ * and swaps the port value if requested.
+ **/
+void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
+{
+ struct e1000_bus_info *bus = &hw->bus;
+ u32 reg;
+
+ /*
+ * The status register reports the correct function number
+ * for the device regardless of function swap state.
+ */
+ reg = er32(STATUS);
+ bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
+}
+
+/**
+ * e1000_set_lan_id_single_port - Set LAN id for a single port device
+ * @hw: pointer to the HW structure
+ *
+ * Sets the LAN function id to zero for a single port device.
+ **/
+void e1000_set_lan_id_single_port(struct e1000_hw *hw)
+{
+ struct e1000_bus_info *bus = &hw->bus;
+
+ bus->func = 0;
+}
+
+/**
* e1000_clear_vfta_generic - Clear VLAN filter table
* @hw: pointer to the HW structure
*
@@ -139,6 +165,68 @@ void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
}
/**
+ * e1000_check_alt_mac_addr_generic - Check for alternate MAC addr
+ * @hw: pointer to the HW structure
+ *
+ * Checks the nvm for an alternate MAC address. An alternate MAC address
+ * can be setup by pre-boot software and must be treated like a permanent
+ * address and must override the actual permanent MAC address. If an
+ * alternate MAC address is found it is programmed into RAR0, replacing
+ * the permanent address that was installed into RAR0 by the Si on reset.
+ * This function will return SUCCESS unless it encounters an error while
+ * reading the EEPROM.
+ **/
+s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
+{
+ u32 i;
+ s32 ret_val = 0;
+ u16 offset, nvm_alt_mac_addr_offset, nvm_data;
+ u8 alt_mac_addr[ETH_ALEN];
+
+ ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
+ &nvm_alt_mac_addr_offset);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ goto out;
+ }
+
+ if (nvm_alt_mac_addr_offset == 0xFFFF) {
+ /* There is no Alternate MAC Address */
+ goto out;
+ }
+
+ if (hw->bus.func == E1000_FUNC_1)
+ nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
+ for (i = 0; i < ETH_ALEN; i += 2) {
+ offset = nvm_alt_mac_addr_offset + (i >> 1);
+ ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
+ if (ret_val) {
+ e_dbg("NVM Read Error\n");
+ goto out;
+ }
+
+ alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
+ alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
+ }
+
+ /* if multicast bit is set, the alternate address will not be used */
+ if (alt_mac_addr[0] & 0x01) {
+ e_dbg("Ignoring Alternate Mac Address with MC bit set\n");
+ goto out;
+ }
+
+ /*
+ * We have a valid alternate MAC address, and we want to treat it the
+ * same as the normal permanent MAC address stored by the HW into the
+ * RAR. Do this by mapping this address into RAR0.
+ */
+ e1000e_rar_set(hw, alt_mac_addr, 0);
+
+out:
+ return ret_val;
+}
+
+/**
* e1000e_rar_set - Set receive address register
* @hw: pointer to the HW structure
* @addr: pointer to the receive address
@@ -252,62 +340,34 @@ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
* @hw: pointer to the HW structure
* @mc_addr_list: array of multicast addresses to program
* @mc_addr_count: number of multicast addresses to program
- * @rar_used_count: the first RAR register free to program
- * @rar_count: total number of supported Receive Address Registers
*
- * Updates the Receive Address Registers and Multicast Table Array.
+ * Updates entire Multicast Table Array.
* The caller must have a packed mc_addr_list of multicast addresses.
- * The parameter rar_count will usually be hw->mac.rar_entry_count
- * unless there are workarounds that change this.
**/
void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
- u8 *mc_addr_list, u32 mc_addr_count,
- u32 rar_used_count, u32 rar_count)
+ u8 *mc_addr_list, u32 mc_addr_count)
{
- u32 i;
- u32 *mcarray = kzalloc(hw->mac.mta_reg_count * sizeof(u32), GFP_ATOMIC);
+ u32 hash_value, hash_bit, hash_reg;
+ int i;
- if (!mcarray) {
- printk(KERN_ERR "multicast array memory allocation failed\n");
- return;
- }
+ /* clear mta_shadow */
+ memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
- /*
- * Load the first set of multicast addresses into the exact
- * filters (RAR). If there are not enough to fill the RAR
- * array, clear the filters.
- */
- for (i = rar_used_count; i < rar_count; i++) {
- if (mc_addr_count) {
- e1000e_rar_set(hw, mc_addr_list, i);
- mc_addr_count--;
- mc_addr_list += ETH_ALEN;
- } else {
- E1000_WRITE_REG_ARRAY(hw, E1000_RA, i << 1, 0);
- e1e_flush();
- E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1) + 1, 0);
- e1e_flush();
- }
- }
-
- /* Load any remaining multicast addresses into the hash table. */
- for (; mc_addr_count > 0; mc_addr_count--) {
- u32 hash_value, hash_reg, hash_bit, mta;
+ /* update mta_shadow from mc_addr_list */
+ for (i = 0; (u32) i < mc_addr_count; i++) {
hash_value = e1000_hash_mc_addr(hw, mc_addr_list);
- e_dbg("Hash value = 0x%03X\n", hash_value);
+
hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
hash_bit = hash_value & 0x1F;
- mta = (1 << hash_bit);
- mcarray[hash_reg] |= mta;
- mc_addr_list += ETH_ALEN;
- }
- /* write the hash table completely */
- for (i = 0; i < hw->mac.mta_reg_count; i++)
- E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, mcarray[i]);
+ hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
+ mc_addr_list += (ETH_ALEN);
+ }
+ /* replace the entire MTA table */
+ for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
+ E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]);
e1e_flush();
- kfree(mcarray);
}
/**
@@ -2072,67 +2132,27 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
}
/**
- * e1000e_read_mac_addr - Read device MAC address
+ * e1000_read_mac_addr_generic - Read device MAC address
* @hw: pointer to the HW structure
*
* Reads the device MAC address from the EEPROM and stores the value.
* Since devices with two ports use the same EEPROM, we increment the
* last bit in the MAC address for the second port.
**/
-s32 e1000e_read_mac_addr(struct e1000_hw *hw)
+s32 e1000_read_mac_addr_generic(struct e1000_hw *hw)
{
- s32 ret_val;
- u16 offset, nvm_data, i;
- u16 mac_addr_offset = 0;
-
- if (hw->mac.type == e1000_82571) {
- /* Check for an alternate MAC address. An alternate MAC
- * address can be setup by pre-boot software and must be
- * treated like a permanent address and must override the
- * actual permanent MAC address.*/
- ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
- &mac_addr_offset);
- if (ret_val) {
- e_dbg("NVM Read Error\n");
- return ret_val;
- }
- if (mac_addr_offset == 0xFFFF)
- mac_addr_offset = 0;
-
- if (mac_addr_offset) {
- if (hw->bus.func == E1000_FUNC_1)
- mac_addr_offset += ETH_ALEN/sizeof(u16);
-
- /* make sure we have a valid mac address here
- * before using it */
- ret_val = e1000_read_nvm(hw, mac_addr_offset, 1,
- &nvm_data);
- if (ret_val) {
- e_dbg("NVM Read Error\n");
- return ret_val;
- }
- if (nvm_data & 0x0001)
- mac_addr_offset = 0;
- }
+ u32 rar_high;
+ u32 rar_low;
+ u16 i;
- if (mac_addr_offset)
- hw->dev_spec.e82571.alt_mac_addr_is_present = 1;
- }
+ rar_high = er32(RAH(0));
+ rar_low = er32(RAL(0));
- for (i = 0; i < ETH_ALEN; i += 2) {
- offset = mac_addr_offset + (i >> 1);
- ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
- if (ret_val) {
- e_dbg("NVM Read Error\n");
- return ret_val;
- }
- hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF);
- hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8);
- }
+ for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
+ hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
- /* Flip last bit of mac address if we're on second port */
- if (!mac_addr_offset && hw->bus.func == E1000_FUNC_1)
- hw->mac.perm_addr[5] ^= 1;
+ for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
+ hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
for (i = 0; i < ETH_ALEN; i++)
hw->mac.addr[i] = hw->mac.perm_addr[i];
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 57f149b75fb..88d54d3efce 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -2541,22 +2541,14 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
* @hw: pointer to the HW structure
* @mc_addr_list: array of multicast addresses to program
* @mc_addr_count: number of multicast addresses to program
- * @rar_used_count: the first RAR register free to program
- * @rar_count: total number of supported Receive Address Registers
*
- * Updates the Receive Address Registers and Multicast Table Array.
+ * Updates the Multicast Table Array.
* The caller must have a packed mc_addr_list of multicast addresses.
- * The parameter rar_count will usually be hw->mac.rar_entry_count
- * unless there are workarounds that change this. Currently no func pointer
- * exists and all implementations are handled in the generic version of this
- * function.
**/
static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
- u32 mc_addr_count, u32 rar_used_count,
- u32 rar_count)
+ u32 mc_addr_count)
{
- hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count,
- rar_used_count, rar_count);
+ hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count);
}
/**
@@ -2572,7 +2564,6 @@ static void e1000_set_multi(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- struct e1000_mac_info *mac = &hw->mac;
struct dev_mc_list *mc_ptr;
u8 *mta_list;
u32 rctl;
@@ -2598,31 +2589,25 @@ static void e1000_set_multi(struct net_device *netdev)
ew32(RCTL, rctl);
- if (netdev->mc_count) {
- mta_list = kmalloc(netdev->mc_count * 6, GFP_ATOMIC);
+ if (!netdev_mc_empty(netdev)) {
+ mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
if (!mta_list)
return;
/* prepare a packed array of only addresses. */
- mc_ptr = netdev->mc_list;
-
- for (i = 0; i < netdev->mc_count; i++) {
- if (!mc_ptr)
- break;
- memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr,
- ETH_ALEN);
- mc_ptr = mc_ptr->next;
- }
+ i = 0;
+ netdev_for_each_mc_addr(mc_ptr, netdev)
+ memcpy(mta_list + (i++ * ETH_ALEN),
+ mc_ptr->dmi_addr, ETH_ALEN);
- e1000_update_mc_addr_list(hw, mta_list, i, 1,
- mac->rar_entry_count);
+ e1000_update_mc_addr_list(hw, mta_list, i);
kfree(mta_list);
} else {
/*
* if we're called from probe, we might not have
* anything to do here, so clear out the list
*/
- e1000_update_mc_addr_list(hw, NULL, 0, 1, mac->rar_entry_count);
+ e1000_update_mc_addr_list(hw, NULL, 0);
}
}
@@ -3482,7 +3467,7 @@ static void e1000_print_link_info(struct e1000_adapter *adapter)
((ctrl & E1000_CTRL_TFCE) ? "TX" : "None" )));
}
-bool e1000_has_link(struct e1000_adapter *adapter)
+bool e1000e_has_link(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
bool link_active = 0;
@@ -3563,7 +3548,7 @@ static void e1000_watchdog_task(struct work_struct *work)
u32 link, tctl;
int tx_pending = 0;
- link = e1000_has_link(adapter);
+ link = e1000e_has_link(adapter);
if ((netif_carrier_ok(netdev)) && link) {
e1000e_enable_receives(adapter);
goto link_up;
@@ -5134,7 +5119,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
e1000_eeprom_checks(adapter);
- /* copy the MAC address out of the NVM */
+ /* copy the MAC address */
if (e1000e_read_mac_addr(&adapter->hw))
e_err("NVM Read Error while reading MAC address\n");
@@ -5326,7 +5311,7 @@ static struct pci_error_handlers e1000_err_handler = {
.resume = e1000_io_resume,
};
-static struct pci_device_id e1000_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index 94c59498cdb..1b05bdf62c3 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -1287,9 +1287,10 @@ set_multicast_list(struct net_device *dev)
struct eepro_local *lp = netdev_priv(dev);
short ioaddr = dev->base_addr;
unsigned short mode;
- struct dev_mc_list *dmi=dev->mc_list;
+ struct dev_mc_list *dmi;
+ int mc_count = netdev_mc_count(dev);
- if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || dev->mc_count > 63)
+ if (dev->flags&(IFF_ALLMULTI|IFF_PROMISC) || mc_count > 63)
{
eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
mode = inb(ioaddr + REG2);
@@ -1299,7 +1300,7 @@ set_multicast_list(struct net_device *dev)
eepro_sw2bank0(ioaddr); /* Return to BANK 0 now */
}
- else if (dev->mc_count==0 )
+ else if (mc_count == 0)
{
eepro_sw2bank2(ioaddr); /* be CAREFUL, BANK 2 now */
mode = inb(ioaddr + REG2);
@@ -1329,12 +1330,10 @@ set_multicast_list(struct net_device *dev)
outw(MC_SETUP, ioaddr + IO_PORT);
outw(0, ioaddr + IO_PORT);
outw(0, ioaddr + IO_PORT);
- outw(6*(dev->mc_count + 1), ioaddr + IO_PORT);
+ outw(6 * (mc_count + 1), ioaddr + IO_PORT);
- for (i = 0; i < dev->mc_count; i++)
- {
- eaddrs=(unsigned short *)dmi->dmi_addr;
- dmi=dmi->next;
+ netdev_for_each_mc_addr(dmi, dev) {
+ eaddrs = (unsigned short *) dmi->dmi_addr;
outw(*eaddrs++, ioaddr + IO_PORT);
outw(*eaddrs++, ioaddr + IO_PORT);
outw(*eaddrs++, ioaddr + IO_PORT);
@@ -1348,7 +1347,7 @@ set_multicast_list(struct net_device *dev)
outb(MC_SETUP, ioaddr);
/* Update the transmit queue */
- i = lp->tx_end + XMT_HEADER + 6*(dev->mc_count + 1);
+ i = lp->tx_end + XMT_HEADER + 6 * (mc_count + 1);
if (lp->tx_start != lp->tx_end)
{
@@ -1380,8 +1379,8 @@ set_multicast_list(struct net_device *dev)
break;
} else if ((i & 0x0f) == 0x03) { /* MC-Done */
printk(KERN_DEBUG "%s: set Rx mode to %d address%s.\n",
- dev->name, dev->mc_count,
- dev->mc_count > 1 ? "es":"");
+ dev->name, mc_count,
+ mc_count > 1 ? "es":"");
break;
}
}
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c
index 6fbfc8eee63..7013dc8a6cb 100644
--- a/drivers/net/eexpress.c
+++ b/drivers/net/eexpress.c
@@ -1578,7 +1578,7 @@ static void eexp_setup_filter(struct net_device *dev)
{
struct dev_mc_list *dmi;
unsigned short ioaddr = dev->base_addr;
- int count = dev->mc_count;
+ int count = netdev_mc_count(dev);
int i;
if (count > 8) {
printk(KERN_INFO "%s: too many multicast addresses (%d)\n",
@@ -1588,23 +1588,19 @@ static void eexp_setup_filter(struct net_device *dev)
outw(CONF_NR_MULTICAST & ~31, ioaddr+SM_PTR);
outw(6*count, ioaddr+SHADOW(CONF_NR_MULTICAST));
- for (i = 0, dmi = dev->mc_list; i < count; i++, dmi = dmi->next) {
- unsigned short *data;
- if (!dmi) {
- printk(KERN_INFO "%s: too few multicast addresses\n", dev->name);
+ i = 0;
+ netdev_for_each_mc_addr(dmi, dev) {
+ unsigned short *data = (unsigned short *) dmi->dmi_addr;
+
+ if (i == count)
break;
- }
- if (dmi->dmi_addrlen != ETH_ALEN) {
- printk(KERN_INFO "%s: invalid multicast address length given.\n", dev->name);
- continue;
- }
- data = (unsigned short *)dmi->dmi_addr;
outw((CONF_MULTICAST+(6*i)) & ~31, ioaddr+SM_PTR);
outw(data[0], ioaddr+SHADOW(CONF_MULTICAST+(6*i)));
outw((CONF_MULTICAST+(6*i)+2) & ~31, ioaddr+SM_PTR);
outw(data[1], ioaddr+SHADOW(CONF_MULTICAST+(6*i)+2));
outw((CONF_MULTICAST+(6*i)+4) & ~31, ioaddr+SM_PTR);
outw(data[2], ioaddr+SHADOW(CONF_MULTICAST+(6*i)+4));
+ i++;
}
}
@@ -1627,9 +1623,9 @@ eexp_set_multicast(struct net_device *dev)
}
if (!(dev->flags & IFF_PROMISC)) {
eexp_setup_filter(dev);
- if (lp->old_mc_count != dev->mc_count) {
+ if (lp->old_mc_count != netdev_mc_count(dev)) {
kick = 1;
- lp->old_mc_count = dev->mc_count;
+ lp->old_mc_count = netdev_mc_count(dev);
}
}
if (kick) {
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 7b62336e673..b004eaba3d7 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -1967,7 +1967,7 @@ static void ehea_set_multicast_list(struct net_device *dev)
{
struct ehea_port *port = netdev_priv(dev);
struct dev_mc_list *k_mcl_entry;
- int ret, i;
+ int ret;
if (dev->flags & IFF_PROMISC) {
ehea_promiscuous(dev, 1);
@@ -1981,7 +1981,7 @@ static void ehea_set_multicast_list(struct net_device *dev)
}
ehea_allmulti(dev, 0);
- if (dev->mc_count) {
+ if (!netdev_mc_empty(dev)) {
ret = ehea_drop_multicast_list(dev);
if (ret) {
/* Dropping the current multicast list failed.
@@ -1990,15 +1990,14 @@ static void ehea_set_multicast_list(struct net_device *dev)
ehea_allmulti(dev, 1);
}
- if (dev->mc_count > port->adapter->max_mc_mac) {
+ if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
ehea_info("Mcast registration limit reached (0x%llx). "
"Use ALLMULTI!",
port->adapter->max_mc_mac);
goto out;
}
- for (i = 0, k_mcl_entry = dev->mc_list; i < dev->mc_count; i++,
- k_mcl_entry = k_mcl_entry->next)
+ netdev_for_each_mc_addr(k_mcl_entry, dev)
ehea_add_multicast_entry(port, k_mcl_entry->dmi_addr);
}
diff --git a/drivers/net/enc28j60.c b/drivers/net/enc28j60.c
index 66813c91a72..3ee32e58c7e 100644
--- a/drivers/net/enc28j60.c
+++ b/drivers/net/enc28j60.c
@@ -1413,7 +1413,7 @@ static void enc28j60_set_multicast_list(struct net_device *dev)
if (netif_msg_link(priv))
dev_info(&dev->dev, "promiscuous mode\n");
priv->rxfilter = RXFILTER_PROMISC;
- } else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count) {
+ } else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) {
if (netif_msg_link(priv))
dev_info(&dev->dev, "%smulticast mode\n",
(dev->flags & IFF_ALLMULTI) ? "all-" : "");
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index e1c2076228b..ee01f5a6d0d 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -34,7 +34,7 @@
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco 10G Ethernet Driver"
-#define DRV_VERSION "1.1.0.100"
+#define DRV_VERSION "1.1.0.241a"
#define DRV_COPYRIGHT "Copyright 2008-2009 Cisco Systems, Inc"
#define PFX DRV_NAME ": "
@@ -89,9 +89,12 @@ struct enic {
spinlock_t devcmd_lock;
u8 mac_addr[ETH_ALEN];
u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
+ unsigned int flags;
unsigned int mc_count;
int csum_rx_enabled;
u32 port_mtu;
+ u32 rx_coalesce_usecs;
+ u32 tx_coalesce_usecs;
/* work queue cache line section */
____cacheline_aligned struct vnic_wq wq[ENIC_WQ_MAX];
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index f875751af15..cf098bb636b 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -51,7 +51,7 @@
#define PCI_DEVICE_ID_CISCO_VIC_ENET 0x0043 /* ethernet vnic */
/* Supported devices */
-static struct pci_device_id enic_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = {
{ PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
{ 0, } /* end of table */
};
@@ -261,6 +261,62 @@ static void enic_set_msglevel(struct net_device *netdev, u32 value)
enic->msg_enable = value;
}
+static int enic_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ecmd)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
+ ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
+
+ return 0;
+}
+
+static int enic_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ecmd)
+{
+ struct enic *enic = netdev_priv(netdev);
+ u32 tx_coalesce_usecs;
+ u32 rx_coalesce_usecs;
+
+ tx_coalesce_usecs = min_t(u32,
+ INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX),
+ ecmd->tx_coalesce_usecs);
+ rx_coalesce_usecs = min_t(u32,
+ INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX),
+ ecmd->rx_coalesce_usecs);
+
+ switch (vnic_dev_get_intr_mode(enic->vdev)) {
+ case VNIC_DEV_INTR_MODE_INTX:
+ if (tx_coalesce_usecs != rx_coalesce_usecs)
+ return -EINVAL;
+
+ vnic_intr_coalescing_timer_set(&enic->intr[ENIC_INTX_WQ_RQ],
+ INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
+ break;
+ case VNIC_DEV_INTR_MODE_MSI:
+ if (tx_coalesce_usecs != rx_coalesce_usecs)
+ return -EINVAL;
+
+ vnic_intr_coalescing_timer_set(&enic->intr[0],
+ INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
+ break;
+ case VNIC_DEV_INTR_MODE_MSIX:
+ vnic_intr_coalescing_timer_set(&enic->intr[ENIC_MSIX_WQ],
+ INTR_COALESCE_USEC_TO_HW(tx_coalesce_usecs));
+ vnic_intr_coalescing_timer_set(&enic->intr[ENIC_MSIX_RQ],
+ INTR_COALESCE_USEC_TO_HW(rx_coalesce_usecs));
+ break;
+ default:
+ break;
+ }
+
+ enic->tx_coalesce_usecs = tx_coalesce_usecs;
+ enic->rx_coalesce_usecs = rx_coalesce_usecs;
+
+ return 0;
+}
+
static const struct ethtool_ops enic_ethtool_ops = {
.get_settings = enic_get_settings,
.get_drvinfo = enic_get_drvinfo,
@@ -278,6 +334,8 @@ static const struct ethtool_ops enic_ethtool_ops = {
.set_sg = ethtool_op_set_sg,
.get_tso = ethtool_op_get_tso,
.set_tso = enic_set_tso,
+ .get_coalesce = enic_get_coalesce,
+ .set_coalesce = enic_set_coalesce,
.get_flags = ethtool_op_get_flags,
.set_flags = ethtool_op_set_flags,
};
@@ -363,12 +421,12 @@ static void enic_mtu_check(struct enic *enic)
u32 mtu = vnic_dev_mtu(enic->vdev);
if (mtu && mtu != enic->port_mtu) {
+ enic->port_mtu = mtu;
if (mtu < enic->netdev->mtu)
printk(KERN_WARNING PFX
"%s: interface MTU (%d) set higher "
"than switch port MTU (%d)\n",
enic->netdev->name, enic->netdev->mtu, mtu);
- enic->port_mtu = mtu;
}
}
@@ -673,7 +731,7 @@ static inline void enic_queue_wq_skb(struct enic *enic,
/* netif_tx_lock held, process context with BHs disabled, or BH */
static netdev_tx_t enic_hard_start_xmit(struct sk_buff *skb,
- struct net_device *netdev)
+ struct net_device *netdev)
{
struct enic *enic = netdev_priv(netdev);
struct vnic_wq *wq = &enic->wq[0];
@@ -764,15 +822,16 @@ static int enic_set_mac_addr(struct net_device *netdev, char *addr)
static void enic_set_multicast_list(struct net_device *netdev)
{
struct enic *enic = netdev_priv(netdev);
- struct dev_mc_list *list = netdev->mc_list;
+ struct dev_mc_list *list;
int directed = 1;
int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0;
+ unsigned int mc_count = netdev_mc_count(netdev);
int allmulti = (netdev->flags & IFF_ALLMULTI) ||
- (netdev->mc_count > ENIC_MULTICAST_PERFECT_FILTERS);
+ mc_count > ENIC_MULTICAST_PERFECT_FILTERS;
+ unsigned int flags = netdev->flags | (allmulti ? IFF_ALLMULTI : 0);
u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
- unsigned int mc_count = netdev->mc_count;
unsigned int i, j;
if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS)
@@ -780,8 +839,11 @@ static void enic_set_multicast_list(struct net_device *netdev)
spin_lock(&enic->devcmd_lock);
- vnic_dev_packet_filter(enic->vdev, directed,
- multicast, broadcast, promisc, allmulti);
+ if (enic->flags != flags) {
+ enic->flags = flags;
+ vnic_dev_packet_filter(enic->vdev, directed,
+ multicast, broadcast, promisc, allmulti);
+ }
/* Is there an easier way? Trying to minimize to
* calls to add/del multicast addrs. We keep the
@@ -789,9 +851,11 @@ static void enic_set_multicast_list(struct net_device *netdev)
* look for changes to add/del.
*/
- for (i = 0; list && i < mc_count; i++) {
- memcpy(mc_addr[i], list->dmi_addr, ETH_ALEN);
- list = list->next;
+ i = 0;
+ netdev_for_each_mc_addr(list, netdev) {
+ if (i == mc_count)
+ break;
+ memcpy(mc_addr[i++], list->dmi_addr, ETH_ALEN);
}
for (i = 0; i < enic->mc_count; i++) {
@@ -1084,34 +1148,6 @@ static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
return 0;
}
-static void enic_rq_drop_buf(struct vnic_rq *rq,
- struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
- int skipped, void *opaque)
-{
- struct enic *enic = vnic_dev_priv(rq->vdev);
- struct sk_buff *skb = buf->os_buf;
-
- if (skipped)
- return;
-
- pci_unmap_single(enic->pdev, buf->dma_addr,
- buf->len, PCI_DMA_FROMDEVICE);
-
- dev_kfree_skb_any(skb);
-}
-
-static int enic_rq_service_drop(struct vnic_dev *vdev, struct cq_desc *cq_desc,
- u8 type, u16 q_number, u16 completed_index, void *opaque)
-{
- struct enic *enic = vnic_dev_priv(vdev);
-
- vnic_rq_service(&enic->rq[q_number], cq_desc,
- completed_index, VNIC_RQ_RETURN_DESC,
- enic_rq_drop_buf, opaque);
-
- return 0;
-}
-
static int enic_poll(struct napi_struct *napi, int budget)
{
struct enic *enic = container_of(napi, struct enic, napi);
@@ -1119,6 +1155,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
unsigned int rq_work_to_do = budget;
unsigned int wq_work_to_do = -1; /* no limit */
unsigned int work_done, rq_work_done, wq_work_done;
+ int err;
/* Service RQ (first) and WQ
*/
@@ -1142,16 +1179,19 @@ static int enic_poll(struct napi_struct *napi, int budget)
0 /* don't unmask intr */,
0 /* don't reset intr timer */);
- if (rq_work_done > 0) {
+ err = vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
- /* Replenish RQ
- */
+ /* Buffer allocation failed. Stay in polling
+ * mode so we can try to fill the ring again.
+ */
- vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
+ if (err)
+ rq_work_done = rq_work_to_do;
- } else {
+ if (rq_work_done < rq_work_to_do) {
- /* If no work done, flush all LROs and exit polling
+ /* Some work done, but not enough to stay in polling,
+ * flush all LROs and exit polling
*/
if (netdev->features & NETIF_F_LRO)
@@ -1170,6 +1210,7 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
struct net_device *netdev = enic->netdev;
unsigned int work_to_do = budget;
unsigned int work_done;
+ int err;
/* Service RQ
*/
@@ -1177,25 +1218,30 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ],
work_to_do, enic_rq_service, NULL);
- if (work_done > 0) {
-
- /* Replenish RQ
- */
-
- vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
-
- /* Return intr event credits for this polling
- * cycle. An intr event is the completion of a
- * RQ packet.
- */
+ /* Return intr event credits for this polling
+ * cycle. An intr event is the completion of a
+ * RQ packet.
+ */
+ if (work_done > 0)
vnic_intr_return_credits(&enic->intr[ENIC_MSIX_RQ],
work_done,
0 /* don't unmask intr */,
0 /* don't reset intr timer */);
- } else {
- /* If no work done, flush all LROs and exit polling
+ err = vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);
+
+ /* Buffer allocation failed. Stay in polling mode
+ * so we can try to fill the ring again.
+ */
+
+ if (err)
+ work_done = work_to_do;
+
+ if (work_done < work_to_do) {
+
+ /* Some work done, but not enough to stay in polling,
+ * flush all LROs and exit polling
*/
if (netdev->features & NETIF_F_LRO)
@@ -1304,6 +1350,24 @@ static int enic_request_intr(struct enic *enic)
return err;
}
+static void enic_synchronize_irqs(struct enic *enic)
+{
+ unsigned int i;
+
+ switch (vnic_dev_get_intr_mode(enic->vdev)) {
+ case VNIC_DEV_INTR_MODE_INTX:
+ case VNIC_DEV_INTR_MODE_MSI:
+ synchronize_irq(enic->pdev->irq);
+ break;
+ case VNIC_DEV_INTR_MODE_MSIX:
+ for (i = 0; i < enic->intr_count; i++)
+ synchronize_irq(enic->msix_entry[i].vector);
+ break;
+ default:
+ break;
+ }
+}
+
static int enic_notify_set(struct enic *enic)
{
int err;
@@ -1360,11 +1424,13 @@ static int enic_open(struct net_device *netdev)
}
for (i = 0; i < enic->rq_count; i++) {
- err = vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf);
- if (err) {
+ vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf);
+ /* Need at least one buffer on ring to get going */
+ if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
printk(KERN_ERR PFX
"%s: Unable to alloc receive buffers.\n",
netdev->name);
+ err = -ENOMEM;
goto err_out_notify_unset;
}
}
@@ -1409,16 +1475,19 @@ static int enic_stop(struct net_device *netdev)
unsigned int i;
int err;
+ for (i = 0; i < enic->intr_count; i++)
+ vnic_intr_mask(&enic->intr[i]);
+
+ enic_synchronize_irqs(enic);
+
del_timer_sync(&enic->notify_timer);
spin_lock(&enic->devcmd_lock);
vnic_dev_disable(enic->vdev);
spin_unlock(&enic->devcmd_lock);
napi_disable(&enic->napi);
- netif_stop_queue(netdev);
-
- for (i = 0; i < enic->intr_count; i++)
- vnic_intr_mask(&enic->intr[i]);
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
for (i = 0; i < enic->wq_count; i++) {
err = vnic_wq_disable(&enic->wq[i]);
@@ -1436,11 +1505,6 @@ static int enic_stop(struct net_device *netdev)
spin_unlock(&enic->devcmd_lock);
enic_free_intr(enic);
- (void)vnic_cq_service(&enic->cq[ENIC_CQ_RQ],
- -1, enic_rq_service_drop, NULL);
- (void)vnic_cq_service(&enic->cq[ENIC_CQ_WQ],
- -1, enic_wq_service, NULL);
-
for (i = 0; i < enic->wq_count; i++)
vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
for (i = 0; i < enic->rq_count; i++)
@@ -1762,7 +1826,8 @@ int enic_dev_init(struct enic *enic)
err = enic_set_intr_mode(enic);
if (err) {
printk(KERN_ERR PFX
- "Failed to set intr mode, aborting.\n");
+ "Failed to set intr mode based on resource "
+ "counts and system capabilities, aborting.\n");
return err;
}
@@ -1986,6 +2051,9 @@ static int __devinit enic_probe(struct pci_dev *pdev,
goto err_out_dev_deinit;
}
+ enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
+ enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
+
netdev->netdev_ops = &enic_netdev_ops;
netdev->watchdog_timeo = 2 * HZ;
netdev->ethtool_ops = &enic_ethtool_ops;
diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c
index 32111144efc..02839bf0fe8 100644
--- a/drivers/net/enic/enic_res.c
+++ b/drivers/net/enic/enic_res.c
@@ -66,21 +66,21 @@ int enic_get_vnic_config(struct enic *enic)
GET_CONFIG(wq_desc_count);
GET_CONFIG(rq_desc_count);
GET_CONFIG(mtu);
- GET_CONFIG(intr_timer);
GET_CONFIG(intr_timer_type);
GET_CONFIG(intr_mode);
+ GET_CONFIG(intr_timer_usec);
c->wq_desc_count =
min_t(u32, ENIC_MAX_WQ_DESCS,
max_t(u32, ENIC_MIN_WQ_DESCS,
c->wq_desc_count));
- c->wq_desc_count &= 0xfffffff0; /* must be aligned to groups of 16 */
+ c->wq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
c->rq_desc_count =
min_t(u32, ENIC_MAX_RQ_DESCS,
max_t(u32, ENIC_MIN_RQ_DESCS,
c->rq_desc_count));
- c->rq_desc_count &= 0xfffffff0; /* must be aligned to groups of 16 */
+ c->rq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */
if (c->mtu == 0)
c->mtu = 1500;
@@ -88,15 +88,17 @@ int enic_get_vnic_config(struct enic *enic)
max_t(u16, ENIC_MIN_MTU,
c->mtu));
- c->intr_timer = min_t(u16, VNIC_INTR_TIMER_MAX, c->intr_timer);
+ c->intr_timer_usec = min_t(u32,
+ INTR_COALESCE_HW_TO_USEC(VNIC_INTR_TIMER_MAX),
+ c->intr_timer_usec);
printk(KERN_INFO PFX "vNIC MAC addr %pM wq/rq %d/%d\n",
enic->mac_addr, c->wq_desc_count, c->rq_desc_count);
printk(KERN_INFO PFX "vNIC mtu %d csum tx/rx %d/%d tso/lro %d/%d "
- "intr timer %d\n",
+ "intr timer %d usec\n",
c->mtu, ENIC_SETTING(enic, TXCSUM),
ENIC_SETTING(enic, RXCSUM), ENIC_SETTING(enic, TSO),
- ENIC_SETTING(enic, LRO), c->intr_timer);
+ ENIC_SETTING(enic, LRO), c->intr_timer_usec);
return 0;
}
@@ -303,7 +305,7 @@ void enic_init_vnic_resources(struct enic *enic)
for (i = 0; i < enic->intr_count; i++) {
vnic_intr_init(&enic->intr[i],
- enic->config.intr_timer,
+ INTR_COALESCE_USEC_TO_HW(enic->config.intr_timer_usec),
enic->config.intr_timer_type,
mask_on_assertion);
}
diff --git a/drivers/net/enic/vnic_dev.c b/drivers/net/enic/vnic_dev.c
index 29a48e8b59d..69b9b70c7da 100644
--- a/drivers/net/enic/vnic_dev.c
+++ b/drivers/net/enic/vnic_dev.c
@@ -36,7 +36,6 @@ struct vnic_res {
};
#define VNIC_DEV_CAP_INIT 0x0001
-#define VNIC_DEV_CAP_PERBI 0x0002
struct vnic_dev {
void *priv;
diff --git a/drivers/net/enic/vnic_enet.h b/drivers/net/enic/vnic_enet.h
index 6332ac9391b..8eeb6758491 100644
--- a/drivers/net/enic/vnic_enet.h
+++ b/drivers/net/enic/vnic_enet.h
@@ -20,6 +20,10 @@
#ifndef _VNIC_ENIC_H_
#define _VNIC_ENIC_H_
+/* Hardware intr coalesce timer is in units of 1.5us */
+#define INTR_COALESCE_USEC_TO_HW(usec) ((usec) * 2/3)
+#define INTR_COALESCE_HW_TO_USEC(usec) ((usec) * 3/2)
+
/* Device-specific region: enet configuration */
struct vnic_enet_config {
u32 flags;
@@ -30,6 +34,7 @@ struct vnic_enet_config {
u8 intr_timer_type;
u8 intr_mode;
char devname[16];
+ u32 intr_timer_usec;
};
#define VENETF_TSO 0x1 /* TSO enabled */
diff --git a/drivers/net/enic/vnic_intr.c b/drivers/net/enic/vnic_intr.c
index 1f8786d7195..3934309a949 100644
--- a/drivers/net/enic/vnic_intr.c
+++ b/drivers/net/enic/vnic_intr.c
@@ -50,12 +50,18 @@ int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
unsigned int coalescing_type, unsigned int mask_on_assertion)
{
- iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer);
+ vnic_intr_coalescing_timer_set(intr, coalescing_timer);
iowrite32(coalescing_type, &intr->ctrl->coalescing_type);
iowrite32(mask_on_assertion, &intr->ctrl->mask_on_assertion);
iowrite32(0, &intr->ctrl->int_credits);
}
+void vnic_intr_coalescing_timer_set(struct vnic_intr *intr,
+ unsigned int coalescing_timer)
+{
+ iowrite32(coalescing_timer, &intr->ctrl->coalescing_timer);
+}
+
void vnic_intr_clean(struct vnic_intr *intr)
{
iowrite32(0, &intr->ctrl->int_credits);
diff --git a/drivers/net/enic/vnic_intr.h b/drivers/net/enic/vnic_intr.h
index 9a53604edce..2fe6c6339e3 100644
--- a/drivers/net/enic/vnic_intr.h
+++ b/drivers/net/enic/vnic_intr.h
@@ -61,6 +61,7 @@ static inline void vnic_intr_unmask(struct vnic_intr *intr)
static inline void vnic_intr_mask(struct vnic_intr *intr)
{
iowrite32(1, &intr->ctrl->mask);
+ (void)ioread32(&intr->ctrl->mask);
}
static inline void vnic_intr_return_credits(struct vnic_intr *intr,
@@ -101,6 +102,8 @@ int vnic_intr_alloc(struct vnic_dev *vdev, struct vnic_intr *intr,
unsigned int index);
void vnic_intr_init(struct vnic_intr *intr, unsigned int coalescing_timer,
unsigned int coalescing_type, unsigned int mask_on_assertion);
+void vnic_intr_coalescing_timer_set(struct vnic_intr *intr,
+ unsigned int coalescing_timer);
void vnic_intr_clean(struct vnic_intr *intr);
#endif /* _VNIC_INTR_H_ */
diff --git a/drivers/net/enic/vnic_nic.h b/drivers/net/enic/vnic_nic.h
index eeaf329945d..cf80ab46d58 100644
--- a/drivers/net/enic/vnic_nic.h
+++ b/drivers/net/enic/vnic_nic.h
@@ -41,12 +41,12 @@
#define NIC_CFG_IG_VLAN_STRIP_EN_MASK_FIELD 1UL
#define NIC_CFG_IG_VLAN_STRIP_EN_SHIFT 24
-#define NIC_CFG_RSS_HASH_TYPE_IPV4 (1 << 0)
-#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 (1 << 1)
-#define NIC_CFG_RSS_HASH_TYPE_IPV6 (1 << 2)
-#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 (1 << 3)
-#define NIC_CFG_RSS_HASH_TYPE_IPV6_EX (1 << 4)
-#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX (1 << 5)
+#define NIC_CFG_RSS_HASH_TYPE_IPV4 (1 << 1)
+#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV4 (1 << 2)
+#define NIC_CFG_RSS_HASH_TYPE_IPV6 (1 << 3)
+#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6 (1 << 4)
+#define NIC_CFG_RSS_HASH_TYPE_IPV6_EX (1 << 5)
+#define NIC_CFG_RSS_HASH_TYPE_TCP_IPV6_EX (1 << 6)
static inline void vnic_set_nic_cfg(u32 *nic_cfg,
u8 rss_default_cpu, u8 rss_hash_type,
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 41494f7b2ec..39c271b6be4 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -167,7 +167,7 @@ static const struct epic_chip_info pci_id_tbl[] = {
};
-static struct pci_device_id epic_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(epic_pci_tbl) = {
{ 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
{ 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
{ 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
@@ -1390,21 +1390,20 @@ static void set_rx_mode(struct net_device *dev)
outl(0x002C, ioaddr + RxCtrl);
/* Unconditionally log net taps. */
memset(mc_filter, 0xff, sizeof(mc_filter));
- } else if ((dev->mc_count > 0) || (dev->flags & IFF_ALLMULTI)) {
+ } else if ((!netdev_mc_empty(dev)) || (dev->flags & IFF_ALLMULTI)) {
/* There is apparently a chip bug, so the multicast filter
is never enabled. */
/* Too many to filter perfectly -- accept all multicasts. */
memset(mc_filter, 0xff, sizeof(mc_filter));
outl(0x000C, ioaddr + RxCtrl);
- } else if (dev->mc_count == 0) {
+ } else if (netdev_mc_empty(dev)) {
outl(0x0004, ioaddr + RxCtrl);
return;
} else { /* Never executed, for now. */
struct dev_mc_list *mclist;
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
unsigned int bit_nr =
ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f;
mc_filter[bit_nr >> 3] |= (1 << bit_nr);
diff --git a/drivers/net/eth16i.c b/drivers/net/eth16i.c
index 71bfeec33a0..d3abeee3f11 100644
--- a/drivers/net/eth16i.c
+++ b/drivers/net/eth16i.c
@@ -1359,7 +1359,7 @@ static void eth16i_multicast(struct net_device *dev)
{
int ioaddr = dev->base_addr;
- if(dev->mc_count || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
+ if (!netdev_mc_empty(dev) || dev->flags&(IFF_ALLMULTI|IFF_PROMISC))
{
outb(3, ioaddr + RECEIVE_MODE_REG);
} else {
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index bd1db92aec1..209742304e2 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -755,7 +755,7 @@ static void ethoc_set_multicast_list(struct net_device *dev)
{
struct ethoc *priv = netdev_priv(dev);
u32 mode = ethoc_read(priv, MODER);
- struct dev_mc_list *mc = NULL;
+ struct dev_mc_list *mc;
u32 hash[2] = { 0, 0 };
/* set loopback mode if requested */
@@ -783,8 +783,8 @@ static void ethoc_set_multicast_list(struct net_device *dev)
hash[0] = 0xffffffff;
hash[1] = 0xffffffff;
} else {
- for (mc = dev->mc_list; mc; mc = mc->next) {
- u32 crc = ether_crc(mc->dmi_addrlen, mc->dmi_addr);
+ netdev_for_each_mc_addr(mc, dev) {
+ u32 crc = ether_crc(ETH_ALEN, mc->dmi_addr);
int bit = (crc >> 26) & 0x3f;
hash[bit >> 5] |= 1 << (bit & 0x1f);
}
@@ -904,7 +904,7 @@ static int ethoc_probe(struct platform_device *pdev)
}
mmio = devm_request_mem_region(&pdev->dev, res->start,
- res->end - res->start + 1, res->name);
+ resource_size(res), res->name);
if (!mmio) {
dev_err(&pdev->dev, "cannot request I/O memory space\n");
ret = -ENXIO;
@@ -917,7 +917,7 @@ static int ethoc_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (res) {
mem = devm_request_mem_region(&pdev->dev, res->start,
- res->end - res->start + 1, res->name);
+ resource_size(res), res->name);
if (!mem) {
dev_err(&pdev->dev, "cannot request memory space\n");
ret = -ENXIO;
@@ -945,7 +945,7 @@ static int ethoc_probe(struct platform_device *pdev)
priv->dma_alloc = 0;
priv->iobase = devm_ioremap_nocache(&pdev->dev, netdev->base_addr,
- mmio->end - mmio->start + 1);
+ resource_size(mmio));
if (!priv->iobase) {
dev_err(&pdev->dev, "cannot remap I/O memory space\n");
ret = -ENXIO;
@@ -954,7 +954,7 @@ static int ethoc_probe(struct platform_device *pdev)
if (netdev->mem_end) {
priv->membase = devm_ioremap_nocache(&pdev->dev,
- netdev->mem_start, mem->end - mem->start + 1);
+ netdev->mem_start, resource_size(mem));
if (!priv->membase) {
dev_err(&pdev->dev, "cannot remap memory space\n");
ret = -ENXIO;
diff --git a/drivers/net/ewrk3.c b/drivers/net/ewrk3.c
index dd4ba01fd92..91e59f3a9d6 100644
--- a/drivers/net/ewrk3.c
+++ b/drivers/net/ewrk3.c
@@ -1169,7 +1169,7 @@ static void set_multicast_list(struct net_device *dev)
static void SetMulticastFilter(struct net_device *dev)
{
struct ewrk3_private *lp = netdev_priv(dev);
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
u_long iobase = dev->base_addr;
int i;
char *addrs, bit, byte;
@@ -1213,9 +1213,8 @@ static void SetMulticastFilter(struct net_device *dev)
}
/* Update table */
- for (i = 0; i < dev->mc_count; i++) { /* for each address in the list */
+ netdev_for_each_mc_addr(dmi, dev) {
addrs = dmi->dmi_addr;
- dmi = dmi->next;
if ((*addrs & 0x01) == 1) { /* multicast address? */
crc = ether_crc_le(ETH_ALEN, addrs);
hashcode = crc & ((1 << 9) - 1); /* hashcode is 9 LSb of CRC */
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index dac4e595589..9d5ad08a119 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -1786,18 +1786,16 @@ static void __set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
memset(mc_filter, 0xff, sizeof(mc_filter));
rx_mode = CR_W_PROM | CR_W_AB | CR_W_AM;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
memset(mc_filter, 0xff, sizeof(mc_filter));
rx_mode = CR_W_AB | CR_W_AM;
} else {
struct dev_mc_list *mclist;
- int i;
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
unsigned int bit;
bit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F;
mc_filter[bit >> 5] |= (1 << bit);
@@ -1941,7 +1939,7 @@ static int netdev_close(struct net_device *dev)
return 0;
}
-static struct pci_device_id fealnx_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(fealnx_pci_tbl) = {
{0x1516, 0x0800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x1516, 0x0803, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1},
{0x1516, 0x0891, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2},
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index 16a1d58419d..9f98c1c4a34 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -1128,6 +1128,26 @@ static phy_info_t phy_info_dp83848= {
},
};
+static phy_info_t phy_info_lan8700 = {
+ 0x0007C0C,
+ "LAN8700",
+ (const phy_cmd_t []) { /* config */
+ { mk_mii_read(MII_REG_CR), mii_parse_cr },
+ { mk_mii_read(MII_REG_ANAR), mii_parse_anar },
+ { mk_mii_end, }
+ },
+ (const phy_cmd_t []) { /* startup */
+ { mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
+ { mk_mii_read(MII_REG_SR), mii_parse_sr },
+ { mk_mii_end, }
+ },
+ (const phy_cmd_t []) { /* act_int */
+ { mk_mii_end, }
+ },
+ (const phy_cmd_t []) { /* shutdown */
+ { mk_mii_end, }
+ },
+};
/* ------------------------------------------------------------------------- */
static phy_info_t const * const phy_info[] = {
@@ -1137,6 +1157,7 @@ static phy_info_t const * const phy_info[] = {
&phy_info_am79c874,
&phy_info_ks8721bl,
&phy_info_dp83848,
+ &phy_info_lan8700,
NULL
};
@@ -1554,7 +1575,7 @@ static void set_multicast_list(struct net_device *dev)
{
struct fec_enet_private *fep = netdev_priv(dev);
struct dev_mc_list *dmi;
- unsigned int i, j, bit, data, crc, tmp;
+ unsigned int i, bit, data, crc, tmp;
unsigned char hash;
if (dev->flags & IFF_PROMISC) {
@@ -1583,9 +1604,7 @@ static void set_multicast_list(struct net_device *dev)
writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
- dmi = dev->mc_list;
-
- for (j = 0; j < dev->mc_count; j++, dmi = dmi->next) {
+ netdev_for_each_mc_addr(dmi, dev) {
/* Only support group multicast for now */
if (!(dmi->dmi_addr[0] & 1))
continue;
@@ -1658,6 +1677,7 @@ static int fec_enet_init(struct net_device *dev, int index)
{
struct fec_enet_private *fep = netdev_priv(dev);
struct bufdesc *cbd_base;
+ struct bufdesc *bdp;
int i;
/* Allocate memory for buffer descriptors. */
@@ -1710,6 +1730,34 @@ static int fec_enet_init(struct net_device *dev, int index)
/* Set MII speed to 2.5 MHz */
fep->phy_speed = ((((clk_get_rate(fep->clk) / 2 + 4999999)
/ 2500000) / 2) & 0x3F) << 1;
+
+ /* Initialize the receive buffer descriptors. */
+ bdp = fep->rx_bd_base;
+ for (i = 0; i < RX_RING_SIZE; i++) {
+
+ /* Initialize the BD for every fragment in the page. */
+ bdp->cbd_sc = 0;
+ bdp++;
+ }
+
+ /* Set the last buffer to wrap */
+ bdp--;
+ bdp->cbd_sc |= BD_SC_WRAP;
+
+ /* ...and the same for transmit */
+ bdp = fep->tx_bd_base;
+ for (i = 0; i < TX_RING_SIZE; i++) {
+
+ /* Initialize the BD for every fragment in the page. */
+ bdp->cbd_sc = 0;
+ bdp->cbd_bufaddr = 0;
+ bdp++;
+ }
+
+ /* Set the last buffer to wrap */
+ bdp--;
+ bdp->cbd_sc |= BD_SC_WRAP;
+
fec_restart(dev, 0);
/* Queue up command to detect the PHY and initialize the
@@ -1730,7 +1778,6 @@ static void
fec_restart(struct net_device *dev, int duplex)
{
struct fec_enet_private *fep = netdev_priv(dev);
- struct bufdesc *bdp;
int i;
/* Whack a reset. We should wait for this. */
@@ -1768,33 +1815,6 @@ fec_restart(struct net_device *dev, int duplex)
}
}
- /* Initialize the receive buffer descriptors. */
- bdp = fep->rx_bd_base;
- for (i = 0; i < RX_RING_SIZE; i++) {
-
- /* Initialize the BD for every fragment in the page. */
- bdp->cbd_sc = BD_ENET_RX_EMPTY;
- bdp++;
- }
-
- /* Set the last buffer to wrap */
- bdp--;
- bdp->cbd_sc |= BD_SC_WRAP;
-
- /* ...and the same for transmit */
- bdp = fep->tx_bd_base;
- for (i = 0; i < TX_RING_SIZE; i++) {
-
- /* Initialize the BD for every fragment in the page. */
- bdp->cbd_sc = 0;
- bdp->cbd_bufaddr = 0;
- bdp++;
- }
-
- /* Set the last buffer to wrap */
- bdp--;
- bdp->cbd_sc |= BD_SC_WRAP;
-
/* Enable MII mode */
if (duplex) {
/* MII enable / FD enable */
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index 848e8407ea8..0dbd7219bbd 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -575,19 +575,16 @@ static void mpc52xx_fec_set_multicast_list(struct net_device *dev)
out_be32(&fec->gaddr2, 0xffffffff);
} else {
u32 crc;
- int i;
struct dev_mc_list *dmi;
u32 gaddr1 = 0x00000000;
u32 gaddr2 = 0x00000000;
- dmi = dev->mc_list;
- for (i=0; i<dev->mc_count; i++) {
+ netdev_for_each_mc_addr(dmi, dev) {
crc = ether_crc_le(6, dmi->dmi_addr) >> 26;
if (crc >= 32)
gaddr1 |= 1 << (crc-32);
else
gaddr2 |= 1 << crc;
- dmi = dmi->next;
}
out_be32(&fec->gaddr1, gaddr1);
out_be32(&fec->gaddr2, gaddr2);
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 3c340489804..ca05e566202 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -3095,7 +3095,7 @@ static void nv_set_multicast(struct net_device *dev)
} else {
pff |= NVREG_PFF_MYADDR;
- if (dev->flags & IFF_ALLMULTI || dev->mc_list) {
+ if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
u32 alwaysOff[2];
u32 alwaysOn[2];
@@ -3105,8 +3105,7 @@ static void nv_set_multicast(struct net_device *dev)
} else {
struct dev_mc_list *walk;
- walk = dev->mc_list;
- while (walk != NULL) {
+ netdev_for_each_mc_addr(walk, dev) {
u32 a, b;
a = le32_to_cpu(*(__le32 *) walk->dmi_addr);
b = le16_to_cpu(*(__le16 *) (&walk->dmi_addr[4]));
@@ -3114,7 +3113,6 @@ static void nv_set_multicast(struct net_device *dev)
alwaysOff[0] &= ~a;
alwaysOn[1] &= b;
alwaysOff[1] &= ~b;
- walk = walk->next;
}
}
addr[0] = alwaysOn[0];
@@ -6198,7 +6196,7 @@ static void nv_shutdown(struct pci_dev *pdev)
#define nv_resume NULL
#endif /* CONFIG_PM */
-static struct pci_device_id pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = {
{ /* nForce Ethernet Controller */
PCI_DEVICE(0x10DE, 0x01C3),
.driver_data = DEV_NEED_TIMERIRQ|DEV_NEED_LINKTIMER,
diff --git a/drivers/net/fs_enet/Kconfig b/drivers/net/fs_enet/Kconfig
index 562ea68ed99..fc073b5a38c 100644
--- a/drivers/net/fs_enet/Kconfig
+++ b/drivers/net/fs_enet/Kconfig
@@ -1,9 +1,13 @@
config FS_ENET
tristate "Freescale Ethernet Driver"
- depends on CPM1 || CPM2
+ depends on CPM1 || CPM2 || PPC_MPC512x
select MII
select PHYLIB
+config FS_ENET_MPC5121_FEC
+ def_bool y if (FS_ENET && PPC_MPC512x)
+ select FS_ENET_HAS_FEC
+
config FS_ENET_HAS_SCC
bool "Chip has an SCC usable for ethernet"
depends on FS_ENET && (CPM1 || CPM2)
@@ -16,13 +20,13 @@ config FS_ENET_HAS_FCC
config FS_ENET_HAS_FEC
bool "Chip has an FEC usable for ethernet"
- depends on FS_ENET && CPM1
+ depends on FS_ENET && (CPM1 || FS_ENET_MPC5121_FEC)
select FS_ENET_MDIO_FEC
default y
config FS_ENET_MDIO_FEC
tristate "MDIO driver for FEC"
- depends on FS_ENET && CPM1
+ depends on FS_ENET && (CPM1 || FS_ENET_MPC5121_FEC)
config FS_ENET_MDIO_FCC
tristate "MDIO driver for FCC"
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index ec2f5034457..0770e2f6da6 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -108,9 +108,7 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
* the last indicator should be set.
*/
if ((sc & BD_ENET_RX_LAST) == 0)
- printk(KERN_WARNING DRV_MODULE_NAME
- ": %s rcv is not +last\n",
- dev->name);
+ dev_warn(fep->dev, "rcv is not +last\n");
/*
* Check for errors.
@@ -178,9 +176,8 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
received++;
netif_receive_skb(skb);
} else {
- printk(KERN_WARNING DRV_MODULE_NAME
- ": %s Memory squeeze, dropping packet.\n",
- dev->name);
+ dev_warn(fep->dev,
+ "Memory squeeze, dropping packet.\n");
fep->stats.rx_dropped++;
skbn = skb;
}
@@ -242,9 +239,7 @@ static int fs_enet_rx_non_napi(struct net_device *dev)
* the last indicator should be set.
*/
if ((sc & BD_ENET_RX_LAST) == 0)
- printk(KERN_WARNING DRV_MODULE_NAME
- ": %s rcv is not +last\n",
- dev->name);
+ dev_warn(fep->dev, "rcv is not +last\n");
/*
* Check for errors.
@@ -313,9 +308,8 @@ static int fs_enet_rx_non_napi(struct net_device *dev)
received++;
netif_rx(skb);
} else {
- printk(KERN_WARNING DRV_MODULE_NAME
- ": %s Memory squeeze, dropping packet.\n",
- dev->name);
+ dev_warn(fep->dev,
+ "Memory squeeze, dropping packet.\n");
fep->stats.rx_dropped++;
skbn = skb;
}
@@ -388,10 +382,10 @@ static void fs_enet_tx(struct net_device *dev)
} else
fep->stats.tx_packets++;
- if (sc & BD_ENET_TX_READY)
- printk(KERN_WARNING DRV_MODULE_NAME
- ": %s HEY! Enet xmit interrupt and TX_READY.\n",
- dev->name);
+ if (sc & BD_ENET_TX_READY) {
+ dev_warn(fep->dev,
+ "HEY! Enet xmit interrupt and TX_READY.\n");
+ }
/*
* Deferred means some collisions occurred during transmit,
@@ -511,9 +505,8 @@ void fs_init_bds(struct net_device *dev)
for (i = 0, bdp = fep->rx_bd_base; i < fep->rx_ring; i++, bdp++) {
skb = dev_alloc_skb(ENET_RX_FRSIZE);
if (skb == NULL) {
- printk(KERN_WARNING DRV_MODULE_NAME
- ": %s Memory squeeze, unable to allocate skb\n",
- dev->name);
+ dev_warn(fep->dev,
+ "Memory squeeze, unable to allocate skb\n");
break;
}
skb_align(skb, ENET_RX_ALIGN);
@@ -587,6 +580,40 @@ void fs_cleanup_bds(struct net_device *dev)
/**********************************************************************************/
+#ifdef CONFIG_FS_ENET_MPC5121_FEC
+/*
+ * MPC5121 FEC requeries 4-byte alignment for TX data buffer!
+ */
+static struct sk_buff *tx_skb_align_workaround(struct net_device *dev,
+ struct sk_buff *skb)
+{
+ struct sk_buff *new_skb;
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ /* Alloc new skb */
+ new_skb = dev_alloc_skb(skb->len + 4);
+ if (!new_skb) {
+ if (net_ratelimit()) {
+ dev_warn(fep->dev,
+ "Memory squeeze, dropping tx packet.\n");
+ }
+ return NULL;
+ }
+
+ /* Make sure new skb is properly aligned */
+ skb_align(new_skb, 4);
+
+ /* Copy data to new skb ... */
+ skb_copy_from_linear_data(skb, new_skb->data, skb->len);
+ skb_put(new_skb, skb->len);
+
+ /* ... and free an old one */
+ dev_kfree_skb_any(skb);
+
+ return new_skb;
+}
+#endif
+
static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
@@ -595,6 +622,19 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
u16 sc;
unsigned long flags;
+#ifdef CONFIG_FS_ENET_MPC5121_FEC
+ if (((unsigned long)skb->data) & 0x3) {
+ skb = tx_skb_align_workaround(dev, skb);
+ if (!skb) {
+ /*
+ * We have lost packet due to memory allocation error
+ * in tx_skb_align_workaround(). Hopefully original
+ * skb is still valid, so try transmit it later.
+ */
+ return NETDEV_TX_BUSY;
+ }
+ }
+#endif
spin_lock_irqsave(&fep->tx_lock, flags);
/*
@@ -610,8 +650,7 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
* Ooops. All transmit buffers are full. Bail out.
* This should not happen, since the tx queue should be stopped.
*/
- printk(KERN_WARNING DRV_MODULE_NAME
- ": %s tx queue full!.\n", dev->name);
+ dev_warn(fep->dev, "tx queue full!.\n");
return NETDEV_TX_BUSY;
}
@@ -788,8 +827,7 @@ static int fs_enet_open(struct net_device *dev)
r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED,
"fs_enet-mac", dev);
if (r != 0) {
- printk(KERN_ERR DRV_MODULE_NAME
- ": %s Could not allocate FS_ENET IRQ!", dev->name);
+ dev_err(fep->dev, "Could not allocate FS_ENET IRQ!");
if (fep->fpi->use_napi)
napi_disable(&fep->napi);
return -EINVAL;
@@ -1053,7 +1091,7 @@ static int __devinit fs_enet_probe(struct of_device *ofdev,
if (ret)
goto out_free_bd;
- printk(KERN_INFO "%s: fs_enet: %pM\n", ndev->name, ndev->dev_addr);
+ pr_info("%s: fs_enet: %pM\n", ndev->name, ndev->dev_addr);
return 0;
@@ -1103,11 +1141,18 @@ static struct of_device_id fs_enet_match[] = {
},
#endif
#ifdef CONFIG_FS_ENET_HAS_FEC
+#ifdef CONFIG_FS_ENET_MPC5121_FEC
+ {
+ .compatible = "fsl,mpc5121-fec",
+ .data = (void *)&fs_fec_ops,
+ },
+#else
{
.compatible = "fsl,pq1-fec-enet",
.data = (void *)&fs_fec_ops,
},
#endif
+#endif
{}
};
MODULE_DEVICE_TABLE(of, fs_enet_match);
diff --git a/drivers/net/fs_enet/fs_enet.h b/drivers/net/fs_enet/fs_enet.h
index ef01e09781a..1ece4b1a689 100644
--- a/drivers/net/fs_enet/fs_enet.h
+++ b/drivers/net/fs_enet/fs_enet.h
@@ -13,9 +13,56 @@
#ifdef CONFIG_CPM1
#include <asm/cpm1.h>
+#endif
+
+#if defined(CONFIG_FS_ENET_HAS_FEC)
+#include <asm/cpm.h>
+
+#if defined(CONFIG_FS_ENET_MPC5121_FEC)
+/* MPC5121 FEC has different register layout */
+struct fec {
+ u32 fec_reserved0;
+ u32 fec_ievent; /* Interrupt event reg */
+ u32 fec_imask; /* Interrupt mask reg */
+ u32 fec_reserved1;
+ u32 fec_r_des_active; /* Receive descriptor reg */
+ u32 fec_x_des_active; /* Transmit descriptor reg */
+ u32 fec_reserved2[3];
+ u32 fec_ecntrl; /* Ethernet control reg */
+ u32 fec_reserved3[6];
+ u32 fec_mii_data; /* MII manage frame reg */
+ u32 fec_mii_speed; /* MII speed control reg */
+ u32 fec_reserved4[7];
+ u32 fec_mib_ctrlstat; /* MIB control/status reg */
+ u32 fec_reserved5[7];
+ u32 fec_r_cntrl; /* Receive control reg */
+ u32 fec_reserved6[15];
+ u32 fec_x_cntrl; /* Transmit Control reg */
+ u32 fec_reserved7[7];
+ u32 fec_addr_low; /* Low 32bits MAC address */
+ u32 fec_addr_high; /* High 16bits MAC address */
+ u32 fec_opd; /* Opcode + Pause duration */
+ u32 fec_reserved8[10];
+ u32 fec_hash_table_high; /* High 32bits hash table */
+ u32 fec_hash_table_low; /* Low 32bits hash table */
+ u32 fec_grp_hash_table_high; /* High 32bits hash table */
+ u32 fec_grp_hash_table_low; /* Low 32bits hash table */
+ u32 fec_reserved9[7];
+ u32 fec_x_wmrk; /* FIFO transmit water mark */
+ u32 fec_reserved10;
+ u32 fec_r_bound; /* FIFO receive bound reg */
+ u32 fec_r_fstart; /* FIFO receive start reg */
+ u32 fec_reserved11[11];
+ u32 fec_r_des_start; /* Receive descriptor ring */
+ u32 fec_x_des_start; /* Transmit descriptor ring */
+ u32 fec_r_buff_size; /* Maximum receive buff size */
+ u32 fec_reserved12[26];
+ u32 fec_dma_control; /* DMA Endian and other ctrl */
+};
+#endif
struct fec_info {
- fec_t __iomem *fecp;
+ struct fec __iomem *fecp;
u32 mii_speed;
};
#endif
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
index 22e5a847a58..cf4f674f9e2 100644
--- a/drivers/net/fs_enet/mac-fcc.c
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -218,7 +218,7 @@ static void set_multicast_finish(struct net_device *dev)
/* if all multi or too many multicasts; just enable all */
if ((dev->flags & IFF_ALLMULTI) != 0 ||
- dev->mc_count > FCC_MAX_MULTICAST_ADDRS) {
+ netdev_mc_count(dev) > FCC_MAX_MULTICAST_ADDRS) {
W32(ep, fen_gaddrh, 0xffffffff);
W32(ep, fen_gaddrl, 0xffffffff);
@@ -235,7 +235,7 @@ static void set_multicast_list(struct net_device *dev)
if ((dev->flags & IFF_PROMISC) == 0) {
set_multicast_start(dev);
- for (pmc = dev->mc_list; pmc != NULL; pmc = pmc->next)
+ netdev_for_each_mc_addr(pmc, dev)
set_multicast_one(dev, pmc->dmi_addr);
set_multicast_finish(dev);
} else
@@ -476,8 +476,9 @@ static void clear_int_events(struct net_device *dev, u32 int_events)
static void ev_error(struct net_device *dev, u32 int_events)
{
- printk(KERN_WARNING DRV_MODULE_NAME
- ": %s FS_ENET ERROR(s) 0x%x\n", dev->name, int_events);
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ dev_warn(fep->dev, "FS_ENET ERROR(s) 0x%x\n", int_events);
}
static int get_regs(struct net_device *dev, void *p, int *sizep)
diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c
index ca7bcb8ab3a..cd2c6cca5f2 100644
--- a/drivers/net/fs_enet/mac-fec.c
+++ b/drivers/net/fs_enet/mac-fec.c
@@ -80,7 +80,7 @@
*/
#define FEC_RESET_DELAY 50
-static int whack_reset(fec_t __iomem *fecp)
+static int whack_reset(struct fec __iomem *fecp)
{
int i;
@@ -168,7 +168,7 @@ static void cleanup_data(struct net_device *dev)
static void set_promiscuous_mode(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
- fec_t __iomem *fecp = fep->fec.fecp;
+ struct fec __iomem *fecp = fep->fec.fecp;
FS(fecp, r_cntrl, FEC_RCNTRL_PROM);
}
@@ -216,11 +216,11 @@ static void set_multicast_one(struct net_device *dev, const u8 *mac)
static void set_multicast_finish(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
- fec_t __iomem *fecp = fep->fec.fecp;
+ struct fec __iomem *fecp = fep->fec.fecp;
/* if all multi or too many multicasts; just enable all */
if ((dev->flags & IFF_ALLMULTI) != 0 ||
- dev->mc_count > FEC_MAX_MULTICAST_ADDRS) {
+ netdev_mc_count(dev) > FEC_MAX_MULTICAST_ADDRS) {
fep->fec.hthi = 0xffffffffU;
fep->fec.htlo = 0xffffffffU;
}
@@ -236,7 +236,7 @@ static void set_multicast_list(struct net_device *dev)
if ((dev->flags & IFF_PROMISC) == 0) {
set_multicast_start(dev);
- for (pmc = dev->mc_list; pmc != NULL; pmc = pmc->next)
+ netdev_for_each_mc_addr(pmc, dev)
set_multicast_one(dev, pmc->dmi_addr);
set_multicast_finish(dev);
} else
@@ -246,7 +246,7 @@ static void set_multicast_list(struct net_device *dev)
static void restart(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
- fec_t __iomem *fecp = fep->fec.fecp;
+ struct fec __iomem *fecp = fep->fec.fecp;
const struct fs_platform_info *fpi = fep->fpi;
dma_addr_t rx_bd_base_phys, tx_bd_base_phys;
int r;
@@ -257,8 +257,7 @@ static void restart(struct net_device *dev)
r = whack_reset(fep->fec.fecp);
if (r != 0)
- printk(KERN_ERR DRV_MODULE_NAME
- ": %s FEC Reset FAILED!\n", dev->name);
+ dev_err(fep->dev, "FEC Reset FAILED!\n");
/*
* Set station address.
*/
@@ -281,7 +280,11 @@ static void restart(struct net_device *dev)
* Set maximum receive buffer size.
*/
FW(fecp, r_buff_size, PKT_MAXBLR_SIZE);
+#ifdef CONFIG_FS_ENET_MPC5121_FEC
+ FW(fecp, r_cntrl, PKT_MAXBUF_SIZE << 16);
+#else
FW(fecp, r_hash, PKT_MAXBUF_SIZE);
+#endif
/* get physical address */
rx_bd_base_phys = fep->ring_mem_addr;
@@ -298,7 +301,11 @@ static void restart(struct net_device *dev)
/*
* Enable big endian and don't care about SDMA FC.
*/
+#ifdef CONFIG_FS_ENET_MPC5121_FEC
+ FS(fecp, dma_control, 0xC0000000);
+#else
FW(fecp, fun_code, 0x78000000);
+#endif
/*
* Set MII speed.
@@ -309,9 +316,17 @@ static void restart(struct net_device *dev)
* Clear any outstanding interrupt.
*/
FW(fecp, ievent, 0xffc0);
+#ifndef CONFIG_FS_ENET_MPC5121_FEC
FW(fecp, ivec, (virq_to_hw(fep->interrupt) / 2) << 29);
FW(fecp, r_cntrl, FEC_RCNTRL_MII_MODE); /* MII enable */
+#else
+ /*
+ * Only set MII mode - do not touch maximum frame length
+ * configured before.
+ */
+ FS(fecp, r_cntrl, FEC_RCNTRL_MII_MODE);
+#endif
/*
* adjust to duplex mode
*/
@@ -340,7 +355,7 @@ static void stop(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
const struct fs_platform_info *fpi = fep->fpi;
- fec_t __iomem *fecp = fep->fec.fecp;
+ struct fec __iomem *fecp = fep->fec.fecp;
struct fec_info* feci= fep->phydev->bus->priv;
@@ -355,9 +370,7 @@ static void stop(struct net_device *dev)
udelay(1);
if (i == FEC_RESET_DELAY)
- printk(KERN_WARNING DRV_MODULE_NAME
- ": %s FEC timeout on graceful transmit stop\n",
- dev->name);
+ dev_warn(fep->dev, "FEC timeout on graceful transmit stop\n");
/*
* Disable FEC. Let only MII interrupts.
*/
@@ -378,7 +391,7 @@ static void stop(struct net_device *dev)
static void napi_clear_rx_event(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
- fec_t __iomem *fecp = fep->fec.fecp;
+ struct fec __iomem *fecp = fep->fec.fecp;
FW(fecp, ievent, FEC_NAPI_RX_EVENT_MSK);
}
@@ -386,7 +399,7 @@ static void napi_clear_rx_event(struct net_device *dev)
static void napi_enable_rx(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
- fec_t __iomem *fecp = fep->fec.fecp;
+ struct fec __iomem *fecp = fep->fec.fecp;
FS(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
}
@@ -394,7 +407,7 @@ static void napi_enable_rx(struct net_device *dev)
static void napi_disable_rx(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
- fec_t __iomem *fecp = fep->fec.fecp;
+ struct fec __iomem *fecp = fep->fec.fecp;
FC(fecp, imask, FEC_NAPI_RX_EVENT_MSK);
}
@@ -402,7 +415,7 @@ static void napi_disable_rx(struct net_device *dev)
static void rx_bd_done(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
- fec_t __iomem *fecp = fep->fec.fecp;
+ struct fec __iomem *fecp = fep->fec.fecp;
FW(fecp, r_des_active, 0x01000000);
}
@@ -410,7 +423,7 @@ static void rx_bd_done(struct net_device *dev)
static void tx_kickstart(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
- fec_t __iomem *fecp = fep->fec.fecp;
+ struct fec __iomem *fecp = fep->fec.fecp;
FW(fecp, x_des_active, 0x01000000);
}
@@ -418,7 +431,7 @@ static void tx_kickstart(struct net_device *dev)
static u32 get_int_events(struct net_device *dev)
{
struct fs_enet_private *fep = netdev_priv(dev);
- fec_t __iomem *fecp = fep->fec.fecp;
+ struct fec __iomem *fecp = fep->fec.fecp;
return FR(fecp, ievent) & FR(fecp, imask);
}
@@ -426,32 +439,33 @@ static u32 get_int_events(struct net_device *dev)
static void clear_int_events(struct net_device *dev, u32 int_events)
{
struct fs_enet_private *fep = netdev_priv(dev);
- fec_t __iomem *fecp = fep->fec.fecp;
+ struct fec __iomem *fecp = fep->fec.fecp;
FW(fecp, ievent, int_events);
}
static void ev_error(struct net_device *dev, u32 int_events)
{
- printk(KERN_WARNING DRV_MODULE_NAME
- ": %s FEC ERROR(s) 0x%x\n", dev->name, int_events);
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ dev_warn(fep->dev, "FEC ERROR(s) 0x%x\n", int_events);
}
static int get_regs(struct net_device *dev, void *p, int *sizep)
{
struct fs_enet_private *fep = netdev_priv(dev);
- if (*sizep < sizeof(fec_t))
+ if (*sizep < sizeof(struct fec))
return -EINVAL;
- memcpy_fromio(p, fep->fec.fecp, sizeof(fec_t));
+ memcpy_fromio(p, fep->fec.fecp, sizeof(struct fec));
return 0;
}
static int get_regs_len(struct net_device *dev)
{
- return sizeof(fec_t);
+ return sizeof(struct fec);
}
static void tx_restart(struct net_device *dev)
diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c
index 008cdd9cc53..c490a466cae 100644
--- a/drivers/net/fs_enet/mac-scc.c
+++ b/drivers/net/fs_enet/mac-scc.c
@@ -213,7 +213,7 @@ static void set_multicast_finish(struct net_device *dev)
/* if all multi or too many multicasts; just enable all */
if ((dev->flags & IFF_ALLMULTI) != 0 ||
- dev->mc_count > SCC_MAX_MULTICAST_ADDRS) {
+ netdev_mc_count(dev) > SCC_MAX_MULTICAST_ADDRS) {
W16(ep, sen_gaddr1, 0xffff);
W16(ep, sen_gaddr2, 0xffff);
@@ -228,7 +228,7 @@ static void set_multicast_list(struct net_device *dev)
if ((dev->flags & IFF_PROMISC) == 0) {
set_multicast_start(dev);
- for (pmc = dev->mc_list; pmc != NULL; pmc = pmc->next)
+ netdev_for_each_mc_addr(pmc, dev)
set_multicast_one(dev, pmc->dmi_addr);
set_multicast_finish(dev);
} else
@@ -367,9 +367,7 @@ static void stop(struct net_device *dev)
udelay(1);
if (i == SCC_RESET_DELAY)
- printk(KERN_WARNING DRV_MODULE_NAME
- ": %s SCC timeout on graceful transmit stop\n",
- dev->name);
+ dev_warn(fep->dev, "SCC timeout on graceful transmit stop\n");
W16(sccp, scc_sccm, 0);
C32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
@@ -429,8 +427,9 @@ static void clear_int_events(struct net_device *dev, u32 int_events)
static void ev_error(struct net_device *dev, u32 int_events)
{
- printk(KERN_WARNING DRV_MODULE_NAME
- ": %s SCC ERROR(s) 0x%x\n", dev->name, int_events);
+ struct fs_enet_private *fep = netdev_priv(dev);
+
+ dev_warn(fep->dev, "SCC ERROR(s) 0x%x\n", int_events);
}
static int get_regs(struct net_device *dev, void *p, int *sizep)
diff --git a/drivers/net/fs_enet/mii-fec.c b/drivers/net/fs_enet/mii-fec.c
index 96eba4280c5..5944b65082c 100644
--- a/drivers/net/fs_enet/mii-fec.c
+++ b/drivers/net/fs_enet/mii-fec.c
@@ -52,7 +52,7 @@
static int fs_enet_fec_mii_read(struct mii_bus *bus , int phy_id, int location)
{
struct fec_info* fec = bus->priv;
- fec_t __iomem *fecp = fec->fecp;
+ struct fec __iomem *fecp = fec->fecp;
int i, ret = -1;
BUG_ON((in_be32(&fecp->fec_r_cntrl) & FEC_RCNTRL_MII_MODE) == 0);
@@ -75,7 +75,7 @@ static int fs_enet_fec_mii_read(struct mii_bus *bus , int phy_id, int location)
static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location, u16 val)
{
struct fec_info* fec = bus->priv;
- fec_t __iomem *fecp = fec->fecp;
+ struct fec __iomem *fecp = fec->fecp;
int i;
/* this must never happen */
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 8bd3c9f1753..61a7b4351e7 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -998,7 +998,7 @@ static int gfar_probe(struct of_device *ofdev,
}
/* Need to reverse the bit maps as bit_map's MSB is q0
- * but, for_each_bit parses from right to left, which
+ * but, for_each_set_bit parses from right to left, which
* basically reverses the queue numbers */
for (i = 0; i< priv->num_grps; i++) {
priv->gfargrp[i].tx_bit_map = reverse_bitmap(
@@ -1011,7 +1011,7 @@ static int gfar_probe(struct of_device *ofdev,
* also assign queues to groups */
for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
priv->gfargrp[grp_idx].num_rx_queues = 0x0;
- for_each_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
+ for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
priv->num_rx_queues) {
priv->gfargrp[grp_idx].num_rx_queues++;
priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
@@ -1019,7 +1019,7 @@ static int gfar_probe(struct of_device *ofdev,
rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
}
priv->gfargrp[grp_idx].num_tx_queues = 0x0;
- for_each_bit (i, &priv->gfargrp[grp_idx].tx_bit_map,
+ for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
priv->num_tx_queues) {
priv->gfargrp[grp_idx].num_tx_queues++;
priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
@@ -1709,7 +1709,7 @@ void gfar_configure_coalescing(struct gfar_private *priv,
if (priv->mode == MQ_MG_MODE) {
baddr = &regs->txic0;
- for_each_bit (i, &tx_mask, priv->num_tx_queues) {
+ for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
if (likely(priv->tx_queue[i]->txcoalescing)) {
gfar_write(baddr + i, 0);
gfar_write(baddr + i, priv->tx_queue[i]->txic);
@@ -1717,7 +1717,7 @@ void gfar_configure_coalescing(struct gfar_private *priv,
}
baddr = &regs->rxic0;
- for_each_bit (i, &rx_mask, priv->num_rx_queues) {
+ for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
if (likely(priv->rx_queue[i]->rxcoalescing)) {
gfar_write(baddr + i, 0);
gfar_write(baddr + i, priv->rx_queue[i]->rxic);
@@ -2607,7 +2607,7 @@ static int gfar_poll(struct napi_struct *napi, int budget)
budget_per_queue = left_over_budget/num_queues;
left_over_budget = 0;
- for_each_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
+ for_each_set_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
if (test_bit(i, &serviced_queues))
continue;
rx_queue = priv->rx_queue[i];
@@ -2863,11 +2863,11 @@ static void gfar_set_multi(struct net_device *dev)
em_num = 0;
}
- if (dev->mc_count == 0)
+ if (netdev_mc_empty(dev))
return;
/* Parse the list, and set the appropriate bits */
- for(mc_ptr = dev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
+ netdev_for_each_mc_addr(mc_ptr, dev) {
if (idx < em_num) {
gfar_set_mac_for_addr(dev, idx,
mc_ptr->dmi_addr);
diff --git a/drivers/net/greth.c b/drivers/net/greth.c
new file mode 100644
index 00000000000..2b9c1cbc9ec
--- /dev/null
+++ b/drivers/net/greth.c
@@ -0,0 +1,1634 @@
+/*
+ * Aeroflex Gaisler GRETH 10/100/1G Ethernet MAC.
+ *
+ * 2005-2009 (c) Aeroflex Gaisler AB
+ *
+ * This driver supports GRETH 10/100 and GRETH 10/100/1G Ethernet MACs
+ * available in the GRLIB VHDL IP core library.
+ *
+ * Full documentation of both cores can be found here:
+ * http://www.gaisler.com/products/grlib/grip.pdf
+ *
+ * The Gigabit version supports scatter/gather DMA, any alignment of
+ * buffers and checksum offloading.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * Contributors: Kristoffer Glembo
+ * Daniel Hellstrom
+ * Marko Isomaki
+ */
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/io.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <asm/cacheflush.h>
+#include <asm/byteorder.h>
+
+#ifdef CONFIG_SPARC
+#include <asm/idprom.h>
+#endif
+
+#include "greth.h"
+
+#define GRETH_DEF_MSG_ENABLE \
+ (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_IFDOWN | \
+ NETIF_MSG_IFUP | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR)
+
+static int greth_debug = -1; /* -1 == use GRETH_DEF_MSG_ENABLE as value */
+module_param(greth_debug, int, 0);
+MODULE_PARM_DESC(greth_debug, "GRETH bitmapped debugging message enable value");
+
+/* Accept MAC address of the form macaddr=0x08,0x00,0x20,0x30,0x40,0x50 */
+static int macaddr[6];
+module_param_array(macaddr, int, NULL, 0);
+MODULE_PARM_DESC(macaddr, "GRETH Ethernet MAC address");
+
+static int greth_edcl = 1;
+module_param(greth_edcl, int, 0);
+MODULE_PARM_DESC(greth_edcl, "GRETH EDCL usage indicator. Set to 1 if EDCL is used.");
+
+static int greth_open(struct net_device *dev);
+static netdev_tx_t greth_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static netdev_tx_t greth_start_xmit_gbit(struct sk_buff *skb,
+ struct net_device *dev);
+static int greth_rx(struct net_device *dev, int limit);
+static int greth_rx_gbit(struct net_device *dev, int limit);
+static void greth_clean_tx(struct net_device *dev);
+static void greth_clean_tx_gbit(struct net_device *dev);
+static irqreturn_t greth_interrupt(int irq, void *dev_id);
+static int greth_close(struct net_device *dev);
+static int greth_set_mac_add(struct net_device *dev, void *p);
+static void greth_set_multicast_list(struct net_device *dev);
+
+#define GRETH_REGLOAD(a) (be32_to_cpu(__raw_readl(&(a))))
+#define GRETH_REGSAVE(a, v) (__raw_writel(cpu_to_be32(v), &(a)))
+#define GRETH_REGORIN(a, v) (GRETH_REGSAVE(a, (GRETH_REGLOAD(a) | (v))))
+#define GRETH_REGANDIN(a, v) (GRETH_REGSAVE(a, (GRETH_REGLOAD(a) & (v))))
+
+#define NEXT_TX(N) (((N) + 1) & GRETH_TXBD_NUM_MASK)
+#define SKIP_TX(N, C) (((N) + C) & GRETH_TXBD_NUM_MASK)
+#define NEXT_RX(N) (((N) + 1) & GRETH_RXBD_NUM_MASK)
+
+static void greth_print_rx_packet(void *addr, int len)
+{
+ print_hex_dump(KERN_DEBUG, "RX: ", DUMP_PREFIX_OFFSET, 16, 1,
+ addr, len, true);
+}
+
+static void greth_print_tx_packet(struct sk_buff *skb)
+{
+ int i;
+ int length;
+
+ if (skb_shinfo(skb)->nr_frags == 0)
+ length = skb->len;
+ else
+ length = skb_headlen(skb);
+
+ print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, length, true);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+
+ print_hex_dump(KERN_DEBUG, "TX: ", DUMP_PREFIX_OFFSET, 16, 1,
+ phys_to_virt(page_to_phys(skb_shinfo(skb)->frags[i].page)) +
+ skb_shinfo(skb)->frags[i].page_offset,
+ length, true);
+ }
+}
+
+static inline void greth_enable_tx(struct greth_private *greth)
+{
+ wmb();
+ GRETH_REGORIN(greth->regs->control, GRETH_TXEN);
+}
+
+static inline void greth_disable_tx(struct greth_private *greth)
+{
+ GRETH_REGANDIN(greth->regs->control, ~GRETH_TXEN);
+}
+
+static inline void greth_enable_rx(struct greth_private *greth)
+{
+ wmb();
+ GRETH_REGORIN(greth->regs->control, GRETH_RXEN);
+}
+
+static inline void greth_disable_rx(struct greth_private *greth)
+{
+ GRETH_REGANDIN(greth->regs->control, ~GRETH_RXEN);
+}
+
+static inline void greth_enable_irqs(struct greth_private *greth)
+{
+ GRETH_REGORIN(greth->regs->control, GRETH_RXI | GRETH_TXI);
+}
+
+static inline void greth_disable_irqs(struct greth_private *greth)
+{
+ GRETH_REGANDIN(greth->regs->control, ~(GRETH_RXI|GRETH_TXI));
+}
+
+static inline void greth_write_bd(u32 *bd, u32 val)
+{
+ __raw_writel(cpu_to_be32(val), bd);
+}
+
+static inline u32 greth_read_bd(u32 *bd)
+{
+ return be32_to_cpu(__raw_readl(bd));
+}
+
+static void greth_clean_rings(struct greth_private *greth)
+{
+ int i;
+ struct greth_bd *rx_bdp = greth->rx_bd_base;
+ struct greth_bd *tx_bdp = greth->tx_bd_base;
+
+ if (greth->gbit_mac) {
+
+ /* Free and unmap RX buffers */
+ for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) {
+ if (greth->rx_skbuff[i] != NULL) {
+ dev_kfree_skb(greth->rx_skbuff[i]);
+ dma_unmap_single(greth->dev,
+ greth_read_bd(&rx_bdp->addr),
+ MAX_FRAME_SIZE+NET_IP_ALIGN,
+ DMA_FROM_DEVICE);
+ }
+ }
+
+ /* TX buffers */
+ while (greth->tx_free < GRETH_TXBD_NUM) {
+
+ struct sk_buff *skb = greth->tx_skbuff[greth->tx_last];
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ tx_bdp = greth->tx_bd_base + greth->tx_last;
+ greth->tx_last = NEXT_TX(greth->tx_last);
+
+ dma_unmap_single(greth->dev,
+ greth_read_bd(&tx_bdp->addr),
+ skb_headlen(skb),
+ DMA_TO_DEVICE);
+
+ for (i = 0; i < nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ tx_bdp = greth->tx_bd_base + greth->tx_last;
+
+ dma_unmap_page(greth->dev,
+ greth_read_bd(&tx_bdp->addr),
+ frag->size,
+ DMA_TO_DEVICE);
+
+ greth->tx_last = NEXT_TX(greth->tx_last);
+ }
+ greth->tx_free += nr_frags+1;
+ dev_kfree_skb(skb);
+ }
+
+
+ } else { /* 10/100 Mbps MAC */
+
+ for (i = 0; i < GRETH_RXBD_NUM; i++, rx_bdp++) {
+ kfree(greth->rx_bufs[i]);
+ dma_unmap_single(greth->dev,
+ greth_read_bd(&rx_bdp->addr),
+ MAX_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+ }
+ for (i = 0; i < GRETH_TXBD_NUM; i++, tx_bdp++) {
+ kfree(greth->tx_bufs[i]);
+ dma_unmap_single(greth->dev,
+ greth_read_bd(&tx_bdp->addr),
+ MAX_FRAME_SIZE,
+ DMA_TO_DEVICE);
+ }
+ }
+}
+
+static int greth_init_rings(struct greth_private *greth)
+{
+ struct sk_buff *skb;
+ struct greth_bd *rx_bd, *tx_bd;
+ u32 dma_addr;
+ int i;
+
+ rx_bd = greth->rx_bd_base;
+ tx_bd = greth->tx_bd_base;
+
+ /* Initialize descriptor rings and buffers */
+ if (greth->gbit_mac) {
+
+ for (i = 0; i < GRETH_RXBD_NUM; i++) {
+ skb = netdev_alloc_skb(greth->netdev, MAX_FRAME_SIZE+NET_IP_ALIGN);
+ if (skb == NULL) {
+ if (netif_msg_ifup(greth))
+ dev_err(greth->dev, "Error allocating DMA ring.\n");
+ goto cleanup;
+ }
+ skb_reserve(skb, NET_IP_ALIGN);
+ dma_addr = dma_map_single(greth->dev,
+ skb->data,
+ MAX_FRAME_SIZE+NET_IP_ALIGN,
+ DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(greth->dev, dma_addr)) {
+ if (netif_msg_ifup(greth))
+ dev_err(greth->dev, "Could not create initial DMA mapping\n");
+ goto cleanup;
+ }
+ greth->rx_skbuff[i] = skb;
+ greth_write_bd(&rx_bd[i].addr, dma_addr);
+ greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE);
+ }
+
+ } else {
+
+ /* 10/100 MAC uses a fixed set of buffers and copy to/from SKBs */
+ for (i = 0; i < GRETH_RXBD_NUM; i++) {
+
+ greth->rx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL);
+
+ if (greth->rx_bufs[i] == NULL) {
+ if (netif_msg_ifup(greth))
+ dev_err(greth->dev, "Error allocating DMA ring.\n");
+ goto cleanup;
+ }
+
+ dma_addr = dma_map_single(greth->dev,
+ greth->rx_bufs[i],
+ MAX_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(greth->dev, dma_addr)) {
+ if (netif_msg_ifup(greth))
+ dev_err(greth->dev, "Could not create initial DMA mapping\n");
+ goto cleanup;
+ }
+ greth_write_bd(&rx_bd[i].addr, dma_addr);
+ greth_write_bd(&rx_bd[i].stat, GRETH_BD_EN | GRETH_BD_IE);
+ }
+ for (i = 0; i < GRETH_TXBD_NUM; i++) {
+
+ greth->tx_bufs[i] = kmalloc(MAX_FRAME_SIZE, GFP_KERNEL);
+
+ if (greth->tx_bufs[i] == NULL) {
+ if (netif_msg_ifup(greth))
+ dev_err(greth->dev, "Error allocating DMA ring.\n");
+ goto cleanup;
+ }
+
+ dma_addr = dma_map_single(greth->dev,
+ greth->tx_bufs[i],
+ MAX_FRAME_SIZE,
+ DMA_TO_DEVICE);
+
+ if (dma_mapping_error(greth->dev, dma_addr)) {
+ if (netif_msg_ifup(greth))
+ dev_err(greth->dev, "Could not create initial DMA mapping\n");
+ goto cleanup;
+ }
+ greth_write_bd(&tx_bd[i].addr, dma_addr);
+ greth_write_bd(&tx_bd[i].stat, 0);
+ }
+ }
+ greth_write_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat,
+ greth_read_bd(&rx_bd[GRETH_RXBD_NUM - 1].stat) | GRETH_BD_WR);
+
+ /* Initialize pointers. */
+ greth->rx_cur = 0;
+ greth->tx_next = 0;
+ greth->tx_last = 0;
+ greth->tx_free = GRETH_TXBD_NUM;
+
+ /* Initialize descriptor base address */
+ GRETH_REGSAVE(greth->regs->tx_desc_p, greth->tx_bd_base_phys);
+ GRETH_REGSAVE(greth->regs->rx_desc_p, greth->rx_bd_base_phys);
+
+ return 0;
+
+cleanup:
+ greth_clean_rings(greth);
+ return -ENOMEM;
+}
+
+static int greth_open(struct net_device *dev)
+{
+ struct greth_private *greth = netdev_priv(dev);
+ int err;
+
+ err = greth_init_rings(greth);
+ if (err) {
+ if (netif_msg_ifup(greth))
+ dev_err(&dev->dev, "Could not allocate memory for DMA rings\n");
+ return err;
+ }
+
+ err = request_irq(greth->irq, greth_interrupt, 0, "eth", (void *) dev);
+ if (err) {
+ if (netif_msg_ifup(greth))
+ dev_err(&dev->dev, "Could not allocate interrupt %d\n", dev->irq);
+ greth_clean_rings(greth);
+ return err;
+ }
+
+ if (netif_msg_ifup(greth))
+ dev_dbg(&dev->dev, " starting queue\n");
+ netif_start_queue(dev);
+
+ napi_enable(&greth->napi);
+
+ greth_enable_irqs(greth);
+ greth_enable_tx(greth);
+ greth_enable_rx(greth);
+ return 0;
+
+}
+
+static int greth_close(struct net_device *dev)
+{
+ struct greth_private *greth = netdev_priv(dev);
+
+ napi_disable(&greth->napi);
+
+ greth_disable_tx(greth);
+
+ netif_stop_queue(dev);
+
+ free_irq(greth->irq, (void *) dev);
+
+ greth_clean_rings(greth);
+
+ return 0;
+}
+
+static netdev_tx_t
+greth_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct greth_private *greth = netdev_priv(dev);
+ struct greth_bd *bdp;
+ int err = NETDEV_TX_OK;
+ u32 status, dma_addr;
+
+ bdp = greth->tx_bd_base + greth->tx_next;
+
+ if (unlikely(greth->tx_free <= 0)) {
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (netif_msg_pktdata(greth))
+ greth_print_tx_packet(skb);
+
+
+ if (unlikely(skb->len > MAX_FRAME_SIZE)) {
+ dev->stats.tx_errors++;
+ goto out;
+ }
+
+ dma_addr = greth_read_bd(&bdp->addr);
+
+ memcpy((unsigned char *) phys_to_virt(dma_addr), skb->data, skb->len);
+
+ dma_sync_single_for_device(greth->dev, dma_addr, skb->len, DMA_TO_DEVICE);
+
+ status = GRETH_BD_EN | (skb->len & GRETH_BD_LEN);
+
+ /* Wrap around descriptor ring */
+ if (greth->tx_next == GRETH_TXBD_NUM_MASK) {
+ status |= GRETH_BD_WR;
+ }
+
+ greth->tx_next = NEXT_TX(greth->tx_next);
+ greth->tx_free--;
+
+ /* No more descriptors */
+ if (unlikely(greth->tx_free == 0)) {
+
+ /* Free transmitted descriptors */
+ greth_clean_tx(dev);
+
+ /* If nothing was cleaned, stop queue & wait for irq */
+ if (unlikely(greth->tx_free == 0)) {
+ status |= GRETH_BD_IE;
+ netif_stop_queue(dev);
+ }
+ }
+
+ /* Write descriptor control word and enable transmission */
+ greth_write_bd(&bdp->stat, status);
+ greth_enable_tx(greth);
+
+out:
+ dev_kfree_skb(skb);
+ return err;
+}
+
+
+static netdev_tx_t
+greth_start_xmit_gbit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct greth_private *greth = netdev_priv(dev);
+ struct greth_bd *bdp;
+ u32 status = 0, dma_addr;
+ int curr_tx, nr_frags, i, err = NETDEV_TX_OK;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+ if (greth->tx_free < nr_frags + 1) {
+ netif_stop_queue(dev);
+ err = NETDEV_TX_BUSY;
+ goto out;
+ }
+
+ if (netif_msg_pktdata(greth))
+ greth_print_tx_packet(skb);
+
+ if (unlikely(skb->len > MAX_FRAME_SIZE)) {
+ dev->stats.tx_errors++;
+ goto out;
+ }
+
+ /* Save skb pointer. */
+ greth->tx_skbuff[greth->tx_next] = skb;
+
+ /* Linear buf */
+ if (nr_frags != 0)
+ status = GRETH_TXBD_MORE;
+
+ status |= GRETH_TXBD_CSALL;
+ status |= skb_headlen(skb) & GRETH_BD_LEN;
+ if (greth->tx_next == GRETH_TXBD_NUM_MASK)
+ status |= GRETH_BD_WR;
+
+
+ bdp = greth->tx_bd_base + greth->tx_next;
+ greth_write_bd(&bdp->stat, status);
+ dma_addr = dma_map_single(greth->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
+ goto map_error;
+
+ greth_write_bd(&bdp->addr, dma_addr);
+
+ curr_tx = NEXT_TX(greth->tx_next);
+
+ /* Frags */
+ for (i = 0; i < nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ greth->tx_skbuff[curr_tx] = NULL;
+ bdp = greth->tx_bd_base + curr_tx;
+
+ status = GRETH_TXBD_CSALL;
+ status |= frag->size & GRETH_BD_LEN;
+
+ /* Wrap around descriptor ring */
+ if (curr_tx == GRETH_TXBD_NUM_MASK)
+ status |= GRETH_BD_WR;
+
+ /* More fragments left */
+ if (i < nr_frags - 1)
+ status |= GRETH_TXBD_MORE;
+
+ /* ... last fragment, check if out of descriptors */
+ else if (greth->tx_free - nr_frags - 1 < (MAX_SKB_FRAGS + 1)) {
+
+ /* Enable interrupts and stop queue */
+ status |= GRETH_BD_IE;
+ netif_stop_queue(dev);
+ }
+
+ greth_write_bd(&bdp->stat, status);
+
+ dma_addr = dma_map_page(greth->dev,
+ frag->page,
+ frag->page_offset,
+ frag->size,
+ DMA_TO_DEVICE);
+
+ if (unlikely(dma_mapping_error(greth->dev, dma_addr)))
+ goto frag_map_error;
+
+ greth_write_bd(&bdp->addr, dma_addr);
+
+ curr_tx = NEXT_TX(curr_tx);
+ }
+
+ wmb();
+
+ /* Enable the descriptors that we configured ... */
+ for (i = 0; i < nr_frags + 1; i++) {
+ bdp = greth->tx_bd_base + greth->tx_next;
+ greth_write_bd(&bdp->stat, greth_read_bd(&bdp->stat) | GRETH_BD_EN);
+ greth->tx_next = NEXT_TX(greth->tx_next);
+ greth->tx_free--;
+ }
+
+ greth_enable_tx(greth);
+
+ return NETDEV_TX_OK;
+
+frag_map_error:
+ /* Unmap SKB mappings that succeeded */
+ for (i = 0; greth->tx_next + i != curr_tx; i++) {
+ bdp = greth->tx_bd_base + greth->tx_next + i;
+ dma_unmap_single(greth->dev,
+ greth_read_bd(&bdp->addr),
+ greth_read_bd(&bdp->stat) & GRETH_BD_LEN,
+ DMA_TO_DEVICE);
+ }
+map_error:
+ if (net_ratelimit())
+ dev_warn(greth->dev, "Could not create TX DMA mapping\n");
+ dev_kfree_skb(skb);
+out:
+ return err;
+}
+
+
+static irqreturn_t greth_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct greth_private *greth;
+ u32 status;
+ irqreturn_t retval = IRQ_NONE;
+
+ greth = netdev_priv(dev);
+
+ spin_lock(&greth->devlock);
+
+ /* Get the interrupt events that caused us to be here. */
+ status = GRETH_REGLOAD(greth->regs->status);
+
+ /* Handle rx and tx interrupts through poll */
+ if (status & (GRETH_INT_RX | GRETH_INT_TX)) {
+
+ /* Clear interrupt status */
+ GRETH_REGORIN(greth->regs->status,
+ status & (GRETH_INT_RX | GRETH_INT_TX));
+
+ retval = IRQ_HANDLED;
+
+ /* Disable interrupts and schedule poll() */
+ greth_disable_irqs(greth);
+ napi_schedule(&greth->napi);
+ }
+
+ mmiowb();
+ spin_unlock(&greth->devlock);
+
+ return retval;
+}
+
+static void greth_clean_tx(struct net_device *dev)
+{
+ struct greth_private *greth;
+ struct greth_bd *bdp;
+ u32 stat;
+
+ greth = netdev_priv(dev);
+
+ while (1) {
+ bdp = greth->tx_bd_base + greth->tx_last;
+ stat = greth_read_bd(&bdp->stat);
+
+ if (unlikely(stat & GRETH_BD_EN))
+ break;
+
+ if (greth->tx_free == GRETH_TXBD_NUM)
+ break;
+
+ /* Check status for errors */
+ if (unlikely(stat & GRETH_TXBD_STATUS)) {
+ dev->stats.tx_errors++;
+ if (stat & GRETH_TXBD_ERR_AL)
+ dev->stats.tx_aborted_errors++;
+ if (stat & GRETH_TXBD_ERR_UE)
+ dev->stats.tx_fifo_errors++;
+ }
+ dev->stats.tx_packets++;
+ greth->tx_last = NEXT_TX(greth->tx_last);
+ greth->tx_free++;
+ }
+
+ if (greth->tx_free > 0) {
+ netif_wake_queue(dev);
+ }
+
+}
+
+static inline void greth_update_tx_stats(struct net_device *dev, u32 stat)
+{
+ /* Check status for errors */
+ if (unlikely(stat & GRETH_TXBD_STATUS)) {
+ dev->stats.tx_errors++;
+ if (stat & GRETH_TXBD_ERR_AL)
+ dev->stats.tx_aborted_errors++;
+ if (stat & GRETH_TXBD_ERR_UE)
+ dev->stats.tx_fifo_errors++;
+ if (stat & GRETH_TXBD_ERR_LC)
+ dev->stats.tx_aborted_errors++;
+ }
+ dev->stats.tx_packets++;
+}
+
+static void greth_clean_tx_gbit(struct net_device *dev)
+{
+ struct greth_private *greth;
+ struct greth_bd *bdp, *bdp_last_frag;
+ struct sk_buff *skb;
+ u32 stat;
+ int nr_frags, i;
+
+ greth = netdev_priv(dev);
+
+ while (greth->tx_free < GRETH_TXBD_NUM) {
+
+ skb = greth->tx_skbuff[greth->tx_last];
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+ /* We only clean fully completed SKBs */
+ bdp_last_frag = greth->tx_bd_base + SKIP_TX(greth->tx_last, nr_frags);
+ stat = bdp_last_frag->stat;
+
+ if (stat & GRETH_BD_EN)
+ break;
+
+ greth->tx_skbuff[greth->tx_last] = NULL;
+
+ greth_update_tx_stats(dev, stat);
+
+ bdp = greth->tx_bd_base + greth->tx_last;
+
+ greth->tx_last = NEXT_TX(greth->tx_last);
+
+ dma_unmap_single(greth->dev,
+ greth_read_bd(&bdp->addr),
+ skb_headlen(skb),
+ DMA_TO_DEVICE);
+
+ for (i = 0; i < nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ bdp = greth->tx_bd_base + greth->tx_last;
+
+ dma_unmap_page(greth->dev,
+ greth_read_bd(&bdp->addr),
+ frag->size,
+ DMA_TO_DEVICE);
+
+ greth->tx_last = NEXT_TX(greth->tx_last);
+ }
+ greth->tx_free += nr_frags+1;
+ dev_kfree_skb(skb);
+ }
+ if (greth->tx_free > (MAX_SKB_FRAGS + 1)) {
+ netif_wake_queue(dev);
+ }
+}
+
+static int greth_pending_packets(struct greth_private *greth)
+{
+ struct greth_bd *bdp;
+ u32 status;
+ bdp = greth->rx_bd_base + greth->rx_cur;
+ status = greth_read_bd(&bdp->stat);
+ if (status & GRETH_BD_EN)
+ return 0;
+ else
+ return 1;
+}
+
+static int greth_rx(struct net_device *dev, int limit)
+{
+ struct greth_private *greth;
+ struct greth_bd *bdp;
+ struct sk_buff *skb;
+ int pkt_len;
+ int bad, count;
+ u32 status, dma_addr;
+
+ greth = netdev_priv(dev);
+
+ for (count = 0; count < limit; ++count) {
+
+ bdp = greth->rx_bd_base + greth->rx_cur;
+ status = greth_read_bd(&bdp->stat);
+ dma_addr = greth_read_bd(&bdp->addr);
+ bad = 0;
+
+ if (unlikely(status & GRETH_BD_EN)) {
+ break;
+ }
+
+ /* Check status for errors. */
+ if (unlikely(status & GRETH_RXBD_STATUS)) {
+ if (status & GRETH_RXBD_ERR_FT) {
+ dev->stats.rx_length_errors++;
+ bad = 1;
+ }
+ if (status & (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE)) {
+ dev->stats.rx_frame_errors++;
+ bad = 1;
+ }
+ if (status & GRETH_RXBD_ERR_CRC) {
+ dev->stats.rx_crc_errors++;
+ bad = 1;
+ }
+ }
+ if (unlikely(bad)) {
+ dev->stats.rx_errors++;
+
+ } else {
+
+ pkt_len = status & GRETH_BD_LEN;
+
+ skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN);
+
+ if (unlikely(skb == NULL)) {
+
+ if (net_ratelimit())
+ dev_warn(&dev->dev, "low on memory - " "packet dropped\n");
+
+ dev->stats.rx_dropped++;
+
+ } else {
+ skb_reserve(skb, NET_IP_ALIGN);
+ skb->dev = dev;
+
+ dma_sync_single_for_cpu(greth->dev,
+ dma_addr,
+ pkt_len,
+ DMA_FROM_DEVICE);
+
+ if (netif_msg_pktdata(greth))
+ greth_print_rx_packet(phys_to_virt(dma_addr), pkt_len);
+
+ memcpy(skb_put(skb, pkt_len), phys_to_virt(dma_addr), pkt_len);
+
+ skb->protocol = eth_type_trans(skb, dev);
+ dev->stats.rx_packets++;
+ netif_receive_skb(skb);
+ }
+ }
+
+ status = GRETH_BD_EN | GRETH_BD_IE;
+ if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
+ status |= GRETH_BD_WR;
+ }
+
+ wmb();
+ greth_write_bd(&bdp->stat, status);
+
+ dma_sync_single_for_device(greth->dev, dma_addr, MAX_FRAME_SIZE, DMA_FROM_DEVICE);
+
+ greth_enable_rx(greth);
+
+ greth->rx_cur = NEXT_RX(greth->rx_cur);
+ }
+
+ return count;
+}
+
+static inline int hw_checksummed(u32 status)
+{
+
+ if (status & GRETH_RXBD_IP_FRAG)
+ return 0;
+
+ if (status & GRETH_RXBD_IP && status & GRETH_RXBD_IP_CSERR)
+ return 0;
+
+ if (status & GRETH_RXBD_UDP && status & GRETH_RXBD_UDP_CSERR)
+ return 0;
+
+ if (status & GRETH_RXBD_TCP && status & GRETH_RXBD_TCP_CSERR)
+ return 0;
+
+ return 1;
+}
+
+static int greth_rx_gbit(struct net_device *dev, int limit)
+{
+ struct greth_private *greth;
+ struct greth_bd *bdp;
+ struct sk_buff *skb, *newskb;
+ int pkt_len;
+ int bad, count = 0;
+ u32 status, dma_addr;
+
+ greth = netdev_priv(dev);
+
+ for (count = 0; count < limit; ++count) {
+
+ bdp = greth->rx_bd_base + greth->rx_cur;
+ skb = greth->rx_skbuff[greth->rx_cur];
+ status = greth_read_bd(&bdp->stat);
+ bad = 0;
+
+ if (status & GRETH_BD_EN)
+ break;
+
+ /* Check status for errors. */
+ if (unlikely(status & GRETH_RXBD_STATUS)) {
+
+ if (status & GRETH_RXBD_ERR_FT) {
+ dev->stats.rx_length_errors++;
+ bad = 1;
+ } else if (status &
+ (GRETH_RXBD_ERR_AE | GRETH_RXBD_ERR_OE | GRETH_RXBD_ERR_LE)) {
+ dev->stats.rx_frame_errors++;
+ bad = 1;
+ } else if (status & GRETH_RXBD_ERR_CRC) {
+ dev->stats.rx_crc_errors++;
+ bad = 1;
+ }
+ }
+
+ /* Allocate new skb to replace current */
+ newskb = netdev_alloc_skb(dev, MAX_FRAME_SIZE + NET_IP_ALIGN);
+
+ if (!bad && newskb) {
+ skb_reserve(newskb, NET_IP_ALIGN);
+
+ dma_addr = dma_map_single(greth->dev,
+ newskb->data,
+ MAX_FRAME_SIZE + NET_IP_ALIGN,
+ DMA_FROM_DEVICE);
+
+ if (!dma_mapping_error(greth->dev, dma_addr)) {
+ /* Process the incoming frame. */
+ pkt_len = status & GRETH_BD_LEN;
+
+ dma_unmap_single(greth->dev,
+ greth_read_bd(&bdp->addr),
+ MAX_FRAME_SIZE + NET_IP_ALIGN,
+ DMA_FROM_DEVICE);
+
+ if (netif_msg_pktdata(greth))
+ greth_print_rx_packet(phys_to_virt(greth_read_bd(&bdp->addr)), pkt_len);
+
+ skb_put(skb, pkt_len);
+
+ if (greth->flags & GRETH_FLAG_RX_CSUM && hw_checksummed(status))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+
+ skb->dev = dev;
+ skb->protocol = eth_type_trans(skb, dev);
+ dev->stats.rx_packets++;
+ netif_receive_skb(skb);
+
+ greth->rx_skbuff[greth->rx_cur] = newskb;
+ greth_write_bd(&bdp->addr, dma_addr);
+ } else {
+ if (net_ratelimit())
+ dev_warn(greth->dev, "Could not create DMA mapping, dropping packet\n");
+ dev_kfree_skb(newskb);
+ dev->stats.rx_dropped++;
+ }
+ } else {
+ if (net_ratelimit())
+ dev_warn(greth->dev, "Could not allocate SKB, dropping packet\n");
+ dev->stats.rx_dropped++;
+ }
+
+ status = GRETH_BD_EN | GRETH_BD_IE;
+ if (greth->rx_cur == GRETH_RXBD_NUM_MASK) {
+ status |= GRETH_BD_WR;
+ }
+
+ wmb();
+ greth_write_bd(&bdp->stat, status);
+ greth_enable_rx(greth);
+ greth->rx_cur = NEXT_RX(greth->rx_cur);
+ }
+
+ return count;
+
+}
+
+static int greth_poll(struct napi_struct *napi, int budget)
+{
+ struct greth_private *greth;
+ int work_done = 0;
+ greth = container_of(napi, struct greth_private, napi);
+
+ if (greth->gbit_mac) {
+ greth_clean_tx_gbit(greth->netdev);
+ } else {
+ greth_clean_tx(greth->netdev);
+ }
+
+restart_poll:
+ if (greth->gbit_mac) {
+ work_done += greth_rx_gbit(greth->netdev, budget - work_done);
+ } else {
+ work_done += greth_rx(greth->netdev, budget - work_done);
+ }
+
+ if (work_done < budget) {
+
+ napi_complete(napi);
+
+ if (greth_pending_packets(greth)) {
+ napi_reschedule(napi);
+ goto restart_poll;
+ }
+ }
+
+ greth_enable_irqs(greth);
+ return work_done;
+}
+
+static int greth_set_mac_add(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+ struct greth_private *greth;
+ struct greth_regs *regs;
+
+ greth = netdev_priv(dev);
+ regs = (struct greth_regs *) greth->regs;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+ GRETH_REGSAVE(regs->esa_msb, addr->sa_data[0] << 8 | addr->sa_data[1]);
+ GRETH_REGSAVE(regs->esa_lsb,
+ addr->sa_data[2] << 24 | addr->
+ sa_data[3] << 16 | addr->sa_data[4] << 8 | addr->sa_data[5]);
+ return 0;
+}
+
+static u32 greth_hash_get_index(__u8 *addr)
+{
+ return (ether_crc(6, addr)) & 0x3F;
+}
+
+static void greth_set_hash_filter(struct net_device *dev)
+{
+ struct dev_mc_list *curr;
+ struct greth_private *greth = netdev_priv(dev);
+ struct greth_regs *regs = (struct greth_regs *) greth->regs;
+ u32 mc_filter[2];
+ unsigned int bitnr;
+
+ mc_filter[0] = mc_filter[1] = 0;
+
+ netdev_for_each_mc_addr(curr, dev) {
+ bitnr = greth_hash_get_index(curr->dmi_addr);
+ mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
+ }
+
+ GRETH_REGSAVE(regs->hash_msb, mc_filter[1]);
+ GRETH_REGSAVE(regs->hash_lsb, mc_filter[0]);
+}
+
+static void greth_set_multicast_list(struct net_device *dev)
+{
+ int cfg;
+ struct greth_private *greth = netdev_priv(dev);
+ struct greth_regs *regs = (struct greth_regs *) greth->regs;
+
+ cfg = GRETH_REGLOAD(regs->control);
+ if (dev->flags & IFF_PROMISC)
+ cfg |= GRETH_CTRL_PR;
+ else
+ cfg &= ~GRETH_CTRL_PR;
+
+ if (greth->multicast) {
+ if (dev->flags & IFF_ALLMULTI) {
+ GRETH_REGSAVE(regs->hash_msb, -1);
+ GRETH_REGSAVE(regs->hash_lsb, -1);
+ cfg |= GRETH_CTRL_MCEN;
+ GRETH_REGSAVE(regs->control, cfg);
+ return;
+ }
+
+ if (netdev_mc_empty(dev)) {
+ cfg &= ~GRETH_CTRL_MCEN;
+ GRETH_REGSAVE(regs->control, cfg);
+ return;
+ }
+
+ /* Setup multicast filter */
+ greth_set_hash_filter(dev);
+ cfg |= GRETH_CTRL_MCEN;
+ }
+ GRETH_REGSAVE(regs->control, cfg);
+}
+
+static u32 greth_get_msglevel(struct net_device *dev)
+{
+ struct greth_private *greth = netdev_priv(dev);
+ return greth->msg_enable;
+}
+
+static void greth_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct greth_private *greth = netdev_priv(dev);
+ greth->msg_enable = value;
+}
+static int greth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct greth_private *greth = netdev_priv(dev);
+ struct phy_device *phy = greth->phy;
+
+ if (!phy)
+ return -ENODEV;
+
+ return phy_ethtool_gset(phy, cmd);
+}
+
+static int greth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct greth_private *greth = netdev_priv(dev);
+ struct phy_device *phy = greth->phy;
+
+ if (!phy)
+ return -ENODEV;
+
+ return phy_ethtool_sset(phy, cmd);
+}
+
+static int greth_get_regs_len(struct net_device *dev)
+{
+ return sizeof(struct greth_regs);
+}
+
+static void greth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+ struct greth_private *greth = netdev_priv(dev);
+
+ strncpy(info->driver, dev_driver_string(greth->dev), 32);
+ strncpy(info->version, "revision: 1.0", 32);
+ strncpy(info->bus_info, greth->dev->bus->name, 32);
+ strncpy(info->fw_version, "N/A", 32);
+ info->eedump_len = 0;
+ info->regdump_len = sizeof(struct greth_regs);
+}
+
+static void greth_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
+{
+ int i;
+ struct greth_private *greth = netdev_priv(dev);
+ u32 __iomem *greth_regs = (u32 __iomem *) greth->regs;
+ u32 *buff = p;
+
+ for (i = 0; i < sizeof(struct greth_regs) / sizeof(u32); i++)
+ buff[i] = greth_read_bd(&greth_regs[i]);
+}
+
+static u32 greth_get_rx_csum(struct net_device *dev)
+{
+ struct greth_private *greth = netdev_priv(dev);
+ return (greth->flags & GRETH_FLAG_RX_CSUM) != 0;
+}
+
+static int greth_set_rx_csum(struct net_device *dev, u32 data)
+{
+ struct greth_private *greth = netdev_priv(dev);
+
+ spin_lock_bh(&greth->devlock);
+
+ if (data)
+ greth->flags |= GRETH_FLAG_RX_CSUM;
+ else
+ greth->flags &= ~GRETH_FLAG_RX_CSUM;
+
+ spin_unlock_bh(&greth->devlock);
+
+ return 0;
+}
+
+static u32 greth_get_tx_csum(struct net_device *dev)
+{
+ return (dev->features & NETIF_F_IP_CSUM) != 0;
+}
+
+static int greth_set_tx_csum(struct net_device *dev, u32 data)
+{
+ netif_tx_lock_bh(dev);
+ ethtool_op_set_tx_csum(dev, data);
+ netif_tx_unlock_bh(dev);
+ return 0;
+}
+
+static const struct ethtool_ops greth_ethtool_ops = {
+ .get_msglevel = greth_get_msglevel,
+ .set_msglevel = greth_set_msglevel,
+ .get_settings = greth_get_settings,
+ .set_settings = greth_set_settings,
+ .get_drvinfo = greth_get_drvinfo,
+ .get_regs_len = greth_get_regs_len,
+ .get_regs = greth_get_regs,
+ .get_rx_csum = greth_get_rx_csum,
+ .set_rx_csum = greth_set_rx_csum,
+ .get_tx_csum = greth_get_tx_csum,
+ .set_tx_csum = greth_set_tx_csum,
+ .get_link = ethtool_op_get_link,
+};
+
+static struct net_device_ops greth_netdev_ops = {
+ .ndo_open = greth_open,
+ .ndo_stop = greth_close,
+ .ndo_start_xmit = greth_start_xmit,
+ .ndo_set_mac_address = greth_set_mac_add,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static inline int wait_for_mdio(struct greth_private *greth)
+{
+ unsigned long timeout = jiffies + 4*HZ/100;
+ while (GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_BUSY) {
+ if (time_after(jiffies, timeout))
+ return 0;
+ }
+ return 1;
+}
+
+static int greth_mdio_read(struct mii_bus *bus, int phy, int reg)
+{
+ struct greth_private *greth = bus->priv;
+ int data;
+
+ if (!wait_for_mdio(greth))
+ return -EBUSY;
+
+ GRETH_REGSAVE(greth->regs->mdio, ((phy & 0x1F) << 11) | ((reg & 0x1F) << 6) | 2);
+
+ if (!wait_for_mdio(greth))
+ return -EBUSY;
+
+ if (!(GRETH_REGLOAD(greth->regs->mdio) & GRETH_MII_NVALID)) {
+ data = (GRETH_REGLOAD(greth->regs->mdio) >> 16) & 0xFFFF;
+ return data;
+
+ } else {
+ return -1;
+ }
+}
+
+static int greth_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
+{
+ struct greth_private *greth = bus->priv;
+
+ if (!wait_for_mdio(greth))
+ return -EBUSY;
+
+ GRETH_REGSAVE(greth->regs->mdio,
+ ((val & 0xFFFF) << 16) | ((phy & 0x1F) << 11) | ((reg & 0x1F) << 6) | 1);
+
+ if (!wait_for_mdio(greth))
+ return -EBUSY;
+
+ return 0;
+}
+
+static int greth_mdio_reset(struct mii_bus *bus)
+{
+ return 0;
+}
+
+static void greth_link_change(struct net_device *dev)
+{
+ struct greth_private *greth = netdev_priv(dev);
+ struct phy_device *phydev = greth->phy;
+ unsigned long flags;
+
+ int status_change = 0;
+
+ spin_lock_irqsave(&greth->devlock, flags);
+
+ if (phydev->link) {
+
+ if ((greth->speed != phydev->speed) || (greth->duplex != phydev->duplex)) {
+
+ GRETH_REGANDIN(greth->regs->control,
+ ~(GRETH_CTRL_FD | GRETH_CTRL_SP | GRETH_CTRL_GB));
+
+ if (phydev->duplex)
+ GRETH_REGORIN(greth->regs->control, GRETH_CTRL_FD);
+
+ if (phydev->speed == SPEED_100) {
+
+ GRETH_REGORIN(greth->regs->control, GRETH_CTRL_SP);
+ }
+
+ else if (phydev->speed == SPEED_1000)
+ GRETH_REGORIN(greth->regs->control, GRETH_CTRL_GB);
+
+ greth->speed = phydev->speed;
+ greth->duplex = phydev->duplex;
+ status_change = 1;
+ }
+ }
+
+ if (phydev->link != greth->link) {
+ if (!phydev->link) {
+ greth->speed = 0;
+ greth->duplex = -1;
+ }
+ greth->link = phydev->link;
+
+ status_change = 1;
+ }
+
+ spin_unlock_irqrestore(&greth->devlock, flags);
+
+ if (status_change) {
+ if (phydev->link)
+ pr_debug("%s: link up (%d/%s)\n",
+ dev->name, phydev->speed,
+ DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
+ else
+ pr_debug("%s: link down\n", dev->name);
+ }
+}
+
+static int greth_mdio_probe(struct net_device *dev)
+{
+ struct greth_private *greth = netdev_priv(dev);
+ struct phy_device *phy = NULL;
+ int ret;
+
+ /* Find the first PHY */
+ phy = phy_find_first(greth->mdio);
+
+ if (!phy) {
+ if (netif_msg_probe(greth))
+ dev_err(&dev->dev, "no PHY found\n");
+ return -ENXIO;
+ }
+
+ ret = phy_connect_direct(dev, phy, &greth_link_change,
+ 0, greth->gbit_mac ?
+ PHY_INTERFACE_MODE_GMII :
+ PHY_INTERFACE_MODE_MII);
+ if (ret) {
+ if (netif_msg_ifup(greth))
+ dev_err(&dev->dev, "could not attach to PHY\n");
+ return ret;
+ }
+
+ if (greth->gbit_mac)
+ phy->supported &= PHY_GBIT_FEATURES;
+ else
+ phy->supported &= PHY_BASIC_FEATURES;
+
+ phy->advertising = phy->supported;
+
+ greth->link = 0;
+ greth->speed = 0;
+ greth->duplex = -1;
+ greth->phy = phy;
+
+ return 0;
+}
+
+static inline int phy_aneg_done(struct phy_device *phydev)
+{
+ int retval;
+
+ retval = phy_read(phydev, MII_BMSR);
+
+ return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
+}
+
+static int greth_mdio_init(struct greth_private *greth)
+{
+ int ret, phy;
+ unsigned long timeout;
+
+ greth->mdio = mdiobus_alloc();
+ if (!greth->mdio) {
+ return -ENOMEM;
+ }
+
+ greth->mdio->name = "greth-mdio";
+ snprintf(greth->mdio->id, MII_BUS_ID_SIZE, "%s-%d", greth->mdio->name, greth->irq);
+ greth->mdio->read = greth_mdio_read;
+ greth->mdio->write = greth_mdio_write;
+ greth->mdio->reset = greth_mdio_reset;
+ greth->mdio->priv = greth;
+
+ greth->mdio->irq = greth->mdio_irqs;
+
+ for (phy = 0; phy < PHY_MAX_ADDR; phy++)
+ greth->mdio->irq[phy] = PHY_POLL;
+
+ ret = mdiobus_register(greth->mdio);
+ if (ret) {
+ goto error;
+ }
+
+ ret = greth_mdio_probe(greth->netdev);
+ if (ret) {
+ if (netif_msg_probe(greth))
+ dev_err(&greth->netdev->dev, "failed to probe MDIO bus\n");
+ goto unreg_mdio;
+ }
+
+ phy_start(greth->phy);
+
+ /* If Ethernet debug link is used make autoneg happen right away */
+ if (greth->edcl && greth_edcl == 1) {
+ phy_start_aneg(greth->phy);
+ timeout = jiffies + 6*HZ;
+ while (!phy_aneg_done(greth->phy) && time_before(jiffies, timeout)) {
+ }
+ genphy_read_status(greth->phy);
+ greth_link_change(greth->netdev);
+ }
+
+ return 0;
+
+unreg_mdio:
+ mdiobus_unregister(greth->mdio);
+error:
+ mdiobus_free(greth->mdio);
+ return ret;
+}
+
+/* Initialize the GRETH MAC */
+static int __devinit greth_of_probe(struct of_device *ofdev, const struct of_device_id *match)
+{
+ struct net_device *dev;
+ struct greth_private *greth;
+ struct greth_regs *regs;
+
+ int i;
+ int err;
+ int tmp;
+ unsigned long timeout;
+
+ dev = alloc_etherdev(sizeof(struct greth_private));
+
+ if (dev == NULL)
+ return -ENOMEM;
+
+ greth = netdev_priv(dev);
+ greth->netdev = dev;
+ greth->dev = &ofdev->dev;
+
+ if (greth_debug > 0)
+ greth->msg_enable = greth_debug;
+ else
+ greth->msg_enable = GRETH_DEF_MSG_ENABLE;
+
+ spin_lock_init(&greth->devlock);
+
+ greth->regs = of_ioremap(&ofdev->resource[0], 0,
+ resource_size(&ofdev->resource[0]),
+ "grlib-greth regs");
+
+ if (greth->regs == NULL) {
+ if (netif_msg_probe(greth))
+ dev_err(greth->dev, "ioremap failure.\n");
+ err = -EIO;
+ goto error1;
+ }
+
+ regs = (struct greth_regs *) greth->regs;
+ greth->irq = ofdev->irqs[0];
+
+ dev_set_drvdata(greth->dev, dev);
+ SET_NETDEV_DEV(dev, greth->dev);
+
+ if (netif_msg_probe(greth))
+ dev_dbg(greth->dev, "reseting controller.\n");
+
+ /* Reset the controller. */
+ GRETH_REGSAVE(regs->control, GRETH_RESET);
+
+ /* Wait for MAC to reset itself */
+ timeout = jiffies + HZ/100;
+ while (GRETH_REGLOAD(regs->control) & GRETH_RESET) {
+ if (time_after(jiffies, timeout)) {
+ err = -EIO;
+ if (netif_msg_probe(greth))
+ dev_err(greth->dev, "timeout when waiting for reset.\n");
+ goto error2;
+ }
+ }
+
+ /* Get default PHY address */
+ greth->phyaddr = (GRETH_REGLOAD(regs->mdio) >> 11) & 0x1F;
+
+ /* Check if we have GBIT capable MAC */
+ tmp = GRETH_REGLOAD(regs->control);
+ greth->gbit_mac = (tmp >> 27) & 1;
+
+ /* Check for multicast capability */
+ greth->multicast = (tmp >> 25) & 1;
+
+ greth->edcl = (tmp >> 31) & 1;
+
+ /* If we have EDCL we disable the EDCL speed-duplex FSM so
+ * it doesn't interfere with the software */
+ if (greth->edcl != 0)
+ GRETH_REGORIN(regs->control, GRETH_CTRL_DISDUPLEX);
+
+ /* Check if MAC can handle MDIO interrupts */
+ greth->mdio_int_en = (tmp >> 26) & 1;
+
+ err = greth_mdio_init(greth);
+ if (err) {
+ if (netif_msg_probe(greth))
+ dev_err(greth->dev, "failed to register MDIO bus\n");
+ goto error2;
+ }
+
+ /* Allocate TX descriptor ring in coherent memory */
+ greth->tx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev,
+ 1024,
+ &greth->tx_bd_base_phys,
+ GFP_KERNEL);
+
+ if (!greth->tx_bd_base) {
+ if (netif_msg_probe(greth))
+ dev_err(&dev->dev, "could not allocate descriptor memory.\n");
+ err = -ENOMEM;
+ goto error3;
+ }
+
+ memset(greth->tx_bd_base, 0, 1024);
+
+ /* Allocate RX descriptor ring in coherent memory */
+ greth->rx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev,
+ 1024,
+ &greth->rx_bd_base_phys,
+ GFP_KERNEL);
+
+ if (!greth->rx_bd_base) {
+ if (netif_msg_probe(greth))
+ dev_err(greth->dev, "could not allocate descriptor memory.\n");
+ err = -ENOMEM;
+ goto error4;
+ }
+
+ memset(greth->rx_bd_base, 0, 1024);
+
+ /* Get MAC address from: module param, OF property or ID prom */
+ for (i = 0; i < 6; i++) {
+ if (macaddr[i] != 0)
+ break;
+ }
+ if (i == 6) {
+ const unsigned char *addr;
+ int len;
+ addr = of_get_property(ofdev->node, "local-mac-address", &len);
+ if (addr != NULL && len == 6) {
+ for (i = 0; i < 6; i++)
+ macaddr[i] = (unsigned int) addr[i];
+ } else {
+#ifdef CONFIG_SPARC
+ for (i = 0; i < 6; i++)
+ macaddr[i] = (unsigned int) idprom->id_ethaddr[i];
+#endif
+ }
+ }
+
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = macaddr[i];
+
+ macaddr[5]++;
+
+ if (!is_valid_ether_addr(&dev->dev_addr[0])) {
+ if (netif_msg_probe(greth))
+ dev_err(greth->dev, "no valid ethernet address, aborting.\n");
+ err = -EINVAL;
+ goto error5;
+ }
+
+ GRETH_REGSAVE(regs->esa_msb, dev->dev_addr[0] << 8 | dev->dev_addr[1]);
+ GRETH_REGSAVE(regs->esa_lsb, dev->dev_addr[2] << 24 | dev->dev_addr[3] << 16 |
+ dev->dev_addr[4] << 8 | dev->dev_addr[5]);
+
+ /* Clear all pending interrupts except PHY irq */
+ GRETH_REGSAVE(regs->status, 0xFF);
+
+ if (greth->gbit_mac) {
+ dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_HIGHDMA;
+ greth_netdev_ops.ndo_start_xmit = greth_start_xmit_gbit;
+ greth->flags = GRETH_FLAG_RX_CSUM;
+ }
+
+ if (greth->multicast) {
+ greth_netdev_ops.ndo_set_multicast_list = greth_set_multicast_list;
+ dev->flags |= IFF_MULTICAST;
+ } else {
+ dev->flags &= ~IFF_MULTICAST;
+ }
+
+ dev->netdev_ops = &greth_netdev_ops;
+ dev->ethtool_ops = &greth_ethtool_ops;
+
+ if (register_netdev(dev)) {
+ if (netif_msg_probe(greth))
+ dev_err(greth->dev, "netdevice registration failed.\n");
+ err = -ENOMEM;
+ goto error5;
+ }
+
+ /* setup NAPI */
+ memset(&greth->napi, 0, sizeof(greth->napi));
+ netif_napi_add(dev, &greth->napi, greth_poll, 64);
+
+ return 0;
+
+error5:
+ dma_free_coherent(greth->dev, 1024, greth->rx_bd_base, greth->rx_bd_base_phys);
+error4:
+ dma_free_coherent(greth->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys);
+error3:
+ mdiobus_unregister(greth->mdio);
+error2:
+ of_iounmap(&ofdev->resource[0], greth->regs, resource_size(&ofdev->resource[0]));
+error1:
+ free_netdev(dev);
+ return err;
+}
+
+static int __devexit greth_of_remove(struct of_device *of_dev)
+{
+ struct net_device *ndev = dev_get_drvdata(&of_dev->dev);
+ struct greth_private *greth = netdev_priv(ndev);
+
+ /* Free descriptor areas */
+ dma_free_coherent(&of_dev->dev, 1024, greth->rx_bd_base, greth->rx_bd_base_phys);
+
+ dma_free_coherent(&of_dev->dev, 1024, greth->tx_bd_base, greth->tx_bd_base_phys);
+
+ dev_set_drvdata(&of_dev->dev, NULL);
+
+ if (greth->phy)
+ phy_stop(greth->phy);
+ mdiobus_unregister(greth->mdio);
+
+ unregister_netdev(ndev);
+ free_netdev(ndev);
+
+ of_iounmap(&of_dev->resource[0], greth->regs, resource_size(&of_dev->resource[0]));
+
+ return 0;
+}
+
+static struct of_device_id greth_of_match[] = {
+ {
+ .name = "GAISLER_ETHMAC",
+ },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, greth_of_match);
+
+static struct of_platform_driver greth_of_driver = {
+ .name = "grlib-greth",
+ .match_table = greth_of_match,
+ .probe = greth_of_probe,
+ .remove = __devexit_p(greth_of_remove),
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "grlib-greth",
+ },
+};
+
+static int __init greth_init(void)
+{
+ return of_register_platform_driver(&greth_of_driver);
+}
+
+static void __exit greth_cleanup(void)
+{
+ of_unregister_platform_driver(&greth_of_driver);
+}
+
+module_init(greth_init);
+module_exit(greth_cleanup);
+
+MODULE_AUTHOR("Aeroflex Gaisler AB.");
+MODULE_DESCRIPTION("Aeroflex Gaisler Ethernet MAC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/greth.h b/drivers/net/greth.h
new file mode 100644
index 00000000000..973388d6abc
--- /dev/null
+++ b/drivers/net/greth.h
@@ -0,0 +1,143 @@
+#ifndef GRETH_H
+#define GRETH_H
+
+#include <linux/phy.h>
+
+/* Register bits and masks */
+#define GRETH_RESET 0x40
+#define GRETH_MII_BUSY 0x8
+#define GRETH_MII_NVALID 0x10
+
+#define GRETH_CTRL_FD 0x10
+#define GRETH_CTRL_PR 0x20
+#define GRETH_CTRL_SP 0x80
+#define GRETH_CTRL_GB 0x100
+#define GRETH_CTRL_PSTATIEN 0x400
+#define GRETH_CTRL_MCEN 0x800
+#define GRETH_CTRL_DISDUPLEX 0x1000
+#define GRETH_STATUS_PHYSTAT 0x100
+
+#define GRETH_BD_EN 0x800
+#define GRETH_BD_WR 0x1000
+#define GRETH_BD_IE 0x2000
+#define GRETH_BD_LEN 0x7FF
+
+#define GRETH_TXEN 0x1
+#define GRETH_INT_TX 0x8
+#define GRETH_TXI 0x4
+#define GRETH_TXBD_STATUS 0x0001C000
+#define GRETH_TXBD_MORE 0x20000
+#define GRETH_TXBD_IPCS 0x40000
+#define GRETH_TXBD_TCPCS 0x80000
+#define GRETH_TXBD_UDPCS 0x100000
+#define GRETH_TXBD_CSALL (GRETH_TXBD_IPCS | GRETH_TXBD_TCPCS | GRETH_TXBD_UDPCS)
+#define GRETH_TXBD_ERR_LC 0x10000
+#define GRETH_TXBD_ERR_UE 0x4000
+#define GRETH_TXBD_ERR_AL 0x8000
+
+#define GRETH_INT_RX 0x4
+#define GRETH_RXEN 0x2
+#define GRETH_RXI 0x8
+#define GRETH_RXBD_STATUS 0xFFFFC000
+#define GRETH_RXBD_ERR_AE 0x4000
+#define GRETH_RXBD_ERR_FT 0x8000
+#define GRETH_RXBD_ERR_CRC 0x10000
+#define GRETH_RXBD_ERR_OE 0x20000
+#define GRETH_RXBD_ERR_LE 0x40000
+#define GRETH_RXBD_IP 0x80000
+#define GRETH_RXBD_IP_CSERR 0x100000
+#define GRETH_RXBD_UDP 0x200000
+#define GRETH_RXBD_UDP_CSERR 0x400000
+#define GRETH_RXBD_TCP 0x800000
+#define GRETH_RXBD_TCP_CSERR 0x1000000
+#define GRETH_RXBD_IP_FRAG 0x2000000
+#define GRETH_RXBD_MCAST 0x4000000
+
+/* Descriptor parameters */
+#define GRETH_TXBD_NUM 128
+#define GRETH_TXBD_NUM_MASK (GRETH_TXBD_NUM-1)
+#define GRETH_TX_BUF_SIZE 2048
+#define GRETH_RXBD_NUM 128
+#define GRETH_RXBD_NUM_MASK (GRETH_RXBD_NUM-1)
+#define GRETH_RX_BUF_SIZE 2048
+
+/* Buffers per page */
+#define GRETH_RX_BUF_PPGAE (PAGE_SIZE/GRETH_RX_BUF_SIZE)
+#define GRETH_TX_BUF_PPGAE (PAGE_SIZE/GRETH_TX_BUF_SIZE)
+
+/* How many pages are needed for buffers */
+#define GRETH_RX_BUF_PAGE_NUM (GRETH_RXBD_NUM/GRETH_RX_BUF_PPGAE)
+#define GRETH_TX_BUF_PAGE_NUM (GRETH_TXBD_NUM/GRETH_TX_BUF_PPGAE)
+
+/* Buffer size.
+ * Gbit MAC uses tagged maximum frame size which is 1518 excluding CRC.
+ * Set to 1520 to make all buffers word aligned for non-gbit MAC.
+ */
+#define MAX_FRAME_SIZE 1520
+
+/* Flags */
+#define GRETH_FLAG_RX_CSUM 0x1
+
+/* GRETH APB registers */
+struct greth_regs {
+ u32 control;
+ u32 status;
+ u32 esa_msb;
+ u32 esa_lsb;
+ u32 mdio;
+ u32 tx_desc_p;
+ u32 rx_desc_p;
+ u32 edclip;
+ u32 hash_msb;
+ u32 hash_lsb;
+};
+
+/* GRETH buffer descriptor */
+struct greth_bd {
+ u32 stat;
+ u32 addr;
+};
+
+struct greth_private {
+ struct sk_buff *rx_skbuff[GRETH_RXBD_NUM];
+ struct sk_buff *tx_skbuff[GRETH_TXBD_NUM];
+
+ unsigned char *tx_bufs[GRETH_TXBD_NUM];
+ unsigned char *rx_bufs[GRETH_RXBD_NUM];
+
+ u16 tx_next;
+ u16 tx_last;
+ u16 tx_free;
+ u16 rx_cur;
+
+ struct greth_regs *regs; /* Address of controller registers. */
+ struct greth_bd *rx_bd_base; /* Address of Rx BDs. */
+ struct greth_bd *tx_bd_base; /* Address of Tx BDs. */
+ dma_addr_t rx_bd_base_phys;
+ dma_addr_t tx_bd_base_phys;
+
+ int irq;
+
+ struct device *dev; /* Pointer to of_device->dev */
+ struct net_device *netdev;
+ struct napi_struct napi;
+ spinlock_t devlock;
+
+ struct phy_device *phy;
+ struct mii_bus *mdio;
+ int mdio_irqs[PHY_MAX_ADDR];
+ unsigned int link;
+ unsigned int speed;
+ unsigned int duplex;
+
+ u32 msg_enable;
+ u32 flags;
+
+ u8 phyaddr;
+ u8 multicast;
+ u8 gbit_mac;
+ u8 mdio_int_en;
+ u8 edcl;
+};
+
+#endif
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index ea85075a89a..373546dd083 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -1854,17 +1854,18 @@ static void set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
writew(0x000F, ioaddr + AddrMode);
- } else if ((dev->mc_count > 63) || (dev->flags & IFF_ALLMULTI)) {
+ } else if ((netdev_mc_count(dev) > 63) || (dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
writew(0x000B, ioaddr + AddrMode);
- } else if (dev->mc_count > 0) { /* Must use the CAM filter. */
+ } else if (!netdev_mc_empty(dev)) { /* Must use the CAM filter. */
struct dev_mc_list *mclist;
- int i;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ int i = 0;
+
+ netdev_for_each_mc_addr(mclist, dev) {
writel(*(u32*)(mclist->dmi_addr), ioaddr + 0x100 + i*8);
writel(0x20000 | (*(u16*)&mclist->dmi_addr[4]),
ioaddr + 0x104 + i*8);
+ i++;
}
/* Clear remaining entries. */
for (; i < 64; i++)
@@ -1990,7 +1991,7 @@ static void __devexit hamachi_remove_one (struct pci_dev *pdev)
}
}
-static struct pci_device_id hamachi_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(hamachi_pci_tbl) = {
{ 0x1318, 0x0911, PCI_ANY_ID, PCI_ANY_ID, },
{ 0, }
};
diff --git a/drivers/net/hp100.c b/drivers/net/hp100.c
index 90f890e7c5e..b766a69bf0c 100644
--- a/drivers/net/hp100.c
+++ b/drivers/net/hp100.c
@@ -210,7 +210,7 @@ MODULE_DEVICE_TABLE(eisa, hp100_eisa_tbl);
#endif
#ifdef CONFIG_PCI
-static struct pci_device_id hp100_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(hp100_pci_tbl) = {
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585A, PCI_ANY_ID, PCI_ANY_ID,},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2585B, PCI_ANY_ID, PCI_ANY_ID,},
{PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_J2970A, PCI_ANY_ID, PCI_ANY_ID,},
@@ -2090,7 +2090,7 @@ static void hp100_set_multicast_list(struct net_device *dev)
lp->mac2_mode = HP100_MAC2MODE6; /* promiscuous mode = get all good */
lp->mac1_mode = HP100_MAC1MODE6; /* packets on the net */
memset(&lp->hash_bytes, 0xff, 8);
- } else if (dev->mc_count || (dev->flags & IFF_ALLMULTI)) {
+ } else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI)) {
lp->mac2_mode = HP100_MAC2MODE5; /* multicast mode = get packets for */
lp->mac1_mode = HP100_MAC1MODE5; /* me, broadcasts and all multicasts */
#ifdef HP100_MULTICAST_FILTER /* doesn't work!!! */
@@ -2098,22 +2098,23 @@ static void hp100_set_multicast_list(struct net_device *dev)
/* set hash filter to receive all multicast packets */
memset(&lp->hash_bytes, 0xff, 8);
} else {
- int i, j, idx;
+ int i, idx;
u_char *addrs;
struct dev_mc_list *dmi;
memset(&lp->hash_bytes, 0x00, 8);
#ifdef HP100_DEBUG
- printk("hp100: %s: computing hash filter - mc_count = %i\n", dev->name, dev->mc_count);
+ printk("hp100: %s: computing hash filter - mc_count = %i\n",
+ dev->name, netdev_mc_count(dev));
#endif
- for (i = 0, dmi = dev->mc_list; i < dev->mc_count; i++, dmi = dmi->next) {
+ netdev_for_each_mc_addr(dmi, dev) {
addrs = dmi->dmi_addr;
if ((*addrs & 0x01) == 0x01) { /* multicast address? */
#ifdef HP100_DEBUG
printk("hp100: %s: multicast = %pM, ",
dev->name, addrs);
#endif
- for (j = idx = 0; j < 6; j++) {
+ for (i = idx = 0; i < 6; i++) {
idx ^= *addrs++ & 0x3f;
printk(":%02x:", idx);
}
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index fb5e019169e..fb0ac6d7c04 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -391,11 +391,11 @@ static void emac_hash_mc(struct emac_instance *dev)
struct dev_mc_list *dmi;
int i;
- DBG(dev, "hash_mc %d" NL, dev->ndev->mc_count);
+ DBG(dev, "hash_mc %d" NL, netdev_mc_count(dev->ndev));
memset(gaht_temp, 0, sizeof (gaht_temp));
- for (dmi = dev->ndev->mc_list; dmi; dmi = dmi->next) {
+ netdev_for_each_mc_addr(dmi, dev->ndev) {
int slot, reg, mask;
DBG2(dev, "mc %pM" NL, dmi->dmi_addr);
@@ -425,9 +425,9 @@ static inline u32 emac_iff2rmr(struct net_device *ndev)
if (ndev->flags & IFF_PROMISC)
r |= EMAC_RMR_PME;
else if (ndev->flags & IFF_ALLMULTI ||
- (ndev->mc_count > EMAC_XAHT_SLOTS(dev)))
+ (netdev_mc_count(ndev) > EMAC_XAHT_SLOTS(dev)))
r |= EMAC_RMR_PMME;
- else if (ndev->mc_count > 0)
+ else if (!netdev_mc_empty(ndev))
r |= EMAC_RMR_MAE;
return r;
diff --git a/drivers/net/ibmlana.c b/drivers/net/ibmlana.c
index 052c74091d9..b5d0f4e973f 100644
--- a/drivers/net/ibmlana.c
+++ b/drivers/net/ibmlana.c
@@ -420,7 +420,7 @@ static void InitBoard(struct net_device *dev)
/* start putting the multicast addresses into the CAM list. Stop if
it is full. */
- for (mcptr = dev->mc_list; mcptr != NULL; mcptr = mcptr->next) {
+ netdev_for_each_mc_addr(mcptr, dev) {
putcam(cams, &camcnt, mcptr->dmi_addr);
if (camcnt == 16)
break;
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index a86693906ac..f2b93796695 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -1062,7 +1062,8 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
struct ibmveth_adapter *adapter = netdev_priv(netdev);
unsigned long lpar_rc;
- if((netdev->flags & IFF_PROMISC) || (netdev->mc_count > adapter->mcastFilterSize)) {
+ if ((netdev->flags & IFF_PROMISC) ||
+ (netdev_mc_count(netdev) > adapter->mcastFilterSize)) {
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
IbmVethMcastEnableRecv |
IbmVethMcastDisableFiltering,
@@ -1071,8 +1072,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc);
}
} else {
- struct dev_mc_list *mclist = netdev->mc_list;
- int i;
+ struct dev_mc_list *mclist;
/* clear the filter table & disable filtering */
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
IbmVethMcastEnableRecv |
@@ -1083,7 +1083,7 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc);
}
/* add the addresses to the filter table */
- for(i = 0; i < netdev->mc_count; ++i, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, netdev) {
// add the multicast address to the filter table
unsigned long mcast_addr = 0;
memcpy(((char *)&mcast_addr)+2, mclist->dmi_addr, 6);
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index c505b50d1fa..9d7fa2fb85e 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -727,6 +727,34 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw)
}
/**
+ * igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown
+ * @hw: pointer to the HW structure
+ **/
+void igb_power_up_serdes_link_82575(struct e1000_hw *hw)
+{
+ u32 reg;
+
+
+ if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
+ !igb_sgmii_active_82575(hw))
+ return;
+
+ /* Enable PCS to turn on link */
+ reg = rd32(E1000_PCS_CFG0);
+ reg |= E1000_PCS_CFG_PCS_EN;
+ wr32(E1000_PCS_CFG0, reg);
+
+ /* Power up the laser */
+ reg = rd32(E1000_CTRL_EXT);
+ reg &= ~E1000_CTRL_EXT_SDP3_DATA;
+ wr32(E1000_CTRL_EXT, reg);
+
+ /* flush the write to verify completion */
+ wrfl();
+ msleep(1);
+}
+
+/**
* igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
* @hw: pointer to the HW structure
* @speed: stores the current speed
@@ -791,27 +819,12 @@ static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
{
u32 reg;
- u16 eeprom_data = 0;
- if (hw->phy.media_type != e1000_media_type_internal_serdes ||
+ if (hw->phy.media_type != e1000_media_type_internal_serdes &&
igb_sgmii_active_82575(hw))
return;
- if (hw->bus.func == E1000_FUNC_0)
- hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
- else if (hw->mac.type == e1000_82580)
- hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
- NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
- &eeprom_data);
- else if (hw->bus.func == E1000_FUNC_1)
- hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
-
- /*
- * If APM is not enabled in the EEPROM and management interface is
- * not enabled, then power down.
- */
- if (!(eeprom_data & E1000_NVM_APME_82575) &&
- !igb_enable_mng_pass_thru(hw)) {
+ if (!igb_enable_mng_pass_thru(hw)) {
/* Disable PCS to turn off link */
reg = rd32(E1000_PCS_CFG0);
reg &= ~E1000_PCS_CFG_PCS_EN;
@@ -826,8 +839,6 @@ void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
wrfl();
msleep(1);
}
-
- return;
}
/**
@@ -1183,6 +1194,22 @@ out:
}
/**
+ * igb_power_down_phy_copper_82575 - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+void igb_power_down_phy_copper_82575(struct e1000_hw *hw)
+{
+ /* If the management interface is not enabled, then power down */
+ if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw)))
+ igb_power_down_phy_copper(hw);
+
+ return;
+}
+
+/**
* igb_clear_hw_cntrs_82575 - Clear device specific hardware counters
* @hw: pointer to the HW structure
*
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h
index d51c9927c81..fbe1c99c193 100644
--- a/drivers/net/igb/e1000_82575.h
+++ b/drivers/net/igb/e1000_82575.h
@@ -29,6 +29,8 @@
#define _E1000_82575_H_
extern void igb_shutdown_serdes_link_82575(struct e1000_hw *hw);
+extern void igb_power_up_serdes_link_82575(struct e1000_hw *hw);
+extern void igb_power_down_phy_copper_82575(struct e1000_hw *hw);
extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
@@ -219,6 +221,9 @@ struct e1000_adv_tx_context_desc {
#define E1000_VLVF_LVLAN 0x00100000
#define E1000_VLVF_VLANID_ENABLE 0x80000000
+#define E1000_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */
+#define E1000_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */
+
#define E1000_IOVCTL 0x05BBC
#define E1000_IOVCTL_REUSE_VFQ 0x00000001
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index 6e036ae3138..fe6cf1b696c 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -313,12 +313,6 @@
#define E1000_PBA_34K 0x0022
#define E1000_PBA_64K 0x0040 /* 64KB */
-#define IFS_MAX 80
-#define IFS_MIN 40
-#define IFS_RATIO 4
-#define IFS_STEP 10
-#define MIN_NUM_XMITS 1000
-
/* SW Semaphore Register */
#define E1000_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
#define E1000_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */
@@ -481,6 +475,7 @@
/* PHY Control Register */
#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
+#define MII_CR_POWER_DOWN 0x0800 /* Power down */
#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index dbaeb5f5e0c..448005276b2 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -339,19 +339,12 @@ struct e1000_mac_info {
enum e1000_mac_type type;
- u32 collision_delta;
u32 ledctl_default;
u32 ledctl_mode1;
u32 ledctl_mode2;
u32 mc_filter_type;
- u32 tx_packet_delta;
u32 txcw;
- u16 current_ifs_val;
- u16 ifs_max_val;
- u16 ifs_min_val;
- u16 ifs_ratio;
- u16 ifs_step_size;
u16 mta_reg_count;
u16 uta_reg_count;
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
index 2ad358a240b..2a8a886b37e 100644
--- a/drivers/net/igb/e1000_mac.c
+++ b/drivers/net/igb/e1000_mac.c
@@ -1304,76 +1304,6 @@ out:
}
/**
- * igb_reset_adaptive - Reset Adaptive Interframe Spacing
- * @hw: pointer to the HW structure
- *
- * Reset the Adaptive Interframe Spacing throttle to default values.
- **/
-void igb_reset_adaptive(struct e1000_hw *hw)
-{
- struct e1000_mac_info *mac = &hw->mac;
-
- if (!mac->adaptive_ifs) {
- hw_dbg("Not in Adaptive IFS mode!\n");
- goto out;
- }
-
- if (!mac->ifs_params_forced) {
- mac->current_ifs_val = 0;
- mac->ifs_min_val = IFS_MIN;
- mac->ifs_max_val = IFS_MAX;
- mac->ifs_step_size = IFS_STEP;
- mac->ifs_ratio = IFS_RATIO;
- }
-
- mac->in_ifs_mode = false;
- wr32(E1000_AIT, 0);
-out:
- return;
-}
-
-/**
- * igb_update_adaptive - Update Adaptive Interframe Spacing
- * @hw: pointer to the HW structure
- *
- * Update the Adaptive Interframe Spacing Throttle value based on the
- * time between transmitted packets and time between collisions.
- **/
-void igb_update_adaptive(struct e1000_hw *hw)
-{
- struct e1000_mac_info *mac = &hw->mac;
-
- if (!mac->adaptive_ifs) {
- hw_dbg("Not in Adaptive IFS mode!\n");
- goto out;
- }
-
- if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
- if (mac->tx_packet_delta > MIN_NUM_XMITS) {
- mac->in_ifs_mode = true;
- if (mac->current_ifs_val < mac->ifs_max_val) {
- if (!mac->current_ifs_val)
- mac->current_ifs_val = mac->ifs_min_val;
- else
- mac->current_ifs_val +=
- mac->ifs_step_size;
- wr32(E1000_AIT,
- mac->current_ifs_val);
- }
- }
- } else {
- if (mac->in_ifs_mode &&
- (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
- mac->current_ifs_val = 0;
- mac->in_ifs_mode = false;
- wr32(E1000_AIT, 0);
- }
- }
-out:
- return;
-}
-
-/**
* igb_validate_mdi_setting - Verify MDI/MDIx settings
* @hw: pointer to the HW structure
*
diff --git a/drivers/net/igb/e1000_mac.h b/drivers/net/igb/e1000_mac.h
index bca17d88241..601be99711c 100644
--- a/drivers/net/igb/e1000_mac.h
+++ b/drivers/net/igb/e1000_mac.h
@@ -67,8 +67,6 @@ void igb_mta_set(struct e1000_hw *hw, u32 hash_value);
void igb_put_hw_semaphore(struct e1000_hw *hw);
void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
s32 igb_check_alt_mac_addr(struct e1000_hw *hw);
-void igb_reset_adaptive(struct e1000_hw *hw);
-void igb_update_adaptive(struct e1000_hw *hw);
bool igb_enable_mng_pass_thru(struct e1000_hw *hw);
diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/igb/e1000_phy.c
index 3670a66401b..cf1f3230092 100644
--- a/drivers/net/igb/e1000_phy.c
+++ b/drivers/net/igb/e1000_phy.c
@@ -1931,6 +1931,41 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw)
}
/**
+ * igb_power_up_phy_copper - Restore copper link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, restore the link to previous settings.
+ **/
+void igb_power_up_phy_copper(struct e1000_hw *hw)
+{
+ u16 mii_reg = 0;
+
+ /* The PHY will retain its settings across a power down/up cycle */
+ hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
+ mii_reg &= ~MII_CR_POWER_DOWN;
+ hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
+}
+
+/**
+ * igb_power_down_phy_copper - Power down copper PHY
+ * @hw: pointer to the HW structure
+ *
+ * Power down PHY to save power when interface is down and wake on lan
+ * is not enabled.
+ **/
+void igb_power_down_phy_copper(struct e1000_hw *hw)
+{
+ u16 mii_reg = 0;
+
+ /* The PHY will retain its settings across a power down/up cycle */
+ hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
+ mii_reg |= MII_CR_POWER_DOWN;
+ hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
+ msleep(1);
+}
+
+/**
* igb_check_polarity_82580 - Checks the polarity.
* @hw: pointer to the HW structure
*
diff --git a/drivers/net/igb/e1000_phy.h b/drivers/net/igb/e1000_phy.h
index 555eb54bb6e..565a6dbb371 100644
--- a/drivers/net/igb/e1000_phy.h
+++ b/drivers/net/igb/e1000_phy.h
@@ -60,6 +60,8 @@ s32 igb_setup_copper_link(struct e1000_hw *hw);
s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
u32 usec_interval, bool *success);
+void igb_power_up_phy_copper(struct e1000_hw *hw);
+void igb_power_down_phy_copper(struct e1000_hw *hw);
s32 igb_phy_init_script_igp3(struct e1000_hw *hw);
s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h
index dd4e6ffd29f..abb7333a1fb 100644
--- a/drivers/net/igb/e1000_regs.h
+++ b/drivers/net/igb/e1000_regs.h
@@ -310,6 +310,7 @@
#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine
* Filter - RW */
+#define E1000_VMVIR(_n) (0x03700 + (4 * (_n)))
#define wr32(reg, value) (writel(value, hw->hw_addr + reg))
#define rd32(reg) (readl(hw->hw_addr + reg))
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index b1c1eb88893..a1775705b24 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -75,11 +75,14 @@ struct vf_data_storage {
u16 vlans_enabled;
u32 flags;
unsigned long last_nack;
+ u16 pf_vlan; /* When set, guest VLAN config not allowed. */
+ u16 pf_qos;
};
#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
#define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */
#define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */
+#define IGB_VF_FLAG_PF_SET_MAC 0x00000008 /* PF has set MAC address */
/* RX descriptor control thresholds.
* PTHRESH - MAC will consider prefetch if it has fewer than this number of
@@ -92,13 +95,13 @@ struct vf_data_storage {
* descriptors until either it has this many to write back, or the
* ITR timer expires.
*/
-#define IGB_RX_PTHRESH (hw->mac.type <= e1000_82576 ? 16 : 8)
+#define IGB_RX_PTHRESH 8
#define IGB_RX_HTHRESH 8
#define IGB_RX_WTHRESH 1
#define IGB_TX_PTHRESH 8
#define IGB_TX_HTHRESH 1
#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \
- adapter->msix_entries) ? 0 : 16)
+ adapter->msix_entries) ? 1 : 16)
/* this is the size past which hardware will drop packets when setting LPE=0 */
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
@@ -138,6 +141,7 @@ struct igb_buffer {
u16 length;
u16 next_to_watch;
u16 mapped_as_page;
+ u16 gso_segs;
};
/* RX */
struct {
@@ -173,7 +177,6 @@ struct igb_q_vector {
u16 itr_val;
u8 set_itr;
- u8 itr_shift;
void __iomem *itr_register;
char name[IFNAMSIZ + 9];
@@ -238,7 +241,6 @@ static inline int igb_desc_unused(struct igb_ring *ring)
}
/* board specific private data structure */
-
struct igb_adapter {
struct timer_list watchdog_timer;
struct timer_list phy_info_timer;
@@ -264,12 +266,12 @@ struct igb_adapter {
unsigned long led_status;
/* TX */
- struct igb_ring *tx_ring; /* One per active queue */
+ struct igb_ring *tx_ring[16];
unsigned long tx_queue_len;
u32 tx_timeout_count;
/* RX */
- struct igb_ring *rx_ring; /* One per active queue */
+ struct igb_ring *rx_ring[16];
int num_tx_queues;
int num_rx_queues;
@@ -354,7 +356,9 @@ extern void igb_unmap_and_free_tx_resource(struct igb_ring *,
struct igb_buffer *);
extern void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
extern void igb_update_stats(struct igb_adapter *);
+extern bool igb_has_link(struct igb_adapter *adapter);
extern void igb_set_ethtool_ops(struct net_device *);
+extern void igb_power_up_link(struct igb_adapter *);
static inline s32 igb_reset_phy(struct e1000_hw *hw)
{
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index f771a6c0877..a4cead12fd9 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -234,6 +234,24 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
return 0;
}
+static u32 igb_get_link(struct net_device *netdev)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct e1000_mac_info *mac = &adapter->hw.mac;
+
+ /*
+ * If the link is not reported up to netdev, interrupts are disabled,
+ * and so the physical link state may have changed since we last
+ * looked. Set get_link_status to make sure that the true link
+ * state is interrogated, rather than pulling a cached and possibly
+ * stale link state from the driver.
+ */
+ if (!netif_carrier_ok(netdev))
+ mac->get_link_status = 1;
+
+ return igb_has_link(adapter);
+}
+
static void igb_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
@@ -296,7 +314,7 @@ static int igb_set_pauseparam(struct net_device *netdev,
static u32 igb_get_rx_csum(struct net_device *netdev)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- return !!(adapter->rx_ring[0].flags & IGB_RING_FLAG_RX_CSUM);
+ return !!(adapter->rx_ring[0]->flags & IGB_RING_FLAG_RX_CSUM);
}
static int igb_set_rx_csum(struct net_device *netdev, u32 data)
@@ -306,9 +324,9 @@ static int igb_set_rx_csum(struct net_device *netdev, u32 data)
for (i = 0; i < adapter->num_rx_queues; i++) {
if (data)
- adapter->rx_ring[i].flags |= IGB_RING_FLAG_RX_CSUM;
+ adapter->rx_ring[i]->flags |= IGB_RING_FLAG_RX_CSUM;
else
- adapter->rx_ring[i].flags &= ~IGB_RING_FLAG_RX_CSUM;
+ adapter->rx_ring[i]->flags &= ~IGB_RING_FLAG_RX_CSUM;
}
return 0;
@@ -771,9 +789,9 @@ static int igb_set_ringparam(struct net_device *netdev,
if (!netif_running(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i].count = new_tx_count;
+ adapter->tx_ring[i]->count = new_tx_count;
for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i].count = new_rx_count;
+ adapter->rx_ring[i]->count = new_rx_count;
adapter->tx_ring_count = new_tx_count;
adapter->rx_ring_count = new_rx_count;
goto clear_reset;
@@ -797,10 +815,10 @@ static int igb_set_ringparam(struct net_device *netdev,
* to the tx and rx ring structs.
*/
if (new_tx_count != adapter->tx_ring_count) {
- memcpy(temp_ring, adapter->tx_ring,
- adapter->num_tx_queues * sizeof(struct igb_ring));
-
for (i = 0; i < adapter->num_tx_queues; i++) {
+ memcpy(&temp_ring[i], adapter->tx_ring[i],
+ sizeof(struct igb_ring));
+
temp_ring[i].count = new_tx_count;
err = igb_setup_tx_resources(&temp_ring[i]);
if (err) {
@@ -812,20 +830,21 @@ static int igb_set_ringparam(struct net_device *netdev,
}
}
- for (i = 0; i < adapter->num_tx_queues; i++)
- igb_free_tx_resources(&adapter->tx_ring[i]);
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ igb_free_tx_resources(adapter->tx_ring[i]);
- memcpy(adapter->tx_ring, temp_ring,
- adapter->num_tx_queues * sizeof(struct igb_ring));
+ memcpy(adapter->tx_ring[i], &temp_ring[i],
+ sizeof(struct igb_ring));
+ }
adapter->tx_ring_count = new_tx_count;
}
- if (new_rx_count != adapter->rx_ring->count) {
- memcpy(temp_ring, adapter->rx_ring,
- adapter->num_rx_queues * sizeof(struct igb_ring));
-
+ if (new_rx_count != adapter->rx_ring_count) {
for (i = 0; i < adapter->num_rx_queues; i++) {
+ memcpy(&temp_ring[i], adapter->rx_ring[i],
+ sizeof(struct igb_ring));
+
temp_ring[i].count = new_rx_count;
err = igb_setup_rx_resources(&temp_ring[i]);
if (err) {
@@ -838,11 +857,12 @@ static int igb_set_ringparam(struct net_device *netdev,
}
- for (i = 0; i < adapter->num_rx_queues; i++)
- igb_free_rx_resources(&adapter->rx_ring[i]);
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ igb_free_rx_resources(adapter->rx_ring[i]);
- memcpy(adapter->rx_ring, temp_ring,
- adapter->num_rx_queues * sizeof(struct igb_ring));
+ memcpy(adapter->rx_ring[i], &temp_ring[i],
+ sizeof(struct igb_ring));
+ }
adapter->rx_ring_count = new_rx_count;
}
@@ -1704,6 +1724,9 @@ static void igb_diag_test(struct net_device *netdev,
dev_info(&adapter->pdev->dev, "offline testing starting\n");
+ /* power up link for link test */
+ igb_power_up_link(adapter);
+
/* Link test performed before hardware reset so autoneg doesn't
* interfere with test result */
if (igb_link_test(adapter, &data[4]))
@@ -1727,6 +1750,8 @@ static void igb_diag_test(struct net_device *netdev,
eth_test->flags |= ETH_TEST_FL_FAILED;
igb_reset(adapter);
+ /* power up link for loopback test */
+ igb_power_up_link(adapter);
if (igb_loopback_test(adapter, &data[3]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1745,9 +1770,14 @@ static void igb_diag_test(struct net_device *netdev,
dev_open(netdev);
} else {
dev_info(&adapter->pdev->dev, "online testing starting\n");
- /* Online tests */
- if (igb_link_test(adapter, &data[4]))
- eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* PHY is powered down when interface is down */
+ if (!netif_carrier_ok(netdev)) {
+ data[4] = 0;
+ } else {
+ if (igb_link_test(adapter, &data[4]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ }
/* Online tests aren't run; pass by default */
data[0] = 0;
@@ -1812,7 +1842,8 @@ static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
struct igb_adapter *adapter = netdev_priv(netdev);
wol->supported = WAKE_UCAST | WAKE_MCAST |
- WAKE_BCAST | WAKE_MAGIC;
+ WAKE_BCAST | WAKE_MAGIC |
+ WAKE_PHY;
wol->wolopts = 0;
/* this function will set ->supported = 0 and return 1 if wol is not
@@ -1835,15 +1866,15 @@ static void igb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
wol->wolopts |= WAKE_BCAST;
if (adapter->wol & E1000_WUFC_MAG)
wol->wolopts |= WAKE_MAGIC;
-
- return;
+ if (adapter->wol & E1000_WUFC_LNKC)
+ wol->wolopts |= WAKE_PHY;
}
static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct igb_adapter *adapter = netdev_priv(netdev);
- if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
+ if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE))
return -EOPNOTSUPP;
if (igb_wol_exclusion(adapter, wol) ||
@@ -1861,6 +1892,8 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
adapter->wol |= E1000_WUFC_BC;
if (wol->wolopts & WAKE_MAGIC)
adapter->wol |= E1000_WUFC_MAG;
+ if (wol->wolopts & WAKE_PHY)
+ adapter->wol |= E1000_WUFC_LNKC;
device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
return 0;
@@ -2005,12 +2038,12 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
for (j = 0; j < adapter->num_tx_queues; j++) {
- queue_stat = (u64 *)&adapter->tx_ring[j].tx_stats;
+ queue_stat = (u64 *)&adapter->tx_ring[j]->tx_stats;
for (k = 0; k < IGB_TX_QUEUE_STATS_LEN; k++, i++)
data[i] = queue_stat[k];
}
for (j = 0; j < adapter->num_rx_queues; j++) {
- queue_stat = (u64 *)&adapter->rx_ring[j].rx_stats;
+ queue_stat = (u64 *)&adapter->rx_ring[j]->rx_stats;
for (k = 0; k < IGB_RX_QUEUE_STATS_LEN; k++, i++)
data[i] = queue_stat[k];
}
@@ -2074,7 +2107,7 @@ static const struct ethtool_ops igb_ethtool_ops = {
.get_msglevel = igb_get_msglevel,
.set_msglevel = igb_set_msglevel,
.nway_reset = igb_nway_reset,
- .get_link = ethtool_op_get_link,
+ .get_link = igb_get_link,
.get_eeprom_len = igb_get_eeprom_len,
.get_eeprom = igb_get_eeprom,
.set_eeprom = igb_set_eeprom,
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index c881347cb26..583a21c1def 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -60,7 +60,7 @@ static const struct e1000_info *igb_info_tbl[] = {
[board_82575] = &e1000_82575_info,
};
-static struct pci_device_id igb_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
@@ -133,6 +133,12 @@ static void igb_msg_task(struct igb_adapter *);
static void igb_vmm_control(struct igb_adapter *);
static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
+static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
+static int igb_ndo_set_vf_vlan(struct net_device *netdev,
+ int vf, u16 vlan, u8 qos);
+static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
+static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
+ struct ifla_vf_info *ivi);
#ifdef CONFIG_PM
static int igb_suspend(struct pci_dev *, pm_message_t);
@@ -312,31 +318,35 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
*/
if (adapter->vfs_allocated_count) {
for (; i < adapter->rss_queues; i++)
- adapter->rx_ring[i].reg_idx = rbase_offset +
- Q_IDX_82576(i);
+ adapter->rx_ring[i]->reg_idx = rbase_offset +
+ Q_IDX_82576(i);
for (; j < adapter->rss_queues; j++)
- adapter->tx_ring[j].reg_idx = rbase_offset +
- Q_IDX_82576(j);
+ adapter->tx_ring[j]->reg_idx = rbase_offset +
+ Q_IDX_82576(j);
}
case e1000_82575:
case e1000_82580:
default:
for (; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i].reg_idx = rbase_offset + i;
+ adapter->rx_ring[i]->reg_idx = rbase_offset + i;
for (; j < adapter->num_tx_queues; j++)
- adapter->tx_ring[j].reg_idx = rbase_offset + j;
+ adapter->tx_ring[j]->reg_idx = rbase_offset + j;
break;
}
}
static void igb_free_queues(struct igb_adapter *adapter)
{
- kfree(adapter->tx_ring);
- kfree(adapter->rx_ring);
-
- adapter->tx_ring = NULL;
- adapter->rx_ring = NULL;
+ int i;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ kfree(adapter->tx_ring[i]);
+ adapter->tx_ring[i] = NULL;
+ }
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ kfree(adapter->rx_ring[i]);
+ adapter->rx_ring[i] = NULL;
+ }
adapter->num_rx_queues = 0;
adapter->num_tx_queues = 0;
}
@@ -350,20 +360,13 @@ static void igb_free_queues(struct igb_adapter *adapter)
**/
static int igb_alloc_queues(struct igb_adapter *adapter)
{
+ struct igb_ring *ring;
int i;
- adapter->tx_ring = kcalloc(adapter->num_tx_queues,
- sizeof(struct igb_ring), GFP_KERNEL);
- if (!adapter->tx_ring)
- goto err;
-
- adapter->rx_ring = kcalloc(adapter->num_rx_queues,
- sizeof(struct igb_ring), GFP_KERNEL);
- if (!adapter->rx_ring)
- goto err;
-
for (i = 0; i < adapter->num_tx_queues; i++) {
- struct igb_ring *ring = &(adapter->tx_ring[i]);
+ ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
+ if (!ring)
+ goto err;
ring->count = adapter->tx_ring_count;
ring->queue_index = i;
ring->pdev = adapter->pdev;
@@ -371,10 +374,13 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
/* For 82575, context index must be unique per ring. */
if (adapter->hw.mac.type == e1000_82575)
ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
+ adapter->tx_ring[i] = ring;
}
for (i = 0; i < adapter->num_rx_queues; i++) {
- struct igb_ring *ring = &(adapter->rx_ring[i]);
+ ring = kzalloc(sizeof(struct igb_ring), GFP_KERNEL);
+ if (!ring)
+ goto err;
ring->count = adapter->rx_ring_count;
ring->queue_index = i;
ring->pdev = adapter->pdev;
@@ -384,6 +390,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
/* set flag indicating ring supports SCTP checksum offload */
if (adapter->hw.mac.type >= e1000_82576)
ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM;
+ adapter->rx_ring[i] = ring;
}
igb_cache_ring_register(adapter);
@@ -498,6 +505,12 @@ static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
BUG();
break;
}
+
+ /* add q_vector eims value to global eims_enable_mask */
+ adapter->eims_enable_mask |= q_vector->eims_value;
+
+ /* configure q_vector to set itr on first interrupt */
+ q_vector->set_itr = 1;
}
/**
@@ -555,11 +568,8 @@ static void igb_configure_msix(struct igb_adapter *adapter)
adapter->eims_enable_mask |= adapter->eims_other;
- for (i = 0; i < adapter->num_q_vectors; i++) {
- struct igb_q_vector *q_vector = adapter->q_vector[i];
- igb_assign_vector(q_vector, vector++);
- adapter->eims_enable_mask |= q_vector->eims_value;
- }
+ for (i = 0; i < adapter->num_q_vectors; i++)
+ igb_assign_vector(adapter->q_vector[i], vector++);
wrfl();
}
@@ -639,6 +649,8 @@ static void igb_free_q_vectors(struct igb_adapter *adapter)
for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
adapter->q_vector[v_idx] = NULL;
+ if (!q_vector)
+ continue;
netif_napi_del(&q_vector->napi);
kfree(q_vector);
}
@@ -750,33 +762,24 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
if (!q_vector)
goto err_out;
q_vector->adapter = adapter;
- q_vector->itr_shift = (hw->mac.type == e1000_82575) ? 16 : 0;
q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
q_vector->itr_val = IGB_START_ITR;
- q_vector->set_itr = 1;
netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
adapter->q_vector[v_idx] = q_vector;
}
return 0;
err_out:
- while (v_idx) {
- v_idx--;
- q_vector = adapter->q_vector[v_idx];
- netif_napi_del(&q_vector->napi);
- kfree(q_vector);
- adapter->q_vector[v_idx] = NULL;
- }
+ igb_free_q_vectors(adapter);
return -ENOMEM;
}
static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
int ring_idx, int v_idx)
{
- struct igb_q_vector *q_vector;
+ struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
- q_vector = adapter->q_vector[v_idx];
- q_vector->rx_ring = &adapter->rx_ring[ring_idx];
+ q_vector->rx_ring = adapter->rx_ring[ring_idx];
q_vector->rx_ring->q_vector = q_vector;
q_vector->itr_val = adapter->rx_itr_setting;
if (q_vector->itr_val && q_vector->itr_val <= 3)
@@ -786,10 +789,9 @@ static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
int ring_idx, int v_idx)
{
- struct igb_q_vector *q_vector;
+ struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
- q_vector = adapter->q_vector[v_idx];
- q_vector->tx_ring = &adapter->tx_ring[ring_idx];
+ q_vector->tx_ring = adapter->tx_ring[ring_idx];
q_vector->tx_ring->q_vector = q_vector;
q_vector->itr_val = adapter->tx_itr_setting;
if (q_vector->itr_val && q_vector->itr_val <= 3)
@@ -1099,7 +1101,7 @@ static void igb_configure(struct igb_adapter *adapter)
* at least 1 descriptor unused to make sure
* next_to_use != next_to_clean */
for (i = 0; i < adapter->num_rx_queues; i++) {
- struct igb_ring *ring = &adapter->rx_ring[i];
+ struct igb_ring *ring = adapter->rx_ring[i];
igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring));
}
@@ -1107,6 +1109,29 @@ static void igb_configure(struct igb_adapter *adapter)
adapter->tx_queue_len = netdev->tx_queue_len;
}
+/**
+ * igb_power_up_link - Power up the phy/serdes link
+ * @adapter: address of board private structure
+ **/
+void igb_power_up_link(struct igb_adapter *adapter)
+{
+ if (adapter->hw.phy.media_type == e1000_media_type_copper)
+ igb_power_up_phy_copper(&adapter->hw);
+ else
+ igb_power_up_serdes_link_82575(&adapter->hw);
+}
+
+/**
+ * igb_power_down_link - Power down the phy/serdes link
+ * @adapter: address of board private structure
+ */
+static void igb_power_down_link(struct igb_adapter *adapter)
+{
+ if (adapter->hw.phy.media_type == e1000_media_type_copper)
+ igb_power_down_phy_copper_82575(&adapter->hw);
+ else
+ igb_shutdown_serdes_link_82575(&adapter->hw);
+}
/**
* igb_up - Open the interface and prepare it to handle traffic
@@ -1328,12 +1353,14 @@ void igb_reset(struct igb_adapter *adapter)
wr32(E1000_PCIEMISC,
reg & ~E1000_PCIEMISC_LX_DECISION);
}
+ if (!netif_running(adapter->netdev))
+ igb_power_down_link(adapter);
+
igb_update_mng_vlan(adapter);
/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
- igb_reset_adaptive(hw);
igb_get_phy_info(hw);
}
@@ -1352,6 +1379,10 @@ static const struct net_device_ops igb_netdev_ops = {
.ndo_vlan_rx_register = igb_vlan_rx_register,
.ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
+ .ndo_set_vf_mac = igb_ndo_set_vf_mac,
+ .ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
+ .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
+ .ndo_get_vf_config = igb_ndo_get_vf_config,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = igb_netpoll,
#endif
@@ -1472,7 +1503,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
igb_get_bus_info_pcie(hw);
hw->phy.autoneg_wait_to_complete = false;
- hw->mac.adaptive_ifs = true;
/* Copper options */
if (hw->phy.media_type == e1000_media_type_copper) {
@@ -1706,9 +1736,6 @@ static void __devexit igb_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
- if (!igb_check_reset_block(hw))
- igb_reset_phy(hw);
-
igb_clear_interrupt_scheme(adapter);
#ifdef CONFIG_PCI_IOV
@@ -1984,7 +2011,7 @@ static int igb_open(struct net_device *netdev)
if (err)
goto err_setup_rx;
- /* e1000_power_up_phy(adapter); */
+ igb_power_up_link(adapter);
/* before we allocate an interrupt, we must be ready to handle it.
* Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
@@ -2026,7 +2053,7 @@ static int igb_open(struct net_device *netdev)
err_req_irq:
igb_release_hw_control(adapter);
- /* e1000_power_down_phy(adapter); */
+ igb_power_down_link(adapter);
igb_free_all_rx_resources(adapter);
err_setup_rx:
igb_free_all_tx_resources(adapter);
@@ -2114,19 +2141,19 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
int i, err = 0;
for (i = 0; i < adapter->num_tx_queues; i++) {
- err = igb_setup_tx_resources(&adapter->tx_ring[i]);
+ err = igb_setup_tx_resources(adapter->tx_ring[i]);
if (err) {
dev_err(&pdev->dev,
"Allocation for Tx Queue %u failed\n", i);
for (i--; i >= 0; i--)
- igb_free_tx_resources(&adapter->tx_ring[i]);
+ igb_free_tx_resources(adapter->tx_ring[i]);
break;
}
}
for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) {
int r_idx = i % adapter->num_tx_queues;
- adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
+ adapter->multi_tx_table[i] = adapter->tx_ring[r_idx];
}
return err;
}
@@ -2209,7 +2236,7 @@ static void igb_configure_tx(struct igb_adapter *adapter)
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
- igb_configure_tx_ring(adapter, &adapter->tx_ring[i]);
+ igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
}
/**
@@ -2267,12 +2294,12 @@ static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
int i, err = 0;
for (i = 0; i < adapter->num_rx_queues; i++) {
- err = igb_setup_rx_resources(&adapter->rx_ring[i]);
+ err = igb_setup_rx_resources(adapter->rx_ring[i]);
if (err) {
dev_err(&pdev->dev,
"Allocation for Rx Queue %u failed\n", i);
for (i--; i >= 0; i--)
- igb_free_rx_resources(&adapter->rx_ring[i]);
+ igb_free_rx_resources(adapter->rx_ring[i]);
break;
}
}
@@ -2479,7 +2506,8 @@ static void igb_rlpml_set(struct igb_adapter *adapter)
wr32(E1000_RLPML, max_frame_size);
}
-static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn)
+static inline void igb_set_vmolr(struct igb_adapter *adapter,
+ int vfn, bool aupe)
{
struct e1000_hw *hw = &adapter->hw;
u32 vmolr;
@@ -2492,8 +2520,11 @@ static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn)
return;
vmolr = rd32(E1000_VMOLR(vfn));
- vmolr |= E1000_VMOLR_AUPE | /* Accept untagged packets */
- E1000_VMOLR_STRVLAN; /* Strip vlan tags */
+ vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
+ if (aupe)
+ vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
+ else
+ vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
/* clear all bits that might not be set */
vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
@@ -2560,11 +2591,14 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
E1000_SRRCTL_BSIZEPKT_SHIFT;
srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
}
+ /* Only set Drop Enable if we are supporting multiple queues */
+ if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
+ srrctl |= E1000_SRRCTL_DROP_EN;
wr32(E1000_SRRCTL(reg_idx), srrctl);
/* set filtering for VMDQ pools */
- igb_set_vmolr(adapter, reg_idx & 0x7);
+ igb_set_vmolr(adapter, reg_idx & 0x7, true);
/* enable receive descriptor fetching */
rxdctl = rd32(E1000_RXDCTL(reg_idx));
@@ -2596,7 +2630,7 @@ static void igb_configure_rx(struct igb_adapter *adapter)
/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring */
for (i = 0; i < adapter->num_rx_queues; i++)
- igb_configure_rx_ring(adapter, &adapter->rx_ring[i]);
+ igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
}
/**
@@ -2633,7 +2667,7 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
- igb_free_tx_resources(&adapter->tx_ring[i]);
+ igb_free_tx_resources(adapter->tx_ring[i]);
}
void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
@@ -2700,7 +2734,7 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
- igb_clean_tx_ring(&adapter->tx_ring[i]);
+ igb_clean_tx_ring(adapter->tx_ring[i]);
}
/**
@@ -2737,7 +2771,7 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
int i;
for (i = 0; i < adapter->num_rx_queues; i++)
- igb_free_rx_resources(&adapter->rx_ring[i]);
+ igb_free_rx_resources(adapter->rx_ring[i]);
}
/**
@@ -2801,7 +2835,7 @@ static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
int i;
for (i = 0; i < adapter->num_rx_queues; i++)
- igb_clean_rx_ring(&adapter->rx_ring[i]);
+ igb_clean_rx_ring(adapter->rx_ring[i]);
}
/**
@@ -2843,38 +2877,30 @@ static int igb_write_mc_addr_list(struct net_device *netdev)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- struct dev_mc_list *mc_ptr = netdev->mc_list;
+ struct dev_mc_list *mc_ptr;
u8 *mta_list;
- u32 vmolr = 0;
int i;
- if (!netdev->mc_count) {
+ if (netdev_mc_empty(netdev)) {
/* nothing to program, so clear mc list */
igb_update_mc_addr_list(hw, NULL, 0);
igb_restore_vf_multicasts(adapter);
return 0;
}
- mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
+ mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
if (!mta_list)
return -ENOMEM;
- /* set vmolr receive overflow multicast bit */
- vmolr |= E1000_VMOLR_ROMPE;
-
/* The shared function expects a packed array of only addresses. */
- mc_ptr = netdev->mc_list;
+ i = 0;
+ netdev_for_each_mc_addr(mc_ptr, netdev)
+ memcpy(mta_list + (i++ * ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
- for (i = 0; i < netdev->mc_count; i++) {
- if (!mc_ptr)
- break;
- memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
- mc_ptr = mc_ptr->next;
- }
igb_update_mc_addr_list(hw, mta_list, i);
kfree(mta_list);
- return netdev->mc_count;
+ return netdev_mc_count(netdev);
}
/**
@@ -2895,12 +2921,13 @@ static int igb_write_uc_addr_list(struct net_device *netdev)
int count = 0;
/* return ENOMEM indicating insufficient memory for addresses */
- if (netdev->uc.count > rar_entries)
+ if (netdev_uc_count(netdev) > rar_entries)
return -ENOMEM;
- if (netdev->uc.count && rar_entries) {
+ if (!netdev_uc_empty(netdev) && rar_entries) {
struct netdev_hw_addr *ha;
- list_for_each_entry(ha, &netdev->uc.list, list) {
+
+ netdev_for_each_uc_addr(ha, netdev) {
if (!rar_entries)
break;
igb_rar_set_qsel(adapter, ha->addr,
@@ -3004,7 +3031,7 @@ static void igb_update_phy_info(unsigned long data)
* igb_has_link - check shared code for link and determine up/down
* @adapter: pointer to driver private info
**/
-static bool igb_has_link(struct igb_adapter *adapter)
+bool igb_has_link(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
bool link_active = false;
@@ -3121,10 +3148,9 @@ static void igb_watchdog_task(struct work_struct *work)
}
igb_update_stats(adapter);
- igb_update_adaptive(hw);
for (i = 0; i < adapter->num_tx_queues; i++) {
- struct igb_ring *tx_ring = &adapter->tx_ring[i];
+ struct igb_ring *tx_ring = adapter->tx_ring[i];
if (!netif_carrier_ok(netdev)) {
/* We've lost link, so the controller stops DMA,
* but we've got queued Tx work that's never going
@@ -3225,6 +3251,10 @@ static void igb_update_ring_itr(struct igb_q_vector *q_vector)
else
new_val = avg_wire_size / 2;
+ /* when in itr mode 3 do not exceed 20K ints/sec */
+ if (adapter->rx_itr_setting == 3 && new_val < 196)
+ new_val = 196;
+
set_itr_val:
if (new_val != q_vector->itr_val) {
q_vector->itr_val = new_val;
@@ -3320,13 +3350,13 @@ static void igb_set_itr(struct igb_adapter *adapter)
adapter->rx_itr = igb_update_itr(adapter,
adapter->rx_itr,
- adapter->rx_ring->total_packets,
- adapter->rx_ring->total_bytes);
+ q_vector->rx_ring->total_packets,
+ q_vector->rx_ring->total_bytes);
adapter->tx_itr = igb_update_itr(adapter,
adapter->tx_itr,
- adapter->tx_ring->total_packets,
- adapter->tx_ring->total_bytes);
+ q_vector->tx_ring->total_packets,
+ q_vector->tx_ring->total_bytes);
current_itr = max(adapter->rx_itr, adapter->tx_itr);
/* conservative mode (itr 3) eliminates the lowest_latency setting */
@@ -3349,10 +3379,10 @@ static void igb_set_itr(struct igb_adapter *adapter)
}
set_itr_now:
- adapter->rx_ring->total_bytes = 0;
- adapter->rx_ring->total_packets = 0;
- adapter->tx_ring->total_bytes = 0;
- adapter->tx_ring->total_packets = 0;
+ q_vector->rx_ring->total_bytes = 0;
+ q_vector->rx_ring->total_packets = 0;
+ q_vector->tx_ring->total_bytes = 0;
+ q_vector->tx_ring->total_packets = 0;
if (new_itr != q_vector->itr_val) {
/* this attempts to bias the interrupt rate towards Bulk
@@ -3392,8 +3422,8 @@ static inline int igb_tso_adv(struct igb_ring *tx_ring,
int err;
struct igb_buffer *buffer_info;
u32 info = 0, tu_cmd = 0;
- u32 mss_l4len_idx, l4len;
- *hdr_len = 0;
+ u32 mss_l4len_idx;
+ u8 l4len;
if (skb_header_cloned(skb)) {
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
@@ -3599,6 +3629,7 @@ static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
}
tx_ring->buffer_info[i].skb = skb;
+ tx_ring->buffer_info[i].gso_segs = skb_shinfo(skb)->gso_segs ?: 1;
tx_ring->buffer_info[first].next_to_watch = i;
return ++count;
@@ -3612,14 +3643,12 @@ dma_error:
buffer_info->length = 0;
buffer_info->next_to_watch = 0;
buffer_info->mapped_as_page = false;
- count--;
/* clear timestamp and dma mappings for remaining portion of packet */
- while (count >= 0) {
- count--;
+ while (count--) {
+ if (i == 0)
+ i = tx_ring->count;
i--;
- if (i < 0)
- i += tx_ring->count;
buffer_info = &tx_ring->buffer_info[i];
igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
}
@@ -3628,7 +3657,7 @@ dma_error:
}
static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
- int tx_flags, int count, u32 paylen,
+ u32 tx_flags, int count, u32 paylen,
u8 hdr_len)
{
union e1000_adv_tx_desc *tx_desc;
@@ -3716,7 +3745,7 @@ static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
return 0;
}
-static int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
+static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
{
if (igb_desc_unused(tx_ring) >= size)
return 0;
@@ -3727,10 +3756,10 @@ netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
struct igb_ring *tx_ring)
{
struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
- unsigned int first;
- unsigned int tx_flags = 0;
- u8 hdr_len = 0;
int tso = 0, count;
+ u32 tx_flags = 0;
+ u16 first;
+ u8 hdr_len = 0;
union skb_shared_tx *shtx = skb_tx(skb);
/* need: 1 descriptor per page,
@@ -3911,7 +3940,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
netdev->mtu = new_mtu;
for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i].rx_buffer_len = rx_buffer_len;
+ adapter->rx_ring[i]->rx_buffer_len = rx_buffer_len;
if (netif_running(netdev))
igb_up(adapter);
@@ -3933,7 +3962,7 @@ void igb_update_stats(struct igb_adapter *adapter)
struct net_device_stats *net_stats = igb_get_stats(adapter->netdev);
struct e1000_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
- u32 rnbc;
+ u32 rnbc, reg;
u16 phy_tmp;
int i;
u64 bytes, packets;
@@ -3953,10 +3982,11 @@ void igb_update_stats(struct igb_adapter *adapter)
packets = 0;
for (i = 0; i < adapter->num_rx_queues; i++) {
u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
- adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp;
+ struct igb_ring *ring = adapter->rx_ring[i];
+ ring->rx_stats.drops += rqdpc_tmp;
net_stats->rx_fifo_errors += rqdpc_tmp;
- bytes += adapter->rx_ring[i].rx_stats.bytes;
- packets += adapter->rx_ring[i].rx_stats.packets;
+ bytes += ring->rx_stats.bytes;
+ packets += ring->rx_stats.packets;
}
net_stats->rx_bytes = bytes;
@@ -3965,8 +3995,9 @@ void igb_update_stats(struct igb_adapter *adapter)
bytes = 0;
packets = 0;
for (i = 0; i < adapter->num_tx_queues; i++) {
- bytes += adapter->tx_ring[i].tx_stats.bytes;
- packets += adapter->tx_ring[i].tx_stats.packets;
+ struct igb_ring *ring = adapter->tx_ring[i];
+ bytes += ring->tx_stats.bytes;
+ packets += ring->tx_stats.packets;
}
net_stats->tx_bytes = bytes;
net_stats->tx_packets = packets;
@@ -4024,15 +4055,17 @@ void igb_update_stats(struct igb_adapter *adapter)
adapter->stats.mptc += rd32(E1000_MPTC);
adapter->stats.bptc += rd32(E1000_BPTC);
- /* used for adaptive IFS */
- hw->mac.tx_packet_delta = rd32(E1000_TPT);
- adapter->stats.tpt += hw->mac.tx_packet_delta;
- hw->mac.collision_delta = rd32(E1000_COLC);
- adapter->stats.colc += hw->mac.collision_delta;
+ adapter->stats.tpt += rd32(E1000_TPT);
+ adapter->stats.colc += rd32(E1000_COLC);
adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
- adapter->stats.rxerrc += rd32(E1000_RXERRC);
- adapter->stats.tncrs += rd32(E1000_TNCRS);
+ /* read internal phy specific stats */
+ reg = rd32(E1000_CTRL_EXT);
+ if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
+ adapter->stats.rxerrc += rd32(E1000_RXERRC);
+ adapter->stats.tncrs += rd32(E1000_TNCRS);
+ }
+
adapter->stats.tsctc += rd32(E1000_TSCTC);
adapter->stats.tsctfc += rd32(E1000_TSCTFC);
@@ -4095,6 +4128,9 @@ static irqreturn_t igb_msix_other(int irq, void *data)
u32 icr = rd32(E1000_ICR);
/* reading ICR causes bit 31 of EICR to be cleared */
+ if (icr & E1000_ICR_DRSTA)
+ schedule_work(&adapter->reset_task);
+
if (icr & E1000_ICR_DOUTSYNC) {
/* HW is reporting DMA is out of sync */
adapter->stats.doosync++;
@@ -4124,6 +4160,7 @@ static irqreturn_t igb_msix_other(int irq, void *data)
static void igb_write_itr(struct igb_q_vector *q_vector)
{
+ struct igb_adapter *adapter = q_vector->adapter;
u32 itr_val = q_vector->itr_val & 0x7FFC;
if (!q_vector->set_itr)
@@ -4132,8 +4169,8 @@ static void igb_write_itr(struct igb_q_vector *q_vector)
if (!itr_val)
itr_val = 0x4;
- if (q_vector->itr_shift)
- itr_val |= itr_val << q_vector->itr_shift;
+ if (adapter->hw.mac.type == e1000_82575)
+ itr_val |= itr_val << 16;
else
itr_val |= 0x8000000;
@@ -4210,9 +4247,8 @@ static void igb_setup_dca(struct igb_adapter *adapter)
wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
for (i = 0; i < adapter->num_q_vectors; i++) {
- struct igb_q_vector *q_vector = adapter->q_vector[i];
- q_vector->cpu = -1;
- igb_update_dca(q_vector);
+ adapter->q_vector[i]->cpu = -1;
+ igb_update_dca(adapter->q_vector[i]);
}
}
@@ -4486,10 +4522,57 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
reg |= size;
wr32(E1000_VMOLR(vf), reg);
}
- return 0;
}
}
- return -1;
+ return 0;
+}
+
+static void igb_set_vmvir(struct igb_adapter *adapter, u32 vid, u32 vf)
+{
+ struct e1000_hw *hw = &adapter->hw;
+
+ if (vid)
+ wr32(E1000_VMVIR(vf), (vid | E1000_VMVIR_VLANA_DEFAULT));
+ else
+ wr32(E1000_VMVIR(vf), 0);
+}
+
+static int igb_ndo_set_vf_vlan(struct net_device *netdev,
+ int vf, u16 vlan, u8 qos)
+{
+ int err = 0;
+ struct igb_adapter *adapter = netdev_priv(netdev);
+
+ if ((vf >= adapter->vfs_allocated_count) || (vlan > 4095) || (qos > 7))
+ return -EINVAL;
+ if (vlan || qos) {
+ err = igb_vlvf_set(adapter, vlan, !!vlan, vf);
+ if (err)
+ goto out;
+ igb_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
+ igb_set_vmolr(adapter, vf, !vlan);
+ adapter->vf_data[vf].pf_vlan = vlan;
+ adapter->vf_data[vf].pf_qos = qos;
+ dev_info(&adapter->pdev->dev,
+ "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf);
+ if (test_bit(__IGB_DOWN, &adapter->state)) {
+ dev_warn(&adapter->pdev->dev,
+ "The VF VLAN has been set,"
+ " but the PF device is not up.\n");
+ dev_warn(&adapter->pdev->dev,
+ "Bring the PF device up before"
+ " attempting to use the VF device.\n");
+ }
+ } else {
+ igb_vlvf_set(adapter, adapter->vf_data[vf].pf_vlan,
+ false, vf);
+ igb_set_vmvir(adapter, vlan, vf);
+ igb_set_vmolr(adapter, vf, true);
+ adapter->vf_data[vf].pf_vlan = 0;
+ adapter->vf_data[vf].pf_qos = 0;
+ }
+out:
+ return err;
}
static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
@@ -4502,15 +4585,21 @@ static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
{
- /* clear all flags */
- adapter->vf_data[vf].flags = 0;
+ /* clear flags */
+ adapter->vf_data[vf].flags &= ~(IGB_VF_FLAG_PF_SET_MAC);
adapter->vf_data[vf].last_nack = jiffies;
/* reset offloads to defaults */
- igb_set_vmolr(adapter, vf);
+ igb_set_vmolr(adapter, vf, true);
/* reset vlans for device */
igb_clear_vf_vfta(adapter, vf);
+ if (adapter->vf_data[vf].pf_vlan)
+ igb_ndo_set_vf_vlan(adapter->netdev, vf,
+ adapter->vf_data[vf].pf_vlan,
+ adapter->vf_data[vf].pf_qos);
+ else
+ igb_clear_vf_vfta(adapter, vf);
/* reset multicast table array for vf */
adapter->vf_data[vf].num_vf_mc_hashes = 0;
@@ -4524,7 +4613,8 @@ static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
/* generate a new mac address as we were hotplug removed/added */
- random_ether_addr(vf_mac);
+ if (!(adapter->vf_data[vf].flags & IGB_VF_FLAG_PF_SET_MAC))
+ random_ether_addr(vf_mac);
/* process remaining reset events */
igb_vf_reset(adapter, vf);
@@ -4637,7 +4727,10 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
break;
case E1000_VF_SET_VLAN:
- retval = igb_set_vf_vlan(adapter, msgbuf, vf);
+ if (adapter->vf_data[vf].pf_vlan)
+ retval = -1;
+ else
+ retval = igb_set_vf_vlan(adapter, msgbuf, vf);
break;
default:
dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
@@ -4718,6 +4811,9 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
igb_write_itr(q_vector);
+ if (icr & E1000_ICR_DRSTA)
+ schedule_work(&adapter->reset_task);
+
if (icr & E1000_ICR_DOUTSYNC) {
/* HW is reporting DMA is out of sync */
adapter->stats.doosync++;
@@ -4757,6 +4853,9 @@ static irqreturn_t igb_intr(int irq, void *data)
if (!(icr & E1000_ICR_INT_ASSERTED))
return IRQ_NONE;
+ if (icr & E1000_ICR_DRSTA)
+ schedule_work(&adapter->reset_task);
+
if (icr & E1000_ICR_DOUTSYNC) {
/* HW is reporting DMA is out of sync */
adapter->stats.doosync++;
@@ -4920,7 +5019,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
if (skb) {
unsigned int segs, bytecount;
/* gso_segs is currently only valid for tcp */
- segs = skb_shinfo(skb)->gso_segs ?: 1;
+ segs = buffer_info->gso_segs;
/* multiply data chunks by size of headers */
bytecount = ((segs - 1) * skb_headlen(skb)) +
skb->len;
@@ -5738,7 +5837,9 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
*enable_wake = wufc || adapter->en_mng_pt;
if (!*enable_wake)
- igb_shutdown_serdes_link_82575(hw);
+ igb_power_down_link(adapter);
+ else
+ igb_power_up_link(adapter);
/* Release control of h/w to f/w. If f/w is AMT enabled, this
* would have already happened in close and is redundant. */
@@ -5778,6 +5879,7 @@ static int igb_resume(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
+ pci_save_state(pdev);
err = pci_enable_device_mem(pdev);
if (err) {
@@ -5795,8 +5897,6 @@ static int igb_resume(struct pci_dev *pdev)
return -ENOMEM;
}
- /* e1000_power_up_phy(adapter); */
-
igb_reset(adapter);
/* let the f/w know that the h/w is now under the control of the
@@ -5905,6 +6005,7 @@ static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
} else {
pci_set_master(pdev);
pci_restore_state(pdev);
+ pci_save_state(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
@@ -5993,6 +6094,43 @@ static int igb_set_vf_mac(struct igb_adapter *adapter,
return 0;
}
+static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
+ return -EINVAL;
+ adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
+ dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
+ dev_info(&adapter->pdev->dev, "Reload the VF driver to make this"
+ " change effective.");
+ if (test_bit(__IGB_DOWN, &adapter->state)) {
+ dev_warn(&adapter->pdev->dev, "The VF MAC address has been set,"
+ " but the PF device is not up.\n");
+ dev_warn(&adapter->pdev->dev, "Bring the PF device up before"
+ " attempting to use the VF device.\n");
+ }
+ return igb_set_vf_mac(adapter, vf, mac);
+}
+
+static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
+{
+ return -EOPNOTSUPP;
+}
+
+static int igb_ndo_get_vf_config(struct net_device *netdev,
+ int vf, struct ifla_vf_info *ivi)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ if (vf >= adapter->vfs_allocated_count)
+ return -EINVAL;
+ ivi->vf = vf;
+ memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
+ ivi->tx_rate = 0;
+ ivi->vlan = adapter->vf_data[vf].pf_vlan;
+ ivi->qos = adapter->vf_data[vf].pf_qos;
+ return 0;
+}
+
static void igb_vmm_control(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 2aa71a766c3..a77afd8a14b 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -1403,8 +1403,8 @@ static void igbvf_set_multi(struct net_device *netdev)
u8 *mta_list = NULL;
int i;
- if (netdev->mc_count) {
- mta_list = kmalloc(netdev->mc_count * 6, GFP_ATOMIC);
+ if (!netdev_mc_empty(netdev)) {
+ mta_list = kmalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
if (!mta_list) {
dev_err(&adapter->pdev->dev,
"failed to allocate multicast filter list\n");
@@ -1413,15 +1413,9 @@ static void igbvf_set_multi(struct net_device *netdev)
}
/* prepare a packed array of only addresses. */
- mc_ptr = netdev->mc_list;
-
- for (i = 0; i < netdev->mc_count; i++) {
- if (!mc_ptr)
- break;
- memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr,
- ETH_ALEN);
- mc_ptr = mc_ptr->next;
- }
+ i = 0;
+ netdev_for_each_mc_addr(mc_ptr, netdev)
+ memcpy(mta_list + (i++ * ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0);
kfree(mta_list);
@@ -2609,11 +2603,7 @@ static void igbvf_print_device_info(struct igbvf_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
dev_info(&pdev->dev, "Intel(R) 82576 Virtual Function\n");
- dev_info(&pdev->dev, "Address: %02x:%02x:%02x:%02x:%02x:%02x\n",
- /* MAC address */
- netdev->dev_addr[0], netdev->dev_addr[1],
- netdev->dev_addr[2], netdev->dev_addr[3],
- netdev->dev_addr[4], netdev->dev_addr[5]);
+ dev_info(&pdev->dev, "Address: %pM\n", netdev->dev_addr);
dev_info(&pdev->dev, "MAC: %d\n", hw->mac.type);
}
@@ -2779,11 +2769,8 @@ static int __devinit igbvf_probe(struct pci_dev *pdev,
memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
if (!is_valid_ether_addr(netdev->perm_addr)) {
- dev_err(&pdev->dev, "Invalid MAC Address: "
- "%02x:%02x:%02x:%02x:%02x:%02x\n",
- netdev->dev_addr[0], netdev->dev_addr[1],
- netdev->dev_addr[2], netdev->dev_addr[3],
- netdev->dev_addr[4], netdev->dev_addr[5]);
+ dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
+ netdev->dev_addr);
err = -EIO;
goto err_hw_init;
}
@@ -2885,7 +2872,7 @@ static struct pci_error_handlers igbvf_err_handler = {
.resume = igbvf_io_resume,
};
-static struct pci_device_id igbvf_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(igbvf_pci_tbl) = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf },
{ } /* terminate list */
};
diff --git a/drivers/net/ioc3-eth.c b/drivers/net/ioc3-eth.c
index 8ec15ab8c8c..70871b9b045 100644
--- a/drivers/net/ioc3-eth.c
+++ b/drivers/net/ioc3-eth.c
@@ -1383,7 +1383,7 @@ static void __devexit ioc3_remove_one (struct pci_dev *pdev)
*/
}
-static struct pci_device_id ioc3_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(ioc3_pci_tbl) = {
{ PCI_VENDOR_ID_SGI, PCI_DEVICE_ID_SGI_IOC3, PCI_ANY_ID, PCI_ANY_ID },
{ 0 }
};
@@ -1664,11 +1664,10 @@ static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
static void ioc3_set_multicast_list(struct net_device *dev)
{
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
struct ioc3_private *ip = netdev_priv(dev);
struct ioc3 *ioc3 = ip->regs;
u64 ehar = 0;
- int i;
netif_stop_queue(dev); /* Lock out others. */
@@ -1681,16 +1680,16 @@ static void ioc3_set_multicast_list(struct net_device *dev)
ioc3_w_emcr(ip->emcr); /* Clear promiscuous. */
(void) ioc3_r_emcr();
- if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
+ if ((dev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(dev) > 64)) {
/* Too many for hashing to make sense or we want all
multicast packets anyway, so skip computing all the
hashes and just accept all packets. */
ip->ehar_h = 0xffffffff;
ip->ehar_l = 0xffffffff;
} else {
- for (i = 0; i < dev->mc_count; i++) {
+ netdev_for_each_mc_addr(dmi, dev) {
char *addr = dmi->dmi_addr;
- dmi = dmi->next;
if (!(*addr & 1))
continue;
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index ba8d246d05a..150415e83f6 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -88,17 +88,15 @@ static const char *ipg_brand_name[] = {
"Sundance Technology ST2021 based NIC",
"Tamarack Microelectronics TC9020/9021 based NIC",
"Tamarack Microelectronics TC9020/9021 based NIC",
- "D-Link NIC",
"D-Link NIC IP1000A"
};
-static struct pci_device_id ipg_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(ipg_pci_tbl) = {
{ PCI_VDEVICE(SUNDANCE, 0x1023), 0 },
{ PCI_VDEVICE(SUNDANCE, 0x2021), 1 },
{ PCI_VDEVICE(SUNDANCE, 0x1021), 2 },
{ PCI_VDEVICE(DLINK, 0x9021), 3 },
- { PCI_VDEVICE(DLINK, 0x4000), 4 },
- { PCI_VDEVICE(DLINK, 0x4020), 5 },
+ { PCI_VDEVICE(DLINK, 0x4020), 4 },
{ 0, }
};
@@ -585,11 +583,11 @@ static void ipg_nic_set_multicast_list(struct net_device *dev)
receivemode = IPG_RM_RECEIVEALLFRAMES;
} else if ((dev->flags & IFF_ALLMULTI) ||
((dev->flags & IFF_MULTICAST) &&
- (dev->mc_count > IPG_MULTICAST_HASHTABLE_SIZE))) {
+ (netdev_mc_count(dev) > IPG_MULTICAST_HASHTABLE_SIZE))) {
/* NIC to be configured to receive all multicast
* frames. */
receivemode |= IPG_RM_RECEIVEMULTICAST;
- } else if ((dev->flags & IFF_MULTICAST) && (dev->mc_count > 0)) {
+ } else if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) {
/* NIC to be configured to receive selected
* multicast addresses. */
receivemode |= IPG_RM_RECEIVEMULTICASTHASH;
@@ -610,8 +608,7 @@ static void ipg_nic_set_multicast_list(struct net_device *dev)
hashtable[1] = 0x00000000;
/* Cycle through all multicast addresses to filter. */
- for (mc_list_ptr = dev->mc_list;
- mc_list_ptr != NULL; mc_list_ptr = mc_list_ptr->next) {
+ netdev_for_each_mc_addr(mc_list_ptr, dev) {
/* Calculate CRC result for each multicast address. */
hashindex = crc32_le(0xffffffff, mc_list_ptr->dmi_addr,
ETH_ALEN);
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index f7638422142..af10e97345c 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -64,6 +64,16 @@ endchoice
comment "Dongle support"
+config SH_SIR
+ tristate "SuperH SIR on UART"
+ depends on IRDA && SUPERH && \
+ (CPU_SUBTYPE_SH7722 || CPU_SUBTYPE_SH7723 || \
+ CPU_SUBTYPE_SH7724)
+ default n
+ help
+ Say Y here if your want to enable SIR function on SuperH UART
+ devices.
+
config DONGLE
bool "Serial dongle support"
depends on IRTTY_SIR
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index d82e1e3bd8c..e030d47e279 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_AU1000_FIR) += au1k_ir.o
# SIR drivers
obj-$(CONFIG_IRTTY_SIR) += irtty-sir.o sir-dev.o
obj-$(CONFIG_BFIN_SIR) += bfin_sir.o
+obj-$(CONFIG_SH_SIR) += sh_sir.o
# dongle drivers for SIR drivers
obj-$(CONFIG_ESI_DONGLE) += esi-sir.o
obj-$(CONFIG_TEKRAM_DONGLE) += tekram-sir.o
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index 2d7b5c1d557..b7e6625ca75 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -184,7 +184,7 @@
#define CONFIG0H_DMA_ON_NORX CONFIG0H_DMA_OFF| OBOE_CONFIG0H_ENDMAC
#define CONFIG0H_DMA_ON CONFIG0H_DMA_ON_NORX | OBOE_CONFIG0H_ENRX
-static struct pci_device_id toshoboe_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(toshoboe_pci_tbl) = {
{ PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_FIR701, PCI_ANY_ID, PCI_ANY_ID, },
{ PCI_VENDOR_ID_TOSHIBA, PCI_DEVICE_ID_FIRD01, PCI_ANY_ID, PCI_ANY_ID, },
{ } /* Terminating entry */
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
new file mode 100644
index 00000000000..d7c983dc91a
--- /dev/null
+++ b/drivers/net/irda/sh_sir.c
@@ -0,0 +1,823 @@
+/*
+ * SuperH IrDA Driver
+ *
+ * Copyright (C) 2009 Renesas Solutions Corp.
+ * Kuninori Morimoto <morimoto.kuninori@renesas.com>
+ *
+ * Based on bfin_sir.c
+ * Copyright 2006-2009 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <net/irda/wrapper.h>
+#include <net/irda/irda_device.h>
+#include <asm/clock.h>
+
+#define DRIVER_NAME "sh_sir"
+
+#define RX_PHASE (1 << 0)
+#define TX_PHASE (1 << 1)
+#define TX_COMP_PHASE (1 << 2) /* tx complete */
+#define NONE_PHASE (1 << 31)
+
+#define IRIF_RINTCLR 0x0016 /* DMA rx interrupt source clear */
+#define IRIF_TINTCLR 0x0018 /* DMA tx interrupt source clear */
+#define IRIF_SIR0 0x0020 /* IrDA-SIR10 control */
+#define IRIF_SIR1 0x0022 /* IrDA-SIR10 baudrate error correction */
+#define IRIF_SIR2 0x0024 /* IrDA-SIR10 baudrate count */
+#define IRIF_SIR3 0x0026 /* IrDA-SIR10 status */
+#define IRIF_SIR_FRM 0x0028 /* Hardware frame processing set */
+#define IRIF_SIR_EOF 0x002A /* EOF value */
+#define IRIF_SIR_FLG 0x002C /* Flag clear */
+#define IRIF_UART_STS2 0x002E /* UART status 2 */
+#define IRIF_UART0 0x0030 /* UART control */
+#define IRIF_UART1 0x0032 /* UART status */
+#define IRIF_UART2 0x0034 /* UART mode */
+#define IRIF_UART3 0x0036 /* UART transmit data */
+#define IRIF_UART4 0x0038 /* UART receive data */
+#define IRIF_UART5 0x003A /* UART interrupt mask */
+#define IRIF_UART6 0x003C /* UART baud rate error correction */
+#define IRIF_UART7 0x003E /* UART baud rate count set */
+#define IRIF_CRC0 0x0040 /* CRC engine control */
+#define IRIF_CRC1 0x0042 /* CRC engine input data */
+#define IRIF_CRC2 0x0044 /* CRC engine calculation */
+#define IRIF_CRC3 0x0046 /* CRC engine output data 1 */
+#define IRIF_CRC4 0x0048 /* CRC engine output data 2 */
+
+/* IRIF_SIR0 */
+#define IRTPW (1 << 1) /* transmit pulse width select */
+#define IRERRC (1 << 0) /* Clear receive pulse width error */
+
+/* IRIF_SIR3 */
+#define IRERR (1 << 0) /* received pulse width Error */
+
+/* IRIF_SIR_FRM */
+#define EOFD (1 << 9) /* EOF detection flag */
+#define FRER (1 << 8) /* Frame Error bit */
+#define FRP (1 << 0) /* Frame processing set */
+
+/* IRIF_UART_STS2 */
+#define IRSME (1 << 6) /* Receive Sum Error flag */
+#define IROVE (1 << 5) /* Receive Overrun Error flag */
+#define IRFRE (1 << 4) /* Receive Framing Error flag */
+#define IRPRE (1 << 3) /* Receive Parity Error flag */
+
+/* IRIF_UART0_*/
+#define TBEC (1 << 2) /* Transmit Data Clear */
+#define RIE (1 << 1) /* Receive Enable */
+#define TIE (1 << 0) /* Transmit Enable */
+
+/* IRIF_UART1 */
+#define URSME (1 << 6) /* Receive Sum Error Flag */
+#define UROVE (1 << 5) /* Receive Overrun Error Flag */
+#define URFRE (1 << 4) /* Receive Framing Error Flag */
+#define URPRE (1 << 3) /* Receive Parity Error Flag */
+#define RBF (1 << 2) /* Receive Buffer Full Flag */
+#define TSBE (1 << 1) /* Transmit Shift Buffer Empty Flag */
+#define TBE (1 << 0) /* Transmit Buffer Empty flag */
+#define TBCOMP (TSBE | TBE)
+
+/* IRIF_UART5 */
+#define RSEIM (1 << 6) /* Receive Sum Error Flag IRQ Mask */
+#define RBFIM (1 << 2) /* Receive Buffer Full Flag IRQ Mask */
+#define TSBEIM (1 << 1) /* Transmit Shift Buffer Empty Flag IRQ Mask */
+#define TBEIM (1 << 0) /* Transmit Buffer Empty Flag IRQ Mask */
+#define RX_MASK (RSEIM | RBFIM)
+
+/* IRIF_CRC0 */
+#define CRC_RST (1 << 15) /* CRC Engine Reset */
+#define CRC_CT_MASK 0x0FFF
+
+/************************************************************************
+
+
+ structure
+
+
+************************************************************************/
+struct sh_sir_self {
+ void __iomem *membase;
+ unsigned int irq;
+ struct clk *clk;
+
+ struct net_device *ndev;
+
+ struct irlap_cb *irlap;
+ struct qos_info qos;
+
+ iobuff_t tx_buff;
+ iobuff_t rx_buff;
+};
+
+/************************************************************************
+
+
+ common function
+
+
+************************************************************************/
+static void sh_sir_write(struct sh_sir_self *self, u32 offset, u16 data)
+{
+ iowrite16(data, self->membase + offset);
+}
+
+static u16 sh_sir_read(struct sh_sir_self *self, u32 offset)
+{
+ return ioread16(self->membase + offset);
+}
+
+static void sh_sir_update_bits(struct sh_sir_self *self, u32 offset,
+ u16 mask, u16 data)
+{
+ u16 old, new;
+
+ old = sh_sir_read(self, offset);
+ new = (old & ~mask) | data;
+ if (old != new)
+ sh_sir_write(self, offset, new);
+}
+
+/************************************************************************
+
+
+ CRC function
+
+
+************************************************************************/
+static void sh_sir_crc_reset(struct sh_sir_self *self)
+{
+ sh_sir_write(self, IRIF_CRC0, CRC_RST);
+}
+
+static void sh_sir_crc_add(struct sh_sir_self *self, u8 data)
+{
+ sh_sir_write(self, IRIF_CRC1, (u16)data);
+}
+
+static u16 sh_sir_crc_cnt(struct sh_sir_self *self)
+{
+ return CRC_CT_MASK & sh_sir_read(self, IRIF_CRC0);
+}
+
+static u16 sh_sir_crc_out(struct sh_sir_self *self)
+{
+ return sh_sir_read(self, IRIF_CRC4);
+}
+
+static int sh_sir_crc_init(struct sh_sir_self *self)
+{
+ struct device *dev = &self->ndev->dev;
+ int ret = -EIO;
+ u16 val;
+
+ sh_sir_crc_reset(self);
+
+ sh_sir_crc_add(self, 0xCC);
+ sh_sir_crc_add(self, 0xF5);
+ sh_sir_crc_add(self, 0xF1);
+ sh_sir_crc_add(self, 0xA7);
+
+ val = sh_sir_crc_cnt(self);
+ if (4 != val) {
+ dev_err(dev, "CRC count error %x\n", val);
+ goto crc_init_out;
+ }
+
+ val = sh_sir_crc_out(self);
+ if (0x51DF != val) {
+ dev_err(dev, "CRC result error%x\n", val);
+ goto crc_init_out;
+ }
+
+ ret = 0;
+
+crc_init_out:
+
+ sh_sir_crc_reset(self);
+ return ret;
+}
+
+/************************************************************************
+
+
+ baud rate functions
+
+
+************************************************************************/
+#define SCLK_BASE 1843200 /* 1.8432MHz */
+
+static u32 sh_sir_find_sclk(struct clk *irda_clk)
+{
+ struct cpufreq_frequency_table *freq_table = irda_clk->freq_table;
+ struct clk *pclk = clk_get(NULL, "peripheral_clk");
+ u32 limit, min = 0xffffffff, tmp;
+ int i, index = 0;
+
+ limit = clk_get_rate(pclk);
+ clk_put(pclk);
+
+ /* IrDA can not set over peripheral_clk */
+ for (i = 0;
+ freq_table[i].frequency != CPUFREQ_TABLE_END;
+ i++) {
+ u32 freq = freq_table[i].frequency;
+
+ if (freq == CPUFREQ_ENTRY_INVALID)
+ continue;
+
+ /* IrDA should not over peripheral_clk */
+ if (freq > limit)
+ continue;
+
+ tmp = freq % SCLK_BASE;
+ if (tmp < min) {
+ min = tmp;
+ index = i;
+ }
+ }
+
+ return freq_table[index].frequency;
+}
+
+#define ERR_ROUNDING(a) ((a + 5000) / 10000)
+static int sh_sir_set_baudrate(struct sh_sir_self *self, u32 baudrate)
+{
+ struct clk *clk;
+ struct device *dev = &self->ndev->dev;
+ u32 rate;
+ u16 uabca, uabc;
+ u16 irbca, irbc;
+ u32 min, rerr, tmp;
+ int i;
+
+ /* Baud Rate Error Correction x 10000 */
+ u32 rate_err_array[] = {
+ 0000, 0625, 1250, 1875,
+ 2500, 3125, 3750, 4375,
+ 5000, 5625, 6250, 6875,
+ 7500, 8125, 8750, 9375,
+ };
+
+ /*
+ * FIXME
+ *
+ * it support 9600 only now
+ */
+ switch (baudrate) {
+ case 9600:
+ break;
+ default:
+ dev_err(dev, "un-supported baudrate %d\n", baudrate);
+ return -EIO;
+ }
+
+ clk = clk_get(NULL, "irda_clk");
+ if (!clk) {
+ dev_err(dev, "can not get irda_clk\n");
+ return -EIO;
+ }
+
+ clk_set_rate(clk, sh_sir_find_sclk(clk));
+ rate = clk_get_rate(clk);
+ clk_put(clk);
+
+ dev_dbg(dev, "selected sclk = %d\n", rate);
+
+ /*
+ * CALCULATION
+ *
+ * 1843200 = system rate / (irbca + (irbc + 1))
+ */
+
+ irbc = rate / SCLK_BASE;
+
+ tmp = rate - (SCLK_BASE * irbc);
+ tmp *= 10000;
+
+ rerr = tmp / SCLK_BASE;
+
+ min = 0xffffffff;
+ irbca = 0;
+ for (i = 0; i < ARRAY_SIZE(rate_err_array); i++) {
+ tmp = abs(rate_err_array[i] - rerr);
+ if (min > tmp) {
+ min = tmp;
+ irbca = i;
+ }
+ }
+
+ tmp = rate / (irbc + ERR_ROUNDING(rate_err_array[irbca]));
+ if ((SCLK_BASE / 100) < abs(tmp - SCLK_BASE))
+ dev_warn(dev, "IrDA freq error margin over %d\n", tmp);
+
+ dev_dbg(dev, "target = %d, result = %d, infrared = %d.%d\n",
+ SCLK_BASE, tmp, irbc, rate_err_array[irbca]);
+
+ irbca = (irbca & 0xF) << 4;
+ irbc = (irbc - 1) & 0xF;
+
+ if (!irbc) {
+ dev_err(dev, "sh_sir can not set 0 in IRIF_SIR2\n");
+ return -EIO;
+ }
+
+ sh_sir_write(self, IRIF_SIR0, IRTPW | IRERRC);
+ sh_sir_write(self, IRIF_SIR1, irbca);
+ sh_sir_write(self, IRIF_SIR2, irbc);
+
+ /*
+ * CALCULATION
+ *
+ * BaudRate[bps] = system rate / (uabca + (uabc + 1) x 16)
+ */
+
+ uabc = rate / baudrate;
+ uabc = (uabc / 16) - 1;
+ uabc = (uabc + 1) * 16;
+
+ tmp = rate - (uabc * baudrate);
+ tmp *= 10000;
+
+ rerr = tmp / baudrate;
+
+ min = 0xffffffff;
+ uabca = 0;
+ for (i = 0; i < ARRAY_SIZE(rate_err_array); i++) {
+ tmp = abs(rate_err_array[i] - rerr);
+ if (min > tmp) {
+ min = tmp;
+ uabca = i;
+ }
+ }
+
+ tmp = rate / (uabc + ERR_ROUNDING(rate_err_array[uabca]));
+ if ((baudrate / 100) < abs(tmp - baudrate))
+ dev_warn(dev, "UART freq error margin over %d\n", tmp);
+
+ dev_dbg(dev, "target = %d, result = %d, uart = %d.%d\n",
+ baudrate, tmp,
+ uabc, rate_err_array[uabca]);
+
+ uabca = (uabca & 0xF) << 4;
+ uabc = (uabc / 16) - 1;
+
+ sh_sir_write(self, IRIF_UART6, uabca);
+ sh_sir_write(self, IRIF_UART7, uabc);
+
+ return 0;
+}
+
+/************************************************************************
+
+
+ iobuf function
+
+
+************************************************************************/
+static int __sh_sir_init_iobuf(iobuff_t *io, int size)
+{
+ io->head = kmalloc(size, GFP_KERNEL);
+ if (!io->head)
+ return -ENOMEM;
+
+ io->truesize = size;
+ io->in_frame = FALSE;
+ io->state = OUTSIDE_FRAME;
+ io->data = io->head;
+
+ return 0;
+}
+
+static void sh_sir_remove_iobuf(struct sh_sir_self *self)
+{
+ kfree(self->rx_buff.head);
+ kfree(self->tx_buff.head);
+
+ self->rx_buff.head = NULL;
+ self->tx_buff.head = NULL;
+}
+
+static int sh_sir_init_iobuf(struct sh_sir_self *self, int rxsize, int txsize)
+{
+ int err = -ENOMEM;
+
+ if (self->rx_buff.head ||
+ self->tx_buff.head) {
+ dev_err(&self->ndev->dev, "iobuff has already existed.");
+ return err;
+ }
+
+ err = __sh_sir_init_iobuf(&self->rx_buff, rxsize);
+ if (err)
+ goto iobuf_err;
+
+ err = __sh_sir_init_iobuf(&self->tx_buff, txsize);
+
+iobuf_err:
+ if (err)
+ sh_sir_remove_iobuf(self);
+
+ return err;
+}
+
+/************************************************************************
+
+
+ status function
+
+
+************************************************************************/
+static void sh_sir_clear_all_err(struct sh_sir_self *self)
+{
+ /* Clear error flag for receive pulse width */
+ sh_sir_update_bits(self, IRIF_SIR0, IRERRC, IRERRC);
+
+ /* Clear frame / EOF error flag */
+ sh_sir_write(self, IRIF_SIR_FLG, 0xffff);
+
+ /* Clear all status error */
+ sh_sir_write(self, IRIF_UART_STS2, 0);
+}
+
+static void sh_sir_set_phase(struct sh_sir_self *self, int phase)
+{
+ u16 uart5 = 0;
+ u16 uart0 = 0;
+
+ switch (phase) {
+ case TX_PHASE:
+ uart5 = TBEIM;
+ uart0 = TBEC | TIE;
+ break;
+ case TX_COMP_PHASE:
+ uart5 = TSBEIM;
+ uart0 = TIE;
+ break;
+ case RX_PHASE:
+ uart5 = RX_MASK;
+ uart0 = RIE;
+ break;
+ default:
+ break;
+ }
+
+ sh_sir_write(self, IRIF_UART5, uart5);
+ sh_sir_write(self, IRIF_UART0, uart0);
+}
+
+static int sh_sir_is_which_phase(struct sh_sir_self *self)
+{
+ u16 val = sh_sir_read(self, IRIF_UART5);
+
+ if (val & TBEIM)
+ return TX_PHASE;
+
+ if (val & TSBEIM)
+ return TX_COMP_PHASE;
+
+ if (val & RX_MASK)
+ return RX_PHASE;
+
+ return NONE_PHASE;
+}
+
+static void sh_sir_tx(struct sh_sir_self *self, int phase)
+{
+ switch (phase) {
+ case TX_PHASE:
+ if (0 >= self->tx_buff.len) {
+ sh_sir_set_phase(self, TX_COMP_PHASE);
+ } else {
+ sh_sir_write(self, IRIF_UART3, self->tx_buff.data[0]);
+ self->tx_buff.len--;
+ self->tx_buff.data++;
+ }
+ break;
+ case TX_COMP_PHASE:
+ sh_sir_set_phase(self, RX_PHASE);
+ netif_wake_queue(self->ndev);
+ break;
+ default:
+ dev_err(&self->ndev->dev, "should not happen\n");
+ break;
+ }
+}
+
+static int sh_sir_read_data(struct sh_sir_self *self)
+{
+ u16 val;
+ int timeout = 1024;
+
+ while (timeout--) {
+ val = sh_sir_read(self, IRIF_UART1);
+
+ /* data get */
+ if (val & RBF) {
+ if (val & (URSME | UROVE | URFRE | URPRE))
+ break;
+
+ return (int)sh_sir_read(self, IRIF_UART4);
+ }
+
+ udelay(1);
+ }
+
+ dev_err(&self->ndev->dev, "UART1 %04x : STATUS %04x\n",
+ val, sh_sir_read(self, IRIF_UART_STS2));
+
+ /* read data register for clear error */
+ sh_sir_read(self, IRIF_UART4);
+
+ return -1;
+}
+
+static void sh_sir_rx(struct sh_sir_self *self)
+{
+ int timeout = 1024;
+ int data;
+
+ while (timeout--) {
+ data = sh_sir_read_data(self);
+ if (data < 0)
+ break;
+
+ async_unwrap_char(self->ndev, &self->ndev->stats,
+ &self->rx_buff, (u8)data);
+ self->ndev->last_rx = jiffies;
+
+ if (EOFD & sh_sir_read(self, IRIF_SIR_FRM))
+ continue;
+
+ break;
+ }
+}
+
+static irqreturn_t sh_sir_irq(int irq, void *dev_id)
+{
+ struct sh_sir_self *self = dev_id;
+ struct device *dev = &self->ndev->dev;
+ int phase = sh_sir_is_which_phase(self);
+
+ switch (phase) {
+ case TX_COMP_PHASE:
+ case TX_PHASE:
+ sh_sir_tx(self, phase);
+ break;
+ case RX_PHASE:
+ if (sh_sir_read(self, IRIF_SIR3))
+ dev_err(dev, "rcv pulse width error occurred\n");
+
+ sh_sir_rx(self);
+ sh_sir_clear_all_err(self);
+ break;
+ default:
+ dev_err(dev, "unknown interrupt\n");
+ }
+
+ return IRQ_HANDLED;
+}
+
+/************************************************************************
+
+
+ net_device_ops function
+
+
+************************************************************************/
+static int sh_sir_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct sh_sir_self *self = netdev_priv(ndev);
+ int speed = irda_get_next_speed(skb);
+
+ if ((0 < speed) &&
+ (9600 != speed)) {
+ dev_err(&ndev->dev, "support 9600 only (%d)\n", speed);
+ return -EIO;
+ }
+
+ netif_stop_queue(ndev);
+
+ self->tx_buff.data = self->tx_buff.head;
+ self->tx_buff.len = 0;
+ if (skb->len)
+ self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data,
+ self->tx_buff.truesize);
+
+ sh_sir_set_phase(self, TX_PHASE);
+ dev_kfree_skb(skb);
+
+ return 0;
+}
+
+static int sh_sir_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd)
+{
+ /*
+ * FIXME
+ *
+ * This function is needed for irda framework.
+ * But nothing to do now
+ */
+ return 0;
+}
+
+static struct net_device_stats *sh_sir_stats(struct net_device *ndev)
+{
+ struct sh_sir_self *self = netdev_priv(ndev);
+
+ return &self->ndev->stats;
+}
+
+static int sh_sir_open(struct net_device *ndev)
+{
+ struct sh_sir_self *self = netdev_priv(ndev);
+ int err;
+
+ clk_enable(self->clk);
+ err = sh_sir_crc_init(self);
+ if (err)
+ goto open_err;
+
+ sh_sir_set_baudrate(self, 9600);
+
+ self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME);
+ if (!self->irlap)
+ goto open_err;
+
+ /*
+ * Now enable the interrupt then start the queue
+ */
+ sh_sir_update_bits(self, IRIF_SIR_FRM, FRP, FRP);
+ sh_sir_read(self, IRIF_UART1); /* flag clear */
+ sh_sir_read(self, IRIF_UART4); /* flag clear */
+ sh_sir_set_phase(self, RX_PHASE);
+
+ netif_start_queue(ndev);
+
+ dev_info(&self->ndev->dev, "opened\n");
+
+ return 0;
+
+open_err:
+ clk_disable(self->clk);
+
+ return err;
+}
+
+static int sh_sir_stop(struct net_device *ndev)
+{
+ struct sh_sir_self *self = netdev_priv(ndev);
+
+ /* Stop IrLAP */
+ if (self->irlap) {
+ irlap_close(self->irlap);
+ self->irlap = NULL;
+ }
+
+ netif_stop_queue(ndev);
+
+ dev_info(&ndev->dev, "stoped\n");
+
+ return 0;
+}
+
+static const struct net_device_ops sh_sir_ndo = {
+ .ndo_open = sh_sir_open,
+ .ndo_stop = sh_sir_stop,
+ .ndo_start_xmit = sh_sir_hard_xmit,
+ .ndo_do_ioctl = sh_sir_ioctl,
+ .ndo_get_stats = sh_sir_stats,
+};
+
+/************************************************************************
+
+
+ platform_driver function
+
+
+************************************************************************/
+static int __devinit sh_sir_probe(struct platform_device *pdev)
+{
+ struct net_device *ndev;
+ struct sh_sir_self *self;
+ struct resource *res;
+ char clk_name[8];
+ void __iomem *base;
+ unsigned int irq;
+ int err = -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq = platform_get_irq(pdev, 0);
+ if (!res || irq < 0) {
+ dev_err(&pdev->dev, "Not enough platform resources.\n");
+ goto exit;
+ }
+
+ ndev = alloc_irdadev(sizeof(*self));
+ if (!ndev)
+ goto exit;
+
+ base = ioremap_nocache(res->start, resource_size(res));
+ if (!base) {
+ err = -ENXIO;
+ dev_err(&pdev->dev, "Unable to ioremap.\n");
+ goto err_mem_1;
+ }
+
+ self = netdev_priv(ndev);
+ err = sh_sir_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME);
+ if (err)
+ goto err_mem_2;
+
+ snprintf(clk_name, sizeof(clk_name), "irda%d", pdev->id);
+ self->clk = clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(self->clk)) {
+ dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
+ goto err_mem_3;
+ }
+
+ irda_init_max_qos_capabilies(&self->qos);
+
+ ndev->netdev_ops = &sh_sir_ndo;
+ ndev->irq = irq;
+
+ self->membase = base;
+ self->ndev = ndev;
+ self->qos.baud_rate.bits &= IR_9600; /* FIXME */
+ self->qos.min_turn_time.bits = 1; /* 10 ms or more */
+
+ irda_qos_bits_to_value(&self->qos);
+
+ err = register_netdev(ndev);
+ if (err)
+ goto err_mem_4;
+
+ platform_set_drvdata(pdev, ndev);
+
+ if (request_irq(irq, sh_sir_irq, IRQF_DISABLED, "sh_sir", self)) {
+ dev_warn(&pdev->dev, "Unable to attach sh_sir interrupt\n");
+ goto err_mem_4;
+ }
+
+ dev_info(&pdev->dev, "SuperH IrDA probed\n");
+
+ goto exit;
+
+err_mem_4:
+ clk_put(self->clk);
+err_mem_3:
+ sh_sir_remove_iobuf(self);
+err_mem_2:
+ iounmap(self->membase);
+err_mem_1:
+ free_netdev(ndev);
+exit:
+ return err;
+}
+
+static int __devexit sh_sir_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct sh_sir_self *self = netdev_priv(ndev);
+
+ if (!self)
+ return 0;
+
+ unregister_netdev(ndev);
+ clk_put(self->clk);
+ sh_sir_remove_iobuf(self);
+ iounmap(self->membase);
+ free_netdev(ndev);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver sh_sir_driver = {
+ .probe = sh_sir_probe,
+ .remove = __devexit_p(sh_sir_remove),
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+};
+
+static int __init sh_sir_init(void)
+{
+ return platform_driver_register(&sh_sir_driver);
+}
+
+static void __exit sh_sir_exit(void)
+{
+ platform_driver_unregister(&sh_sir_driver);
+}
+
+module_init(sh_sir_init);
+module_exit(sh_sir_exit);
+
+MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
+MODULE_DESCRIPTION("SuperH IrDA driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/irda/via-ircc.c b/drivers/net/irda/via-ircc.c
index fddb4efd545..6533c010cf5 100644
--- a/drivers/net/irda/via-ircc.c
+++ b/drivers/net/irda/via-ircc.c
@@ -121,7 +121,7 @@ static void iodelay(int udelay)
}
}
-static struct pci_device_id via_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(via_pci_tbl) = {
{ PCI_VENDOR_ID_VIA, 0x8231, PCI_ANY_ID, PCI_ANY_ID,0,0,0 },
{ PCI_VENDOR_ID_VIA, 0x3109, PCI_ANY_ID, PCI_ANY_ID,0,0,1 },
{ PCI_VENDOR_ID_VIA, 0x3074, PCI_ANY_ID, PCI_ANY_ID,0,0,2 },
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c
index bd3c6b5ee76..209d4bcface 100644
--- a/drivers/net/irda/vlsi_ir.c
+++ b/drivers/net/irda/vlsi_ir.c
@@ -59,7 +59,7 @@ MODULE_LICENSE("GPL");
static /* const */ char drivername[] = DRIVER_NAME;
-static struct pci_device_id vlsi_irda_table [] = {
+static DEFINE_PCI_DEVICE_TABLE(vlsi_irda_table) = {
{
.class = PCI_CLASS_WIRELESS_IRDA << 8,
.class_mask = PCI_CLASS_SUBCLASS_MASK << 8,
diff --git a/drivers/net/isa-skeleton.c b/drivers/net/isa-skeleton.c
deleted file mode 100644
index 04d0502726c..00000000000
--- a/drivers/net/isa-skeleton.c
+++ /dev/null
@@ -1,718 +0,0 @@
-/* isa-skeleton.c: A network driver outline for linux.
- *
- * Written 1993-94 by Donald Becker.
- *
- * Copyright 1993 United States Government as represented by the
- * Director, National Security Agency.
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- * The author may be reached as becker@scyld.com, or C/O
- * Scyld Computing Corporation
- * 410 Severn Ave., Suite 210
- * Annapolis MD 21403
- *
- * This file is an outline for writing a network device driver for the
- * the Linux operating system.
- *
- * To write (or understand) a driver, have a look at the "loopback.c" file to
- * get a feel of what is going on, and then use the code below as a skeleton
- * for the new driver.
- *
- */
-
-static const char *version =
- "isa-skeleton.c:v1.51 9/24/94 Donald Becker (becker@cesdis.gsfc.nasa.gov)\n";
-
-/*
- * Sources:
- * List your sources of programming information to document that
- * the driver is your own creation, and give due credit to others
- * that contributed to the work. Remember that GNU project code
- * cannot use proprietary or trade secret information. Interface
- * definitions are generally considered non-copyrightable to the
- * extent that the same names and structures must be used to be
- * compatible.
- *
- * Finally, keep in mind that the Linux kernel is has an API, not
- * ABI. Proprietary object-code-only distributions are not permitted
- * under the GPL.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/fcntl.h>
-#include <linux/interrupt.h>
-#include <linux/ioport.h>
-#include <linux/in.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/spinlock.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <linux/bitops.h>
-
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/dma.h>
-
-/*
- * The name of the card. Is used for messages and in the requests for
- * io regions, irqs and dma channels
- */
-static const char* cardname = "netcard";
-
-/* First, a few definitions that the brave might change. */
-
-/* A zero-terminated list of I/O addresses to be probed. */
-static unsigned int netcard_portlist[] __initdata =
- { 0x200, 0x240, 0x280, 0x2C0, 0x300, 0x320, 0x340, 0};
-
-/* use 0 for production, 1 for verification, >2 for debug */
-#ifndef NET_DEBUG
-#define NET_DEBUG 2
-#endif
-static unsigned int net_debug = NET_DEBUG;
-
-/* The number of low I/O ports used by the ethercard. */
-#define NETCARD_IO_EXTENT 32
-
-#define MY_TX_TIMEOUT ((400*HZ)/1000)
-
-/* Information that need to be kept for each board. */
-struct net_local {
- struct net_device_stats stats;
- long open_time; /* Useless example local info. */
-
- /* Tx control lock. This protects the transmit buffer ring
- * state along with the "tx full" state of the driver. This
- * means all netif_queue flow control actions are protected
- * by this lock as well.
- */
- spinlock_t lock;
-};
-
-/* The station (ethernet) address prefix, used for IDing the board. */
-#define SA_ADDR0 0x00
-#define SA_ADDR1 0x42
-#define SA_ADDR2 0x65
-
-/* Index to functions, as function prototypes. */
-
-static int netcard_probe1(struct net_device *dev, int ioaddr);
-static int net_open(struct net_device *dev);
-static int net_send_packet(struct sk_buff *skb, struct net_device *dev);
-static irqreturn_t net_interrupt(int irq, void *dev_id);
-static void net_rx(struct net_device *dev);
-static int net_close(struct net_device *dev);
-static struct net_device_stats *net_get_stats(struct net_device *dev);
-static void set_multicast_list(struct net_device *dev);
-static void net_tx_timeout(struct net_device *dev);
-
-
-/* Example routines you must write ;->. */
-#define tx_done(dev) 1
-static void hardware_send_packet(short ioaddr, char *buf, int length);
-static void chipset_init(struct net_device *dev, int startp);
-
-/*
- * Check for a network adaptor of this type, and return '0' iff one exists.
- * If dev->base_addr == 0, probe all likely locations.
- * If dev->base_addr == 1, always return failure.
- * If dev->base_addr == 2, allocate space for the device and return success
- * (detachable devices only).
- */
-static int __init do_netcard_probe(struct net_device *dev)
-{
- int i;
- int base_addr = dev->base_addr;
- int irq = dev->irq;
-
- if (base_addr > 0x1ff) /* Check a single specified location. */
- return netcard_probe1(dev, base_addr);
- else if (base_addr != 0) /* Don't probe at all. */
- return -ENXIO;
-
- for (i = 0; netcard_portlist[i]; i++) {
- int ioaddr = netcard_portlist[i];
- if (netcard_probe1(dev, ioaddr) == 0)
- return 0;
- dev->irq = irq;
- }
-
- return -ENODEV;
-}
-
-static void cleanup_card(struct net_device *dev)
-{
-#ifdef jumpered_dma
- free_dma(dev->dma);
-#endif
-#ifdef jumpered_interrupts
- free_irq(dev->irq, dev);
-#endif
- release_region(dev->base_addr, NETCARD_IO_EXTENT);
-}
-
-#ifndef MODULE
-struct net_device * __init netcard_probe(int unit)
-{
- struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
- int err;
-
- if (!dev)
- return ERR_PTR(-ENOMEM);
-
- sprintf(dev->name, "eth%d", unit);
- netdev_boot_setup_check(dev);
-
- err = do_netcard_probe(dev);
- if (err)
- goto out;
- return dev;
-out:
- free_netdev(dev);
- return ERR_PTR(err);
-}
-#endif
-
-static const struct net_device_ops netcard_netdev_ops = {
- .ndo_open = net_open,
- .ndo_stop = net_close,
- .ndo_start_xmit = net_send_packet,
- .ndo_get_stats = net_get_stats,
- .ndo_set_multicast_list = set_multicast_list,
- .ndo_tx_timeout = net_tx_timeout,
- .ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
- .ndo_change_mtu = eth_change_mtu,
-};
-
-/*
- * This is the real probe routine. Linux has a history of friendly device
- * probes on the ISA bus. A good device probes avoids doing writes, and
- * verifies that the correct device exists and functions.
- */
-static int __init netcard_probe1(struct net_device *dev, int ioaddr)
-{
- struct net_local *np;
- static unsigned version_printed;
- int i;
- int err = -ENODEV;
-
- /* Grab the region so that no one else tries to probe our ioports. */
- if (!request_region(ioaddr, NETCARD_IO_EXTENT, cardname))
- return -EBUSY;
-
- /*
- * For ethernet adaptors the first three octets of the station address
- * contains the manufacturer's unique code. That might be a good probe
- * method. Ideally you would add additional checks.
- */
- if (inb(ioaddr + 0) != SA_ADDR0 ||
- inb(ioaddr + 1) != SA_ADDR1 ||
- inb(ioaddr + 2) != SA_ADDR2)
- goto out;
-
- if (net_debug && version_printed++ == 0)
- printk(KERN_DEBUG "%s", version);
-
- printk(KERN_INFO "%s: %s found at %#3x, ", dev->name, cardname, ioaddr);
-
- /* Fill in the 'dev' fields. */
- dev->base_addr = ioaddr;
-
- /* Retrieve and print the ethernet address. */
- for (i = 0; i < 6; i++)
- dev->dev_addr[i] = inb(ioaddr + i);
-
- printk("%pM", dev->dev_addr);
-
- err = -EAGAIN;
-#ifdef jumpered_interrupts
- /*
- * If this board has jumpered interrupts, allocate the interrupt
- * vector now. There is no point in waiting since no other device
- * can use the interrupt, and this marks the irq as busy. Jumpered
- * interrupts are typically not reported by the boards, and we must
- * used autoIRQ to find them.
- */
-
- if (dev->irq == -1)
- ; /* Do nothing: a user-level program will set it. */
- else if (dev->irq < 2) { /* "Auto-IRQ" */
- unsigned long irq_mask = probe_irq_on();
- /* Trigger an interrupt here. */
-
- dev->irq = probe_irq_off(irq_mask);
- if (net_debug >= 2)
- printk(" autoirq is %d", dev->irq);
- } else if (dev->irq == 2)
- /*
- * Fixup for users that don't know that IRQ 2 is really
- * IRQ9, or don't know which one to set.
- */
- dev->irq = 9;
-
- {
- int irqval = request_irq(dev->irq, net_interrupt, 0, cardname, dev);
- if (irqval) {
- printk("%s: unable to get IRQ %d (irqval=%d).\n",
- dev->name, dev->irq, irqval);
- goto out;
- }
- }
-#endif /* jumpered interrupt */
-#ifdef jumpered_dma
- /*
- * If we use a jumpered DMA channel, that should be probed for and
- * allocated here as well. See lance.c for an example.
- */
- if (dev->dma == 0) {
- if (request_dma(dev->dma, cardname)) {
- printk("DMA %d allocation failed.\n", dev->dma);
- goto out1;
- } else
- printk(", assigned DMA %d.\n", dev->dma);
- } else {
- short dma_status, new_dma_status;
-
- /* Read the DMA channel status registers. */
- dma_status = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
- (inb(DMA2_STAT_REG) & 0xf0);
- /* Trigger a DMA request, perhaps pause a bit. */
- outw(0x1234, ioaddr + 8);
- /* Re-read the DMA status registers. */
- new_dma_status = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
- (inb(DMA2_STAT_REG) & 0xf0);
- /*
- * Eliminate the old and floating requests,
- * and DMA4 the cascade.
- */
- new_dma_status ^= dma_status;
- new_dma_status &= ~0x10;
- for (i = 7; i > 0; i--)
- if (test_bit(i, &new_dma_status)) {
- dev->dma = i;
- break;
- }
- if (i <= 0) {
- printk("DMA probe failed.\n");
- goto out1;
- }
- if (request_dma(dev->dma, cardname)) {
- printk("probed DMA %d allocation failed.\n", dev->dma);
- goto out1;
- }
- }
-#endif /* jumpered DMA */
-
- np = netdev_priv(dev);
- spin_lock_init(&np->lock);
-
- dev->netdev_ops = &netcard_netdev_ops;
- dev->watchdog_timeo = MY_TX_TIMEOUT;
-
- err = register_netdev(dev);
- if (err)
- goto out2;
- return 0;
-out2:
-#ifdef jumpered_dma
- free_dma(dev->dma);
-#endif
-out1:
-#ifdef jumpered_interrupts
- free_irq(dev->irq, dev);
-#endif
-out:
- release_region(base_addr, NETCARD_IO_EXTENT);
- return err;
-}
-
-static void net_tx_timeout(struct net_device *dev)
-{
- struct net_local *np = netdev_priv(dev);
-
- printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name,
- tx_done(dev) ? "IRQ conflict" : "network cable problem");
-
- /* Try to restart the adaptor. */
- chipset_init(dev, 1);
-
- np->stats.tx_errors++;
-
- /* If we have space available to accept new transmit
- * requests, wake up the queueing layer. This would
- * be the case if the chipset_init() call above just
- * flushes out the tx queue and empties it.
- *
- * If instead, the tx queue is retained then the
- * netif_wake_queue() call should be placed in the
- * TX completion interrupt handler of the driver instead
- * of here.
- */
- if (!tx_full(dev))
- netif_wake_queue(dev);
-}
-
-/*
- * Open/initialize the board. This is called (in the current kernel)
- * sometime after booting when the 'ifconfig' program is run.
- *
- * This routine should set everything up anew at each open, even
- * registers that "should" only need to be set once at boot, so that
- * there is non-reboot way to recover if something goes wrong.
- */
-static int
-net_open(struct net_device *dev)
-{
- struct net_local *np = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- /*
- * This is used if the interrupt line can turned off (shared).
- * See 3c503.c for an example of selecting the IRQ at config-time.
- */
- if (request_irq(dev->irq, net_interrupt, 0, cardname, dev)) {
- return -EAGAIN;
- }
- /*
- * Always allocate the DMA channel after the IRQ,
- * and clean up on failure.
- */
- if (request_dma(dev->dma, cardname)) {
- free_irq(dev->irq, dev);
- return -EAGAIN;
- }
-
- /* Reset the hardware here. Don't forget to set the station address. */
- chipset_init(dev, 1);
- outb(0x00, ioaddr);
- np->open_time = jiffies;
-
- /* We are now ready to accept transmit requeusts from
- * the queueing layer of the networking.
- */
- netif_start_queue(dev);
-
- return 0;
-}
-
-/* This will only be invoked if your driver is _not_ in XOFF state.
- * What this means is that you need not check it, and that this
- * invariant will hold if you make sure that the netif_*_queue()
- * calls are done at the proper times.
- */
-static int net_send_packet(struct sk_buff *skb, struct net_device *dev)
-{
- struct net_local *np = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- short length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
- unsigned char *buf = skb->data;
-
- /* If some error occurs while trying to transmit this
- * packet, you should return '1' from this function.
- * In such a case you _may not_ do anything to the
- * SKB, it is still owned by the network queueing
- * layer when an error is returned. This means you
- * may not modify any SKB fields, you may not free
- * the SKB, etc.
- */
-
-#if TX_RING
- /* This is the most common case for modern hardware.
- * The spinlock protects this code from the TX complete
- * hardware interrupt handler. Queue flow control is
- * thus managed under this lock as well.
- */
- unsigned long flags;
- spin_lock_irqsave(&np->lock, flags);
-
- add_to_tx_ring(np, skb, length);
- dev->trans_start = jiffies;
-
- /* If we just used up the very last entry in the
- * TX ring on this device, tell the queueing
- * layer to send no more.
- */
- if (tx_full(dev))
- netif_stop_queue(dev);
-
- /* When the TX completion hw interrupt arrives, this
- * is when the transmit statistics are updated.
- */
-
- spin_unlock_irqrestore(&np->lock, flags);
-#else
- /* This is the case for older hardware which takes
- * a single transmit buffer at a time, and it is
- * just written to the device via PIO.
- *
- * No spin locking is needed since there is no TX complete
- * event. If by chance your card does have a TX complete
- * hardware IRQ then you may need to utilize np->lock here.
- */
- hardware_send_packet(ioaddr, buf, length);
- np->stats.tx_bytes += skb->len;
-
- dev->trans_start = jiffies;
-
- /* You might need to clean up and record Tx statistics here. */
- if (inw(ioaddr) == /*RU*/81)
- np->stats.tx_aborted_errors++;
- dev_kfree_skb (skb);
-#endif
-
- return NETDEV_TX_OK;
-}
-
-#if TX_RING
-/* This handles TX complete events posted by the device
- * via interrupts.
- */
-void net_tx(struct net_device *dev)
-{
- struct net_local *np = netdev_priv(dev);
- int entry;
-
- /* This protects us from concurrent execution of
- * our dev->hard_start_xmit function above.
- */
- spin_lock(&np->lock);
-
- entry = np->tx_old;
- while (tx_entry_is_sent(np, entry)) {
- struct sk_buff *skb = np->skbs[entry];
-
- np->stats.tx_bytes += skb->len;
- dev_kfree_skb_irq (skb);
-
- entry = next_tx_entry(np, entry);
- }
- np->tx_old = entry;
-
- /* If we had stopped the queue due to a "tx full"
- * condition, and space has now been made available,
- * wake up the queue.
- */
- if (netif_queue_stopped(dev) && ! tx_full(dev))
- netif_wake_queue(dev);
-
- spin_unlock(&np->lock);
-}
-#endif
-
-/*
- * The typical workload of the driver:
- * Handle the network interface interrupts.
- */
-static irqreturn_t net_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct net_local *np;
- int ioaddr, status;
- int handled = 0;
-
- ioaddr = dev->base_addr;
-
- np = netdev_priv(dev);
- status = inw(ioaddr + 0);
-
- if (status == 0)
- goto out;
- handled = 1;
-
- if (status & RX_INTR) {
- /* Got a packet(s). */
- net_rx(dev);
- }
-#if TX_RING
- if (status & TX_INTR) {
- /* Transmit complete. */
- net_tx(dev);
- np->stats.tx_packets++;
- netif_wake_queue(dev);
- }
-#endif
- if (status & COUNTERS_INTR) {
- /* Increment the appropriate 'localstats' field. */
- np->stats.tx_window_errors++;
- }
-out:
- return IRQ_RETVAL(handled);
-}
-
-/* We have a good packet(s), get it/them out of the buffers. */
-static void
-net_rx(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
- int boguscount = 10;
-
- do {
- int status = inw(ioaddr);
- int pkt_len = inw(ioaddr);
-
- if (pkt_len == 0) /* Read all the frames? */
- break; /* Done for now */
-
- if (status & 0x40) { /* There was an error. */
- lp->stats.rx_errors++;
- if (status & 0x20) lp->stats.rx_frame_errors++;
- if (status & 0x10) lp->stats.rx_over_errors++;
- if (status & 0x08) lp->stats.rx_crc_errors++;
- if (status & 0x04) lp->stats.rx_fifo_errors++;
- } else {
- /* Malloc up new buffer. */
- struct sk_buff *skb;
-
- lp->stats.rx_bytes+=pkt_len;
-
- skb = dev_alloc_skb(pkt_len);
- if (skb == NULL) {
- printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n",
- dev->name);
- lp->stats.rx_dropped++;
- break;
- }
- skb->dev = dev;
-
- /* 'skb->data' points to the start of sk_buff data area. */
- memcpy(skb_put(skb,pkt_len), (void*)dev->rmem_start,
- pkt_len);
- /* or */
- insw(ioaddr, skb->data, (pkt_len + 1) >> 1);
-
- netif_rx(skb);
- lp->stats.rx_packets++;
- lp->stats.rx_bytes += pkt_len;
- }
- } while (--boguscount);
-
- return;
-}
-
-/* The inverse routine to net_open(). */
-static int
-net_close(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- int ioaddr = dev->base_addr;
-
- lp->open_time = 0;
-
- netif_stop_queue(dev);
-
- /* Flush the Tx and disable Rx here. */
-
- disable_dma(dev->dma);
-
- /* If not IRQ or DMA jumpered, free up the line. */
- outw(0x00, ioaddr+0); /* Release the physical interrupt line. */
-
- free_irq(dev->irq, dev);
- free_dma(dev->dma);
-
- /* Update the statistics here. */
-
- return 0;
-
-}
-
-/*
- * Get the current statistics.
- * This may be called with the card open or closed.
- */
-static struct net_device_stats *net_get_stats(struct net_device *dev)
-{
- struct net_local *lp = netdev_priv(dev);
- short ioaddr = dev->base_addr;
-
- /* Update the statistics from the device registers. */
- lp->stats.rx_missed_errors = inw(ioaddr+1);
- return &lp->stats;
-}
-
-/*
- * Set or clear the multicast filter for this adaptor.
- * num_addrs == -1 Promiscuous mode, receive all packets
- * num_addrs == 0 Normal mode, clear multicast list
- * num_addrs > 0 Multicast mode, receive normal and MC packets,
- * and do best-effort filtering.
- */
-static void
-set_multicast_list(struct net_device *dev)
-{
- short ioaddr = dev->base_addr;
- if (dev->flags&IFF_PROMISC)
- {
- /* Enable promiscuous mode */
- outw(MULTICAST|PROMISC, ioaddr);
- }
- else if((dev->flags&IFF_ALLMULTI) || dev->mc_count > HW_MAX_ADDRS)
- {
- /* Disable promiscuous mode, use normal mode. */
- hardware_set_filter(NULL);
-
- outw(MULTICAST, ioaddr);
- }
- else if(dev->mc_count)
- {
- /* Walk the address list, and load the filter */
- hardware_set_filter(dev->mc_list);
-
- outw(MULTICAST, ioaddr);
- }
- else
- outw(0, ioaddr);
-}
-
-#ifdef MODULE
-
-static struct net_device *this_device;
-static int io = 0x300;
-static int irq;
-static int dma;
-static int mem;
-MODULE_LICENSE("GPL");
-
-int init_module(void)
-{
- struct net_device *dev;
- int result;
-
- if (io == 0)
- printk(KERN_WARNING "%s: You shouldn't use auto-probing with insmod!\n",
- cardname);
- dev = alloc_etherdev(sizeof(struct net_local));
- if (!dev)
- return -ENOMEM;
-
- /* Copy the parameters from insmod into the device structure. */
- dev->base_addr = io;
- dev->irq = irq;
- dev->dma = dma;
- dev->mem_start = mem;
- if (do_netcard_probe(dev) == 0) {
- this_device = dev;
- return 0;
- }
- free_netdev(dev);
- return -ENXIO;
-}
-
-void
-cleanup_module(void)
-{
- unregister_netdev(this_device);
- cleanup_card(this_device);
- free_netdev(this_device);
-}
-
-#endif /* MODULE */
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 16c91910d6c..966de5d6952 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -958,18 +958,17 @@ static void veth_set_multicast_list(struct net_device *dev)
write_lock_irqsave(&port->mcast_gate, flags);
if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
- (dev->mc_count > VETH_MAX_MCAST)) {
+ (netdev_mc_count(dev) > VETH_MAX_MCAST)) {
port->promiscuous = 1;
} else {
- struct dev_mc_list *dmi = dev->mc_list;
- int i;
+ struct dev_mc_list *dmi;
port->promiscuous = 0;
/* Update table */
port->num_mcast = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ netdev_for_each_mc_addr(dmi, dev) {
u8 *addr = dmi->dmi_addr;
u64 xaddr = 0;
@@ -978,7 +977,6 @@ static void veth_set_multicast_list(struct net_device *dev)
port->mcast_addr[port->num_mcast] = xaddr;
port->num_mcast++;
}
- dmi = dmi->next;
}
}
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index 5257ae08b9f..92d2e71d0c8 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -75,19 +75,14 @@ struct ixgb_adapter;
#include "ixgb_ee.h"
#include "ixgb_ids.h"
+#define PFX "ixgb: "
+
#ifdef _DEBUG_DRIVER_
-#define IXGB_DBG(args...) printk(KERN_DEBUG "ixgb: " args)
+#define IXGB_DBG(args...) printk(KERN_DEBUG PFX args)
#else
#define IXGB_DBG(args...)
#endif
-#define PFX "ixgb: "
-#define DPRINTK(nlevel, klevel, fmt, args...) \
- (void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
- printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
- __func__ , ## args))
-
-
/* TX/RX descriptor defines */
#define DEFAULT_TXD 256
#define MAX_TXD 4096
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 593d1a4f217..c9fef65cb98 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -50,7 +50,7 @@ MODULE_PARM_DESC(copybreak,
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
* Class, Class Mask, private data (not used) }
*/
-static struct pci_device_id ixgb_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(ixgb_pci_tbl) = {
{INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{INTEL_VENDOR_ID, IXGB_DEVICE_ID_82597EX_CX4,
@@ -238,8 +238,8 @@ ixgb_up(struct ixgb_adapter *adapter)
if (err) {
if (adapter->have_msi)
pci_disable_msi(adapter->pdev);
- DPRINTK(PROBE, ERR,
- "Unable to allocate interrupt Error: %d\n", err);
+ netif_err(adapter, probe, adapter->netdev,
+ "Unable to allocate interrupt Error: %d\n", err);
return err;
}
@@ -310,7 +310,7 @@ ixgb_reset(struct ixgb_adapter *adapter)
ixgb_adapter_stop(hw);
if (!ixgb_init_hw(hw))
- DPRINTK(PROBE, ERR, "ixgb_init_hw failed.\n");
+ netif_err(adapter, probe, adapter->netdev, "ixgb_init_hw failed\n");
/* restore frame size information */
IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
@@ -447,7 +447,8 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* make sure the EEPROM is good */
if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
- DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
+ netif_err(adapter, probe, adapter->netdev,
+ "The EEPROM Checksum Is Not Valid\n");
err = -EIO;
goto err_eeprom;
}
@@ -456,7 +457,7 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
if (!is_valid_ether_addr(netdev->perm_addr)) {
- DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
+ netif_err(adapter, probe, adapter->netdev, "Invalid MAC Address\n");
err = -EIO;
goto err_eeprom;
}
@@ -477,7 +478,8 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev);
- DPRINTK(PROBE, INFO, "Intel(R) PRO/10GbE Network Connection\n");
+ netif_info(adapter, probe, adapter->netdev,
+ "Intel(R) PRO/10GbE Network Connection\n");
ixgb_check_options(adapter);
/* reset the hardware with the new settings */
@@ -552,14 +554,14 @@ ixgb_sw_init(struct ixgb_adapter *adapter)
hw->max_frame_size = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
adapter->rx_buffer_len = hw->max_frame_size + 8; /* + 8 for errata */
- if ((hw->device_id == IXGB_DEVICE_ID_82597EX)
- || (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4)
- || (hw->device_id == IXGB_DEVICE_ID_82597EX_LR)
- || (hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
+ if ((hw->device_id == IXGB_DEVICE_ID_82597EX) ||
+ (hw->device_id == IXGB_DEVICE_ID_82597EX_CX4) ||
+ (hw->device_id == IXGB_DEVICE_ID_82597EX_LR) ||
+ (hw->device_id == IXGB_DEVICE_ID_82597EX_SR))
hw->mac_type = ixgb_82597;
else {
/* should never have loaded on this device */
- DPRINTK(PROBE, ERR, "unsupported device id\n");
+ netif_err(adapter, probe, adapter->netdev, "unsupported device id\n");
}
/* enable flow control to be programmed */
@@ -661,8 +663,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
size = sizeof(struct ixgb_buffer) * txdr->count;
txdr->buffer_info = vmalloc(size);
if (!txdr->buffer_info) {
- DPRINTK(PROBE, ERR,
- "Unable to allocate transmit descriptor ring memory\n");
+ netif_err(adapter, probe, adapter->netdev,
+ "Unable to allocate transmit descriptor ring memory\n");
return -ENOMEM;
}
memset(txdr->buffer_info, 0, size);
@@ -675,8 +677,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
if (!txdr->desc) {
vfree(txdr->buffer_info);
- DPRINTK(PROBE, ERR,
- "Unable to allocate transmit descriptor memory\n");
+ netif_err(adapter, probe, adapter->netdev,
+ "Unable to allocate transmit descriptor memory\n");
return -ENOMEM;
}
memset(txdr->desc, 0, txdr->size);
@@ -750,8 +752,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
size = sizeof(struct ixgb_buffer) * rxdr->count;
rxdr->buffer_info = vmalloc(size);
if (!rxdr->buffer_info) {
- DPRINTK(PROBE, ERR,
- "Unable to allocate receive descriptor ring\n");
+ netif_err(adapter, probe, adapter->netdev,
+ "Unable to allocate receive descriptor ring\n");
return -ENOMEM;
}
memset(rxdr->buffer_info, 0, size);
@@ -765,8 +767,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
if (!rxdr->desc) {
vfree(rxdr->buffer_info);
- DPRINTK(PROBE, ERR,
- "Unable to allocate receive descriptors\n");
+ netif_err(adapter, probe, adapter->netdev,
+ "Unable to allocate receive descriptors\n");
return -ENOMEM;
}
memset(rxdr->desc, 0, rxdr->size);
@@ -1077,7 +1079,7 @@ ixgb_set_multi(struct net_device *netdev)
rctl |= IXGB_RCTL_VFE;
}
- if (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
+ if (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
rctl |= IXGB_RCTL_MPE;
IXGB_WRITE_REG(hw, RCTL, rctl);
} else {
@@ -1086,13 +1088,12 @@ ixgb_set_multi(struct net_device *netdev)
IXGB_WRITE_REG(hw, RCTL, rctl);
- for (i = 0, mc_ptr = netdev->mc_list;
- mc_ptr;
- i++, mc_ptr = mc_ptr->next)
- memcpy(&mta[i * IXGB_ETH_LENGTH_OF_ADDRESS],
+ i = 0;
+ netdev_for_each_mc_addr(mc_ptr, netdev)
+ memcpy(&mta[i++ * IXGB_ETH_LENGTH_OF_ADDRESS],
mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS);
- ixgb_mc_addr_list_update(hw, mta, netdev->mc_count, 0);
+ ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0);
}
}
@@ -1580,7 +1581,8 @@ ixgb_change_mtu(struct net_device *netdev, int new_mtu)
/* MTU < 68 is an error for IPv4 traffic, just don't allow it */
if ((new_mtu < 68) ||
(max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) {
- DPRINTK(PROBE, ERR, "Invalid MTU setting %d\n", new_mtu);
+ netif_err(adapter, probe, adapter->netdev,
+ "Invalid MTU setting %d\n", new_mtu);
return -EINVAL;
}
@@ -1616,7 +1618,7 @@ ixgb_update_stats(struct ixgb_adapter *adapter)
return;
if ((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
- (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
+ (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL);
u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL);
u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH);
@@ -1854,24 +1856,25 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
&& !(IXGB_READ_REG(&adapter->hw, STATUS) &
IXGB_STATUS_TXOFF)) {
/* detected Tx unit hang */
- DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
- " TDH <%x>\n"
- " TDT <%x>\n"
- " next_to_use <%x>\n"
- " next_to_clean <%x>\n"
- "buffer_info[next_to_clean]\n"
- " time_stamp <%lx>\n"
- " next_to_watch <%x>\n"
- " jiffies <%lx>\n"
- " next_to_watch.status <%x>\n",
- IXGB_READ_REG(&adapter->hw, TDH),
- IXGB_READ_REG(&adapter->hw, TDT),
- tx_ring->next_to_use,
- tx_ring->next_to_clean,
- tx_ring->buffer_info[eop].time_stamp,
- eop,
- jiffies,
- eop_desc->status);
+ netif_err(adapter, drv, adapter->netdev,
+ "Detected Tx Unit Hang\n"
+ " TDH <%x>\n"
+ " TDT <%x>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n"
+ "buffer_info[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " next_to_watch <%x>\n"
+ " jiffies <%lx>\n"
+ " next_to_watch.status <%x>\n",
+ IXGB_READ_REG(&adapter->hw, TDH),
+ IXGB_READ_REG(&adapter->hw, TDT),
+ tx_ring->next_to_use,
+ tx_ring->next_to_clean,
+ tx_ring->buffer_info[eop].time_stamp,
+ eop,
+ jiffies,
+ eop_desc->status);
netif_stop_queue(netdev);
}
}
@@ -2269,7 +2272,8 @@ static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
struct ixgb_adapter *adapter = netdev_priv(netdev);
if (pci_enable_device(pdev)) {
- DPRINTK(PROBE, ERR, "Cannot re-enable PCI device after reset.\n");
+ netif_err(adapter, probe, adapter->netdev,
+ "Cannot re-enable PCI device after reset\n");
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -2285,14 +2289,16 @@ static pci_ers_result_t ixgb_io_slot_reset(struct pci_dev *pdev)
/* Make sure the EEPROM is good */
if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
- DPRINTK(PROBE, ERR, "After reset, the EEPROM checksum is not valid.\n");
+ netif_err(adapter, probe, adapter->netdev,
+ "After reset, the EEPROM checksum is not valid\n");
return PCI_ERS_RESULT_DISCONNECT;
}
ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
if (!is_valid_ether_addr(netdev->perm_addr)) {
- DPRINTK(PROBE, ERR, "After reset, invalid MAC address.\n");
+ netif_err(adapter, probe, adapter->netdev,
+ "After reset, invalid MAC address\n");
return PCI_ERS_RESULT_DISCONNECT;
}
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index bfef0ebcba9..8f81efb4916 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -33,7 +33,8 @@
obj-$(CONFIG_IXGBE) += ixgbe.o
ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
- ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o
+ ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
+ ixgbe_mbx.o
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 303e7bd39b6..19e94ee155a 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -98,6 +98,22 @@
#define IXGBE_MAX_RSC_INT_RATE 162760
+#define IXGBE_MAX_VF_MC_ENTRIES 30
+#define IXGBE_MAX_VF_FUNCTIONS 64
+#define IXGBE_MAX_VFTA_ENTRIES 128
+#define MAX_EMULATION_MAC_ADDRS 16
+#define VMDQ_P(p) ((p) + adapter->num_vfs)
+
+struct vf_data_storage {
+ unsigned char vf_mac_addresses[ETH_ALEN];
+ u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES];
+ u16 num_vf_mc_hashes;
+ u16 default_vf_vlan_id;
+ u16 vlans_enabled;
+ bool clear_to_send;
+ int rar;
+};
+
/* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */
struct ixgbe_tx_buffer {
@@ -159,6 +175,7 @@ struct ixgbe_ring {
struct ixgbe_queue_stats stats;
unsigned long reinit_state;
+ int numa_node;
u64 rsc_count; /* stat for coalesced packets */
u64 rsc_flush; /* stats for flushed packets */
u32 restart_queue; /* track tx queue restarts */
@@ -171,7 +188,7 @@ struct ixgbe_ring {
enum ixgbe_ring_f_enum {
RING_F_NONE = 0,
RING_F_DCB,
- RING_F_VMDQ,
+ RING_F_VMDQ, /* SR-IOV uses the same ring feature */
RING_F_RSS,
RING_F_FDIR,
#ifdef IXGBE_FCOE
@@ -183,7 +200,7 @@ enum ixgbe_ring_f_enum {
#define IXGBE_MAX_DCB_INDICES 8
#define IXGBE_MAX_RSS_INDICES 16
-#define IXGBE_MAX_VMDQ_INDICES 16
+#define IXGBE_MAX_VMDQ_INDICES 64
#define IXGBE_MAX_FDIR_INDICES 64
#ifdef IXGBE_FCOE
#define IXGBE_MAX_FCOE_INDICES 8
@@ -277,7 +294,7 @@ struct ixgbe_adapter {
u16 eitr_high;
/* TX */
- struct ixgbe_ring *tx_ring ____cacheline_aligned_in_smp; /* One per active queue */
+ struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp;
int num_tx_queues;
u32 tx_timeout_count;
bool detect_tx_hung;
@@ -286,8 +303,10 @@ struct ixgbe_adapter {
u64 lsc_int;
/* RX */
- struct ixgbe_ring *rx_ring ____cacheline_aligned_in_smp; /* One per active queue */
+ struct ixgbe_ring *rx_ring[MAX_RX_QUEUES] ____cacheline_aligned_in_smp;
int num_rx_queues;
+ int num_rx_pools; /* == num_rx_queues in 82598 */
+ int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
u64 hw_csum_rx_error;
u64 hw_rx_no_dma_resources;
u64 non_eop_descs;
@@ -323,13 +342,14 @@ struct ixgbe_adapter {
#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 19)
#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 20)
#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 22)
-#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 23)
-#define IXGBE_FLAG_IN_SFP_LINK_TASK (u32)(1 << 24)
-#define IXGBE_FLAG_IN_SFP_MOD_TASK (u32)(1 << 25)
-#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 26)
-#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 27)
-#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 28)
-#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 29)
+#define IXGBE_FLAG_IN_SFP_LINK_TASK (u32)(1 << 23)
+#define IXGBE_FLAG_IN_SFP_MOD_TASK (u32)(1 << 24)
+#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 25)
+#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 26)
+#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 27)
+#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 28)
+#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 29)
+#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 30)
u32 flags2;
#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1)
@@ -379,6 +399,13 @@ struct ixgbe_adapter {
u64 rsc_total_flush;
u32 wol;
u16 eeprom_version;
+
+ int node;
+
+ /* SR-IOV */
+ DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
+ unsigned int num_vfs;
+ struct vf_data_storage *vfinfo;
};
enum ixbge_state_t {
@@ -426,6 +453,10 @@ extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
struct ixgbe_atr_input *input,
u8 queue);
+extern s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
+ struct ixgbe_atr_input *input,
+ struct ixgbe_atr_input_masks *input_masks,
+ u16 soft_id, u8 queue);
extern s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input,
u16 vlan_id);
extern s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input,
@@ -440,6 +471,7 @@ extern s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input,
u16 flex_byte);
extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input,
u8 l4type);
+extern void ixgbe_set_rx_mode(struct net_device *netdev);
#ifdef IXGBE_FCOE
extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
extern int ixgbe_fso(struct ixgbe_adapter *adapter,
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index b49bd6b9feb..1f30e163bd9 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -31,6 +31,7 @@
#include "ixgbe.h"
#include "ixgbe_phy.h"
+#include "ixgbe_mbx.h"
#define IXGBE_82599_MAX_TX_QUEUES 128
#define IXGBE_82599_MAX_RX_QUEUES 128
@@ -889,7 +890,7 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
{
s32 status = 0;
- u32 ctrl, ctrl_ext;
+ u32 ctrl;
u32 i;
u32 autoc;
u32 autoc2;
@@ -944,15 +945,9 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
status = IXGBE_ERR_RESET_FAILED;
hw_dbg(hw, "Reset polling failed to complete.\n");
}
- /* Clear PF Reset Done bit so PF/VF Mail Ops can work */
- ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
- ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
- IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
msleep(50);
-
-
/*
* Store the original AUTOC/AUTOC2 values if they have not been
* stored off yet. Otherwise restore the stored original
@@ -1095,9 +1090,11 @@ static s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, u32 vind,
bool vlan_on)
{
u32 regindex;
+ u32 vlvf_index;
u32 bitindex;
u32 bits;
u32 first_empty_slot;
+ u32 vt_ctl;
if (vlan > 4095)
return IXGBE_ERR_PARAM;
@@ -1124,76 +1121,84 @@ static s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, u32 vind,
/* Part 2
- * If the vind is set
+ * If VT mode is set
* Either vlan_on
* make sure the vlan is in VLVF
* set the vind bit in the matching VLVFB
* Or !vlan_on
* clear the pool bit and possibly the vind
*/
- if (vind) {
- /* find the vlanid or the first empty slot */
- first_empty_slot = 0;
-
- for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) {
- bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex));
- if (!bits && !first_empty_slot)
- first_empty_slot = regindex;
- else if ((bits & 0x0FFF) == vlan)
- break;
- }
+ vt_ctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ if (!(vt_ctl & IXGBE_VT_CTL_VT_ENABLE))
+ goto out;
- if (regindex >= IXGBE_VLVF_ENTRIES) {
- if (first_empty_slot)
- regindex = first_empty_slot;
- else {
- hw_dbg(hw, "No space in VLVF.\n");
- goto out;
- }
+ /* find the vlanid or the first empty slot */
+ first_empty_slot = 0;
+
+ for (vlvf_index = 1; vlvf_index < IXGBE_VLVF_ENTRIES; vlvf_index++) {
+ bits = IXGBE_READ_REG(hw, IXGBE_VLVF(vlvf_index));
+ if (!bits && !first_empty_slot)
+ first_empty_slot = vlvf_index;
+ else if ((bits & 0x0FFF) == vlan)
+ break;
+ }
+
+ if (vlvf_index >= IXGBE_VLVF_ENTRIES) {
+ if (first_empty_slot)
+ vlvf_index = first_empty_slot;
+ else {
+ hw_dbg(hw, "No space in VLVF.\n");
+ goto out;
}
+ }
- if (vlan_on) {
- /* set the pool bit */
- if (vind < 32) {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB(regindex * 2));
- bits |= (1 << vind);
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB(regindex * 2), bits);
- } else {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB((regindex * 2) + 1));
- bits |= (1 << vind);
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB((regindex * 2) + 1), bits);
- }
+ if (vlan_on) {
+ /* set the pool bit */
+ if (vind < 32) {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(vlvf_index * 2));
+ bits |= (1 << vind);
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB(vlvf_index * 2), bits);
} else {
- /* clear the pool bit */
- if (vind < 32) {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB(regindex * 2));
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB((vlvf_index * 2) + 1));
+ bits |= (1 << (vind - 32));
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB((vlvf_index * 2) + 1), bits);
+ }
+ } else {
+ /* clear the pool bit */
+ if (vind < 32) {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(vlvf_index * 2));
bits &= ~(1 << vind);
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB(regindex * 2), bits);
- bits |= IXGBE_READ_REG(hw,
- IXGBE_VLVFB((regindex * 2) + 1));
- } else {
- bits = IXGBE_READ_REG(hw,
- IXGBE_VLVFB((regindex * 2) + 1));
- bits &= ~(1 << vind);
- IXGBE_WRITE_REG(hw,
- IXGBE_VLVFB((regindex * 2) + 1), bits);
- bits |= IXGBE_READ_REG(hw,
- IXGBE_VLVFB(regindex * 2));
- }
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB(vlvf_index * 2), bits);
+ bits |= IXGBE_READ_REG(hw,
+ IXGBE_VLVFB((vlvf_index * 2) + 1));
+ } else {
+ bits = IXGBE_READ_REG(hw,
+ IXGBE_VLVFB((vlvf_index * 2) + 1));
+ bits &= ~(1 << (vind - 32));
+ IXGBE_WRITE_REG(hw,
+ IXGBE_VLVFB((vlvf_index * 2) + 1), bits);
+ bits |= IXGBE_READ_REG(hw,
+ IXGBE_VLVFB(vlvf_index * 2));
}
+ }
- if (bits)
- IXGBE_WRITE_REG(hw, IXGBE_VLVF(regindex),
- (IXGBE_VLVF_VIEN | vlan));
- else
- IXGBE_WRITE_REG(hw, IXGBE_VLVF(regindex), 0);
+ if (bits) {
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index),
+ (IXGBE_VLVF_VIEN | vlan));
+ /* if bits is non-zero then some pools/VFs are still
+ * using this VLAN ID. Force the VFTA entry to on */
+ bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex));
+ bits |= (1 << bitindex);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits);
}
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
out:
return 0;
@@ -1434,6 +1439,9 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
/* Send interrupt when 64 filters are left */
fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT;
+ /* Initialize the drop queue to Rx queue 127 */
+ fdirctrl |= (127 << IXGBE_FDIRCTRL_DROP_Q_SHIFT);
+
switch (pballoc) {
case IXGBE_FDIR_PBALLOC_64K:
/* 2k - 1 perfect filters */
@@ -1675,8 +1683,8 @@ s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr)
* @src_addr_4: the fourth 4 bytes of the IP address to load
**/
s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input,
- u32 src_addr_1, u32 src_addr_2,
- u32 src_addr_3, u32 src_addr_4)
+ u32 src_addr_1, u32 src_addr_2,
+ u32 src_addr_3, u32 src_addr_4)
{
input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET] = src_addr_4 & 0xff;
input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] =
@@ -1718,8 +1726,8 @@ s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input,
* @dst_addr_4: the fourth 4 bytes of the IP address to load
**/
s32 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input,
- u32 dst_addr_1, u32 dst_addr_2,
- u32 dst_addr_3, u32 dst_addr_4)
+ u32 dst_addr_1, u32 dst_addr_2,
+ u32 dst_addr_3, u32 dst_addr_4)
{
input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET] = dst_addr_4 & 0xff;
input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] =
@@ -1797,7 +1805,7 @@ s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte)
* @vm_pool: the Virtual Machine pool to load
**/
s32 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input,
- u8 vm_pool)
+ u8 vm_pool)
{
input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET] = vm_pool;
@@ -1821,8 +1829,7 @@ s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type)
* @input: input stream to search
* @vlan: the VLAN id to load
**/
-static s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input,
- u16 *vlan)
+static s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan)
{
*vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET];
*vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8;
@@ -2078,23 +2085,26 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
* ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
* @hw: pointer to hardware structure
* @input: input bitstream
+ * @input_masks: bitwise masks for relevant fields
+ * @soft_id: software index into the silicon hash tables for filter storage
* @queue: queue index to direct traffic to
*
* Note that the caller to this function must lock before calling, since the
* hardware writes must be protected from one another.
**/
s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
- struct ixgbe_atr_input *input,
- u16 soft_id,
- u8 queue)
+ struct ixgbe_atr_input *input,
+ struct ixgbe_atr_input_masks *input_masks,
+ u16 soft_id, u8 queue)
{
u32 fdircmd = 0;
u32 fdirhash;
- u32 src_ipv4, dst_ipv4;
+ u32 src_ipv4 = 0, dst_ipv4 = 0;
u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4;
u16 src_port, dst_port, vlan_id, flex_bytes;
u16 bucket_hash;
u8 l4type;
+ u8 fdirm = 0;
/* Get our input values */
ixgbe_atr_get_l4type_82599(input, &l4type);
@@ -2149,7 +2159,6 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
/* IPv4 */
ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4);
IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4);
-
}
ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4);
@@ -2158,7 +2167,78 @@ s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id |
(flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT)));
IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port |
- (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
+ (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
+
+ /*
+ * Program the relevant mask registers. If src/dst_port or src/dst_addr
+ * are zero, then assume a full mask for that field. Also assume that
+ * a VLAN of 0 is unspecified, so mask that out as well. L4type
+ * cannot be masked out in this implementation.
+ *
+ * This also assumes IPv4 only. IPv6 masking isn't supported at this
+ * point in time.
+ */
+ if (src_ipv4 == 0)
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, 0xffffffff);
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, input_masks->src_ip_mask);
+
+ if (dst_ipv4 == 0)
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, 0xffffffff);
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, input_masks->dst_ip_mask);
+
+ switch (l4type & IXGBE_ATR_L4TYPE_MASK) {
+ case IXGBE_ATR_L4TYPE_TCP:
+ if (src_port == 0)
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xffff);
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
+ input_masks->src_port_mask);
+
+ if (dst_port == 0)
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
+ (0xffff << 16)));
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) |
+ (input_masks->dst_port_mask << 16)));
+ break;
+ case IXGBE_ATR_L4TYPE_UDP:
+ if (src_port == 0)
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xffff);
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
+ input_masks->src_port_mask);
+
+ if (dst_port == 0)
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
+ (0xffff << 16)));
+ else
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM,
+ (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
+ (input_masks->src_port_mask << 16)));
+ break;
+ default:
+ /* this already would have failed above */
+ break;
+ }
+
+ /* Program the last mask register, FDIRM */
+ if (input_masks->vlan_id_mask || !vlan_id)
+ /* Mask both VLAN and VLANP - bits 0 and 1 */
+ fdirm |= 0x3;
+
+ if (input_masks->data_mask || !flex_bytes)
+ /* Flex bytes need masking, so mask the whole thing - bit 4 */
+ fdirm |= 0x10;
+
+ /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
+ fdirm |= 0x24;
+
+ IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW;
fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE;
@@ -2655,4 +2735,5 @@ struct ixgbe_info ixgbe_82599_info = {
.mac_ops = &mac_ops_82599,
.eeprom_ops = &eeprom_ops_82599,
.phy_ops = &phy_ops_82599,
+ .mbx_ops = &mbx_ops_82599,
};
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index 21f158f79dd..eb49020903c 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -28,7 +28,6 @@
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/sched.h>
-#include <linux/list.h>
#include <linux/netdevice.h>
#include "ixgbe.h"
@@ -1278,19 +1277,11 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw)
/* Get the MAC address from the RAR0 for later reference */
hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
- hw_dbg(hw, " Keeping Current RAR0 Addr =%.2X %.2X %.2X ",
- hw->mac.addr[0], hw->mac.addr[1],
- hw->mac.addr[2]);
- hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
- hw->mac.addr[4], hw->mac.addr[5]);
+ hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr);
} else {
/* Setup the receive address. */
hw_dbg(hw, "Overriding MAC Address in RAR[0]\n");
- hw_dbg(hw, " New MAC Addr =%.2X %.2X %.2X ",
- hw->mac.addr[0], hw->mac.addr[1],
- hw->mac.addr[2]);
- hw_dbg(hw, "%.2X %.2X %.2X\n", hw->mac.addr[3],
- hw->mac.addr[4], hw->mac.addr[5]);
+ hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr);
hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
}
@@ -1355,7 +1346,7 @@ static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
/**
* ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses
* @hw: pointer to hardware structure
- * @uc_list: the list of new addresses
+ * @netdev: pointer to net device structure
*
* The given list replaces any existing list. Clears the secondary addrs from
* receive address registers. Uses unused receive address registers for the
@@ -1365,7 +1356,7 @@ static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq)
* manually putting the device into promiscuous mode.
**/
s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
- struct list_head *uc_list)
+ struct net_device *netdev)
{
u32 i;
u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc;
@@ -1389,7 +1380,7 @@ s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
}
/* Add the new addresses */
- list_for_each_entry(ha, uc_list, list) {
+ netdev_for_each_uc_addr(ha, netdev) {
hw_dbg(hw, " Adding the secondary addresses:\n");
ixgbe_add_uc_addr(hw, ha->addr, 0);
}
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index dfff0ffaa50..13606d4809c 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -60,7 +60,7 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list,
u32 mc_addr_count,
ixgbe_mc_addr_itr func);
s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw,
- struct list_head *uc_list);
+ struct net_device *netdev);
s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw);
s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw);
s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval);
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index d77961fc75f..7949a446e4c 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -441,10 +441,8 @@ static int ixgbe_set_tso(struct net_device *netdev, u32 data)
netdev->features |= NETIF_F_TSO;
netdev->features |= NETIF_F_TSO6;
} else {
- netif_tx_stop_all_queues(netdev);
netdev->features &= ~NETIF_F_TSO;
netdev->features &= ~NETIF_F_TSO6;
- netif_tx_start_all_queues(netdev);
}
return 0;
}
@@ -834,8 +832,8 @@ static void ixgbe_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_ring *tx_ring = adapter->tx_ring;
- struct ixgbe_ring *rx_ring = adapter->rx_ring;
+ struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
+ struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
ring->rx_max_pending = IXGBE_MAX_RXD;
ring->tx_max_pending = IXGBE_MAX_TXD;
@@ -867,8 +865,8 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD);
new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
- if ((new_tx_count == adapter->tx_ring->count) &&
- (new_rx_count == adapter->rx_ring->count)) {
+ if ((new_tx_count == adapter->tx_ring[0]->count) &&
+ (new_rx_count == adapter->rx_ring[0]->count)) {
/* nothing to do */
return 0;
}
@@ -878,25 +876,24 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
if (!netif_running(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i].count = new_tx_count;
+ adapter->tx_ring[i]->count = new_tx_count;
for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i].count = new_rx_count;
+ adapter->rx_ring[i]->count = new_rx_count;
adapter->tx_ring_count = new_tx_count;
adapter->rx_ring_count = new_rx_count;
- goto err_setup;
+ goto clear_reset;
}
- temp_tx_ring = kcalloc(adapter->num_tx_queues,
- sizeof(struct ixgbe_ring), GFP_KERNEL);
+ temp_tx_ring = vmalloc(adapter->num_tx_queues * sizeof(struct ixgbe_ring));
if (!temp_tx_ring) {
err = -ENOMEM;
- goto err_setup;
+ goto clear_reset;
}
if (new_tx_count != adapter->tx_ring_count) {
- memcpy(temp_tx_ring, adapter->tx_ring,
- adapter->num_tx_queues * sizeof(struct ixgbe_ring));
for (i = 0; i < adapter->num_tx_queues; i++) {
+ memcpy(&temp_tx_ring[i], adapter->tx_ring[i],
+ sizeof(struct ixgbe_ring));
temp_tx_ring[i].count = new_tx_count;
err = ixgbe_setup_tx_resources(adapter,
&temp_tx_ring[i]);
@@ -904,28 +901,24 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
while (i) {
i--;
ixgbe_free_tx_resources(adapter,
- &temp_tx_ring[i]);
+ &temp_tx_ring[i]);
}
- goto err_setup;
+ goto clear_reset;
}
}
need_update = true;
}
- temp_rx_ring = kcalloc(adapter->num_rx_queues,
- sizeof(struct ixgbe_ring), GFP_KERNEL);
- if ((!temp_rx_ring) && (need_update)) {
- for (i = 0; i < adapter->num_tx_queues; i++)
- ixgbe_free_tx_resources(adapter, &temp_tx_ring[i]);
- kfree(temp_tx_ring);
+ temp_rx_ring = vmalloc(adapter->num_rx_queues * sizeof(struct ixgbe_ring));
+ if (!temp_rx_ring) {
err = -ENOMEM;
goto err_setup;
}
if (new_rx_count != adapter->rx_ring_count) {
- memcpy(temp_rx_ring, adapter->rx_ring,
- adapter->num_rx_queues * sizeof(struct ixgbe_ring));
for (i = 0; i < adapter->num_rx_queues; i++) {
+ memcpy(&temp_rx_ring[i], adapter->rx_ring[i],
+ sizeof(struct ixgbe_ring));
temp_rx_ring[i].count = new_rx_count;
err = ixgbe_setup_rx_resources(adapter,
&temp_rx_ring[i]);
@@ -947,22 +940,32 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
/* tx */
if (new_tx_count != adapter->tx_ring_count) {
- kfree(adapter->tx_ring);
- adapter->tx_ring = temp_tx_ring;
- temp_tx_ring = NULL;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ ixgbe_free_tx_resources(adapter,
+ adapter->tx_ring[i]);
+ memcpy(adapter->tx_ring[i], &temp_tx_ring[i],
+ sizeof(struct ixgbe_ring));
+ }
adapter->tx_ring_count = new_tx_count;
}
/* rx */
if (new_rx_count != adapter->rx_ring_count) {
- kfree(adapter->rx_ring);
- adapter->rx_ring = temp_rx_ring;
- temp_rx_ring = NULL;
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ ixgbe_free_rx_resources(adapter,
+ adapter->rx_ring[i]);
+ memcpy(adapter->rx_ring[i], &temp_rx_ring[i],
+ sizeof(struct ixgbe_ring));
+ }
adapter->rx_ring_count = new_rx_count;
}
ixgbe_up(adapter);
}
+
+ vfree(temp_rx_ring);
err_setup:
+ vfree(temp_tx_ring);
+clear_reset:
clear_bit(__IXGBE_RESETTING, &adapter->state);
return err;
}
@@ -974,6 +977,9 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
return IXGBE_TEST_LEN;
case ETH_SS_STATS:
return IXGBE_STATS_LEN;
+ case ETH_SS_NTUPLE_FILTERS:
+ return (ETHTOOL_MAX_NTUPLE_LIST_ENTRY *
+ ETHTOOL_MAX_NTUPLE_STRING_PER_ENTRY);
default:
return -EOPNOTSUPP;
}
@@ -1007,13 +1013,13 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
for (j = 0; j < adapter->num_tx_queues; j++) {
- queue_stat = (u64 *)&adapter->tx_ring[j].stats;
+ queue_stat = (u64 *)&adapter->tx_ring[j]->stats;
for (k = 0; k < stat_count; k++)
data[i + k] = queue_stat[k];
i += k;
}
for (j = 0; j < adapter->num_rx_queues; j++) {
- queue_stat = (u64 *)&adapter->rx_ring[j].stats;
+ queue_stat = (u64 *)&adapter->rx_ring[j]->stats;
for (k = 0; k < stat_count; k++)
data[i + k] = queue_stat[k];
i += k;
@@ -1627,7 +1633,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
reg_data |= IXGBE_RXDCTL_ENABLE;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(0), reg_data);
if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- int j = adapter->rx_ring[0].reg_idx;
+ int j = adapter->rx_ring[0]->reg_idx;
u32 k;
for (k = 0; k < 10; k++) {
if (IXGBE_READ_REG(&adapter->hw,
@@ -1867,11 +1873,22 @@ static void ixgbe_diag_test(struct net_device *netdev,
if (ixgbe_intr_test(adapter, &data[2]))
eth_test->flags |= ETH_TEST_FL_FAILED;
+ /* If SRIOV or VMDq is enabled then skip MAC
+ * loopback diagnostic. */
+ if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
+ IXGBE_FLAG_VMDQ_ENABLED)) {
+ DPRINTK(HW, INFO, "Skip MAC loopback diagnostic in VT "
+ "mode\n");
+ data[3] = 0;
+ goto skip_loopback;
+ }
+
ixgbe_reset(adapter);
DPRINTK(HW, INFO, "loopback testing starting\n");
if (ixgbe_loopback_test(adapter, &data[3]))
eth_test->flags |= ETH_TEST_FL_FAILED;
+skip_loopback:
ixgbe_reset(adapter);
clear_bit(__IXGBE_TESTING, &adapter->state);
@@ -2000,7 +2017,7 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0].work_limit;
+ ec->tx_max_coalesced_frames_irq = adapter->tx_ring[0]->work_limit;
/* only valid if in constant ITR mode */
switch (adapter->rx_itr_setting) {
@@ -2053,7 +2070,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
return -EINVAL;
if (ec->tx_max_coalesced_frames_irq)
- adapter->tx_ring[0].work_limit = ec->tx_max_coalesced_frames_irq;
+ adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq;
if (ec->rx_coalesce_usecs > 1) {
/* check the limits */
@@ -2134,23 +2151,124 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
static int ixgbe_set_flags(struct net_device *netdev, u32 data)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ bool need_reset = false;
ethtool_op_set_flags(netdev, data);
- if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
- return 0;
-
/* if state changes we need to update adapter->flags and reset */
if ((!!(data & ETH_FLAG_LRO)) !=
(!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) {
adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
+ need_reset = true;
+ }
+
+ /*
+ * Check if Flow Director n-tuple support was enabled or disabled. If
+ * the state changed, we need to reset.
+ */
+ if ((adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) &&
+ (!(data & ETH_FLAG_NTUPLE))) {
+ /* turn off Flow Director perfect, set hash and reset */
+ adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ need_reset = true;
+ } else if ((!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) &&
+ (data & ETH_FLAG_NTUPLE)) {
+ /* turn off Flow Director hash, enable perfect and reset */
+ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+ need_reset = true;
+ } else {
+ /* no state change */
+ }
+
+ if (need_reset) {
if (netif_running(netdev))
ixgbe_reinit_locked(adapter);
else
ixgbe_reset(adapter);
}
+
return 0;
+}
+static int ixgbe_set_rx_ntuple(struct net_device *dev,
+ struct ethtool_rx_ntuple *cmd)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(dev);
+ struct ethtool_rx_ntuple_flow_spec fs = cmd->fs;
+ struct ixgbe_atr_input input_struct;
+ struct ixgbe_atr_input_masks input_masks;
+ int target_queue;
+
+ if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+ return -EOPNOTSUPP;
+
+ /*
+ * Don't allow programming if the action is a queue greater than
+ * the number of online Tx queues.
+ */
+ if ((fs.action >= adapter->num_tx_queues) ||
+ (fs.action < ETHTOOL_RXNTUPLE_ACTION_DROP))
+ return -EINVAL;
+
+ memset(&input_struct, 0, sizeof(struct ixgbe_atr_input));
+ memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks));
+
+ input_masks.src_ip_mask = fs.m_u.tcp_ip4_spec.ip4src;
+ input_masks.dst_ip_mask = fs.m_u.tcp_ip4_spec.ip4dst;
+ input_masks.src_port_mask = fs.m_u.tcp_ip4_spec.psrc;
+ input_masks.dst_port_mask = fs.m_u.tcp_ip4_spec.pdst;
+ input_masks.vlan_id_mask = fs.vlan_tag_mask;
+ /* only use the lowest 2 bytes for flex bytes */
+ input_masks.data_mask = (fs.data_mask & 0xffff);
+
+ switch (fs.flow_type) {
+ case TCP_V4_FLOW:
+ ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_TCP);
+ break;
+ case UDP_V4_FLOW:
+ ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_UDP);
+ break;
+ case SCTP_V4_FLOW:
+ ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_SCTP);
+ break;
+ default:
+ return -1;
+ }
+
+ /* Mask bits from the inputs based on user-supplied mask */
+ ixgbe_atr_set_src_ipv4_82599(&input_struct,
+ (fs.h_u.tcp_ip4_spec.ip4src & ~fs.m_u.tcp_ip4_spec.ip4src));
+ ixgbe_atr_set_dst_ipv4_82599(&input_struct,
+ (fs.h_u.tcp_ip4_spec.ip4dst & ~fs.m_u.tcp_ip4_spec.ip4dst));
+ /* 82599 expects these to be byte-swapped for perfect filtering */
+ ixgbe_atr_set_src_port_82599(&input_struct,
+ ((ntohs(fs.h_u.tcp_ip4_spec.psrc)) & ~fs.m_u.tcp_ip4_spec.psrc));
+ ixgbe_atr_set_dst_port_82599(&input_struct,
+ ((ntohs(fs.h_u.tcp_ip4_spec.pdst)) & ~fs.m_u.tcp_ip4_spec.pdst));
+
+ /* VLAN and Flex bytes are either completely masked or not */
+ if (!fs.vlan_tag_mask)
+ ixgbe_atr_set_vlan_id_82599(&input_struct, fs.vlan_tag);
+
+ if (!input_masks.data_mask)
+ /* make sure we only use the first 2 bytes of user data */
+ ixgbe_atr_set_flex_byte_82599(&input_struct,
+ (fs.data & 0xffff));
+
+ /* determine if we need to drop or route the packet */
+ if (fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP)
+ target_queue = MAX_RX_QUEUES - 1;
+ else
+ target_queue = fs.action;
+
+ spin_lock(&adapter->fdir_perfect_lock);
+ ixgbe_fdir_add_perfect_filter_82599(&adapter->hw, &input_struct,
+ &input_masks, 0, target_queue);
+ spin_unlock(&adapter->fdir_perfect_lock);
+
+ return 0;
}
static const struct ethtool_ops ixgbe_ethtool_ops = {
@@ -2188,6 +2306,7 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.set_coalesce = ixgbe_set_coalesce,
.get_flags = ethtool_op_get_flags,
.set_flags = ixgbe_set_flags,
+ .set_rx_ntuple = ixgbe_set_rx_ntuple,
};
void ixgbe_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index e9a20c88c15..4123dec0dfb 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -525,7 +525,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
fcoe_i = f->mask + i % f->indices;
fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
- fcoe_q = adapter->rx_ring[fcoe_i].reg_idx;
+ fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
}
IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
@@ -533,7 +533,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
} else {
/* Use single rx queue for FCoE */
fcoe_i = f->mask;
- fcoe_q = adapter->rx_ring[fcoe_i].reg_idx;
+ fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0);
IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE),
IXGBE_ETQS_QUEUE_EN |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 951b73cf5ca..684af371462 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -45,12 +45,13 @@
#include "ixgbe.h"
#include "ixgbe_common.h"
#include "ixgbe_dcb_82599.h"
+#include "ixgbe_sriov.h"
char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Network Driver";
-#define DRV_VERSION "2.0.44-k2"
+#define DRV_VERSION "2.0.62-k2"
const char ixgbe_driver_version[] = DRV_VERSION;
static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
@@ -67,7 +68,7 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = {
* { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
* Class, Class Mask, private data (not used) }
*/
-static struct pci_device_id ixgbe_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598),
board_82598 },
{PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT),
@@ -124,6 +125,13 @@ static struct notifier_block dca_notifier = {
};
#endif
+#ifdef CONFIG_PCI_IOV
+static unsigned int max_vfs;
+module_param(max_vfs, uint, 0);
+MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
+ "per physical function");
+#endif /* CONFIG_PCI_IOV */
+
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
MODULE_LICENSE("GPL");
@@ -131,6 +139,41 @@ MODULE_VERSION(DRV_VERSION);
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
+static inline void ixgbe_disable_sriov(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 gcr;
+ u32 gpie;
+ u32 vmdctl;
+
+#ifdef CONFIG_PCI_IOV
+ /* disable iov and allow time for transactions to clear */
+ pci_disable_sriov(adapter->pdev);
+#endif
+
+ /* turn off device IOV mode */
+ gcr = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+ gcr &= ~(IXGBE_GCR_EXT_SRIOV);
+ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr);
+ gpie = IXGBE_READ_REG(hw, IXGBE_GPIE);
+ gpie &= ~IXGBE_GPIE_VTMODE_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
+
+ /* set default pool back to 0 */
+ vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ vmdctl &= ~IXGBE_VT_CTL_POOL_MASK;
+ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
+
+ /* take a breather then clean up driver data */
+ msleep(100);
+ if (adapter->vfinfo)
+ kfree(adapter->vfinfo);
+ adapter->vfinfo = NULL;
+
+ adapter->num_vfs = 0;
+ adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
+}
+
static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
{
u32 ctrl_ext;
@@ -451,7 +494,7 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
{
u32 rxctrl;
int cpu = get_cpu();
- int q = rx_ring - adapter->rx_ring;
+ int q = rx_ring->reg_idx;
if (rx_ring->cpu != cpu) {
rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
@@ -479,7 +522,7 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
{
u32 txctrl;
int cpu = get_cpu();
- int q = tx_ring - adapter->tx_ring;
+ int q = tx_ring->reg_idx;
struct ixgbe_hw *hw = &adapter->hw;
if (tx_ring->cpu != cpu) {
@@ -513,12 +556,12 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
for (i = 0; i < adapter->num_tx_queues; i++) {
- adapter->tx_ring[i].cpu = -1;
- ixgbe_update_tx_dca(adapter, &adapter->tx_ring[i]);
+ adapter->tx_ring[i]->cpu = -1;
+ ixgbe_update_tx_dca(adapter, adapter->tx_ring[i]);
}
for (i = 0; i < adapter->num_rx_queues; i++) {
- adapter->rx_ring[i].cpu = -1;
- ixgbe_update_rx_dca(adapter, &adapter->rx_ring[i]);
+ adapter->rx_ring[i]->cpu = -1;
+ ixgbe_update_rx_dca(adapter, adapter->rx_ring[i]);
}
}
@@ -775,6 +818,12 @@ static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
return skb;
}
+struct ixgbe_rsc_cb {
+ dma_addr_t dma;
+};
+
+#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
+
static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *rx_ring,
int *work_done, int work_to_do)
@@ -806,6 +855,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
break;
(*work_done)++;
+ rmb(); /* read descriptor and rx_buffer_info after status DD */
if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
@@ -823,9 +873,21 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
rx_buffer_info->skb = NULL;
if (rx_buffer_info->dma) {
- pci_unmap_single(pdev, rx_buffer_info->dma,
- rx_ring->rx_buf_len,
- PCI_DMA_FROMDEVICE);
+ if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
+ (!(staterr & IXGBE_RXD_STAT_EOP)) &&
+ (!(skb->prev)))
+ /*
+ * When HWRSC is enabled, delay unmapping
+ * of the first packet. It carries the
+ * header information, HW may still
+ * access the header after the writeback.
+ * Only unmap it when EOP is reached
+ */
+ IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
+ else
+ pci_unmap_single(pdev, rx_buffer_info->dma,
+ rx_ring->rx_buf_len,
+ PCI_DMA_FROMDEVICE);
rx_buffer_info->dma = 0;
skb_put(skb, len);
}
@@ -873,6 +935,10 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
if (skb->prev)
skb = ixgbe_transform_rsc_queue(skb, &(rx_ring->rsc_count));
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
+ if (IXGBE_RSC_CB(skb)->dma)
+ pci_unmap_single(pdev, IXGBE_RSC_CB(skb)->dma,
+ rx_ring->rx_buf_len,
+ PCI_DMA_FROMDEVICE);
if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
rx_ring->rsc_count += skb_shinfo(skb)->nr_frags;
else
@@ -984,12 +1050,12 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
*/
for (v_idx = 0; v_idx < q_vectors; v_idx++) {
q_vector = adapter->q_vector[v_idx];
- /* XXX for_each_bit(...) */
+ /* XXX for_each_set_bit(...) */
r_idx = find_first_bit(q_vector->rxr_idx,
adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
- j = adapter->rx_ring[r_idx].reg_idx;
+ j = adapter->rx_ring[r_idx]->reg_idx;
ixgbe_set_ivar(adapter, 0, j, v_idx);
r_idx = find_next_bit(q_vector->rxr_idx,
adapter->num_rx_queues,
@@ -999,7 +1065,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
- j = adapter->tx_ring[r_idx].reg_idx;
+ j = adapter->tx_ring[r_idx]->reg_idx;
ixgbe_set_ivar(adapter, 1, j, v_idx);
r_idx = find_next_bit(q_vector->txr_idx,
adapter->num_tx_queues,
@@ -1025,7 +1091,12 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
/* set up to autoclear timer, and the vectors */
mask = IXGBE_EIMS_ENABLE_MASK;
- mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
+ if (adapter->num_vfs)
+ mask &= ~(IXGBE_EIMS_OTHER |
+ IXGBE_EIMS_MAILBOX |
+ IXGBE_EIMS_LSC);
+ else
+ mask &= ~(IXGBE_EIMS_OTHER | IXGBE_EIMS_LSC);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIAC, mask);
}
@@ -1134,7 +1205,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
- tx_ring = &(adapter->tx_ring[r_idx]);
+ tx_ring = adapter->tx_ring[r_idx];
ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
q_vector->tx_itr,
tx_ring->total_packets,
@@ -1149,7 +1220,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
- rx_ring = &(adapter->rx_ring[r_idx]);
+ rx_ring = adapter->rx_ring[r_idx];
ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
q_vector->rx_itr,
rx_ring->total_packets,
@@ -1254,6 +1325,9 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
if (eicr & IXGBE_EICR_LSC)
ixgbe_check_lsc(adapter);
+ if (eicr & IXGBE_EICR_MAILBOX)
+ ixgbe_msg_task(adapter);
+
if (hw->mac.type == ixgbe_mac_82598EB)
ixgbe_check_fan_failure(adapter, eicr);
@@ -1268,7 +1342,7 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
netif_tx_stop_all_queues(netdev);
for (i = 0; i < adapter->num_tx_queues; i++) {
struct ixgbe_ring *tx_ring =
- &adapter->tx_ring[i];
+ adapter->tx_ring[i];
if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
&tx_ring->reinit_state))
schedule_work(&adapter->fdir_reinit_task);
@@ -1327,7 +1401,7 @@ static irqreturn_t ixgbe_msix_clean_tx(int irq, void *data)
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
- tx_ring = &(adapter->tx_ring[r_idx]);
+ tx_ring = adapter->tx_ring[r_idx];
tx_ring->total_bytes = 0;
tx_ring->total_packets = 0;
r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
@@ -1355,7 +1429,7 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
- rx_ring = &(adapter->rx_ring[r_idx]);
+ rx_ring = adapter->rx_ring[r_idx];
rx_ring->total_bytes = 0;
rx_ring->total_packets = 0;
r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
@@ -1385,7 +1459,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
- ring = &(adapter->tx_ring[r_idx]);
+ ring = adapter->tx_ring[r_idx];
ring->total_bytes = 0;
ring->total_packets = 0;
r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
@@ -1394,7 +1468,7 @@ static irqreturn_t ixgbe_msix_clean_many(int irq, void *data)
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
- ring = &(adapter->rx_ring[r_idx]);
+ ring = adapter->rx_ring[r_idx];
ring->total_bytes = 0;
ring->total_packets = 0;
r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
@@ -1425,7 +1499,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
long r_idx;
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- rx_ring = &(adapter->rx_ring[r_idx]);
+ rx_ring = adapter->rx_ring[r_idx];
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
ixgbe_update_rx_dca(adapter, rx_ring);
@@ -1466,7 +1540,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
- ring = &(adapter->tx_ring[r_idx]);
+ ring = adapter->tx_ring[r_idx];
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
ixgbe_update_tx_dca(adapter, ring);
@@ -1482,7 +1556,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
budget = max(budget, 1);
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
- ring = &(adapter->rx_ring[r_idx]);
+ ring = adapter->rx_ring[r_idx];
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
ixgbe_update_rx_dca(adapter, ring);
@@ -1493,7 +1567,7 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
}
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- ring = &(adapter->rx_ring[r_idx]);
+ ring = adapter->rx_ring[r_idx];
/* If all Rx work done, exit the polling mode */
if (work_done < budget) {
napi_complete(napi);
@@ -1526,7 +1600,7 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
long r_idx;
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
- tx_ring = &(adapter->tx_ring[r_idx]);
+ tx_ring = adapter->tx_ring[r_idx];
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
ixgbe_update_tx_dca(adapter, tx_ring);
@@ -1711,8 +1785,8 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
u8 current_itr;
u32 new_itr = q_vector->eitr;
- struct ixgbe_ring *rx_ring = &adapter->rx_ring[0];
- struct ixgbe_ring *tx_ring = &adapter->tx_ring[0];
+ struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
+ struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
q_vector->tx_itr,
@@ -1768,6 +1842,8 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
mask |= IXGBE_EIMS_ECC;
mask |= IXGBE_EIMS_GPI_SDP1;
mask |= IXGBE_EIMS_GPI_SDP2;
+ if (adapter->num_vfs)
+ mask |= IXGBE_EIMS_MAILBOX;
}
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
@@ -1776,6 +1852,11 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
ixgbe_irq_enable_queues(adapter, ~0);
IXGBE_WRITE_FLUSH(&adapter->hw);
+
+ if (adapter->num_vfs > 32) {
+ u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel);
+ }
}
/**
@@ -1817,10 +1898,10 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
ixgbe_check_fan_failure(adapter, eicr);
if (napi_schedule_prep(&(q_vector->napi))) {
- adapter->tx_ring[0].total_packets = 0;
- adapter->tx_ring[0].total_bytes = 0;
- adapter->rx_ring[0].total_packets = 0;
- adapter->rx_ring[0].total_bytes = 0;
+ adapter->tx_ring[0]->total_packets = 0;
+ adapter->tx_ring[0]->total_bytes = 0;
+ adapter->rx_ring[0]->total_packets = 0;
+ adapter->rx_ring[0]->total_bytes = 0;
/* would disable interrupts here but EIAM disabled it */
__napi_schedule(&(q_vector->napi));
}
@@ -1905,6 +1986,8 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
+ if (adapter->num_vfs > 32)
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
}
IXGBE_WRITE_FLUSH(&adapter->hw);
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@ -1950,7 +2033,7 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
/* Setup the HW Tx Head and Tail descriptor pointers */
for (i = 0; i < adapter->num_tx_queues; i++) {
- struct ixgbe_ring *ring = &adapter->tx_ring[i];
+ struct ixgbe_ring *ring = adapter->tx_ring[i];
j = ring->reg_idx;
tdba = ring->dma;
tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
@@ -1960,8 +2043,8 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_TDLEN(j), tdlen);
IXGBE_WRITE_REG(hw, IXGBE_TDH(j), 0);
IXGBE_WRITE_REG(hw, IXGBE_TDT(j), 0);
- adapter->tx_ring[i].head = IXGBE_TDH(j);
- adapter->tx_ring[i].tail = IXGBE_TDT(j);
+ adapter->tx_ring[i]->head = IXGBE_TDH(j);
+ adapter->tx_ring[i]->tail = IXGBE_TDT(j);
/*
* Disable Tx Head Writeback RO bit, since this hoses
* bookkeeping if things aren't delivered in order.
@@ -1989,18 +2072,32 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter)
if (hw->mac.type == ixgbe_mac_82599EB) {
u32 rttdcs;
+ u32 mask;
/* disable the arbiter while setting MTQC */
rttdcs = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
rttdcs |= IXGBE_RTTDCS_ARBDIS;
IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs);
- /* We enable 8 traffic classes, DCB only */
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
- IXGBE_WRITE_REG(hw, IXGBE_MTQC, (IXGBE_MTQC_RT_ENA |
- IXGBE_MTQC_8TC_8TQ));
- else
+ /* set transmit pool layout */
+ mask = (IXGBE_FLAG_SRIOV_ENABLED | IXGBE_FLAG_DCB_ENABLED);
+ switch (adapter->flags & mask) {
+
+ case (IXGBE_FLAG_SRIOV_ENABLED):
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC,
+ (IXGBE_MTQC_VT_ENA | IXGBE_MTQC_64VF));
+ break;
+
+ case (IXGBE_FLAG_DCB_ENABLED):
+ /* We enable 8 traffic classes, DCB only */
+ IXGBE_WRITE_REG(hw, IXGBE_MTQC,
+ (IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ));
+ break;
+
+ default:
IXGBE_WRITE_REG(hw, IXGBE_MTQC, IXGBE_MTQC_64Q_1PB);
+ break;
+ }
/* re-eable the arbiter */
rttdcs &= ~IXGBE_RTTDCS_ARBDIS;
@@ -2059,12 +2156,16 @@ static u32 ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
#ifdef CONFIG_IXGBE_DCB
| IXGBE_FLAG_DCB_ENABLED
#endif
+ | IXGBE_FLAG_SRIOV_ENABLED
);
switch (mask) {
case (IXGBE_FLAG_RSS_ENABLED):
mrqc = IXGBE_MRQC_RSSEN;
break;
+ case (IXGBE_FLAG_SRIOV_ENABLED):
+ mrqc = IXGBE_MRQC_VMDQEN;
+ break;
#ifdef CONFIG_IXGBE_DCB
case (IXGBE_FLAG_DCB_ENABLED):
mrqc = IXGBE_MRQC_RT8TCEN;
@@ -2090,7 +2191,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, int index)
u32 rscctrl;
int rx_buf_len;
- rx_ring = &adapter->rx_ring[index];
+ rx_ring = adapter->rx_ring[index];
j = rx_ring->reg_idx;
rx_buf_len = rx_ring->rx_buf_len;
rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(j));
@@ -2145,7 +2246,9 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
int rx_buf_len;
/* Decide whether to use packet split mode or not */
- adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+ /* Do not use packet split if we're in SR-IOV Mode */
+ if (!adapter->num_vfs)
+ adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
/* Set the RX buffer length according to the mode */
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
@@ -2157,7 +2260,9 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
IXGBE_PSRTYPE_IPV4HDR |
IXGBE_PSRTYPE_IPV6HDR |
IXGBE_PSRTYPE_L2HDR;
- IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(0), psrtype);
+ IXGBE_WRITE_REG(hw,
+ IXGBE_PSRTYPE(adapter->num_vfs),
+ psrtype);
}
} else {
if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
@@ -2184,7 +2289,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
#endif
IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0);
- rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
+ rdlen = adapter->rx_ring[0]->count * sizeof(union ixgbe_adv_rx_desc);
/* disable receives while setting up the descriptors */
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
@@ -2194,7 +2299,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
* the Base and Length of the Rx Descriptor Ring
*/
for (i = 0; i < adapter->num_rx_queues; i++) {
- rx_ring = &adapter->rx_ring[i];
+ rx_ring = adapter->rx_ring[i];
rdba = rx_ring->dma;
j = rx_ring->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_RDBAL(j), (rdba & DMA_BIT_MASK(32)));
@@ -2243,6 +2348,30 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
}
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ u32 vt_reg_bits;
+ u32 reg_offset, vf_shift;
+ u32 vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL);
+ vt_reg_bits = IXGBE_VMD_CTL_VMDQ_EN
+ | IXGBE_VT_CTL_REPLEN;
+ vt_reg_bits |= (adapter->num_vfs <<
+ IXGBE_VT_CTL_POOL_SHIFT);
+ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl | vt_reg_bits);
+ IXGBE_WRITE_REG(hw, IXGBE_MRQC, 0);
+
+ vf_shift = adapter->num_vfs % 32;
+ reg_offset = adapter->num_vfs / 32;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
+ /* Enable only the PF's pool for Tx/Rx */
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
+ ixgbe_set_vmolr(hw, adapter->num_vfs);
+ }
+
/* Program MRQC for the distribution of queues */
mrqc = ixgbe_setup_mrqc(adapter);
@@ -2274,6 +2403,20 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
}
IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
+ if (adapter->num_vfs) {
+ u32 reg;
+
+ /* Map PF MAC address in RAR Entry 0 to first pool
+ * following VFs */
+ hw->mac.ops.set_vmdq(hw, 0, adapter->num_vfs);
+
+ /* Set up VF register offsets for selected VT Mode, i.e.
+ * 64 VFs for SR-IOV */
+ reg = IXGBE_READ_REG(hw, IXGBE_GCR_EXT);
+ reg |= IXGBE_GCR_EXT_SRIOV;
+ IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, reg);
+ }
+
rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED ||
@@ -2312,15 +2455,17 @@ static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ int pool_ndx = adapter->num_vfs;
/* add VID to filter table */
- hw->mac.ops.set_vfta(&adapter->hw, vid, 0, true);
+ hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, true);
}
static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
+ int pool_ndx = adapter->num_vfs;
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_disable(adapter);
@@ -2331,7 +2476,7 @@ static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
ixgbe_irq_enable(adapter);
/* remove VID from filter table */
- hw->mac.ops.set_vfta(&adapter->hw, vid, 0, false);
+ hw->mac.ops.set_vfta(&adapter->hw, vid, pool_ndx, false);
}
static void ixgbe_vlan_rx_register(struct net_device *netdev,
@@ -2361,7 +2506,7 @@ static void ixgbe_vlan_rx_register(struct net_device *netdev,
} else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
for (i = 0; i < adapter->num_rx_queues; i++) {
u32 ctrl;
- j = adapter->rx_ring[i].reg_idx;
+ j = adapter->rx_ring[i]->reg_idx;
ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXDCTL(j));
ctrl |= IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXDCTL(j), ctrl);
@@ -2414,7 +2559,7 @@ static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
* responsible for configuring the hardware for proper unicast, multicast and
* promiscuous mode.
**/
-static void ixgbe_set_rx_mode(struct net_device *netdev)
+void ixgbe_set_rx_mode(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -2446,14 +2591,16 @@ static void ixgbe_set_rx_mode(struct net_device *netdev)
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
/* reprogram secondary unicast list */
- hw->mac.ops.update_uc_addr_list(hw, &netdev->uc.list);
+ hw->mac.ops.update_uc_addr_list(hw, netdev);
/* reprogram multicast list */
- addr_count = netdev->mc_count;
+ addr_count = netdev_mc_count(netdev);
if (addr_count)
addr_list = netdev->mc_list->dmi_addr;
hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
ixgbe_addr_list_itr);
+ if (adapter->num_vfs)
+ ixgbe_restore_vf_multicasts(adapter);
}
static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
@@ -2522,7 +2669,7 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i].reg_idx;
+ j = adapter->tx_ring[i]->reg_idx;
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
/* PThresh workaround for Tx hang with DFP enabled. */
txdctl |= 32;
@@ -2539,7 +2686,7 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
for (i = 0; i < adapter->num_rx_queues; i++) {
- j = adapter->rx_ring[i].reg_idx;
+ j = adapter->rx_ring[i]->reg_idx;
vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
vlnctrl |= IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl);
@@ -2579,7 +2726,7 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
#endif /* IXGBE_FCOE */
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i].atr_sample_rate =
+ adapter->tx_ring[i]->atr_sample_rate =
adapter->atr_sample_rate;
ixgbe_init_fdir_signature_82599(hw, adapter->fdir_pballoc);
} else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
@@ -2589,8 +2736,8 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter)
ixgbe_configure_tx(adapter);
ixgbe_configure_rx(adapter);
for (i = 0; i < adapter->num_rx_queues; i++)
- ixgbe_alloc_rx_buffers(adapter, &adapter->rx_ring[i],
- (adapter->rx_ring[i].count - 1));
+ ixgbe_alloc_rx_buffers(adapter, adapter->rx_ring[i],
+ (adapter->rx_ring[i]->count - 1));
}
static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
@@ -2673,7 +2820,7 @@ link_cfg_out:
static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
int rxr)
{
- int j = adapter->rx_ring[rxr].reg_idx;
+ int j = adapter->rx_ring[rxr]->reg_idx;
int k;
for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
@@ -2687,8 +2834,8 @@ static inline void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
DPRINTK(DRV, ERR, "RXDCTL.ENABLE on Rx queue %d "
"not set within the polling period\n", rxr);
}
- ixgbe_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
- (adapter->rx_ring[rxr].count - 1));
+ ixgbe_release_rx_desc(&adapter->hw, adapter->rx_ring[rxr],
+ (adapter->rx_ring[rxr]->count - 1));
}
static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
@@ -2702,6 +2849,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
u32 txdctl, rxdctl, mhadd;
u32 dmatxctl;
u32 gpie;
+ u32 ctrl_ext;
ixgbe_get_hw_control(adapter);
@@ -2714,6 +2862,10 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
/* MSI only */
gpie = 0;
}
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ gpie &= ~IXGBE_GPIE_VTMODE_MASK;
+ gpie |= IXGBE_GPIE_VTMODE_64;
+ }
/* XXX: to interrupt immediately for EICS writes, enable this */
/* gpie |= IXGBE_GPIE_EIMEN; */
IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie);
@@ -2770,7 +2922,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
}
for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i].reg_idx;
+ j = adapter->tx_ring[i]->reg_idx;
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
/* enable WTHRESH=8 descriptors, to encourage burst writeback */
txdctl |= (8 << 16);
@@ -2784,14 +2936,26 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, dmatxctl);
}
for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i].reg_idx;
+ j = adapter->tx_ring[i]->reg_idx;
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
txdctl |= IXGBE_TXDCTL_ENABLE;
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
+ if (hw->mac.type == ixgbe_mac_82599EB) {
+ int wait_loop = 10;
+ /* poll for Tx Enable ready */
+ do {
+ msleep(1);
+ txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
+ } while (--wait_loop &&
+ !(txdctl & IXGBE_TXDCTL_ENABLE));
+ if (!wait_loop)
+ DPRINTK(DRV, ERR, "Could not enable "
+ "Tx Queue %d\n", j);
+ }
}
for (i = 0; i < num_rx_rings; i++) {
- j = adapter->rx_ring[i].reg_idx;
+ j = adapter->rx_ring[i]->reg_idx;
rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
/* enable PTHRESH=32 descriptors (half the internal cache)
* and HTHRESH=0 descriptors (to minimize latency on fetch),
@@ -2865,7 +3029,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_tx_queues; i++)
set_bit(__IXGBE_FDIR_INIT_DONE,
- &(adapter->tx_ring[i].reinit_state));
+ &(adapter->tx_ring[i]->reinit_state));
/* enable transmits */
netif_tx_start_all_queues(netdev);
@@ -2875,6 +3039,12 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
adapter->link_check_timeout = jiffies;
mod_timer(&adapter->watchdog_timer, jiffies);
+
+ /* Set PF Reset Done bit so PF/VF Mail Ops can work */
+ ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
+ ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
+ IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
+
return 0;
}
@@ -2923,7 +3093,8 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
}
/* reprogram the RAR[0] in case user changed it. */
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
+ IXGBE_RAH_AV);
}
/**
@@ -2955,6 +3126,10 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
rx_buffer_info->skb = NULL;
do {
struct sk_buff *this = skb;
+ if (IXGBE_RSC_CB(this)->dma)
+ pci_unmap_single(pdev, IXGBE_RSC_CB(this)->dma,
+ rx_ring->rx_buf_len,
+ PCI_DMA_FROMDEVICE);
skb = skb->prev;
dev_kfree_skb(this);
} while (skb);
@@ -3029,7 +3204,7 @@ static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
int i;
for (i = 0; i < adapter->num_rx_queues; i++)
- ixgbe_clean_rx_ring(adapter, &adapter->rx_ring[i]);
+ ixgbe_clean_rx_ring(adapter, adapter->rx_ring[i]);
}
/**
@@ -3041,7 +3216,7 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
- ixgbe_clean_tx_ring(adapter, &adapter->tx_ring[i]);
+ ixgbe_clean_tx_ring(adapter, adapter->tx_ring[i]);
}
void ixgbe_down(struct ixgbe_adapter *adapter)
@@ -3055,6 +3230,17 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
/* signal that we are down to the interrupt handler */
set_bit(__IXGBE_DOWN, &adapter->state);
+ /* disable receive for all VFs and wait one second */
+ if (adapter->num_vfs) {
+ for (i = 0 ; i < adapter->num_vfs; i++)
+ adapter->vfinfo[i].clear_to_send = 0;
+
+ /* ping all the active vfs to let them know we are going down */
+ ixgbe_ping_all_vfs(adapter);
+ /* Disable all VFTE/VFRE TX/RX */
+ ixgbe_disable_tx_rx(adapter);
+ }
+
/* disable receives */
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
@@ -3081,7 +3267,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
/* disable transmits in the hardware now that interrupts are off */
for (i = 0; i < adapter->num_tx_queues; i++) {
- j = adapter->tx_ring[i].reg_idx;
+ j = adapter->tx_ring[i]->reg_idx;
txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
(txdctl & ~IXGBE_TXDCTL_ENABLE));
@@ -3094,6 +3280,9 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
netif_carrier_off(netdev);
+ /* clear n-tuple filters that are cached */
+ ethtool_ntuple_flush(netdev);
+
if (!pci_channel_offline(adapter->pdev))
ixgbe_reset(adapter);
ixgbe_clean_all_tx_rings(adapter);
@@ -3121,13 +3310,13 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
- ixgbe_update_tx_dca(adapter, adapter->tx_ring);
- ixgbe_update_rx_dca(adapter, adapter->rx_ring);
+ ixgbe_update_tx_dca(adapter, adapter->tx_ring[0]);
+ ixgbe_update_rx_dca(adapter, adapter->rx_ring[0]);
}
#endif
- tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring);
- ixgbe_clean_rx_irq(q_vector, adapter->rx_ring, &work_done, budget);
+ tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]);
+ ixgbe_clean_rx_irq(q_vector, adapter->rx_ring[0], &work_done, budget);
if (!tx_clean_complete)
work_done = budget;
@@ -3291,6 +3480,19 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
}
#endif /* IXGBE_FCOE */
+/**
+ * ixgbe_set_sriov_queues: Allocate queues for IOV use
+ * @adapter: board private structure to initialize
+ *
+ * IOV doesn't actually use anything, so just NAK the
+ * request for now and let the other queue routines
+ * figure out what to do.
+ */
+static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
+{
+ return false;
+}
+
/*
* ixgbe_set_num_queues: Allocate queues for device, feature dependant
* @adapter: board private structure to initialize
@@ -3304,6 +3506,15 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
**/
static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
{
+ /* Start with base case */
+ adapter->num_rx_queues = 1;
+ adapter->num_tx_queues = 1;
+ adapter->num_rx_pools = adapter->num_rx_queues;
+ adapter->num_rx_queues_per_pool = 1;
+
+ if (ixgbe_set_sriov_queues(adapter))
+ return;
+
#ifdef IXGBE_FCOE
if (ixgbe_set_fcoe_queues(adapter))
goto done;
@@ -3393,9 +3604,9 @@ static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i].reg_idx = i;
+ adapter->rx_ring[i]->reg_idx = i;
for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i].reg_idx = i;
+ adapter->tx_ring[i]->reg_idx = i;
ret = true;
} else {
ret = false;
@@ -3422,8 +3633,8 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
/* the number of queues is assumed to be symmetric */
for (i = 0; i < dcb_i; i++) {
- adapter->rx_ring[i].reg_idx = i << 3;
- adapter->tx_ring[i].reg_idx = i << 2;
+ adapter->rx_ring[i]->reg_idx = i << 3;
+ adapter->tx_ring[i]->reg_idx = i << 2;
}
ret = true;
} else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
@@ -3441,18 +3652,18 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
* Rx TC0-TC7 are offset by 16 queues each
*/
for (i = 0; i < 3; i++) {
- adapter->tx_ring[i].reg_idx = i << 5;
- adapter->rx_ring[i].reg_idx = i << 4;
+ adapter->tx_ring[i]->reg_idx = i << 5;
+ adapter->rx_ring[i]->reg_idx = i << 4;
}
for ( ; i < 5; i++) {
- adapter->tx_ring[i].reg_idx =
+ adapter->tx_ring[i]->reg_idx =
((i + 2) << 4);
- adapter->rx_ring[i].reg_idx = i << 4;
+ adapter->rx_ring[i]->reg_idx = i << 4;
}
for ( ; i < dcb_i; i++) {
- adapter->tx_ring[i].reg_idx =
+ adapter->tx_ring[i]->reg_idx =
((i + 8) << 3);
- adapter->rx_ring[i].reg_idx = i << 4;
+ adapter->rx_ring[i]->reg_idx = i << 4;
}
ret = true;
@@ -3465,12 +3676,12 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
*
* Rx TC0-TC3 are offset by 32 queues each
*/
- adapter->tx_ring[0].reg_idx = 0;
- adapter->tx_ring[1].reg_idx = 64;
- adapter->tx_ring[2].reg_idx = 96;
- adapter->tx_ring[3].reg_idx = 112;
+ adapter->tx_ring[0]->reg_idx = 0;
+ adapter->tx_ring[1]->reg_idx = 64;
+ adapter->tx_ring[2]->reg_idx = 96;
+ adapter->tx_ring[3]->reg_idx = 112;
for (i = 0 ; i < dcb_i; i++)
- adapter->rx_ring[i].reg_idx = i << 5;
+ adapter->rx_ring[i]->reg_idx = i << 5;
ret = true;
} else {
@@ -3503,9 +3714,9 @@ static bool inline ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))) {
for (i = 0; i < adapter->num_rx_queues; i++)
- adapter->rx_ring[i].reg_idx = i;
+ adapter->rx_ring[i]->reg_idx = i;
for (i = 0; i < adapter->num_tx_queues; i++)
- adapter->tx_ring[i].reg_idx = i;
+ adapter->tx_ring[i]->reg_idx = i;
ret = true;
}
@@ -3533,8 +3744,8 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
ixgbe_cache_ring_dcb(adapter);
/* find out queues in TC for FCoE */
- fcoe_rx_i = adapter->rx_ring[fcoe->tc].reg_idx + 1;
- fcoe_tx_i = adapter->tx_ring[fcoe->tc].reg_idx + 1;
+ fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
+ fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
/*
* In 82599, the number of Tx queues for each traffic
* class for both 8-TC and 4-TC modes are:
@@ -3565,8 +3776,8 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
fcoe_tx_i = f->mask;
}
for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
- adapter->rx_ring[f->mask + i].reg_idx = fcoe_rx_i;
- adapter->tx_ring[f->mask + i].reg_idx = fcoe_tx_i;
+ adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
+ adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
}
ret = true;
}
@@ -3575,6 +3786,24 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
#endif /* IXGBE_FCOE */
/**
+ * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
+ * @adapter: board private structure to initialize
+ *
+ * SR-IOV doesn't use any descriptor rings but changes the default if
+ * no other mapping is used.
+ *
+ */
+static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
+{
+ adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2;
+ adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2;
+ if (adapter->num_vfs)
+ return true;
+ else
+ return false;
+}
+
+/**
* ixgbe_cache_ring_register - Descriptor ring to register mapping
* @adapter: board private structure to initialize
*
@@ -3588,8 +3817,11 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
{
/* start with default case */
- adapter->rx_ring[0].reg_idx = 0;
- adapter->tx_ring[0].reg_idx = 0;
+ adapter->rx_ring[0]->reg_idx = 0;
+ adapter->tx_ring[0]->reg_idx = 0;
+
+ if (ixgbe_cache_ring_sriov(adapter))
+ return;
#ifdef IXGBE_FCOE
if (ixgbe_cache_ring_fcoe(adapter))
@@ -3619,33 +3851,63 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
{
int i;
-
- adapter->tx_ring = kcalloc(adapter->num_tx_queues,
- sizeof(struct ixgbe_ring), GFP_KERNEL);
- if (!adapter->tx_ring)
- goto err_tx_ring_allocation;
-
- adapter->rx_ring = kcalloc(adapter->num_rx_queues,
- sizeof(struct ixgbe_ring), GFP_KERNEL);
- if (!adapter->rx_ring)
- goto err_rx_ring_allocation;
+ int orig_node = adapter->node;
for (i = 0; i < adapter->num_tx_queues; i++) {
- adapter->tx_ring[i].count = adapter->tx_ring_count;
- adapter->tx_ring[i].queue_index = i;
+ struct ixgbe_ring *ring = adapter->tx_ring[i];
+ if (orig_node == -1) {
+ int cur_node = next_online_node(adapter->node);
+ if (cur_node == MAX_NUMNODES)
+ cur_node = first_online_node;
+ adapter->node = cur_node;
+ }
+ ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
+ adapter->node);
+ if (!ring)
+ ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
+ if (!ring)
+ goto err_tx_ring_allocation;
+ ring->count = adapter->tx_ring_count;
+ ring->queue_index = i;
+ ring->numa_node = adapter->node;
+
+ adapter->tx_ring[i] = ring;
}
+ /* Restore the adapter's original node */
+ adapter->node = orig_node;
+
for (i = 0; i < adapter->num_rx_queues; i++) {
- adapter->rx_ring[i].count = adapter->rx_ring_count;
- adapter->rx_ring[i].queue_index = i;
+ struct ixgbe_ring *ring = adapter->rx_ring[i];
+ if (orig_node == -1) {
+ int cur_node = next_online_node(adapter->node);
+ if (cur_node == MAX_NUMNODES)
+ cur_node = first_online_node;
+ adapter->node = cur_node;
+ }
+ ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
+ adapter->node);
+ if (!ring)
+ ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
+ if (!ring)
+ goto err_rx_ring_allocation;
+ ring->count = adapter->rx_ring_count;
+ ring->queue_index = i;
+ ring->numa_node = adapter->node;
+
+ adapter->rx_ring[i] = ring;
}
+ /* Restore the adapter's original node */
+ adapter->node = orig_node;
+
ixgbe_cache_ring_register(adapter);
return 0;
err_rx_ring_allocation:
- kfree(adapter->tx_ring);
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ kfree(adapter->tx_ring[i]);
err_tx_ring_allocation:
return -ENOMEM;
}
@@ -3700,6 +3962,9 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
adapter->atr_sample_rate = 0;
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ ixgbe_disable_sriov(adapter);
+
ixgbe_set_num_queues(adapter);
err = pci_enable_msi(adapter->pdev);
@@ -3741,7 +4006,11 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
}
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
- q_vector = kzalloc(sizeof(struct ixgbe_q_vector), GFP_KERNEL);
+ q_vector = kzalloc_node(sizeof(struct ixgbe_q_vector),
+ GFP_KERNEL, adapter->node);
+ if (!q_vector)
+ q_vector = kzalloc(sizeof(struct ixgbe_q_vector),
+ GFP_KERNEL);
if (!q_vector)
goto err_out;
q_vector->adapter = adapter;
@@ -3868,10 +4137,16 @@ err_set_interrupt:
**/
void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
{
- kfree(adapter->tx_ring);
- kfree(adapter->rx_ring);
- adapter->tx_ring = NULL;
- adapter->rx_ring = NULL;
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ kfree(adapter->tx_ring[i]);
+ adapter->tx_ring[i] = NULL;
+ }
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ kfree(adapter->rx_ring[i]);
+ adapter->rx_ring[i] = NULL;
+ }
ixgbe_free_q_vectors(adapter);
ixgbe_reset_interrupt_capability(adapter);
@@ -3942,6 +4217,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
+ struct net_device *dev = adapter->netdev;
unsigned int rss;
#ifdef CONFIG_IXGBE_DCB
int j;
@@ -3969,10 +4245,18 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
- adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ if (dev->features & NETIF_F_NTUPLE) {
+ /* Flow Director perfect filter enabled */
+ adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
+ adapter->atr_sample_rate = 0;
+ spin_lock_init(&adapter->fdir_perfect_lock);
+ } else {
+ /* Flow Director hash filters enabled */
+ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
+ adapter->atr_sample_rate = 20;
+ }
adapter->ring_feature[RING_F_FDIR].indices =
IXGBE_MAX_FDIR_INDICES;
- adapter->atr_sample_rate = 20;
adapter->fdir_pballoc = 0;
#ifdef IXGBE_FCOE
adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
@@ -4041,6 +4325,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
/* enable rx csum by default */
adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
+ /* get assigned NUMA node */
+ adapter->node = dev_to_node(&pdev->dev);
+
set_bit(__IXGBE_DOWN, &adapter->state);
return 0;
@@ -4060,7 +4347,9 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
int size;
size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
- tx_ring->tx_buffer_info = vmalloc(size);
+ tx_ring->tx_buffer_info = vmalloc_node(size, tx_ring->numa_node);
+ if (!tx_ring->tx_buffer_info)
+ tx_ring->tx_buffer_info = vmalloc(size);
if (!tx_ring->tx_buffer_info)
goto err;
memset(tx_ring->tx_buffer_info, 0, size);
@@ -4102,7 +4391,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
int i, err = 0;
for (i = 0; i < adapter->num_tx_queues; i++) {
- err = ixgbe_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+ err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]);
if (!err)
continue;
DPRINTK(PROBE, ERR, "Allocation for Tx Queue %u failed\n", i);
@@ -4126,7 +4415,9 @@ int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
int size;
size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
- rx_ring->rx_buffer_info = vmalloc(size);
+ rx_ring->rx_buffer_info = vmalloc_node(size, adapter->node);
+ if (!rx_ring->rx_buffer_info)
+ rx_ring->rx_buffer_info = vmalloc(size);
if (!rx_ring->rx_buffer_info) {
DPRINTK(PROBE, ERR,
"vmalloc allocation failed for the rx desc ring\n");
@@ -4172,7 +4463,7 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
int i, err = 0;
for (i = 0; i < adapter->num_rx_queues; i++) {
- err = ixgbe_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+ err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
if (!err)
continue;
DPRINTK(PROBE, ERR, "Allocation for Rx Queue %u failed\n", i);
@@ -4215,8 +4506,8 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
- if (adapter->tx_ring[i].desc)
- ixgbe_free_tx_resources(adapter, &adapter->tx_ring[i]);
+ if (adapter->tx_ring[i]->desc)
+ ixgbe_free_tx_resources(adapter, adapter->tx_ring[i]);
}
/**
@@ -4252,8 +4543,8 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
int i;
for (i = 0; i < adapter->num_rx_queues; i++)
- if (adapter->rx_ring[i].desc)
- ixgbe_free_rx_resources(adapter, &adapter->rx_ring[i]);
+ if (adapter->rx_ring[i]->desc)
+ ixgbe_free_rx_resources(adapter, adapter->rx_ring[i]);
}
/**
@@ -4530,8 +4821,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
adapter->hw_rx_no_dma_resources +=
IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
for (i = 0; i < adapter->num_rx_queues; i++) {
- rsc_count += adapter->rx_ring[i].rsc_count;
- rsc_flush += adapter->rx_ring[i].rsc_flush;
+ rsc_count += adapter->rx_ring[i]->rsc_count;
+ rsc_flush += adapter->rx_ring[i]->rsc_flush;
}
adapter->rsc_total_count = rsc_count;
adapter->rsc_total_flush = rsc_flush;
@@ -4539,11 +4830,11 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
/* gather some stats to the adapter struct that are per queue */
for (i = 0; i < adapter->num_tx_queues; i++)
- restart_queue += adapter->tx_ring[i].restart_queue;
+ restart_queue += adapter->tx_ring[i]->restart_queue;
adapter->restart_queue = restart_queue;
for (i = 0; i < adapter->num_rx_queues; i++)
- non_eop_descs += adapter->rx_ring[i].non_eop_descs;
+ non_eop_descs += adapter->rx_ring[i]->non_eop_descs;
adapter->non_eop_descs = non_eop_descs;
adapter->stats.crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
@@ -4782,7 +5073,7 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work)
if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
for (i = 0; i < adapter->num_tx_queues; i++)
set_bit(__IXGBE_FDIR_INIT_DONE,
- &(adapter->tx_ring[i].reinit_state));
+ &(adapter->tx_ring[i]->reinit_state));
} else {
DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, "
"ignored adding FDIR ATR filters \n");
@@ -4791,6 +5082,8 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work)
netif_tx_start_all_queues(adapter->netdev);
}
+static DEFINE_MUTEX(ixgbe_watchdog_lock);
+
/**
* ixgbe_watchdog_task - worker thread to bring link up
* @work: pointer to work_struct containing our data
@@ -4802,13 +5095,16 @@ static void ixgbe_watchdog_task(struct work_struct *work)
watchdog_task);
struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
- u32 link_speed = adapter->link_speed;
- bool link_up = adapter->link_up;
+ u32 link_speed;
+ bool link_up;
int i;
struct ixgbe_ring *tx_ring;
int some_tx_pending = 0;
- adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
+ mutex_lock(&ixgbe_watchdog_lock);
+
+ link_up = adapter->link_up;
+ link_speed = adapter->link_speed;
if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) {
hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
@@ -4879,7 +5175,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
if (!netif_carrier_ok(netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++) {
- tx_ring = &adapter->tx_ring[i];
+ tx_ring = adapter->tx_ring[i];
if (tx_ring->next_to_use != tx_ring->next_to_clean) {
some_tx_pending = 1;
break;
@@ -4897,7 +5193,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
}
ixgbe_update_stats(adapter);
- adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
+ mutex_unlock(&ixgbe_watchdog_lock);
}
static int ixgbe_tso(struct ixgbe_adapter *adapter,
@@ -5343,8 +5639,14 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
return txq;
}
#endif
- if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
- return (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK) >> 13;
+ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+ if (skb->priority == TC_PRIO_CONTROL)
+ txq = adapter->ring_feature[RING_F_DCB].indices-1;
+ else
+ txq = (skb->vlan_tci & IXGBE_TX_FLAGS_VLAN_PRIO_MASK)
+ >> 13;
+ return txq;
+ }
return skb_tx_hash(dev, skb);
}
@@ -5371,17 +5673,12 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
tx_flags |= IXGBE_TX_FLAGS_VLAN;
} else if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
- if (skb->priority != TC_PRIO_CONTROL) {
- tx_flags |= ((skb->queue_mapping & 0x7) << 13);
- tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
- tx_flags |= IXGBE_TX_FLAGS_VLAN;
- } else {
- skb->queue_mapping =
- adapter->ring_feature[RING_F_DCB].indices-1;
- }
+ tx_flags |= ((skb->queue_mapping & 0x7) << 13);
+ tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
+ tx_flags |= IXGBE_TX_FLAGS_VLAN;
}
- tx_ring = &adapter->tx_ring[skb->queue_mapping];
+ tx_ring = adapter->tx_ring[skb->queue_mapping];
if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
(skb->protocol == htons(ETH_P_FCOE))) {
@@ -5487,7 +5784,8 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, adapter->num_vfs,
+ IXGBE_RAH_AV);
return 0;
}
@@ -5624,6 +5922,61 @@ static const struct net_device_ops ixgbe_netdev_ops = {
#endif /* IXGBE_FCOE */
};
+static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
+ const struct ixgbe_info *ii)
+{
+#ifdef CONFIG_PCI_IOV
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+
+ if (hw->mac.type != ixgbe_mac_82599EB || !max_vfs)
+ return;
+
+ /* The 82599 supports up to 64 VFs per physical function
+ * but this implementation limits allocation to 63 so that
+ * basic networking resources are still available to the
+ * physical function
+ */
+ adapter->num_vfs = (max_vfs > 63) ? 63 : max_vfs;
+ adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED;
+ err = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
+ if (err) {
+ DPRINTK(PROBE, ERR,
+ "Failed to enable PCI sriov: %d\n", err);
+ goto err_novfs;
+ }
+ /* If call to enable VFs succeeded then allocate memory
+ * for per VF control structures.
+ */
+ adapter->vfinfo =
+ kcalloc(adapter->num_vfs,
+ sizeof(struct vf_data_storage), GFP_KERNEL);
+ if (adapter->vfinfo) {
+ /* Now that we're sure SR-IOV is enabled
+ * and memory allocated set up the mailbox parameters
+ */
+ ixgbe_init_mbx_params_pf(hw);
+ memcpy(&hw->mbx.ops, ii->mbx_ops,
+ sizeof(hw->mbx.ops));
+
+ /* Disable RSC when in SR-IOV mode */
+ adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE |
+ IXGBE_FLAG2_RSC_ENABLED);
+ return;
+ }
+
+ /* Oh oh */
+ DPRINTK(PROBE, ERR,
+ "Unable to allocate memory for VF "
+ "Data Storage - SRIOV disabled\n");
+ pci_disable_sriov(adapter->pdev);
+
+err_novfs:
+ adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED;
+ adapter->num_vfs = 0;
+#endif /* CONFIG_PCI_IOV */
+}
+
/**
* ixgbe_probe - Device Initialization Routine
* @pdev: PCI device information struct
@@ -5644,6 +5997,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
static int cards_found;
int i, err, pci_using_dac;
+ unsigned int indices = num_possible_cpus();
#ifdef IXGBE_FCOE
u16 device_caps;
#endif
@@ -5682,7 +6036,18 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
pci_set_master(pdev);
pci_save_state(pdev);
- netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), MAX_TX_QUEUES);
+ if (ii->mac == ixgbe_mac_82598EB)
+ indices = min_t(unsigned int, indices, IXGBE_MAX_RSS_INDICES);
+ else
+ indices = min_t(unsigned int, indices, IXGBE_MAX_FDIR_INDICES);
+
+ indices = max_t(unsigned int, indices, IXGBE_MAX_DCB_INDICES);
+#ifdef IXGBE_FCOE
+ indices += min_t(unsigned int, num_possible_cpus(),
+ IXGBE_MAX_FCOE_INDICES);
+#endif
+ indices = min_t(unsigned int, indices, MAX_TX_QUEUES);
+ netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
if (!netdev) {
err = -ENOMEM;
goto err_alloc_etherdev;
@@ -5802,6 +6167,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
goto err_sw_init;
}
+ ixgbe_probe_vf(adapter, ii);
+
netdev->features = NETIF_F_SG |
NETIF_F_IP_CSUM |
NETIF_F_HW_VLAN_TX |
@@ -5822,6 +6189,9 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
netdev->vlan_features |= NETIF_F_IPV6_CSUM;
netdev->vlan_features |= NETIF_F_SG;
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ adapter->flags &= ~(IXGBE_FLAG_RSS_ENABLED |
+ IXGBE_FLAG_DCB_ENABLED);
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
@@ -5948,6 +6318,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
ixgbe_setup_dca(adapter);
}
#endif
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ DPRINTK(PROBE, INFO, "IOV is enabled with %d VFs\n",
+ adapter->num_vfs);
+ for (i = 0; i < adapter->num_vfs; i++)
+ ixgbe_vf_configuration(pdev, (i | 0x10000000));
+ }
+
/* add san mac addr to netdev */
ixgbe_add_sanmac_netdev(netdev);
@@ -5960,6 +6337,8 @@ err_register:
ixgbe_clear_interrupt_scheme(adapter);
err_sw_init:
err_eeprom:
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ ixgbe_disable_sriov(adapter);
clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
del_timer_sync(&adapter->sfp_timer);
cancel_work_sync(&adapter->sfp_task);
@@ -6028,6 +6407,9 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
if (netdev->reg_state == NETREG_REGISTERED)
unregister_netdev(netdev);
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ ixgbe_disable_sriov(adapter);
+
ixgbe_clear_interrupt_scheme(adapter);
ixgbe_release_hw_control(adapter);
diff --git a/drivers/net/ixgbe/ixgbe_mbx.c b/drivers/net/ixgbe/ixgbe_mbx.c
new file mode 100644
index 00000000000..d75f9148eb1
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_mbx.c
@@ -0,0 +1,479 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include "ixgbe_type.h"
+#include "ixgbe_common.h"
+#include "ixgbe_mbx.h"
+
+/**
+ * ixgbe_read_mbx - Reads a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to read
+ *
+ * returns SUCCESS if it successfuly read message from buffer
+ **/
+s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ /* limit read to size of mailbox */
+ if (size > mbx->size)
+ size = mbx->size;
+
+ if (mbx->ops.read)
+ ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_write_mbx - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = 0;
+
+ if (size > mbx->size)
+ ret_val = IXGBE_ERR_MBX;
+
+ else if (mbx->ops.write)
+ ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_msg - checks to see if someone sent us mail
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (mbx->ops.check_for_msg)
+ ret_val = mbx->ops.check_for_msg(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_ack - checks to see if someone sent us ACK
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (mbx->ops.check_for_ack)
+ ret_val = mbx->ops.check_for_ack(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_rst - checks to see if other side has reset
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to check
+ *
+ * returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (mbx->ops.check_for_rst)
+ ret_val = mbx->ops.check_for_rst(hw, mbx_id);
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_poll_for_msg - Wait for message notification
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message notification
+ **/
+static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ if (!countdown || !mbx->ops.check_for_msg)
+ goto out;
+
+ while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
+ countdown--;
+ if (!countdown)
+ break;
+ udelay(mbx->usec_delay);
+ }
+
+ /* if we failed, all future posted messages fail until reset */
+ if (!countdown)
+ mbx->timeout = 0;
+out:
+ return countdown ? 0 : IXGBE_ERR_MBX;
+}
+
+/**
+ * ixgbe_poll_for_ack - Wait for message acknowledgement
+ * @hw: pointer to the HW structure
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message acknowledgement
+ **/
+static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ if (!countdown || !mbx->ops.check_for_ack)
+ goto out;
+
+ while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
+ countdown--;
+ if (!countdown)
+ break;
+ udelay(mbx->usec_delay);
+ }
+
+ /* if we failed, all future posted messages fail until reset */
+ if (!countdown)
+ mbx->timeout = 0;
+out:
+ return countdown ? 0 : IXGBE_ERR_MBX;
+}
+
+/**
+ * ixgbe_read_posted_mbx - Wait for message notification and receive message
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully received a message notification and
+ * copied it into the receive buffer.
+ **/
+s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (!mbx->ops.read)
+ goto out;
+
+ ret_val = ixgbe_poll_for_msg(hw, mbx_id);
+
+ /* if ack received read message, otherwise we timed out */
+ if (!ret_val)
+ ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @mbx_id: id of mailbox to write
+ *
+ * returns SUCCESS if it successfully copied message into the buffer and
+ * received an ack to that message within delay * timeout period
+ **/
+s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 mbx_id)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ /* exit if either we can't write or there isn't a defined timeout */
+ if (!mbx->ops.write || !mbx->timeout)
+ goto out;
+
+ /* send msg */
+ ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+ /* if msg sent wait until we receive an ack */
+ if (!ret_val)
+ ret_val = ixgbe_poll_for_ack(hw, mbx_id);
+out:
+ return ret_val;
+}
+
+/**
+ * ixgbe_init_mbx_ops_generic - Initialize MB function pointers
+ * @hw: pointer to the HW structure
+ *
+ * Setup the mailbox read and write message function pointers
+ **/
+void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ mbx->ops.read_posted = ixgbe_read_posted_mbx;
+ mbx->ops.write_posted = ixgbe_write_posted_mbx;
+}
+
+static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
+{
+ u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (mbvficr & mask) {
+ ret_val = 0;
+ IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_msg_pf - checks to see if the VF has sent mail
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+ s32 index = IXGBE_MBVFICR_INDEX(vf_number);
+ u32 vf_bit = vf_number % 16;
+
+ if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
+ index)) {
+ ret_val = 0;
+ hw->mbx.stats.reqs++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_ack_pf - checks to see if the VF has ACKed
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+ s32 index = IXGBE_MBVFICR_INDEX(vf_number);
+ u32 vf_bit = vf_number % 16;
+
+ if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
+ index)) {
+ ret_val = 0;
+ hw->mbx.stats.acks++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_check_for_rst_pf - checks to see if the VF has reset
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+ u32 reg_offset = (vf_number < 32) ? 0 : 1;
+ u32 vf_shift = vf_number % 32;
+ u32 vflre = 0;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (hw->mac.type == ixgbe_mac_82599EB)
+ vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
+
+ if (vflre & (1 << vf_shift)) {
+ ret_val = 0;
+ IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
+ hw->mbx.stats.rsts++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_obtain_mbx_lock_pf - obtain mailbox lock
+ * @hw: pointer to the HW structure
+ * @vf_number: the VF index
+ *
+ * return SUCCESS if we obtained the mailbox lock
+ **/
+static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+ u32 p2v_mailbox;
+
+ /* Take ownership of the buffer */
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU);
+
+ /* reserve mailbox for vf use */
+ p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
+ if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
+ ret_val = 0;
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_write_mbx_pf - Places a message in the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_number: the VF index
+ *
+ * returns SUCCESS if it successfully copied message into the buffer
+ **/
+static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 vf_number)
+{
+ s32 ret_val;
+ u16 i;
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
+ if (ret_val)
+ goto out_no_write;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ ixgbe_check_for_msg_pf(hw, vf_number);
+ ixgbe_check_for_ack_pf(hw, vf_number);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
+
+ /* Interrupt VF to tell it a message has been sent and release buffer*/
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+out_no_write:
+ return ret_val;
+
+}
+
+/**
+ * ixgbe_read_mbx_pf - Read a message from the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ * @vf_number: the VF index
+ *
+ * This function copies a message from the mailbox buffer to the caller's
+ * memory buffer. The presumption is that the caller knows that there was
+ * a message due to a VF request so no polling for message is needed.
+ **/
+static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
+ u16 vf_number)
+{
+ s32 ret_val;
+ u16 i;
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
+ if (ret_val)
+ goto out_no_read;
+
+ /* copy the message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i);
+
+ /* Acknowledge the message and release buffer */
+ IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+ return ret_val;
+}
+
+/**
+ * ixgbe_init_mbx_params_pf - set initial values for pf mailbox
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the hw->mbx struct to correct values for pf mailbox
+ */
+void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ if (hw->mac.type != ixgbe_mac_82599EB)
+ return;
+
+ mbx->timeout = 0;
+ mbx->usec_delay = 0;
+
+ mbx->size = IXGBE_VFMAILBOX_SIZE;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
+}
+
+struct ixgbe_mbx_operations mbx_ops_82599 = {
+ .read = ixgbe_read_mbx_pf,
+ .write = ixgbe_write_mbx_pf,
+ .read_posted = ixgbe_read_posted_mbx,
+ .write_posted = ixgbe_write_posted_mbx,
+ .check_for_msg = ixgbe_check_for_msg_pf,
+ .check_for_ack = ixgbe_check_for_ack_pf,
+ .check_for_rst = ixgbe_check_for_rst_pf,
+};
+
diff --git a/drivers/net/ixgbe/ixgbe_mbx.h b/drivers/net/ixgbe/ixgbe_mbx.h
new file mode 100644
index 00000000000..be7ab3309ab
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_mbx.h
@@ -0,0 +1,96 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_MBX_H_
+#define _IXGBE_MBX_H_
+
+#include "ixgbe_type.h"
+
+#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
+#define IXGBE_ERR_MBX -100
+
+#define IXGBE_VFMAILBOX 0x002FC
+#define IXGBE_VFMBMEM 0x00200
+
+#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * x))
+#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * vfn))
+
+#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
+#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
+#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
+
+#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
+#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
+#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
+#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
+
+
+/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
+ * PF. The reverse is true if it is IXGBE_PF_*.
+ * Message ACK's are the value or'd with 0xF0000000
+ */
+#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
+ * this are the ACK */
+#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
+ * this are the NACK */
+#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
+ clear to send requests */
+#define IXGBE_VT_MSGINFO_SHIFT 16
+/* bits 23:16 are used for exra info for certain messages */
+#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
+
+#define IXGBE_VF_RESET 0x01 /* VF requests reset */
+#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
+#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
+#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
+#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+
+/* length of permanent address message returned from PF */
+#define IXGBE_VF_PERMADDR_MSG_LEN 4
+/* word in permanent address message with the current multicast type */
+#define IXGBE_VF_MC_TYPE_WORD 3
+
+#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
+
+#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
+#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
+
+s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_read_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_write_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16);
+s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16);
+s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
+s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
+void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw);
+void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
+
+extern struct ixgbe_mbx_operations mbx_ops_82599;
+
+#endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
new file mode 100644
index 00000000000..d4cd20f3019
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -0,0 +1,362 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/ipv6.h>
+#ifdef NETIF_F_HW_VLAN_TX
+#include <linux/if_vlan.h>
+#endif
+
+#include "ixgbe.h"
+
+#include "ixgbe_sriov.h"
+
+int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
+ int entries, u16 *hash_list, u32 vf)
+{
+ struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
+ int i;
+
+ /* only so many hash values supported */
+ entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
+
+ /*
+ * salt away the number of multi cast addresses assigned
+ * to this VF for later use to restore when the PF multi cast
+ * list changes
+ */
+ vfinfo->num_vf_mc_hashes = entries;
+
+ /*
+ * VFs are limited to using the MTA hash table for their multicast
+ * addresses
+ */
+ for (i = 0; i < entries; i++) {
+ vfinfo->vf_mc_hashes[i] = hash_list[i];;
+ }
+
+ /* Flush and reset the mta with the new values */
+ ixgbe_set_rx_mode(adapter->netdev);
+
+ return 0;
+}
+
+void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct vf_data_storage *vfinfo;
+ int i, j;
+ u32 vector_bit;
+ u32 vector_reg;
+ u32 mta_reg;
+
+ for (i = 0; i < adapter->num_vfs; i++) {
+ vfinfo = &adapter->vfinfo[i];
+ for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
+ hw->addr_ctrl.mta_in_use++;
+ vector_reg = (vfinfo->vf_mc_hashes[j] >> 5) & 0x7F;
+ vector_bit = vfinfo->vf_mc_hashes[j] & 0x1F;
+ mta_reg = IXGBE_READ_REG(hw, IXGBE_MTA(vector_reg));
+ mta_reg |= (1 << vector_bit);
+ IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
+ }
+ }
+}
+
+int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf)
+{
+ u32 ctrl;
+
+ /* Check if global VLAN already set, if not set it */
+ ctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_VLNCTRL);
+ if (!(ctrl & IXGBE_VLNCTRL_VFE)) {
+ /* enable VLAN tag insert/strip */
+ ctrl |= IXGBE_VLNCTRL_VFE;
+ ctrl &= ~IXGBE_VLNCTRL_CFIEN;
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_VLNCTRL, ctrl);
+ }
+
+ return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add);
+}
+
+
+void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf)
+{
+ u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
+ vmolr |= (IXGBE_VMOLR_AUPE |
+ IXGBE_VMOLR_ROMPE |
+ IXGBE_VMOLR_ROPE |
+ IXGBE_VMOLR_BAM);
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
+}
+
+inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ /* reset offloads to defaults */
+ ixgbe_set_vmolr(hw, vf);
+
+
+ /* reset multicast table array for vf */
+ adapter->vfinfo[vf].num_vf_mc_hashes = 0;
+
+ /* Flush and reset the mta with the new values */
+ ixgbe_set_rx_mode(adapter->netdev);
+
+ if (adapter->vfinfo[vf].rar > 0) {
+ adapter->hw.mac.ops.clear_rar(&adapter->hw,
+ adapter->vfinfo[vf].rar);
+ adapter->vfinfo[vf].rar = -1;
+ }
+}
+
+int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
+ int vf, unsigned char *mac_addr)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ adapter->vfinfo[vf].rar = hw->mac.ops.set_rar(hw, vf + 1, mac_addr,
+ vf, IXGBE_RAH_AV);
+ if (adapter->vfinfo[vf].rar < 0) {
+ DPRINTK(DRV, ERR, "Could not set MAC Filter for VF %d\n", vf);
+ return -1;
+ }
+
+ memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, 6);
+
+ return 0;
+}
+
+int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
+{
+ unsigned char vf_mac_addr[6];
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ unsigned int vfn = (event_mask & 0x3f);
+
+ bool enable = ((event_mask & 0x10000000U) != 0);
+
+ if (enable) {
+ random_ether_addr(vf_mac_addr);
+ DPRINTK(PROBE, INFO, "IOV: VF %d is enabled "
+ "mac %02X:%02X:%02X:%02X:%02X:%02X\n",
+ vfn,
+ vf_mac_addr[0], vf_mac_addr[1], vf_mac_addr[2],
+ vf_mac_addr[3], vf_mac_addr[4], vf_mac_addr[5]);
+ /*
+ * Store away the VF "permananet" MAC address, it will ask
+ * for it later.
+ */
+ memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6);
+ }
+
+ return 0;
+}
+
+inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 reg;
+ u32 reg_offset, vf_shift;
+
+ vf_shift = vf % 32;
+ reg_offset = vf / 32;
+
+ /* enable transmit and receive for vf */
+ reg = IXGBE_READ_REG(hw, IXGBE_VFTE(reg_offset));
+ reg |= (reg | (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg);
+
+ reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset));
+ reg |= (reg | (1 << vf_shift));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
+
+ ixgbe_vf_reset_event(adapter, vf);
+}
+
+static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
+{
+ u32 mbx_size = IXGBE_VFMAILBOX_SIZE;
+ u32 msgbuf[mbx_size];
+ struct ixgbe_hw *hw = &adapter->hw;
+ s32 retval;
+ int entries;
+ u16 *hash_list;
+ int add, vid;
+
+ retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
+
+ if (retval)
+ printk(KERN_ERR "Error receiving message from VF\n");
+
+ /* this is a message we already processed, do nothing */
+ if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
+ return retval;
+
+ /*
+ * until the vf completes a virtual function reset it should not be
+ * allowed to start any configuration.
+ */
+
+ if (msgbuf[0] == IXGBE_VF_RESET) {
+ unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
+ u8 *addr = (u8 *)(&msgbuf[1]);
+ DPRINTK(PROBE, INFO, "VF Reset msg received from vf %d\n", vf);
+ adapter->vfinfo[vf].clear_to_send = false;
+ ixgbe_vf_reset_msg(adapter, vf);
+ adapter->vfinfo[vf].clear_to_send = true;
+
+ /* reply to reset with ack and vf mac address */
+ msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
+ memcpy(addr, vf_mac, IXGBE_ETH_LENGTH_OF_ADDRESS);
+ /*
+ * Piggyback the multicast filter type so VF can compute the
+ * correct vectors
+ */
+ msgbuf[3] = hw->mac.mc_filter_type;
+ ixgbe_write_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN, vf);
+
+ return retval;
+ }
+
+ if (!adapter->vfinfo[vf].clear_to_send) {
+ msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
+ ixgbe_write_mbx(hw, msgbuf, 1, vf);
+ return retval;
+ }
+
+ switch ((msgbuf[0] & 0xFFFF)) {
+ case IXGBE_VF_SET_MAC_ADDR:
+ {
+ u8 *new_mac = ((u8 *)(&msgbuf[1]));
+ if (is_valid_ether_addr(new_mac))
+ ixgbe_set_vf_mac(adapter, vf, new_mac);
+ else
+ retval = -1;
+ }
+ break;
+ case IXGBE_VF_SET_MULTICAST:
+ entries = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
+ >> IXGBE_VT_MSGINFO_SHIFT;
+ hash_list = (u16 *)&msgbuf[1];
+ retval = ixgbe_set_vf_multicasts(adapter, entries,
+ hash_list, vf);
+ break;
+ case IXGBE_VF_SET_LPE:
+ WARN_ON((msgbuf[0] & 0xFFFF) == IXGBE_VF_SET_LPE);
+ break;
+ case IXGBE_VF_SET_VLAN:
+ add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
+ >> IXGBE_VT_MSGINFO_SHIFT;
+ vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
+ retval = ixgbe_set_vf_vlan(adapter, add, vid, vf);
+ break;
+ default:
+ DPRINTK(DRV, ERR, "Unhandled Msg %8.8x\n", msgbuf[0]);
+ retval = IXGBE_ERR_MBX;
+ break;
+ }
+
+ /* notify the VF of the results of what it sent us */
+ if (retval)
+ msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
+ else
+ msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK;
+
+ msgbuf[0] |= IXGBE_VT_MSGTYPE_CTS;
+
+ ixgbe_write_mbx(hw, msgbuf, 1, vf);
+
+ return retval;
+}
+
+static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 msg = IXGBE_VT_MSGTYPE_NACK;
+
+ /* if device isn't clear to send it shouldn't be reading either */
+ if (!adapter->vfinfo[vf].clear_to_send)
+ ixgbe_write_mbx(hw, &msg, 1, vf);
+}
+
+void ixgbe_msg_task(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 vf;
+
+ for (vf = 0; vf < adapter->num_vfs; vf++) {
+ /* process any reset requests */
+ if (!ixgbe_check_for_rst(hw, vf))
+ ixgbe_vf_reset_event(adapter, vf);
+
+ /* process any messages pending */
+ if (!ixgbe_check_for_msg(hw, vf))
+ ixgbe_rcv_msg_from_vf(adapter, vf);
+
+ /* process any acks */
+ if (!ixgbe_check_for_ack(hw, vf))
+ ixgbe_rcv_ack_from_vf(adapter, vf);
+ }
+}
+
+void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ /* disable transmit and receive for all vfs */
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTE(1), 0);
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0);
+}
+
+void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 ping;
+ int i;
+
+ for (i = 0 ; i < adapter->num_vfs; i++) {
+ ping = IXGBE_PF_CONTROL_MSG;
+ if (adapter->vfinfo[i].clear_to_send)
+ ping |= IXGBE_VT_MSGTYPE_CTS;
+ ixgbe_write_mbx(hw, &ping, 1, i);
+ }
+}
+
diff --git a/drivers/net/ixgbe/ixgbe_sriov.h b/drivers/net/ixgbe/ixgbe_sriov.h
new file mode 100644
index 00000000000..51d1106c45a
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_sriov.h
@@ -0,0 +1,47 @@
+/*******************************************************************************
+
+ Intel 10 Gigabit PCI Express Linux driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_SRIOV_H_
+#define _IXGBE_SRIOV_H_
+
+int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
+ int entries, u16 *hash_list, u32 vf);
+void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
+int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf);
+void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf);
+void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf);
+void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf);
+void ixgbe_msg_task(struct ixgbe_adapter *adapter);
+int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
+ int vf, unsigned char *mac_addr);
+int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
+void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
+void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
+void ixgbe_dump_registers(struct ixgbe_adapter *adapter);
+
+#endif /* _IXGBE_SRIOV_H_ */
+
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 9eafddfa1b9..2be90746659 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -30,7 +30,7 @@
#include <linux/types.h>
#include <linux/mdio.h>
-#include <linux/list.h>
+#include <linux/netdevice.h>
/* Vendor ID */
#define IXGBE_INTEL_VENDOR_ID 0x8086
@@ -277,6 +277,7 @@
#define IXGBE_DTXCTL 0x07E00
#define IXGBE_DMATXCTL 0x04A80
+#define IXGBE_PFDTXGSWC 0x08220
#define IXGBE_DTXMXSZRQ 0x08100
#define IXGBE_DTXTCPFLGL 0x04A88
#define IXGBE_DTXTCPFLGH 0x04A8C
@@ -287,6 +288,8 @@
#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */
#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */
#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */
+
+#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */
#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */
/* Tx DCA Control register : 128 of these (0-127) */
#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40))
@@ -497,6 +500,7 @@
/* DCB registers */
#define IXGBE_RTRPCS 0x02430
#define IXGBE_RTTDCS 0x04900
+#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */
#define IXGBE_RTTPCS 0x0CD00
#define IXGBE_RTRUP2TC 0x03020
#define IXGBE_RTTUP2TC 0x0C800
@@ -730,6 +734,13 @@
#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000
#define IXGBE_GCR_CAP_VER2 0x00040000
+#define IXGBE_GCR_EXT_MSIX_EN 0x80000000
+#define IXGBE_GCR_EXT_VT_MODE_16 0x00000001
+#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002
+#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003
+#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \
+ IXGBE_GCR_EXT_VT_MODE_64)
+
/* Time Sync Registers */
#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */
#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */
@@ -1065,6 +1076,8 @@
/* VFRE bitmask */
#define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF
+#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
+
/* RDHMPN and TDHMPN bitmasks */
#define IXGBE_RDHMPN_RDICADDR 0x007FF800
#define IXGBE_RDHMPN_RDICRDREQ 0x00800000
@@ -1295,6 +1308,7 @@
/* VLAN pool filtering masks */
#define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */
#define IXGBE_VLVF_ENTRIES 64
+#define IXGBE_VLVF_VLANID_MASK 0x00000FFF
#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */
@@ -1843,6 +1857,12 @@
#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */
#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT
+/* SR-IOV specific macros */
+#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4)
+#define IXGBE_MBVFICR(_i) (0x00710 + (_i * 4))
+#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600))
+#define IXGBE_VFLREC(_i) (0x00700 + (_i * 4))
+
/* Little Endian defines */
#ifndef __le32
#define __le32 u32
@@ -2109,6 +2129,15 @@ struct ixgbe_atr_input {
u8 byte_stream[42];
};
+struct ixgbe_atr_input_masks {
+ u32 src_ip_mask;
+ u32 dst_ip_mask;
+ u16 src_port_mask;
+ u16 dst_port_mask;
+ u16 vlan_id_mask;
+ u16 data_mask;
+};
+
enum ixgbe_eeprom_type {
ixgbe_eeprom_uninitialized = 0,
ixgbe_eeprom_spi,
@@ -2385,7 +2414,7 @@ struct ixgbe_mac_operations {
s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32);
s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
s32 (*init_rx_addrs)(struct ixgbe_hw *);
- s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct list_head *);
+ s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct net_device *);
s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
ixgbe_mc_addr_itr);
s32 (*enable_mc)(struct ixgbe_hw *);
@@ -2463,6 +2492,37 @@ struct ixgbe_phy_info {
bool multispeed_fiber;
};
+#include "ixgbe_mbx.h"
+
+struct ixgbe_mbx_operations {
+ s32 (*init_params)(struct ixgbe_hw *hw);
+ s32 (*read)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16);
+ s32 (*check_for_msg)(struct ixgbe_hw *, u16);
+ s32 (*check_for_ack)(struct ixgbe_hw *, u16);
+ s32 (*check_for_rst)(struct ixgbe_hw *, u16);
+};
+
+struct ixgbe_mbx_stats {
+ u32 msgs_tx;
+ u32 msgs_rx;
+
+ u32 acks;
+ u32 reqs;
+ u32 rsts;
+};
+
+struct ixgbe_mbx_info {
+ struct ixgbe_mbx_operations ops;
+ struct ixgbe_mbx_stats stats;
+ u32 timeout;
+ u32 usec_delay;
+ u32 v2p_mailbox;
+ u16 size;
+};
+
struct ixgbe_hw {
u8 __iomem *hw_addr;
void *back;
@@ -2472,6 +2532,7 @@ struct ixgbe_hw {
struct ixgbe_phy_info phy;
struct ixgbe_eeprom_info eeprom;
struct ixgbe_bus_info bus;
+ struct ixgbe_mbx_info mbx;
u16 device_id;
u16 vendor_id;
u16 subsystem_device_id;
@@ -2486,6 +2547,7 @@ struct ixgbe_info {
struct ixgbe_mac_operations *mac_ops;
struct ixgbe_eeprom_operations *eeprom_ops;
struct ixgbe_phy_operations *phy_ops;
+ struct ixgbe_mbx_operations *mbx_ops;
};
diff --git a/drivers/net/ixgbevf/Makefile b/drivers/net/ixgbevf/Makefile
new file mode 100644
index 00000000000..dd4e0d27e8c
--- /dev/null
+++ b/drivers/net/ixgbevf/Makefile
@@ -0,0 +1,38 @@
+################################################################################
+#
+# Intel 82599 Virtual Function driver
+# Copyright(c) 1999 - 2009 Intel Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along with
+# this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information:
+# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+#
+################################################################################
+
+#
+# Makefile for the Intel(R) 82599 VF ethernet driver
+#
+
+obj-$(CONFIG_IXGBEVF) += ixgbevf.o
+
+ixgbevf-objs := vf.o \
+ mbx.o \
+ ethtool.o \
+ ixgbevf_main.o
+
diff --git a/drivers/net/ixgbevf/defines.h b/drivers/net/ixgbevf/defines.h
new file mode 100644
index 00000000000..c44fdb05447
--- /dev/null
+++ b/drivers/net/ixgbevf/defines.h
@@ -0,0 +1,292 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBEVF_DEFINES_H_
+#define _IXGBEVF_DEFINES_H_
+
+/* Device IDs */
+#define IXGBE_DEV_ID_82599_VF 0x10ED
+
+#define IXGBE_VF_IRQ_CLEAR_MASK 7
+#define IXGBE_VF_MAX_TX_QUEUES 1
+#define IXGBE_VF_MAX_RX_QUEUES 1
+#define IXGBE_ETH_LENGTH_OF_ADDRESS 6
+
+/* Link speed */
+typedef u32 ixgbe_link_speed;
+#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
+#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
+
+#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */
+#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
+#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */
+#define IXGBE_LINKS_UP 0x40000000
+#define IXGBE_LINKS_SPEED 0x20000000
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8
+#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8
+#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024
+
+/* Interrupt Vector Allocation Registers */
+#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */
+
+#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */
+
+/* Receive Config masks */
+#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */
+#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */
+#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */
+#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */
+
+/* DCA Control */
+#define IXGBE_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+
+/* PSRTYPE bit definitions */
+#define IXGBE_PSRTYPE_TCPHDR 0x00000010
+#define IXGBE_PSRTYPE_UDPHDR 0x00000020
+#define IXGBE_PSRTYPE_IPV4HDR 0x00000100
+#define IXGBE_PSRTYPE_IPV6HDR 0x00000200
+#define IXGBE_PSRTYPE_L2HDR 0x00001000
+
+/* SRRCTL bit definitions */
+#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */
+#define IXGBE_SRRCTL_RDMTS_SHIFT 22
+#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000
+#define IXGBE_SRRCTL_DROP_EN 0x10000000
+#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F
+#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00
+#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000
+#define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000
+#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000
+#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000
+
+/* Receive Descriptor bit definitions */
+#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */
+#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */
+#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */
+#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */
+#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */
+#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004
+#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */
+#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */
+#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */
+#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */
+#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */
+#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */
+#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */
+#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */
+#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */
+#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */
+#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */
+#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */
+#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */
+#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */
+#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */
+#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */
+#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */
+#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */
+#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */
+#define IXGBE_RXDADV_ERR_MASK 0xFFF00000 /* RDESC.ERRORS mask */
+#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */
+#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */
+#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */
+#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */
+#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */
+#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */
+#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */
+#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */
+#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */
+#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */
+#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */
+#define IXGBE_RXD_PRI_SHIFT 13
+#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */
+#define IXGBE_RXD_CFI_SHIFT 12
+
+#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */
+#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */
+#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */
+#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */
+#define IXGBE_RXDADV_STAT_MASK 0x000FFFFF /* Stat/NEXTP: bit 0-19 */
+#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */
+#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */
+#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */
+#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */
+#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */
+#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */
+
+#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F
+#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0
+#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0
+#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0
+#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000
+#define IXGBE_RXDADV_RSCCNT_SHIFT 17
+#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5
+#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000
+#define IXGBE_RXDADV_SPH 0x8000
+
+#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
+ IXGBE_RXD_ERR_CE | \
+ IXGBE_RXD_ERR_LE | \
+ IXGBE_RXD_ERR_PE | \
+ IXGBE_RXD_ERR_OSE | \
+ IXGBE_RXD_ERR_USE)
+
+#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \
+ IXGBE_RXDADV_ERR_CE | \
+ IXGBE_RXDADV_ERR_LE | \
+ IXGBE_RXDADV_ERR_PE | \
+ IXGBE_RXDADV_ERR_OSE | \
+ IXGBE_RXDADV_ERR_USE)
+
+#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */
+#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */
+#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */
+#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
+#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */
+#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */
+#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
+#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
+#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */
+
+/* Transmit Descriptor - Advanced */
+union ixgbe_adv_tx_desc {
+ struct {
+ __le64 buffer_addr; /* Address of descriptor's data buf */
+ __le32 cmd_type_len;
+ __le32 olinfo_status;
+ } read;
+ struct {
+ __le64 rsvd; /* Reserved */
+ __le32 nxtseq_seed;
+ __le32 status;
+ } wb;
+};
+
+/* Receive Descriptor - Advanced */
+union ixgbe_adv_rx_desc {
+ struct {
+ __le64 pkt_addr; /* Packet buffer address */
+ __le64 hdr_addr; /* Header buffer address */
+ } read;
+ struct {
+ struct {
+ union {
+ __le32 data;
+ struct {
+ __le16 pkt_info; /* RSS, Pkt type */
+ __le16 hdr_info; /* Splithdr, hdrlen */
+ } hs_rss;
+ } lo_dword;
+ union {
+ __le32 rss; /* RSS Hash */
+ struct {
+ __le16 ip_id; /* IP id */
+ __le16 csum; /* Packet Checksum */
+ } csum_ip;
+ } hi_dword;
+ } lower;
+ struct {
+ __le32 status_error; /* ext status/error */
+ __le16 length; /* Packet length */
+ __le16 vlan; /* VLAN tag */
+ } upper;
+ } wb; /* writeback */
+};
+
+/* Context descriptors */
+struct ixgbe_adv_tx_context_desc {
+ __le32 vlan_macip_lens;
+ __le32 seqnum_seed;
+ __le32 type_tucmd_mlhl;
+ __le32 mss_l4len_idx;
+};
+
+/* Adv Transmit Descriptor Config Masks */
+#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */
+#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */
+#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */
+#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */
+#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */
+#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */
+#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */
+#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */
+#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */
+#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */
+#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
+#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */
+#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
+#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
+#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */
+#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */
+#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
+#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
+ IXGBE_ADVTXD_POPTS_SHIFT)
+#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
+ IXGBE_ADVTXD_POPTS_SHIFT)
+#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */
+#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
+#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */
+#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */
+#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */
+
+/* Interrupt register bitmasks */
+
+/* Extended Interrupt Cause Read */
+#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */
+#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */
+#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */
+
+/* Extended Interrupt Cause Set */
+#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
+#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
+
+/* Extended Interrupt Mask Set */
+#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
+#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
+
+/* Extended Interrupt Mask Clear */
+#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */
+#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */
+#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
+
+#define IXGBE_EIMS_ENABLE_MASK ( \
+ IXGBE_EIMS_RTX_QUEUE | \
+ IXGBE_EIMS_MAILBOX | \
+ IXGBE_EIMS_OTHER)
+
+#define IXGBE_EITR_CNT_WDIS 0x80000000
+
+/* Error Codes */
+#define IXGBE_ERR_INVALID_MAC_ADDR -1
+#define IXGBE_ERR_RESET_FAILED -2
+
+#endif /* _IXGBEVF_DEFINES_H_ */
diff --git a/drivers/net/ixgbevf/ethtool.c b/drivers/net/ixgbevf/ethtool.c
new file mode 100644
index 00000000000..399be0c34c3
--- /dev/null
+++ b/drivers/net/ixgbevf/ethtool.c
@@ -0,0 +1,716 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* ethtool support for ixgbevf */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+#include <linux/vmalloc.h>
+#include <linux/if_vlan.h>
+#include <linux/uaccess.h>
+
+#include "ixgbevf.h"
+
+#define IXGBE_ALL_RAR_ENTRIES 16
+
+#ifdef ETHTOOL_GSTATS
+struct ixgbe_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+ int base_stat_offset;
+};
+
+#define IXGBEVF_STAT(m, b) sizeof(((struct ixgbevf_adapter *)0)->m), \
+ offsetof(struct ixgbevf_adapter, m), \
+ offsetof(struct ixgbevf_adapter, b)
+static struct ixgbe_stats ixgbe_gstrings_stats[] = {
+ {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc)},
+ {"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc)},
+ {"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc)},
+ {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc)},
+ {"tx_busy", IXGBEVF_STAT(tx_busy, zero_base)},
+ {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc)},
+ {"rx_csum_offload_good", IXGBEVF_STAT(hw_csum_rx_good, zero_base)},
+ {"rx_csum_offload_errors", IXGBEVF_STAT(hw_csum_rx_error, zero_base)},
+ {"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base)},
+ {"rx_header_split", IXGBEVF_STAT(rx_hdr_split, zero_base)},
+};
+
+#define IXGBE_QUEUE_STATS_LEN 0
+#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
+
+#define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
+#endif /* ETHTOOL_GSTATS */
+#ifdef ETHTOOL_TEST
+static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Register test (offline)",
+ "Link test (on/offline)"
+};
+#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
+#endif /* ETHTOOL_TEST */
+
+static int ixgbevf_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *ecmd)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 link_speed = 0;
+ bool link_up;
+
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ ecmd->transceiver = XCVR_DUMMY1;
+ ecmd->port = -1;
+
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
+
+ if (link_up) {
+ ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
+ SPEED_10000 : SPEED_1000;
+ ecmd->duplex = DUPLEX_FULL;
+ } else {
+ ecmd->speed = -1;
+ ecmd->duplex = -1;
+ }
+
+ return 0;
+}
+
+static u32 ixgbevf_get_rx_csum(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ return adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED;
+}
+
+static int ixgbevf_set_rx_csum(struct net_device *netdev, u32 data)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ if (data)
+ adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
+ else
+ adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
+
+ if (netif_running(netdev)) {
+ if (!adapter->dev_closed)
+ ixgbevf_reinit_locked(adapter);
+ } else {
+ ixgbevf_reset(adapter);
+ }
+
+ return 0;
+}
+
+static int ixgbevf_set_tso(struct net_device *netdev, u32 data)
+{
+ if (data) {
+ netdev->features |= NETIF_F_TSO;
+ netdev->features |= NETIF_F_TSO6;
+ } else {
+ netif_tx_stop_all_queues(netdev);
+ netdev->features &= ~NETIF_F_TSO;
+ netdev->features &= ~NETIF_F_TSO6;
+ netif_tx_start_all_queues(netdev);
+ }
+ return 0;
+}
+
+static u32 ixgbevf_get_msglevel(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ return adapter->msg_enable;
+}
+
+static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ adapter->msg_enable = data;
+}
+
+#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
+
+static char *ixgbevf_reg_names[] = {
+ "IXGBE_VFCTRL",
+ "IXGBE_VFSTATUS",
+ "IXGBE_VFLINKS",
+ "IXGBE_VFRXMEMWRAP",
+ "IXGBE_VFRTIMER",
+ "IXGBE_VTEICR",
+ "IXGBE_VTEICS",
+ "IXGBE_VTEIMS",
+ "IXGBE_VTEIMC",
+ "IXGBE_VTEIAC",
+ "IXGBE_VTEIAM",
+ "IXGBE_VTEITR",
+ "IXGBE_VTIVAR",
+ "IXGBE_VTIVAR_MISC",
+ "IXGBE_VFRDBAL0",
+ "IXGBE_VFRDBAL1",
+ "IXGBE_VFRDBAH0",
+ "IXGBE_VFRDBAH1",
+ "IXGBE_VFRDLEN0",
+ "IXGBE_VFRDLEN1",
+ "IXGBE_VFRDH0",
+ "IXGBE_VFRDH1",
+ "IXGBE_VFRDT0",
+ "IXGBE_VFRDT1",
+ "IXGBE_VFRXDCTL0",
+ "IXGBE_VFRXDCTL1",
+ "IXGBE_VFSRRCTL0",
+ "IXGBE_VFSRRCTL1",
+ "IXGBE_VFPSRTYPE",
+ "IXGBE_VFTDBAL0",
+ "IXGBE_VFTDBAL1",
+ "IXGBE_VFTDBAH0",
+ "IXGBE_VFTDBAH1",
+ "IXGBE_VFTDLEN0",
+ "IXGBE_VFTDLEN1",
+ "IXGBE_VFTDH0",
+ "IXGBE_VFTDH1",
+ "IXGBE_VFTDT0",
+ "IXGBE_VFTDT1",
+ "IXGBE_VFTXDCTL0",
+ "IXGBE_VFTXDCTL1",
+ "IXGBE_VFTDWBAL0",
+ "IXGBE_VFTDWBAL1",
+ "IXGBE_VFTDWBAH0",
+ "IXGBE_VFTDWBAH1"
+};
+
+
+static int ixgbevf_get_regs_len(struct net_device *netdev)
+{
+ return (ARRAY_SIZE(ixgbevf_reg_names)) * sizeof(u32);
+}
+
+static void ixgbevf_get_regs(struct net_device *netdev,
+ struct ethtool_regs *regs,
+ void *p)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 *regs_buff = p;
+ u32 regs_len = ixgbevf_get_regs_len(netdev);
+ u8 i;
+
+ memset(p, 0, regs_len);
+
+ regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
+
+ /* General Registers */
+ regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL);
+ regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS);
+ regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+ regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP);
+ regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFRTIMER);
+
+ /* Interrupt */
+ /* don't read EICR because it can clear interrupt causes, instead
+ * read EICS which is a shadow but doesn't clear EICR */
+ regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
+ regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
+ regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
+ regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_VTEIMC);
+ regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_VTEIAC);
+ regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_VTEIAM);
+ regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_VTEITR(0));
+ regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_VTIVAR(0));
+ regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
+
+ /* Receive DMA */
+ for (i = 0; i < 2; i++)
+ regs_buff[14 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAL(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[16 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAH(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[18 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDLEN(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[20 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDH(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[22 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDT(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[24 + i] = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[26 + i] = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
+
+ /* Receive */
+ regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_VFPSRTYPE);
+
+ /* Transmit */
+ for (i = 0; i < 2; i++)
+ regs_buff[29 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAL(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAH(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[33 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDLEN(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDH(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[37 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDT(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[39 + i] = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i));
+ for (i = 0; i < 2; i++)
+ regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i));
+
+ for (i = 0; i < ARRAY_SIZE(ixgbevf_reg_names); i++)
+ hw_dbg(hw, "%s\t%8.8x\n", ixgbevf_reg_names[i], regs_buff[i]);
+}
+
+static void ixgbevf_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ strlcpy(drvinfo->driver, ixgbevf_driver_name, 32);
+ strlcpy(drvinfo->version, ixgbevf_driver_version, 32);
+
+ strlcpy(drvinfo->fw_version, "N/A", 4);
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+}
+
+static void ixgbevf_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_ring *tx_ring = adapter->tx_ring;
+ struct ixgbevf_ring *rx_ring = adapter->rx_ring;
+
+ ring->rx_max_pending = IXGBEVF_MAX_RXD;
+ ring->tx_max_pending = IXGBEVF_MAX_TXD;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->rx_pending = rx_ring->count;
+ ring->tx_pending = tx_ring->count;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+}
+
+static int ixgbevf_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
+ int i, err;
+ u32 new_rx_count, new_tx_count;
+ bool need_tx_update = false;
+ bool need_rx_update = false;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+ new_rx_count = max(ring->rx_pending, (u32)IXGBEVF_MIN_RXD);
+ new_rx_count = min(new_rx_count, (u32)IXGBEVF_MAX_RXD);
+ new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
+
+ new_tx_count = max(ring->tx_pending, (u32)IXGBEVF_MIN_TXD);
+ new_tx_count = min(new_tx_count, (u32)IXGBEVF_MAX_TXD);
+ new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
+
+ if ((new_tx_count == adapter->tx_ring->count) &&
+ (new_rx_count == adapter->rx_ring->count)) {
+ /* nothing to do */
+ return 0;
+ }
+
+ while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
+ msleep(1);
+
+ if (new_tx_count != adapter->tx_ring_count) {
+ tx_ring = kcalloc(adapter->num_tx_queues,
+ sizeof(struct ixgbevf_ring), GFP_KERNEL);
+ if (!tx_ring) {
+ err = -ENOMEM;
+ goto err_setup;
+ }
+ memcpy(tx_ring, adapter->tx_ring,
+ adapter->num_tx_queues * sizeof(struct ixgbevf_ring));
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ tx_ring[i].count = new_tx_count;
+ err = ixgbevf_setup_tx_resources(adapter,
+ &tx_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ ixgbevf_free_tx_resources(adapter,
+ &tx_ring[i]);
+ }
+ kfree(tx_ring);
+ goto err_setup;
+ }
+ tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
+ }
+ need_tx_update = true;
+ }
+
+ if (new_rx_count != adapter->rx_ring_count) {
+ rx_ring = kcalloc(adapter->num_rx_queues,
+ sizeof(struct ixgbevf_ring), GFP_KERNEL);
+ if ((!rx_ring) && (need_tx_update)) {
+ err = -ENOMEM;
+ goto err_rx_setup;
+ }
+ memcpy(rx_ring, adapter->rx_ring,
+ adapter->num_rx_queues * sizeof(struct ixgbevf_ring));
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ rx_ring[i].count = new_rx_count;
+ err = ixgbevf_setup_rx_resources(adapter,
+ &rx_ring[i]);
+ if (err) {
+ while (i) {
+ i--;
+ ixgbevf_free_rx_resources(adapter,
+ &rx_ring[i]);
+ }
+ kfree(rx_ring);
+ goto err_rx_setup;
+ }
+ rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
+ }
+ need_rx_update = true;
+ }
+
+err_rx_setup:
+ /* if rings need to be updated, here's the place to do it in one shot */
+ if (need_tx_update || need_rx_update) {
+ if (netif_running(netdev))
+ ixgbevf_down(adapter);
+ }
+
+ /* tx */
+ if (need_tx_update) {
+ kfree(adapter->tx_ring);
+ adapter->tx_ring = tx_ring;
+ tx_ring = NULL;
+ adapter->tx_ring_count = new_tx_count;
+ }
+
+ /* rx */
+ if (need_rx_update) {
+ kfree(adapter->rx_ring);
+ adapter->rx_ring = rx_ring;
+ rx_ring = NULL;
+ adapter->rx_ring_count = new_rx_count;
+ }
+
+ /* success! */
+ err = 0;
+ if (netif_running(netdev))
+ ixgbevf_up(adapter);
+
+err_setup:
+ clear_bit(__IXGBEVF_RESETTING, &adapter->state);
+ return err;
+}
+
+static int ixgbevf_get_sset_count(struct net_device *dev, int stringset)
+{
+ switch (stringset) {
+ case ETH_SS_TEST:
+ return IXGBE_TEST_LEN;
+ case ETH_SS_STATS:
+ return IXGBE_GLOBAL_STATS_LEN;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ int i;
+
+ ixgbevf_update_stats(adapter);
+ for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
+ char *p = (char *)adapter +
+ ixgbe_gstrings_stats[i].stat_offset;
+ char *b = (char *)adapter +
+ ixgbe_gstrings_stats[i].base_stat_offset;
+ data[i] = ((ixgbe_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p) -
+ ((ixgbe_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)b : *(u32 *)b);
+ }
+}
+
+static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
+ u8 *data)
+{
+ char *p = (char *)data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *ixgbe_gstrings_test,
+ IXGBE_TEST_LEN * ETH_GSTRING_LEN);
+ break;
+ case ETH_SS_STATS:
+ for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
+ memcpy(p, ixgbe_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static int ixgbevf_link_test(struct ixgbevf_adapter *adapter, u64 *data)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ bool link_up;
+ u32 link_speed = 0;
+ *data = 0;
+
+ hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
+ if (!link_up)
+ *data = 1;
+
+ return *data;
+}
+
+/* ethtool register test data */
+struct ixgbevf_reg_test {
+ u16 reg;
+ u8 array_len;
+ u8 test_type;
+ u32 mask;
+ u32 write;
+};
+
+/* In the hardware, registers are laid out either singly, in arrays
+ * spaced 0x40 bytes apart, or in contiguous tables. We assume
+ * most tests take place on arrays or single registers (handled
+ * as a single-element array) and special-case the tables.
+ * Table tests are always pattern tests.
+ *
+ * We also make provision for some required setup steps by specifying
+ * registers to be written without any read-back testing.
+ */
+
+#define PATTERN_TEST 1
+#define SET_READ_TEST 2
+#define WRITE_NO_TEST 3
+#define TABLE32_TEST 4
+#define TABLE64_TEST_LO 5
+#define TABLE64_TEST_HI 6
+
+/* default VF register test */
+static struct ixgbevf_reg_test reg_test_vf[] = {
+ { IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
+ { IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
+ { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
+ { IXGBE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
+ { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 },
+ { IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
+ { IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
+ { 0, 0, 0, 0 }
+};
+
+#define REG_PATTERN_TEST(R, M, W) \
+{ \
+ u32 pat, val, before; \
+ const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \
+ for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { \
+ before = readl(adapter->hw.hw_addr + R); \
+ writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \
+ val = readl(adapter->hw.hw_addr + R); \
+ if (val != (_test[pat] & W & M)) { \
+ hw_dbg(&adapter->hw, \
+ "pattern test reg %04X failed: got " \
+ "0x%08X expected 0x%08X\n", \
+ R, val, (_test[pat] & W & M)); \
+ *data = R; \
+ writel(before, adapter->hw.hw_addr + R); \
+ return 1; \
+ } \
+ writel(before, adapter->hw.hw_addr + R); \
+ } \
+}
+
+#define REG_SET_AND_CHECK(R, M, W) \
+{ \
+ u32 val, before; \
+ before = readl(adapter->hw.hw_addr + R); \
+ writel((W & M), (adapter->hw.hw_addr + R)); \
+ val = readl(adapter->hw.hw_addr + R); \
+ if ((W & M) != (val & M)) { \
+ printk(KERN_ERR "set/check reg %04X test failed: got 0x%08X " \
+ "expected 0x%08X\n", R, (val & M), (W & M)); \
+ *data = R; \
+ writel(before, (adapter->hw.hw_addr + R)); \
+ return 1; \
+ } \
+ writel(before, (adapter->hw.hw_addr + R)); \
+}
+
+static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
+{
+ struct ixgbevf_reg_test *test;
+ u32 i;
+
+ test = reg_test_vf;
+
+ /*
+ * Perform the register test, looping through the test table
+ * until we either fail or reach the null entry.
+ */
+ while (test->reg) {
+ for (i = 0; i < test->array_len; i++) {
+ switch (test->test_type) {
+ case PATTERN_TEST:
+ REG_PATTERN_TEST(test->reg + (i * 0x40),
+ test->mask,
+ test->write);
+ break;
+ case SET_READ_TEST:
+ REG_SET_AND_CHECK(test->reg + (i * 0x40),
+ test->mask,
+ test->write);
+ break;
+ case WRITE_NO_TEST:
+ writel(test->write,
+ (adapter->hw.hw_addr + test->reg)
+ + (i * 0x40));
+ break;
+ case TABLE32_TEST:
+ REG_PATTERN_TEST(test->reg + (i * 4),
+ test->mask,
+ test->write);
+ break;
+ case TABLE64_TEST_LO:
+ REG_PATTERN_TEST(test->reg + (i * 8),
+ test->mask,
+ test->write);
+ break;
+ case TABLE64_TEST_HI:
+ REG_PATTERN_TEST((test->reg + 4) + (i * 8),
+ test->mask,
+ test->write);
+ break;
+ }
+ }
+ test++;
+ }
+
+ *data = 0;
+ return *data;
+}
+
+static void ixgbevf_diag_test(struct net_device *netdev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ bool if_running = netif_running(netdev);
+
+ set_bit(__IXGBEVF_TESTING, &adapter->state);
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ /* Offline tests */
+
+ hw_dbg(&adapter->hw, "offline testing starting\n");
+
+ /* Link test performed before hardware reset so autoneg doesn't
+ * interfere with test result */
+ if (ixgbevf_link_test(adapter, &data[1]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ if (if_running)
+ /* indicate we're in test mode */
+ dev_close(netdev);
+ else
+ ixgbevf_reset(adapter);
+
+ hw_dbg(&adapter->hw, "register testing starting\n");
+ if (ixgbevf_reg_test(adapter, &data[0]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ ixgbevf_reset(adapter);
+
+ clear_bit(__IXGBEVF_TESTING, &adapter->state);
+ if (if_running)
+ dev_open(netdev);
+ } else {
+ hw_dbg(&adapter->hw, "online testing starting\n");
+ /* Online tests */
+ if (ixgbevf_link_test(adapter, &data[1]))
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* Online tests aren't run; pass by default */
+ data[0] = 0;
+
+ clear_bit(__IXGBEVF_TESTING, &adapter->state);
+ }
+ msleep_interruptible(4 * 1000);
+}
+
+static int ixgbevf_nway_reset(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ if (netif_running(netdev)) {
+ if (!adapter->dev_closed)
+ ixgbevf_reinit_locked(adapter);
+ }
+
+ return 0;
+}
+
+static struct ethtool_ops ixgbevf_ethtool_ops = {
+ .get_settings = ixgbevf_get_settings,
+ .get_drvinfo = ixgbevf_get_drvinfo,
+ .get_regs_len = ixgbevf_get_regs_len,
+ .get_regs = ixgbevf_get_regs,
+ .nway_reset = ixgbevf_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = ixgbevf_get_ringparam,
+ .set_ringparam = ixgbevf_set_ringparam,
+ .get_rx_csum = ixgbevf_get_rx_csum,
+ .set_rx_csum = ixgbevf_set_rx_csum,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+ .get_msglevel = ixgbevf_get_msglevel,
+ .set_msglevel = ixgbevf_set_msglevel,
+ .get_tso = ethtool_op_get_tso,
+ .set_tso = ixgbevf_set_tso,
+ .self_test = ixgbevf_diag_test,
+ .get_sset_count = ixgbevf_get_sset_count,
+ .get_strings = ixgbevf_get_strings,
+ .get_ethtool_stats = ixgbevf_get_ethtool_stats,
+};
+
+void ixgbevf_set_ethtool_ops(struct net_device *netdev)
+{
+ SET_ETHTOOL_OPS(netdev, &ixgbevf_ethtool_ops);
+}
diff --git a/drivers/net/ixgbevf/ixgbevf.h b/drivers/net/ixgbevf/ixgbevf.h
new file mode 100644
index 00000000000..f7015efbff0
--- /dev/null
+++ b/drivers/net/ixgbevf/ixgbevf.h
@@ -0,0 +1,318 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBEVF_H_
+#define _IXGBEVF_H_
+
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+
+#include "vf.h"
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer */
+struct ixgbevf_tx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ unsigned long time_stamp;
+ u16 length;
+ u16 next_to_watch;
+ u16 mapped_as_page;
+};
+
+struct ixgbevf_rx_buffer {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct page *page;
+ dma_addr_t page_dma;
+ unsigned int page_offset;
+};
+
+struct ixgbevf_ring {
+ struct ixgbevf_adapter *adapter; /* backlink */
+ void *desc; /* descriptor ring memory */
+ dma_addr_t dma; /* phys. address of descriptor ring */
+ unsigned int size; /* length in bytes */
+ unsigned int count; /* amount of descriptors */
+ unsigned int next_to_use;
+ unsigned int next_to_clean;
+
+ int queue_index; /* needed for multiqueue queue management */
+ union {
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ struct ixgbevf_rx_buffer *rx_buffer_info;
+ };
+
+ u16 head;
+ u16 tail;
+
+ unsigned int total_bytes;
+ unsigned int total_packets;
+
+ u16 reg_idx; /* holds the special value that gets the hardware register
+ * offset associated with this ring, which is different
+ * for DCB and RSS modes */
+
+#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE)
+ /* cpu for tx queue */
+ int cpu;
+#endif
+
+ u64 v_idx; /* maps directly to the index for this ring in the hardware
+ * vector array, can also be used for finding the bit in EICR
+ * and friends that represents the vector for this ring */
+
+ u16 work_limit; /* max work per interrupt */
+ u16 rx_buf_len;
+};
+
+enum ixgbevf_ring_f_enum {
+ RING_F_NONE = 0,
+ RING_F_ARRAY_SIZE /* must be last in enum set */
+};
+
+struct ixgbevf_ring_feature {
+ int indices;
+ int mask;
+};
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
+
+#define MAX_RX_QUEUES 1
+#define MAX_TX_QUEUES 1
+
+#define IXGBEVF_DEFAULT_TXD 1024
+#define IXGBEVF_DEFAULT_RXD 512
+#define IXGBEVF_MAX_TXD 4096
+#define IXGBEVF_MIN_TXD 64
+#define IXGBEVF_MAX_RXD 4096
+#define IXGBEVF_MIN_RXD 64
+
+/* Supported Rx Buffer Sizes */
+#define IXGBEVF_RXBUFFER_64 64 /* Used for packet split */
+#define IXGBEVF_RXBUFFER_128 128 /* Used for packet split */
+#define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
+#define IXGBEVF_RXBUFFER_2048 2048
+#define IXGBEVF_MAX_RXBUFFER 16384 /* largest size for single descriptor */
+
+#define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
+
+#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
+
+#define IXGBE_TX_FLAGS_CSUM (u32)(1)
+#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1)
+#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2)
+#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3)
+#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 4)
+#define IXGBE_TX_FLAGS_FSO (u32)(1 << 5)
+#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
+#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
+#define IXGBE_TX_FLAGS_VLAN_SHIFT 16
+
+/* MAX_MSIX_Q_VECTORS of these are allocated,
+ * but we only use one per queue-specific vector.
+ */
+struct ixgbevf_q_vector {
+ struct ixgbevf_adapter *adapter;
+ struct napi_struct napi;
+ DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */
+ DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
+ u8 rxr_count; /* Rx ring count assigned to this vector */
+ u8 txr_count; /* Tx ring count assigned to this vector */
+ u8 tx_itr;
+ u8 rx_itr;
+ u32 eitr;
+ int v_idx; /* vector index in list */
+};
+
+/* Helper macros to switch between ints/sec and what the register uses.
+ * And yes, it's the same math going both ways. The lowest value
+ * supported by all of the ixgbe hardware is 8.
+ */
+#define EITR_INTS_PER_SEC_TO_REG(_eitr) \
+ ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
+#define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
+
+#define IXGBE_DESC_UNUSED(R) \
+ ((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+ (R)->next_to_clean - (R)->next_to_use - 1)
+
+#define IXGBE_RX_DESC_ADV(R, i) \
+ (&(((union ixgbe_adv_rx_desc *)((R).desc))[i]))
+#define IXGBE_TX_DESC_ADV(R, i) \
+ (&(((union ixgbe_adv_tx_desc *)((R).desc))[i]))
+#define IXGBE_TX_CTXTDESC_ADV(R, i) \
+ (&(((struct ixgbe_adv_tx_context_desc *)((R).desc))[i]))
+
+#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
+
+#define OTHER_VECTOR 1
+#define NON_Q_VECTORS (OTHER_VECTOR)
+
+#define MAX_MSIX_Q_VECTORS 2
+#define MAX_MSIX_COUNT 2
+
+#define MIN_MSIX_Q_VECTORS 2
+#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
+
+/* board specific private data structure */
+struct ixgbevf_adapter {
+ struct timer_list watchdog_timer;
+#ifdef NETIF_F_HW_VLAN_TX
+ struct vlan_group *vlgrp;
+#endif
+ u16 bd_number;
+ struct work_struct reset_task;
+ struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
+ char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
+
+ /* Interrupt Throttle Rate */
+ u32 itr_setting;
+ u16 eitr_low;
+ u16 eitr_high;
+
+ /* TX */
+ struct ixgbevf_ring *tx_ring; /* One per active queue */
+ int num_tx_queues;
+ u64 restart_queue;
+ u64 hw_csum_tx_good;
+ u64 lsc_int;
+ u64 hw_tso_ctxt;
+ u64 hw_tso6_ctxt;
+ u32 tx_timeout_count;
+ bool detect_tx_hung;
+
+ /* RX */
+ struct ixgbevf_ring *rx_ring; /* One per active queue */
+ int num_rx_queues;
+ int num_rx_pools; /* == num_rx_queues in 82598 */
+ int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */
+ u64 hw_csum_rx_error;
+ u64 hw_rx_no_dma_resources;
+ u64 hw_csum_rx_good;
+ u64 non_eop_descs;
+ int num_msix_vectors;
+ int max_msix_q_vectors; /* true count of q_vectors for device */
+ struct ixgbevf_ring_feature ring_feature[RING_F_ARRAY_SIZE];
+ struct msix_entry *msix_entries;
+
+ u64 rx_hdr_split;
+ u32 alloc_rx_page_failed;
+ u32 alloc_rx_buff_failed;
+
+ /* Some features need tri-state capability,
+ * thus the additional *_CAPABLE flags.
+ */
+ u32 flags;
+#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1)
+#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1)
+#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 2)
+#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 3)
+#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 4)
+#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 5)
+#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 6)
+#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 7)
+#define IXGBE_FLAG_IN_WATCHDOG_TASK (u32)(1 << 8)
+ /* OS defined structs */
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct net_device_stats net_stats;
+
+ /* structs defined in ixgbe_vf.h */
+ struct ixgbe_hw hw;
+ u16 msg_enable;
+ struct ixgbevf_hw_stats stats;
+ u64 zero_base;
+ /* Interrupt Throttle Rate */
+ u32 eitr_param;
+
+ unsigned long state;
+ u32 *config_space;
+ u64 tx_busy;
+ unsigned int tx_ring_count;
+ unsigned int rx_ring_count;
+
+ u32 link_speed;
+ bool link_up;
+ unsigned long link_check_timeout;
+
+ struct work_struct watchdog_task;
+ bool netdev_registered;
+ bool dev_closed;
+};
+
+enum ixbgevf_state_t {
+ __IXGBEVF_TESTING,
+ __IXGBEVF_RESETTING,
+ __IXGBEVF_DOWN
+};
+
+enum ixgbevf_boards {
+ board_82599_vf,
+};
+
+extern struct ixgbevf_info ixgbevf_vf_info;
+extern struct ixgbe_mac_operations ixgbevf_mbx_ops;
+
+/* needed by ethtool.c */
+extern char ixgbevf_driver_name[];
+extern const char ixgbevf_driver_version[];
+
+extern int ixgbevf_up(struct ixgbevf_adapter *adapter);
+extern void ixgbevf_down(struct ixgbevf_adapter *adapter);
+extern void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
+extern void ixgbevf_reset(struct ixgbevf_adapter *adapter);
+extern void ixgbevf_set_ethtool_ops(struct net_device *netdev);
+extern int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *,
+ struct ixgbevf_ring *);
+extern int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *,
+ struct ixgbevf_ring *);
+extern void ixgbevf_free_rx_resources(struct ixgbevf_adapter *,
+ struct ixgbevf_ring *);
+extern void ixgbevf_free_tx_resources(struct ixgbevf_adapter *,
+ struct ixgbevf_ring *);
+extern void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
+
+#ifdef ETHTOOL_OPS_COMPAT
+extern int ethtool_ioctl(struct ifreq *ifr);
+
+#endif
+extern void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
+extern void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
+
+#ifdef DEBUG
+extern char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw);
+#define hw_dbg(hw, format, arg...) \
+ printk(KERN_DEBUG "%s: " format, ixgbevf_get_hw_dev_name(hw), ##arg)
+#else
+#define hw_dbg(hw, format, arg...) do {} while (0)
+#endif
+
+#endif /* _IXGBEVF_H_ */
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
new file mode 100644
index 00000000000..ca653c49b76
--- /dev/null
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -0,0 +1,3578 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+/******************************************************************************
+ Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
+******************************************************************************/
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/string.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/ipv6.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+
+#include "ixgbevf.h"
+
+char ixgbevf_driver_name[] = "ixgbevf";
+static const char ixgbevf_driver_string[] =
+ "Intel(R) 82599 Virtual Function";
+
+#define DRV_VERSION "1.0.0-k0"
+const char ixgbevf_driver_version[] = DRV_VERSION;
+static char ixgbevf_copyright[] = "Copyright (c) 2009 Intel Corporation.";
+
+static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
+ [board_82599_vf] = &ixgbevf_vf_info,
+};
+
+/* ixgbevf_pci_tbl - PCI Device ID Table
+ *
+ * Wildcard entries (PCI_ANY_ID) should come last
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ * Class, Class Mask, private data (not used) }
+ */
+static struct pci_device_id ixgbevf_pci_tbl[] = {
+ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF),
+ board_82599_vf},
+
+ /* required last entry */
+ {0, }
+};
+MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
+
+MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
+MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+#define DEFAULT_DEBUG_LEVEL_SHIFT 3
+
+/* forward decls */
+static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector);
+static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
+ u32 itr_reg);
+
+static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
+ struct ixgbevf_ring *rx_ring,
+ u32 val)
+{
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rx_ring->reg_idx), val);
+}
+
+/*
+ * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors
+ * @adapter: pointer to adapter struct
+ * @direction: 0 for Rx, 1 for Tx, -1 for other causes
+ * @queue: queue to map the corresponding interrupt to
+ * @msix_vector: the vector to map to the corresponding queue
+ *
+ */
+static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
+ u8 queue, u8 msix_vector)
+{
+ u32 ivar, index;
+ struct ixgbe_hw *hw = &adapter->hw;
+ if (direction == -1) {
+ /* other causes */
+ msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+ ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
+ ivar &= ~0xFF;
+ ivar |= msix_vector;
+ IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
+ } else {
+ /* tx or rx causes */
+ msix_vector |= IXGBE_IVAR_ALLOC_VAL;
+ index = ((16 * (queue & 1)) + (8 * direction));
+ ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
+ ivar &= ~(0xFF << index);
+ ivar |= (msix_vector << index);
+ IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(queue >> 1), ivar);
+ }
+}
+
+static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_tx_buffer
+ *tx_buffer_info)
+{
+ if (tx_buffer_info->dma) {
+ if (tx_buffer_info->mapped_as_page)
+ pci_unmap_page(adapter->pdev,
+ tx_buffer_info->dma,
+ tx_buffer_info->length,
+ PCI_DMA_TODEVICE);
+ else
+ pci_unmap_single(adapter->pdev,
+ tx_buffer_info->dma,
+ tx_buffer_info->length,
+ PCI_DMA_TODEVICE);
+ tx_buffer_info->dma = 0;
+ }
+ if (tx_buffer_info->skb) {
+ dev_kfree_skb_any(tx_buffer_info->skb);
+ tx_buffer_info->skb = NULL;
+ }
+ tx_buffer_info->time_stamp = 0;
+ /* tx_buffer_info must be completely set up in the transmit path */
+}
+
+static inline bool ixgbevf_check_tx_hang(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring,
+ unsigned int eop)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 head, tail;
+
+ /* Detect a transmit hang in hardware, this serializes the
+ * check with the clearing of time_stamp and movement of eop */
+ head = readl(hw->hw_addr + tx_ring->head);
+ tail = readl(hw->hw_addr + tx_ring->tail);
+ adapter->detect_tx_hung = false;
+ if ((head != tail) &&
+ tx_ring->tx_buffer_info[eop].time_stamp &&
+ time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ)) {
+ /* detected Tx unit hang */
+ union ixgbe_adv_tx_desc *tx_desc;
+ tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+ printk(KERN_ERR "Detected Tx Unit Hang\n"
+ " Tx Queue <%d>\n"
+ " TDH, TDT <%x>, <%x>\n"
+ " next_to_use <%x>\n"
+ " next_to_clean <%x>\n"
+ "tx_buffer_info[next_to_clean]\n"
+ " time_stamp <%lx>\n"
+ " jiffies <%lx>\n",
+ tx_ring->queue_index,
+ head, tail,
+ tx_ring->next_to_use, eop,
+ tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
+ return true;
+ }
+
+ return false;
+}
+
+#define IXGBE_MAX_TXD_PWR 14
+#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
+ (((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
+#ifdef MAX_SKB_FRAGS
+#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
+ MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
+#else
+#define DESC_NEEDED TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD)
+#endif
+
+static void ixgbevf_tx_timeout(struct net_device *netdev);
+
+/**
+ * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
+ * @adapter: board private structure
+ * @tx_ring: tx ring to clean
+ **/
+static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ unsigned int i, eop, count = 0;
+ unsigned int total_bytes = 0, total_packets = 0;
+
+ i = tx_ring->next_to_clean;
+ eop = tx_ring->tx_buffer_info[i].next_to_watch;
+ eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+
+ while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
+ (count < tx_ring->work_limit)) {
+ bool cleaned = false;
+ for ( ; !cleaned; count++) {
+ struct sk_buff *skb;
+ tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ cleaned = (i == eop);
+ skb = tx_buffer_info->skb;
+
+ if (cleaned && skb) {
+ unsigned int segs, bytecount;
+
+ /* gso_segs is currently only valid for tcp */
+ segs = skb_shinfo(skb)->gso_segs ?: 1;
+ /* multiply data chunks by size of headers */
+ bytecount = ((segs - 1) * skb_headlen(skb)) +
+ skb->len;
+ total_packets += segs;
+ total_bytes += bytecount;
+ }
+
+ ixgbevf_unmap_and_free_tx_resource(adapter,
+ tx_buffer_info);
+
+ tx_desc->wb.status = 0;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ }
+
+ eop = tx_ring->tx_buffer_info[i].next_to_watch;
+ eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
+ }
+
+ tx_ring->next_to_clean = i;
+
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+ if (unlikely(count && netif_carrier_ok(netdev) &&
+ (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+#ifdef HAVE_TX_MQ
+ if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
+ !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
+ netif_wake_subqueue(netdev, tx_ring->queue_index);
+ ++adapter->restart_queue;
+ }
+#else
+ if (netif_queue_stopped(netdev) &&
+ !test_bit(__IXGBEVF_DOWN, &adapter->state)) {
+ netif_wake_queue(netdev);
+ ++adapter->restart_queue;
+ }
+#endif
+ }
+
+ if (adapter->detect_tx_hung) {
+ if (ixgbevf_check_tx_hang(adapter, tx_ring, i)) {
+ /* schedule immediate reset if we believe we hung */
+ printk(KERN_INFO
+ "tx hang %d detected, resetting adapter\n",
+ adapter->tx_timeout_count + 1);
+ ixgbevf_tx_timeout(adapter->netdev);
+ }
+ }
+
+ /* re-arm the interrupt */
+ if ((count >= tx_ring->work_limit) &&
+ (!test_bit(__IXGBEVF_DOWN, &adapter->state))) {
+ IXGBE_WRITE_REG(hw, IXGBE_VTEICS, tx_ring->v_idx);
+ }
+
+ tx_ring->total_bytes += total_bytes;
+ tx_ring->total_packets += total_packets;
+
+ adapter->net_stats.tx_bytes += total_bytes;
+ adapter->net_stats.tx_packets += total_packets;
+
+ return (count < tx_ring->work_limit);
+}
+
+/**
+ * ixgbevf_receive_skb - Send a completed packet up the stack
+ * @q_vector: structure containing interrupt and ring information
+ * @skb: packet to send up
+ * @status: hardware indication of status of receive
+ * @rx_ring: rx descriptor ring (for a specific queue) to setup
+ * @rx_desc: rx descriptor
+ **/
+static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
+ struct sk_buff *skb, u8 status,
+ struct ixgbevf_ring *ring,
+ union ixgbe_adv_rx_desc *rx_desc)
+{
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ bool is_vlan = (status & IXGBE_RXD_STAT_VP);
+ u16 tag = le16_to_cpu(rx_desc->wb.upper.vlan);
+ int ret;
+
+ if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) {
+ if (adapter->vlgrp && is_vlan)
+ vlan_gro_receive(&q_vector->napi,
+ adapter->vlgrp,
+ tag, skb);
+ else
+ napi_gro_receive(&q_vector->napi, skb);
+ } else {
+ if (adapter->vlgrp && is_vlan)
+ ret = vlan_hwaccel_rx(skb, adapter->vlgrp, tag);
+ else
+ ret = netif_rx(skb);
+ }
+}
+
+/**
+ * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
+ * @adapter: address of board private structure
+ * @status_err: hardware indication of status of receive
+ * @skb: skb currently being received and modified
+ **/
+static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
+ u32 status_err, struct sk_buff *skb)
+{
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* Rx csum disabled */
+ if (!(adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED))
+ return;
+
+ /* if IP and error */
+ if ((status_err & IXGBE_RXD_STAT_IPCS) &&
+ (status_err & IXGBE_RXDADV_ERR_IPE)) {
+ adapter->hw_csum_rx_error++;
+ return;
+ }
+
+ if (!(status_err & IXGBE_RXD_STAT_L4CS))
+ return;
+
+ if (status_err & IXGBE_RXDADV_ERR_TCPE) {
+ adapter->hw_csum_rx_error++;
+ return;
+ }
+
+ /* It must be a TCP or UDP packet with a valid checksum */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ adapter->hw_csum_rx_good++;
+}
+
+/**
+ * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
+ * @adapter: address of board private structure
+ **/
+static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *rx_ring,
+ int cleaned_count)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ union ixgbe_adv_rx_desc *rx_desc;
+ struct ixgbevf_rx_buffer *bi;
+ struct sk_buff *skb;
+ unsigned int i;
+ unsigned int bufsz = rx_ring->rx_buf_len + NET_IP_ALIGN;
+
+ i = rx_ring->next_to_use;
+ bi = &rx_ring->rx_buffer_info[i];
+
+ while (cleaned_count--) {
+ rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
+
+ if (!bi->page_dma &&
+ (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)) {
+ if (!bi->page) {
+ bi->page = netdev_alloc_page(adapter->netdev);
+ if (!bi->page) {
+ adapter->alloc_rx_page_failed++;
+ goto no_buffers;
+ }
+ bi->page_offset = 0;
+ } else {
+ /* use a half page if we're re-using */
+ bi->page_offset ^= (PAGE_SIZE / 2);
+ }
+
+ bi->page_dma = pci_map_page(pdev, bi->page,
+ bi->page_offset,
+ (PAGE_SIZE / 2),
+ PCI_DMA_FROMDEVICE);
+ }
+
+ skb = bi->skb;
+ if (!skb) {
+ skb = netdev_alloc_skb(adapter->netdev,
+ bufsz);
+
+ if (!skb) {
+ adapter->alloc_rx_buff_failed++;
+ goto no_buffers;
+ }
+
+ /*
+ * Make buffer alignment 2 beyond a 16 byte boundary
+ * this will result in a 16 byte aligned IP header after
+ * the 14 byte MAC header is removed
+ */
+ skb_reserve(skb, NET_IP_ALIGN);
+
+ bi->skb = skb;
+ }
+ if (!bi->dma) {
+ bi->dma = pci_map_single(pdev, skb->data,
+ rx_ring->rx_buf_len,
+ PCI_DMA_FROMDEVICE);
+ }
+ /* Refresh the desc even if buffer_addrs didn't change because
+ * each write-back erases this info. */
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
+ rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
+ } else {
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+ }
+
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+ bi = &rx_ring->rx_buffer_info[i];
+ }
+
+no_buffers:
+ if (rx_ring->next_to_use != i) {
+ rx_ring->next_to_use = i;
+ if (i-- == 0)
+ i = (rx_ring->count - 1);
+
+ ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
+ }
+}
+
+static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
+ u64 qmask)
+{
+ u32 mask;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ mask = (qmask & 0xFFFFFFFF);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+}
+
+static inline u16 ixgbevf_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
+{
+ return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
+}
+
+static inline u16 ixgbevf_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
+{
+ return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+}
+
+static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
+ struct ixgbevf_ring *rx_ring,
+ int *work_done, int work_to_do)
+{
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ struct pci_dev *pdev = adapter->pdev;
+ union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
+ struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
+ struct sk_buff *skb;
+ unsigned int i;
+ u32 len, staterr;
+ u16 hdr_info;
+ bool cleaned = false;
+ int cleaned_count = 0;
+ unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+
+ i = rx_ring->next_to_clean;
+ rx_desc = IXGBE_RX_DESC_ADV(*rx_ring, i);
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ rx_buffer_info = &rx_ring->rx_buffer_info[i];
+
+ while (staterr & IXGBE_RXD_STAT_DD) {
+ u32 upper_len = 0;
+ if (*work_done >= work_to_do)
+ break;
+ (*work_done)++;
+
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ hdr_info = le16_to_cpu(ixgbevf_get_hdr_info(rx_desc));
+ len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
+ IXGBE_RXDADV_HDRBUFLEN_SHIFT;
+ if (hdr_info & IXGBE_RXDADV_SPH)
+ adapter->rx_hdr_split++;
+ if (len > IXGBEVF_RX_HDR_SIZE)
+ len = IXGBEVF_RX_HDR_SIZE;
+ upper_len = le16_to_cpu(rx_desc->wb.upper.length);
+ } else {
+ len = le16_to_cpu(rx_desc->wb.upper.length);
+ }
+ cleaned = true;
+ skb = rx_buffer_info->skb;
+ prefetch(skb->data - NET_IP_ALIGN);
+ rx_buffer_info->skb = NULL;
+
+ if (rx_buffer_info->dma) {
+ pci_unmap_single(pdev, rx_buffer_info->dma,
+ rx_ring->rx_buf_len,
+ PCI_DMA_FROMDEVICE);
+ rx_buffer_info->dma = 0;
+ skb_put(skb, len);
+ }
+
+ if (upper_len) {
+ pci_unmap_page(pdev, rx_buffer_info->page_dma,
+ PAGE_SIZE / 2, PCI_DMA_FROMDEVICE);
+ rx_buffer_info->page_dma = 0;
+ skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
+ rx_buffer_info->page,
+ rx_buffer_info->page_offset,
+ upper_len);
+
+ if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
+ (page_count(rx_buffer_info->page) != 1))
+ rx_buffer_info->page = NULL;
+ else
+ get_page(rx_buffer_info->page);
+
+ skb->len += upper_len;
+ skb->data_len += upper_len;
+ skb->truesize += upper_len;
+ }
+
+ i++;
+ if (i == rx_ring->count)
+ i = 0;
+
+ next_rxd = IXGBE_RX_DESC_ADV(*rx_ring, i);
+ prefetch(next_rxd);
+ cleaned_count++;
+
+ next_buffer = &rx_ring->rx_buffer_info[i];
+
+ if (!(staterr & IXGBE_RXD_STAT_EOP)) {
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ rx_buffer_info->skb = next_buffer->skb;
+ rx_buffer_info->dma = next_buffer->dma;
+ next_buffer->skb = skb;
+ next_buffer->dma = 0;
+ } else {
+ skb->next = next_buffer->skb;
+ skb->next->prev = skb;
+ }
+ adapter->non_eop_descs++;
+ goto next_desc;
+ }
+
+ /* ERR_MASK will only have valid bits if EOP set */
+ if (unlikely(staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK)) {
+ dev_kfree_skb_irq(skb);
+ goto next_desc;
+ }
+
+ ixgbevf_rx_checksum(adapter, staterr, skb);
+
+ /* probably a little skewed due to removing CRC */
+ total_rx_bytes += skb->len;
+ total_rx_packets++;
+
+ /*
+ * Work around issue of some types of VM to VM loop back
+ * packets not getting split correctly
+ */
+ if (staterr & IXGBE_RXD_STAT_LB) {
+ u32 header_fixup_len = skb->len - skb->data_len;
+ if (header_fixup_len < 14)
+ skb_push(skb, header_fixup_len);
+ }
+ skb->protocol = eth_type_trans(skb, adapter->netdev);
+
+ ixgbevf_receive_skb(q_vector, skb, staterr, rx_ring, rx_desc);
+ adapter->netdev->last_rx = jiffies;
+
+next_desc:
+ rx_desc->wb.upper.status_error = 0;
+
+ /* return some buffers to hardware, one at a time is too slow */
+ if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
+ ixgbevf_alloc_rx_buffers(adapter, rx_ring,
+ cleaned_count);
+ cleaned_count = 0;
+ }
+
+ /* use prefetched values */
+ rx_desc = next_rxd;
+ rx_buffer_info = &rx_ring->rx_buffer_info[i];
+
+ staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+ }
+
+ rx_ring->next_to_clean = i;
+ cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
+
+ if (cleaned_count)
+ ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
+
+ rx_ring->total_packets += total_rx_packets;
+ rx_ring->total_bytes += total_rx_bytes;
+ adapter->net_stats.rx_bytes += total_rx_bytes;
+ adapter->net_stats.rx_packets += total_rx_packets;
+
+ return cleaned;
+}
+
+/**
+ * ixgbevf_clean_rxonly - msix (aka one shot) rx clean routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function is optimized for cleaning one queue only on a single
+ * q_vector!!!
+ **/
+static int ixgbevf_clean_rxonly(struct napi_struct *napi, int budget)
+{
+ struct ixgbevf_q_vector *q_vector =
+ container_of(napi, struct ixgbevf_q_vector, napi);
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ struct ixgbevf_ring *rx_ring = NULL;
+ int work_done = 0;
+ long r_idx;
+
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ rx_ring = &(adapter->rx_ring[r_idx]);
+
+ ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
+
+ /* If all Rx work done, exit the polling mode */
+ if (work_done < budget) {
+ napi_complete(napi);
+ if (adapter->itr_setting & 1)
+ ixgbevf_set_itr_msix(q_vector);
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ ixgbevf_irq_enable_queues(adapter, rx_ring->v_idx);
+ }
+
+ return work_done;
+}
+
+/**
+ * ixgbevf_clean_rxonly_many - msix (aka one shot) rx clean routine
+ * @napi: napi struct with our devices info in it
+ * @budget: amount of work driver is allowed to do this pass, in packets
+ *
+ * This function will clean more than one rx queue associated with a
+ * q_vector.
+ **/
+static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget)
+{
+ struct ixgbevf_q_vector *q_vector =
+ container_of(napi, struct ixgbevf_q_vector, napi);
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ struct ixgbevf_ring *rx_ring = NULL;
+ int work_done = 0, i;
+ long r_idx;
+ u64 enable_mask = 0;
+
+ /* attempt to distribute budget to each queue fairly, but don't allow
+ * the budget to go below 1 because we'll exit polling */
+ budget /= (q_vector->rxr_count ?: 1);
+ budget = max(budget, 1);
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ for (i = 0; i < q_vector->rxr_count; i++) {
+ rx_ring = &(adapter->rx_ring[r_idx]);
+ ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
+ enable_mask |= rx_ring->v_idx;
+ r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+ r_idx + 1);
+ }
+
+#ifndef HAVE_NETDEV_NAPI_LIST
+ if (!netif_running(adapter->netdev))
+ work_done = 0;
+
+#endif
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ rx_ring = &(adapter->rx_ring[r_idx]);
+
+ /* If all Rx work done, exit the polling mode */
+ if (work_done < budget) {
+ napi_complete(napi);
+ if (adapter->itr_setting & 1)
+ ixgbevf_set_itr_msix(q_vector);
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ ixgbevf_irq_enable_queues(adapter, enable_mask);
+ }
+
+ return work_done;
+}
+
+
+/**
+ * ixgbevf_configure_msix - Configure MSI-X hardware
+ * @adapter: board private structure
+ *
+ * ixgbevf_configure_msix sets up the hardware to properly generate MSI-X
+ * interrupts.
+ **/
+static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbevf_q_vector *q_vector;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i, j, q_vectors, v_idx, r_idx;
+ u32 mask;
+
+ q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ /*
+ * Populate the IVAR table and set the ITR values to the
+ * corresponding register.
+ */
+ for (v_idx = 0; v_idx < q_vectors; v_idx++) {
+ q_vector = adapter->q_vector[v_idx];
+ /* XXX for_each_set_bit(...) */
+ r_idx = find_first_bit(q_vector->rxr_idx,
+ adapter->num_rx_queues);
+
+ for (i = 0; i < q_vector->rxr_count; i++) {
+ j = adapter->rx_ring[r_idx].reg_idx;
+ ixgbevf_set_ivar(adapter, 0, j, v_idx);
+ r_idx = find_next_bit(q_vector->rxr_idx,
+ adapter->num_rx_queues,
+ r_idx + 1);
+ }
+ r_idx = find_first_bit(q_vector->txr_idx,
+ adapter->num_tx_queues);
+
+ for (i = 0; i < q_vector->txr_count; i++) {
+ j = adapter->tx_ring[r_idx].reg_idx;
+ ixgbevf_set_ivar(adapter, 1, j, v_idx);
+ r_idx = find_next_bit(q_vector->txr_idx,
+ adapter->num_tx_queues,
+ r_idx + 1);
+ }
+
+ /* if this is a tx only vector halve the interrupt rate */
+ if (q_vector->txr_count && !q_vector->rxr_count)
+ q_vector->eitr = (adapter->eitr_param >> 1);
+ else if (q_vector->rxr_count)
+ /* rx only */
+ q_vector->eitr = adapter->eitr_param;
+
+ ixgbevf_write_eitr(adapter, v_idx, q_vector->eitr);
+ }
+
+ ixgbevf_set_ivar(adapter, -1, 1, v_idx);
+
+ /* set up to autoclear timer, and the vectors */
+ mask = IXGBE_EIMS_ENABLE_MASK;
+ mask &= ~IXGBE_EIMS_OTHER;
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
+}
+
+enum latency_range {
+ lowest_latency = 0,
+ low_latency = 1,
+ bulk_latency = 2,
+ latency_invalid = 255
+};
+
+/**
+ * ixgbevf_update_itr - update the dynamic ITR value based on statistics
+ * @adapter: pointer to adapter
+ * @eitr: eitr setting (ints per sec) to give last timeslice
+ * @itr_setting: current throttle rate in ints/second
+ * @packets: the number of packets during this measurement interval
+ * @bytes: the number of bytes during this measurement interval
+ *
+ * Stores a new ITR value based on packets and byte
+ * counts during the last interrupt. The advantage of per interrupt
+ * computation is faster updates and more accurate ITR for the current
+ * traffic pattern. Constants in this function were computed
+ * based on theoretical maximum wire speed and thresholds were set based
+ * on testing data as well as attempting to minimize response time
+ * while increasing bulk throughput.
+ **/
+static u8 ixgbevf_update_itr(struct ixgbevf_adapter *adapter,
+ u32 eitr, u8 itr_setting,
+ int packets, int bytes)
+{
+ unsigned int retval = itr_setting;
+ u32 timepassed_us;
+ u64 bytes_perint;
+
+ if (packets == 0)
+ goto update_itr_done;
+
+
+ /* simple throttlerate management
+ * 0-20MB/s lowest (100000 ints/s)
+ * 20-100MB/s low (20000 ints/s)
+ * 100-1249MB/s bulk (8000 ints/s)
+ */
+ /* what was last interrupt timeslice? */
+ timepassed_us = 1000000/eitr;
+ bytes_perint = bytes / timepassed_us; /* bytes/usec */
+
+ switch (itr_setting) {
+ case lowest_latency:
+ if (bytes_perint > adapter->eitr_low)
+ retval = low_latency;
+ break;
+ case low_latency:
+ if (bytes_perint > adapter->eitr_high)
+ retval = bulk_latency;
+ else if (bytes_perint <= adapter->eitr_low)
+ retval = lowest_latency;
+ break;
+ case bulk_latency:
+ if (bytes_perint <= adapter->eitr_high)
+ retval = low_latency;
+ break;
+ }
+
+update_itr_done:
+ return retval;
+}
+
+/**
+ * ixgbevf_write_eitr - write VTEITR register in hardware specific way
+ * @adapter: pointer to adapter struct
+ * @v_idx: vector index into q_vector array
+ * @itr_reg: new value to be written in *register* format, not ints/s
+ *
+ * This function is made to be called by ethtool and by the driver
+ * when it needs to update VTEITR registers at runtime. Hardware
+ * specific quirks/differences are taken care of here.
+ */
+static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
+ u32 itr_reg)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ itr_reg = EITR_INTS_PER_SEC_TO_REG(itr_reg);
+
+ /*
+ * set the WDIS bit to not clear the timer bits and cause an
+ * immediate assertion of the interrupt
+ */
+ itr_reg |= IXGBE_EITR_CNT_WDIS;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
+}
+
+static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector)
+{
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ u32 new_itr;
+ u8 current_itr, ret_itr;
+ int i, r_idx, v_idx = q_vector->v_idx;
+ struct ixgbevf_ring *rx_ring, *tx_ring;
+
+ r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+ for (i = 0; i < q_vector->txr_count; i++) {
+ tx_ring = &(adapter->tx_ring[r_idx]);
+ ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
+ q_vector->tx_itr,
+ tx_ring->total_packets,
+ tx_ring->total_bytes);
+ /* if the result for this queue would decrease interrupt
+ * rate for this vector then use that result */
+ q_vector->tx_itr = ((q_vector->tx_itr > ret_itr) ?
+ q_vector->tx_itr - 1 : ret_itr);
+ r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+ r_idx + 1);
+ }
+
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ for (i = 0; i < q_vector->rxr_count; i++) {
+ rx_ring = &(adapter->rx_ring[r_idx]);
+ ret_itr = ixgbevf_update_itr(adapter, q_vector->eitr,
+ q_vector->rx_itr,
+ rx_ring->total_packets,
+ rx_ring->total_bytes);
+ /* if the result for this queue would decrease interrupt
+ * rate for this vector then use that result */
+ q_vector->rx_itr = ((q_vector->rx_itr > ret_itr) ?
+ q_vector->rx_itr - 1 : ret_itr);
+ r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+ r_idx + 1);
+ }
+
+ current_itr = max(q_vector->rx_itr, q_vector->tx_itr);
+
+ switch (current_itr) {
+ /* counts and packets in update_itr are dependent on these numbers */
+ case lowest_latency:
+ new_itr = 100000;
+ break;
+ case low_latency:
+ new_itr = 20000; /* aka hwitr = ~200 */
+ break;
+ case bulk_latency:
+ default:
+ new_itr = 8000;
+ break;
+ }
+
+ if (new_itr != q_vector->eitr) {
+ u32 itr_reg;
+
+ /* save the algorithm value here, not the smoothed one */
+ q_vector->eitr = new_itr;
+ /* do an exponential smoothing */
+ new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
+ itr_reg = EITR_INTS_PER_SEC_TO_REG(new_itr);
+ ixgbevf_write_eitr(adapter, v_idx, itr_reg);
+ }
+
+ return;
+}
+
+static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
+{
+ struct net_device *netdev = data;
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 eicr;
+ u32 msg;
+
+ eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr);
+
+ hw->mbx.ops.read(hw, &msg, 1);
+
+ if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG)
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + 10));
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
+{
+ struct ixgbevf_q_vector *q_vector = data;
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ struct ixgbevf_ring *tx_ring;
+ int i, r_idx;
+
+ if (!q_vector->txr_count)
+ return IRQ_HANDLED;
+
+ r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+ for (i = 0; i < q_vector->txr_count; i++) {
+ tx_ring = &(adapter->tx_ring[r_idx]);
+ tx_ring->total_bytes = 0;
+ tx_ring->total_packets = 0;
+ ixgbevf_clean_tx_irq(adapter, tx_ring);
+ r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+ r_idx + 1);
+ }
+
+ if (adapter->itr_setting & 1)
+ ixgbevf_set_itr_msix(q_vector);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * ixgbe_msix_clean_rx - single unshared vector rx clean (all queues)
+ * @irq: unused
+ * @data: pointer to our q_vector struct for this interrupt vector
+ **/
+static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data)
+{
+ struct ixgbevf_q_vector *q_vector = data;
+ struct ixgbevf_adapter *adapter = q_vector->adapter;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct ixgbevf_ring *rx_ring;
+ int r_idx;
+ int i;
+
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ for (i = 0; i < q_vector->rxr_count; i++) {
+ rx_ring = &(adapter->rx_ring[r_idx]);
+ rx_ring->total_bytes = 0;
+ rx_ring->total_packets = 0;
+ r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+ r_idx + 1);
+ }
+
+ if (!q_vector->rxr_count)
+ return IRQ_HANDLED;
+
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ rx_ring = &(adapter->rx_ring[r_idx]);
+ /* disable interrupts on this vector only */
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, rx_ring->v_idx);
+ napi_schedule(&q_vector->napi);
+
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ixgbevf_msix_clean_many(int irq, void *data)
+{
+ ixgbevf_msix_clean_rx(irq, data);
+ ixgbevf_msix_clean_tx(irq, data);
+
+ return IRQ_HANDLED;
+}
+
+static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
+ int r_idx)
+{
+ struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
+
+ set_bit(r_idx, q_vector->rxr_idx);
+ q_vector->rxr_count++;
+ a->rx_ring[r_idx].v_idx = 1 << v_idx;
+}
+
+static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
+ int t_idx)
+{
+ struct ixgbevf_q_vector *q_vector = a->q_vector[v_idx];
+
+ set_bit(t_idx, q_vector->txr_idx);
+ q_vector->txr_count++;
+ a->tx_ring[t_idx].v_idx = 1 << v_idx;
+}
+
+/**
+ * ixgbevf_map_rings_to_vectors - Maps descriptor rings to vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function maps descriptor rings to the queue-specific vectors
+ * we were allotted through the MSI-X enabling code. Ideally, we'd have
+ * one vector per ring/queue, but on a constrained vector budget, we
+ * group the rings as "efficiently" as possible. You would add new
+ * mapping configurations in here.
+ **/
+static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
+{
+ int q_vectors;
+ int v_start = 0;
+ int rxr_idx = 0, txr_idx = 0;
+ int rxr_remaining = adapter->num_rx_queues;
+ int txr_remaining = adapter->num_tx_queues;
+ int i, j;
+ int rqpv, tqpv;
+ int err = 0;
+
+ q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ /*
+ * The ideal configuration...
+ * We have enough vectors to map one per queue.
+ */
+ if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
+ for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
+ map_vector_to_rxq(adapter, v_start, rxr_idx);
+
+ for (; txr_idx < txr_remaining; v_start++, txr_idx++)
+ map_vector_to_txq(adapter, v_start, txr_idx);
+ goto out;
+ }
+
+ /*
+ * If we don't have enough vectors for a 1-to-1
+ * mapping, we'll have to group them so there are
+ * multiple queues per vector.
+ */
+ /* Re-adjusting *qpv takes care of the remainder. */
+ for (i = v_start; i < q_vectors; i++) {
+ rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
+ for (j = 0; j < rqpv; j++) {
+ map_vector_to_rxq(adapter, i, rxr_idx);
+ rxr_idx++;
+ rxr_remaining--;
+ }
+ }
+ for (i = v_start; i < q_vectors; i++) {
+ tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
+ for (j = 0; j < tqpv; j++) {
+ map_vector_to_txq(adapter, i, txr_idx);
+ txr_idx++;
+ txr_remaining--;
+ }
+ }
+
+out:
+ return err;
+}
+
+/**
+ * ixgbevf_request_msix_irqs - Initialize MSI-X interrupts
+ * @adapter: board private structure
+ *
+ * ixgbevf_request_msix_irqs allocates MSI-X vectors and requests
+ * interrupts from the kernel.
+ **/
+static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ irqreturn_t (*handler)(int, void *);
+ int i, vector, q_vectors, err;
+ int ri = 0, ti = 0;
+
+ /* Decrement for Other and TCP Timer vectors */
+ q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \
+ ? &ixgbevf_msix_clean_many : \
+ (_v)->rxr_count ? &ixgbevf_msix_clean_rx : \
+ (_v)->txr_count ? &ixgbevf_msix_clean_tx : \
+ NULL)
+ for (vector = 0; vector < q_vectors; vector++) {
+ handler = SET_HANDLER(adapter->q_vector[vector]);
+
+ if (handler == &ixgbevf_msix_clean_rx) {
+ sprintf(adapter->name[vector], "%s-%s-%d",
+ netdev->name, "rx", ri++);
+ } else if (handler == &ixgbevf_msix_clean_tx) {
+ sprintf(adapter->name[vector], "%s-%s-%d",
+ netdev->name, "tx", ti++);
+ } else if (handler == &ixgbevf_msix_clean_many) {
+ sprintf(adapter->name[vector], "%s-%s-%d",
+ netdev->name, "TxRx", vector);
+ } else {
+ /* skip this unused q_vector */
+ continue;
+ }
+ err = request_irq(adapter->msix_entries[vector].vector,
+ handler, 0, adapter->name[vector],
+ adapter->q_vector[vector]);
+ if (err) {
+ hw_dbg(&adapter->hw,
+ "request_irq failed for MSIX interrupt "
+ "Error: %d\n", err);
+ goto free_queue_irqs;
+ }
+ }
+
+ sprintf(adapter->name[vector], "%s:mbx", netdev->name);
+ err = request_irq(adapter->msix_entries[vector].vector,
+ &ixgbevf_msix_mbx, 0, adapter->name[vector], netdev);
+ if (err) {
+ hw_dbg(&adapter->hw,
+ "request_irq for msix_mbx failed: %d\n", err);
+ goto free_queue_irqs;
+ }
+
+ return 0;
+
+free_queue_irqs:
+ for (i = vector - 1; i >= 0; i--)
+ free_irq(adapter->msix_entries[--vector].vector,
+ &(adapter->q_vector[i]));
+ pci_disable_msix(adapter->pdev);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ return err;
+}
+
+static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
+{
+ int i, q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ for (i = 0; i < q_vectors; i++) {
+ struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
+ bitmap_zero(q_vector->rxr_idx, MAX_RX_QUEUES);
+ bitmap_zero(q_vector->txr_idx, MAX_TX_QUEUES);
+ q_vector->rxr_count = 0;
+ q_vector->txr_count = 0;
+ q_vector->eitr = adapter->eitr_param;
+ }
+}
+
+/**
+ * ixgbevf_request_irq - initialize interrupts
+ * @adapter: board private structure
+ *
+ * Attempts to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
+{
+ int err = 0;
+
+ err = ixgbevf_request_msix_irqs(adapter);
+
+ if (err)
+ hw_dbg(&adapter->hw,
+ "request_irq failed, Error %d\n", err);
+
+ return err;
+}
+
+static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int i, q_vectors;
+
+ q_vectors = adapter->num_msix_vectors;
+
+ i = q_vectors - 1;
+
+ free_irq(adapter->msix_entries[i].vector, netdev);
+ i--;
+
+ for (; i >= 0; i--) {
+ free_irq(adapter->msix_entries[i].vector,
+ adapter->q_vector[i]);
+ }
+
+ ixgbevf_reset_q_vectors(adapter);
+}
+
+/**
+ * ixgbevf_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+static inline void ixgbevf_irq_disable(struct ixgbevf_adapter *adapter)
+{
+ int i;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, ~0);
+
+ IXGBE_WRITE_FLUSH(hw);
+
+ for (i = 0; i < adapter->num_msix_vectors; i++)
+ synchronize_irq(adapter->msix_entries[i].vector);
+}
+
+/**
+ * ixgbevf_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+static inline void ixgbevf_irq_enable(struct ixgbevf_adapter *adapter,
+ bool queues, bool flush)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 mask;
+ u64 qmask;
+
+ mask = (IXGBE_EIMS_ENABLE_MASK & ~IXGBE_EIMS_RTX_QUEUE);
+ qmask = ~0;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+
+ if (queues)
+ ixgbevf_irq_enable_queues(adapter, qmask);
+
+ if (flush)
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbevf_configure_tx - Configure 82599 VF Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void ixgbevf_configure_tx(struct ixgbevf_adapter *adapter)
+{
+ u64 tdba;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 i, j, tdlen, txctrl;
+
+ /* Setup the HW Tx Head and Tail descriptor pointers */
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ struct ixgbevf_ring *ring = &adapter->tx_ring[i];
+ j = ring->reg_idx;
+ tdba = ring->dma;
+ tdlen = ring->count * sizeof(union ixgbe_adv_tx_desc);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(j),
+ (tdba & DMA_BIT_MASK(32)));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(j), (tdba >> 32));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(j), tdlen);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDH(j), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDT(j), 0);
+ adapter->tx_ring[i].head = IXGBE_VFTDH(j);
+ adapter->tx_ring[i].tail = IXGBE_VFTDT(j);
+ /* Disable Tx Head Writeback RO bit, since this hoses
+ * bookkeeping if things aren't delivered in order.
+ */
+ txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(j));
+ txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(j), txctrl);
+ }
+}
+
+#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2
+
+static void ixgbevf_configure_srrctl(struct ixgbevf_adapter *adapter, int index)
+{
+ struct ixgbevf_ring *rx_ring;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 srrctl;
+
+ rx_ring = &adapter->rx_ring[index];
+
+ srrctl = IXGBE_SRRCTL_DROP_EN;
+
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ u16 bufsz = IXGBEVF_RXBUFFER_2048;
+ /* grow the amount we can receive on large page machines */
+ if (bufsz < (PAGE_SIZE / 2))
+ bufsz = (PAGE_SIZE / 2);
+ /* cap the bufsz at our largest descriptor size */
+ bufsz = min((u16)IXGBEVF_MAX_RXBUFFER, bufsz);
+
+ srrctl |= bufsz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ srrctl |= IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
+ srrctl |= ((IXGBEVF_RX_HDR_SIZE <<
+ IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
+ IXGBE_SRRCTL_BSIZEHDR_MASK);
+ } else {
+ srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+
+ if (rx_ring->rx_buf_len == MAXIMUM_ETHERNET_VLAN_SIZE)
+ srrctl |= IXGBEVF_RXBUFFER_2048 >>
+ IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ else
+ srrctl |= rx_ring->rx_buf_len >>
+ IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+ }
+ IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(index), srrctl);
+}
+
+/**
+ * ixgbevf_configure_rx - Configure 82599 VF Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
+{
+ u64 rdba;
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+ int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+ int i, j;
+ u32 rdlen;
+ int rx_buf_len;
+
+ /* Decide whether to use packet split mode or not */
+ if (netdev->mtu > ETH_DATA_LEN) {
+ if (adapter->flags & IXGBE_FLAG_RX_PS_CAPABLE)
+ adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+ else
+ adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+ } else {
+ if (adapter->flags & IXGBE_FLAG_RX_1BUF_CAPABLE)
+ adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+ else
+ adapter->flags |= IXGBE_FLAG_RX_PS_ENABLED;
+ }
+
+ /* Set the RX buffer length according to the mode */
+ if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) {
+ /* PSRTYPE must be initialized in 82599 */
+ u32 psrtype = IXGBE_PSRTYPE_TCPHDR |
+ IXGBE_PSRTYPE_UDPHDR |
+ IXGBE_PSRTYPE_IPV4HDR |
+ IXGBE_PSRTYPE_IPV6HDR |
+ IXGBE_PSRTYPE_L2HDR;
+ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
+ rx_buf_len = IXGBEVF_RX_HDR_SIZE;
+ } else {
+ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
+ if (netdev->mtu <= ETH_DATA_LEN)
+ rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+ else
+ rx_buf_len = ALIGN(max_frame, 1024);
+ }
+
+ rdlen = adapter->rx_ring[0].count * sizeof(union ixgbe_adv_rx_desc);
+ /* Setup the HW Rx Head and Tail Descriptor Pointers and
+ * the Base and Length of the Rx Descriptor Ring */
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ rdba = adapter->rx_ring[i].dma;
+ j = adapter->rx_ring[i].reg_idx;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(j),
+ (rdba & DMA_BIT_MASK(32)));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(j), (rdba >> 32));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(j), rdlen);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDH(j), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDT(j), 0);
+ adapter->rx_ring[i].head = IXGBE_VFRDH(j);
+ adapter->rx_ring[i].tail = IXGBE_VFRDT(j);
+ adapter->rx_ring[i].rx_buf_len = rx_buf_len;
+
+ ixgbevf_configure_srrctl(adapter, j);
+ }
+}
+
+static void ixgbevf_vlan_rx_register(struct net_device *netdev,
+ struct vlan_group *grp)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i, j;
+ u32 ctrl;
+
+ adapter->vlgrp = grp;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ j = adapter->rx_ring[i].reg_idx;
+ ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
+ ctrl |= IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), ctrl);
+ }
+}
+
+static void ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *v_netdev;
+
+ /* add VID to filter table */
+ if (hw->mac.ops.set_vfta)
+ hw->mac.ops.set_vfta(hw, vid, 0, true);
+ /*
+ * Copy feature flags from netdev to the vlan netdev for this vid.
+ * This allows things like TSO to bubble down to our vlan device.
+ */
+ v_netdev = vlan_group_get_device(adapter->vlgrp, vid);
+ v_netdev->features |= adapter->netdev->features;
+ vlan_group_set_device(adapter->vlgrp, vid, v_netdev);
+}
+
+static void ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ ixgbevf_irq_disable(adapter);
+
+ vlan_group_set_device(adapter->vlgrp, vid, NULL);
+
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ ixgbevf_irq_enable(adapter, true, true);
+
+ /* remove VID from filter table */
+ if (hw->mac.ops.set_vfta)
+ hw->mac.ops.set_vfta(hw, vid, 0, false);
+}
+
+static void ixgbevf_restore_vlan(struct ixgbevf_adapter *adapter)
+{
+ ixgbevf_vlan_rx_register(adapter->netdev, adapter->vlgrp);
+
+ if (adapter->vlgrp) {
+ u16 vid;
+ for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
+ if (!vlan_group_get_device(adapter->vlgrp, vid))
+ continue;
+ ixgbevf_vlan_rx_add_vid(adapter->netdev, vid);
+ }
+ }
+}
+
+static u8 *ixgbevf_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr,
+ u32 *vmdq)
+{
+ struct dev_mc_list *mc_ptr;
+ u8 *addr = *mc_addr_ptr;
+ *vmdq = 0;
+
+ mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
+ if (mc_ptr->next)
+ *mc_addr_ptr = mc_ptr->next->dmi_addr;
+ else
+ *mc_addr_ptr = NULL;
+
+ return addr;
+}
+
+/**
+ * ixgbevf_set_rx_mode - Multicast set
+ * @netdev: network interface device structure
+ *
+ * The set_rx_method entry point is called whenever the multicast address
+ * list or the network interface flags are updated. This routine is
+ * responsible for configuring the hardware for proper multicast mode.
+ **/
+static void ixgbevf_set_rx_mode(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ u8 *addr_list = NULL;
+ int addr_count = 0;
+
+ /* reprogram multicast list */
+ addr_count = netdev_mc_count(netdev);
+ if (addr_count)
+ addr_list = netdev->mc_list->dmi_addr;
+ if (hw->mac.ops.update_mc_addr_list)
+ hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
+ ixgbevf_addr_list_itr);
+}
+
+static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
+{
+ int q_idx;
+ struct ixgbevf_q_vector *q_vector;
+ int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+ struct napi_struct *napi;
+ q_vector = adapter->q_vector[q_idx];
+ if (!q_vector->rxr_count)
+ continue;
+ napi = &q_vector->napi;
+ if (q_vector->rxr_count > 1)
+ napi->poll = &ixgbevf_clean_rxonly_many;
+
+ napi_enable(napi);
+ }
+}
+
+static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
+{
+ int q_idx;
+ struct ixgbevf_q_vector *q_vector;
+ int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
+ for (q_idx = 0; q_idx < q_vectors; q_idx++) {
+ q_vector = adapter->q_vector[q_idx];
+ if (!q_vector->rxr_count)
+ continue;
+ napi_disable(&q_vector->napi);
+ }
+}
+
+static void ixgbevf_configure(struct ixgbevf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ int i;
+
+ ixgbevf_set_rx_mode(netdev);
+
+ ixgbevf_restore_vlan(adapter);
+
+ ixgbevf_configure_tx(adapter);
+ ixgbevf_configure_rx(adapter);
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct ixgbevf_ring *ring = &adapter->rx_ring[i];
+ ixgbevf_alloc_rx_buffers(adapter, ring, ring->count);
+ ring->next_to_use = ring->count - 1;
+ writel(ring->next_to_use, adapter->hw.hw_addr + ring->tail);
+ }
+}
+
+#define IXGBE_MAX_RX_DESC_POLL 10
+static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
+ int rxr)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int j = adapter->rx_ring[rxr].reg_idx;
+ int k;
+
+ for (k = 0; k < IXGBE_MAX_RX_DESC_POLL; k++) {
+ if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j)) & IXGBE_RXDCTL_ENABLE)
+ break;
+ else
+ msleep(1);
+ }
+ if (k >= IXGBE_MAX_RX_DESC_POLL) {
+ hw_dbg(hw, "RXDCTL.ENABLE on Rx queue %d "
+ "not set within the polling period\n", rxr);
+ }
+
+ ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr],
+ (adapter->rx_ring[rxr].count - 1));
+}
+
+static int ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i, j = 0;
+ int num_rx_rings = adapter->num_rx_queues;
+ u32 txdctl, rxdctl;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ j = adapter->tx_ring[i].reg_idx;
+ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
+ /* enable WTHRESH=8 descriptors, to encourage burst writeback */
+ txdctl |= (8 << 16);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
+ }
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ j = adapter->tx_ring[i].reg_idx;
+ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
+ txdctl |= IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j), txdctl);
+ }
+
+ for (i = 0; i < num_rx_rings; i++) {
+ j = adapter->rx_ring[i].reg_idx;
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(j));
+ rxdctl |= IXGBE_RXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(j), rxdctl);
+ ixgbevf_rx_desc_queue_enable(adapter, i);
+ }
+
+ ixgbevf_configure_msix(adapter);
+
+ if (hw->mac.ops.set_rar) {
+ if (is_valid_ether_addr(hw->mac.addr))
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
+ else
+ hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
+ }
+
+ clear_bit(__IXGBEVF_DOWN, &adapter->state);
+ ixgbevf_napi_enable_all(adapter);
+
+ /* enable transmits */
+ netif_tx_start_all_queues(netdev);
+
+ /* bring the link up in the watchdog, this could race with our first
+ * link up interrupt but shouldn't be a problem */
+ adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE;
+ adapter->link_check_timeout = jiffies;
+ mod_timer(&adapter->watchdog_timer, jiffies);
+ return 0;
+}
+
+int ixgbevf_up(struct ixgbevf_adapter *adapter)
+{
+ int err;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ ixgbevf_configure(adapter);
+
+ err = ixgbevf_up_complete(adapter);
+
+ /* clear any pending interrupts, may auto mask */
+ IXGBE_READ_REG(hw, IXGBE_VTEICR);
+
+ ixgbevf_irq_enable(adapter, true, true);
+
+ return err;
+}
+
+/**
+ * ixgbevf_clean_rx_ring - Free Rx Buffers per Queue
+ * @adapter: board private structure
+ * @rx_ring: ring to free buffers from
+ **/
+static void ixgbevf_clean_rx_ring(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *rx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ unsigned long size;
+ unsigned int i;
+
+ if (!rx_ring->rx_buffer_info)
+ return;
+
+ /* Free all the Rx ring sk_buffs */
+ for (i = 0; i < rx_ring->count; i++) {
+ struct ixgbevf_rx_buffer *rx_buffer_info;
+
+ rx_buffer_info = &rx_ring->rx_buffer_info[i];
+ if (rx_buffer_info->dma) {
+ pci_unmap_single(pdev, rx_buffer_info->dma,
+ rx_ring->rx_buf_len,
+ PCI_DMA_FROMDEVICE);
+ rx_buffer_info->dma = 0;
+ }
+ if (rx_buffer_info->skb) {
+ struct sk_buff *skb = rx_buffer_info->skb;
+ rx_buffer_info->skb = NULL;
+ do {
+ struct sk_buff *this = skb;
+ skb = skb->prev;
+ dev_kfree_skb(this);
+ } while (skb);
+ }
+ if (!rx_buffer_info->page)
+ continue;
+ pci_unmap_page(pdev, rx_buffer_info->page_dma, PAGE_SIZE / 2,
+ PCI_DMA_FROMDEVICE);
+ rx_buffer_info->page_dma = 0;
+ put_page(rx_buffer_info->page);
+ rx_buffer_info->page = NULL;
+ rx_buffer_info->page_offset = 0;
+ }
+
+ size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
+ memset(rx_ring->rx_buffer_info, 0, size);
+
+ /* Zero out the descriptor ring */
+ memset(rx_ring->desc, 0, rx_ring->size);
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ if (rx_ring->head)
+ writel(0, adapter->hw.hw_addr + rx_ring->head);
+ if (rx_ring->tail)
+ writel(0, adapter->hw.hw_addr + rx_ring->tail);
+}
+
+/**
+ * ixgbevf_clean_tx_ring - Free Tx Buffers
+ * @adapter: board private structure
+ * @tx_ring: ring to be cleaned
+ **/
+static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring)
+{
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ unsigned long size;
+ unsigned int i;
+
+ if (!tx_ring->tx_buffer_info)
+ return;
+
+ /* Free all the Tx ring sk_buffs */
+
+ for (i = 0; i < tx_ring->count; i++) {
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+ }
+
+ size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
+ memset(tx_ring->tx_buffer_info, 0, size);
+
+ memset(tx_ring->desc, 0, tx_ring->size);
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+
+ if (tx_ring->head)
+ writel(0, adapter->hw.hw_addr + tx_ring->head);
+ if (tx_ring->tail)
+ writel(0, adapter->hw.hw_addr + tx_ring->tail);
+}
+
+/**
+ * ixgbevf_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void ixgbevf_clean_all_rx_rings(struct ixgbevf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ ixgbevf_clean_rx_ring(adapter, &adapter->rx_ring[i]);
+}
+
+/**
+ * ixgbevf_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void ixgbevf_clean_all_tx_rings(struct ixgbevf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ ixgbevf_clean_tx_ring(adapter, &adapter->tx_ring[i]);
+}
+
+void ixgbevf_down(struct ixgbevf_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 txdctl;
+ int i, j;
+
+ /* signal that we are down to the interrupt handler */
+ set_bit(__IXGBEVF_DOWN, &adapter->state);
+ /* disable receives */
+
+ netif_tx_disable(netdev);
+
+ msleep(10);
+
+ netif_tx_stop_all_queues(netdev);
+
+ ixgbevf_irq_disable(adapter);
+
+ ixgbevf_napi_disable_all(adapter);
+
+ del_timer_sync(&adapter->watchdog_timer);
+ /* can't call flush scheduled work here because it can deadlock
+ * if linkwatch_event tries to acquire the rtnl_lock which we are
+ * holding */
+ while (adapter->flags & IXGBE_FLAG_IN_WATCHDOG_TASK)
+ msleep(1);
+
+ /* disable transmits in the hardware now that interrupts are off */
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ j = adapter->tx_ring[i].reg_idx;
+ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(j));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(j),
+ (txdctl & ~IXGBE_TXDCTL_ENABLE));
+ }
+
+ netif_carrier_off(netdev);
+
+ if (!pci_channel_offline(adapter->pdev))
+ ixgbevf_reset(adapter);
+
+ ixgbevf_clean_all_tx_rings(adapter);
+ ixgbevf_clean_all_rx_rings(adapter);
+}
+
+void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ WARN_ON(in_interrupt());
+
+ while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
+ msleep(1);
+
+ /*
+ * Check if PF is up before re-init. If not then skip until
+ * later when the PF is up and ready to service requests from
+ * the VF via mailbox. If the VF is up and running then the
+ * watchdog task will continue to schedule reset tasks until
+ * the PF is up and running.
+ */
+ if (!hw->mac.ops.reset_hw(hw)) {
+ ixgbevf_down(adapter);
+ ixgbevf_up(adapter);
+ }
+
+ clear_bit(__IXGBEVF_RESETTING, &adapter->state);
+}
+
+void ixgbevf_reset(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
+
+ if (hw->mac.ops.reset_hw(hw))
+ hw_dbg(hw, "PF still resetting\n");
+ else
+ hw->mac.ops.init_hw(hw);
+
+ if (is_valid_ether_addr(adapter->hw.mac.addr)) {
+ memcpy(netdev->dev_addr, adapter->hw.mac.addr,
+ netdev->addr_len);
+ memcpy(netdev->perm_addr, adapter->hw.mac.addr,
+ netdev->addr_len);
+ }
+}
+
+static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
+ int vectors)
+{
+ int err, vector_threshold;
+
+ /* We'll want at least 3 (vector_threshold):
+ * 1) TxQ[0] Cleanup
+ * 2) RxQ[0] Cleanup
+ * 3) Other (Link Status Change, etc.)
+ */
+ vector_threshold = MIN_MSIX_COUNT;
+
+ /* The more we get, the more we will assign to Tx/Rx Cleanup
+ * for the separate queues...where Rx Cleanup >= Tx Cleanup.
+ * Right now, we simply care about how many we'll get; we'll
+ * set them up later while requesting irq's.
+ */
+ while (vectors >= vector_threshold) {
+ err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
+ vectors);
+ if (!err) /* Success in acquiring all requested vectors. */
+ break;
+ else if (err < 0)
+ vectors = 0; /* Nasty failure, quit now */
+ else /* err == number of vectors we should try again with */
+ vectors = err;
+ }
+
+ if (vectors < vector_threshold) {
+ /* Can't allocate enough MSI-X interrupts? Oh well.
+ * This just means we'll go with either a single MSI
+ * vector or fall back to legacy interrupts.
+ */
+ hw_dbg(&adapter->hw,
+ "Unable to allocate MSI-X interrupts\n");
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+ } else {
+ /*
+ * Adjust for only the vectors we'll use, which is minimum
+ * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
+ * vectors we were allocated.
+ */
+ adapter->num_msix_vectors = vectors;
+ }
+}
+
+/*
+ * ixgbe_set_num_queues: Allocate queues for device, feature dependant
+ * @adapter: board private structure to initialize
+ *
+ * This is the top level queue allocation routine. The order here is very
+ * important, starting with the "most" number of features turned on at once,
+ * and ending with the smallest set of features. This way large combinations
+ * can be allocated if they're turned on, and smaller combinations are the
+ * fallthrough conditions.
+ *
+ **/
+static void ixgbevf_set_num_queues(struct ixgbevf_adapter *adapter)
+{
+ /* Start with base case */
+ adapter->num_rx_queues = 1;
+ adapter->num_tx_queues = 1;
+ adapter->num_rx_pools = adapter->num_rx_queues;
+ adapter->num_rx_queues_per_pool = 1;
+}
+
+/**
+ * ixgbevf_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one ring per queue at run-time since we don't know the
+ * number of queues at compile-time. The polling_netdev array is
+ * intended for Multiqueue, but should work fine with a single queue.
+ **/
+static int ixgbevf_alloc_queues(struct ixgbevf_adapter *adapter)
+{
+ int i;
+
+ adapter->tx_ring = kcalloc(adapter->num_tx_queues,
+ sizeof(struct ixgbevf_ring), GFP_KERNEL);
+ if (!adapter->tx_ring)
+ goto err_tx_ring_allocation;
+
+ adapter->rx_ring = kcalloc(adapter->num_rx_queues,
+ sizeof(struct ixgbevf_ring), GFP_KERNEL);
+ if (!adapter->rx_ring)
+ goto err_rx_ring_allocation;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ adapter->tx_ring[i].count = adapter->tx_ring_count;
+ adapter->tx_ring[i].queue_index = i;
+ adapter->tx_ring[i].reg_idx = i;
+ }
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ adapter->rx_ring[i].count = adapter->rx_ring_count;
+ adapter->rx_ring[i].queue_index = i;
+ adapter->rx_ring[i].reg_idx = i;
+ }
+
+ return 0;
+
+err_rx_ring_allocation:
+ kfree(adapter->tx_ring);
+err_tx_ring_allocation:
+ return -ENOMEM;
+}
+
+/**
+ * ixgbevf_set_interrupt_capability - set MSI-X or FAIL if not supported
+ * @adapter: board private structure to initialize
+ *
+ * Attempt to configure the interrupts using the best available
+ * capabilities of the hardware and the kernel.
+ **/
+static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
+{
+ int err = 0;
+ int vector, v_budget;
+
+ /*
+ * It's easy to be greedy for MSI-X vectors, but it really
+ * doesn't do us much good if we have a lot more vectors
+ * than CPU's. So let's be conservative and only ask for
+ * (roughly) twice the number of vectors as there are CPU's.
+ */
+ v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
+ (int)(num_online_cpus() * 2)) + NON_Q_VECTORS;
+
+ /* A failure in MSI-X entry allocation isn't fatal, but it does
+ * mean we disable MSI-X capabilities of the adapter. */
+ adapter->msix_entries = kcalloc(v_budget,
+ sizeof(struct msix_entry), GFP_KERNEL);
+ if (!adapter->msix_entries) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ for (vector = 0; vector < v_budget; vector++)
+ adapter->msix_entries[vector].entry = vector;
+
+ ixgbevf_acquire_msix_vectors(adapter, v_budget);
+
+out:
+ return err;
+}
+
+/**
+ * ixgbevf_alloc_q_vectors - Allocate memory for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one q_vector per queue interrupt. If allocation fails we
+ * return -ENOMEM.
+ **/
+static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
+{
+ int q_idx, num_q_vectors;
+ struct ixgbevf_q_vector *q_vector;
+ int napi_vectors;
+ int (*poll)(struct napi_struct *, int);
+
+ num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ napi_vectors = adapter->num_rx_queues;
+ poll = &ixgbevf_clean_rxonly;
+
+ for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+ q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
+ if (!q_vector)
+ goto err_out;
+ q_vector->adapter = adapter;
+ q_vector->v_idx = q_idx;
+ q_vector->eitr = adapter->eitr_param;
+ if (q_idx < napi_vectors)
+ netif_napi_add(adapter->netdev, &q_vector->napi,
+ (*poll), 64);
+ adapter->q_vector[q_idx] = q_vector;
+ }
+
+ return 0;
+
+err_out:
+ while (q_idx) {
+ q_idx--;
+ q_vector = adapter->q_vector[q_idx];
+ netif_napi_del(&q_vector->napi);
+ kfree(q_vector);
+ adapter->q_vector[q_idx] = NULL;
+ }
+ return -ENOMEM;
+}
+
+/**
+ * ixgbevf_free_q_vectors - Free memory allocated for interrupt vectors
+ * @adapter: board private structure to initialize
+ *
+ * This function frees the memory allocated to the q_vectors. In addition if
+ * NAPI is enabled it will delete any references to the NAPI struct prior
+ * to freeing the q_vector.
+ **/
+static void ixgbevf_free_q_vectors(struct ixgbevf_adapter *adapter)
+{
+ int q_idx, num_q_vectors;
+ int napi_vectors;
+
+ num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ napi_vectors = adapter->num_rx_queues;
+
+ for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
+ struct ixgbevf_q_vector *q_vector = adapter->q_vector[q_idx];
+
+ adapter->q_vector[q_idx] = NULL;
+ if (q_idx < napi_vectors)
+ netif_napi_del(&q_vector->napi);
+ kfree(q_vector);
+ }
+}
+
+/**
+ * ixgbevf_reset_interrupt_capability - Reset MSIX setup
+ * @adapter: board private structure
+ *
+ **/
+static void ixgbevf_reset_interrupt_capability(struct ixgbevf_adapter *adapter)
+{
+ pci_disable_msix(adapter->pdev);
+ kfree(adapter->msix_entries);
+ adapter->msix_entries = NULL;
+
+ return;
+}
+
+/**
+ * ixgbevf_init_interrupt_scheme - Determine if MSIX is supported and init
+ * @adapter: board private structure to initialize
+ *
+ **/
+static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
+{
+ int err;
+
+ /* Number of supported queues */
+ ixgbevf_set_num_queues(adapter);
+
+ err = ixgbevf_set_interrupt_capability(adapter);
+ if (err) {
+ hw_dbg(&adapter->hw,
+ "Unable to setup interrupt capabilities\n");
+ goto err_set_interrupt;
+ }
+
+ err = ixgbevf_alloc_q_vectors(adapter);
+ if (err) {
+ hw_dbg(&adapter->hw, "Unable to allocate memory for queue "
+ "vectors\n");
+ goto err_alloc_q_vectors;
+ }
+
+ err = ixgbevf_alloc_queues(adapter);
+ if (err) {
+ printk(KERN_ERR "Unable to allocate memory for queues\n");
+ goto err_alloc_queues;
+ }
+
+ hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, "
+ "Tx Queue count = %u\n",
+ (adapter->num_rx_queues > 1) ? "Enabled" :
+ "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
+
+ set_bit(__IXGBEVF_DOWN, &adapter->state);
+
+ return 0;
+err_alloc_queues:
+ ixgbevf_free_q_vectors(adapter);
+err_alloc_q_vectors:
+ ixgbevf_reset_interrupt_capability(adapter);
+err_set_interrupt:
+ return err;
+}
+
+/**
+ * ixgbevf_sw_init - Initialize general software structures
+ * (struct ixgbevf_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * ixgbevf_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct pci_dev *pdev = adapter->pdev;
+ int err;
+
+ /* PCI config space info */
+
+ hw->vendor_id = pdev->vendor;
+ hw->device_id = pdev->device;
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_device_id = pdev->subsystem_device;
+
+ hw->mbx.ops.init_params(hw);
+ hw->mac.max_tx_queues = MAX_TX_QUEUES;
+ hw->mac.max_rx_queues = MAX_RX_QUEUES;
+ err = hw->mac.ops.reset_hw(hw);
+ if (err) {
+ dev_info(&pdev->dev,
+ "PF still in reset state, assigning new address\n");
+ random_ether_addr(hw->mac.addr);
+ } else {
+ err = hw->mac.ops.init_hw(hw);
+ if (err) {
+ printk(KERN_ERR "init_shared_code failed: %d\n", err);
+ goto out;
+ }
+ }
+
+ /* Enable dynamic interrupt throttling rates */
+ adapter->eitr_param = 20000;
+ adapter->itr_setting = 1;
+
+ /* set defaults for eitr in MegaBytes */
+ adapter->eitr_low = 10;
+ adapter->eitr_high = 20;
+
+ /* set default ring sizes */
+ adapter->tx_ring_count = IXGBEVF_DEFAULT_TXD;
+ adapter->rx_ring_count = IXGBEVF_DEFAULT_RXD;
+
+ /* enable rx csum by default */
+ adapter->flags |= IXGBE_FLAG_RX_CSUM_ENABLED;
+
+ set_bit(__IXGBEVF_DOWN, &adapter->state);
+
+out:
+ return err;
+}
+
+static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ adapter->stats.last_vfgprc = IXGBE_READ_REG(hw, IXGBE_VFGPRC);
+ adapter->stats.last_vfgorc = IXGBE_READ_REG(hw, IXGBE_VFGORC_LSB);
+ adapter->stats.last_vfgorc |=
+ (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGORC_MSB))) << 32);
+ adapter->stats.last_vfgptc = IXGBE_READ_REG(hw, IXGBE_VFGPTC);
+ adapter->stats.last_vfgotc = IXGBE_READ_REG(hw, IXGBE_VFGOTC_LSB);
+ adapter->stats.last_vfgotc |=
+ (((u64)(IXGBE_READ_REG(hw, IXGBE_VFGOTC_MSB))) << 32);
+ adapter->stats.last_vfmprc = IXGBE_READ_REG(hw, IXGBE_VFMPRC);
+
+ adapter->stats.base_vfgprc = adapter->stats.last_vfgprc;
+ adapter->stats.base_vfgorc = adapter->stats.last_vfgorc;
+ adapter->stats.base_vfgptc = adapter->stats.last_vfgptc;
+ adapter->stats.base_vfgotc = adapter->stats.last_vfgotc;
+ adapter->stats.base_vfmprc = adapter->stats.last_vfmprc;
+}
+
+#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \
+ { \
+ u32 current_counter = IXGBE_READ_REG(hw, reg); \
+ if (current_counter < last_counter) \
+ counter += 0x100000000LL; \
+ last_counter = current_counter; \
+ counter &= 0xFFFFFFFF00000000LL; \
+ counter |= current_counter; \
+ }
+
+#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \
+ { \
+ u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \
+ u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \
+ u64 current_counter = (current_counter_msb << 32) | \
+ current_counter_lsb; \
+ if (current_counter < last_counter) \
+ counter += 0x1000000000LL; \
+ last_counter = current_counter; \
+ counter &= 0xFFFFFFF000000000LL; \
+ counter |= current_counter; \
+ }
+/**
+ * ixgbevf_update_stats - Update the board statistics counters.
+ * @adapter: board private structure
+ **/
+void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
+ adapter->stats.vfgprc);
+ UPDATE_VF_COUNTER_32bit(IXGBE_VFGPTC, adapter->stats.last_vfgptc,
+ adapter->stats.vfgptc);
+ UPDATE_VF_COUNTER_36bit(IXGBE_VFGORC_LSB, IXGBE_VFGORC_MSB,
+ adapter->stats.last_vfgorc,
+ adapter->stats.vfgorc);
+ UPDATE_VF_COUNTER_36bit(IXGBE_VFGOTC_LSB, IXGBE_VFGOTC_MSB,
+ adapter->stats.last_vfgotc,
+ adapter->stats.vfgotc);
+ UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
+ adapter->stats.vfmprc);
+
+ /* Fill out the OS statistics structure */
+ adapter->net_stats.multicast = adapter->stats.vfmprc -
+ adapter->stats.base_vfmprc;
+}
+
+/**
+ * ixgbevf_watchdog - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+static void ixgbevf_watchdog(unsigned long data)
+{
+ struct ixgbevf_adapter *adapter = (struct ixgbevf_adapter *)data;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u64 eics = 0;
+ int i;
+
+ /*
+ * Do the watchdog outside of interrupt context due to the lovely
+ * delays that some of the newer hardware requires
+ */
+
+ if (test_bit(__IXGBEVF_DOWN, &adapter->state))
+ goto watchdog_short_circuit;
+
+ /* get one bit for every active tx/rx interrupt vector */
+ for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
+ struct ixgbevf_q_vector *qv = adapter->q_vector[i];
+ if (qv->rxr_count || qv->txr_count)
+ eics |= (1 << i);
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_VTEICS, (u32)eics);
+
+watchdog_short_circuit:
+ schedule_work(&adapter->watchdog_task);
+}
+
+/**
+ * ixgbevf_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+static void ixgbevf_tx_timeout(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ /* Do the reset outside of interrupt context */
+ schedule_work(&adapter->reset_task);
+}
+
+static void ixgbevf_reset_task(struct work_struct *work)
+{
+ struct ixgbevf_adapter *adapter;
+ adapter = container_of(work, struct ixgbevf_adapter, reset_task);
+
+ /* If we're already down or resetting, just bail */
+ if (test_bit(__IXGBEVF_DOWN, &adapter->state) ||
+ test_bit(__IXGBEVF_RESETTING, &adapter->state))
+ return;
+
+ adapter->tx_timeout_count++;
+
+ ixgbevf_reinit_locked(adapter);
+}
+
+/**
+ * ixgbevf_watchdog_task - worker thread to bring link up
+ * @work: pointer to work_struct containing our data
+ **/
+static void ixgbevf_watchdog_task(struct work_struct *work)
+{
+ struct ixgbevf_adapter *adapter = container_of(work,
+ struct ixgbevf_adapter,
+ watchdog_task);
+ struct net_device *netdev = adapter->netdev;
+ struct ixgbe_hw *hw = &adapter->hw;
+ u32 link_speed = adapter->link_speed;
+ bool link_up = adapter->link_up;
+
+ adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
+
+ /*
+ * Always check the link on the watchdog because we have
+ * no LSC interrupt
+ */
+ if (hw->mac.ops.check_link) {
+ if ((hw->mac.ops.check_link(hw, &link_speed,
+ &link_up, false)) != 0) {
+ adapter->link_up = link_up;
+ adapter->link_speed = link_speed;
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+ schedule_work(&adapter->reset_task);
+ goto pf_has_reset;
+ }
+ } else {
+ /* always assume link is up, if no check link
+ * function */
+ link_speed = IXGBE_LINK_SPEED_10GB_FULL;
+ link_up = true;
+ }
+ adapter->link_up = link_up;
+ adapter->link_speed = link_speed;
+
+ if (link_up) {
+ if (!netif_carrier_ok(netdev)) {
+ hw_dbg(&adapter->hw, "NIC Link is Up %s, ",
+ ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
+ "10 Gbps" : "1 Gbps"));
+ netif_carrier_on(netdev);
+ netif_tx_wake_all_queues(netdev);
+ } else {
+ /* Force detection of hung controller */
+ adapter->detect_tx_hung = true;
+ }
+ } else {
+ adapter->link_up = false;
+ adapter->link_speed = 0;
+ if (netif_carrier_ok(netdev)) {
+ hw_dbg(&adapter->hw, "NIC Link is Down\n");
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+ }
+ }
+
+pf_has_reset:
+ ixgbevf_update_stats(adapter);
+
+ /* Force detection of hung controller every watchdog period */
+ adapter->detect_tx_hung = true;
+
+ /* Reset the timer */
+ if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
+ mod_timer(&adapter->watchdog_timer,
+ round_jiffies(jiffies + (2 * HZ)));
+
+ adapter->flags &= ~IXGBE_FLAG_IN_WATCHDOG_TASK;
+}
+
+/**
+ * ixgbevf_free_tx_resources - Free Tx Resources per Queue
+ * @adapter: board private structure
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+void ixgbevf_free_tx_resources(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ ixgbevf_clean_tx_ring(adapter, tx_ring);
+
+ vfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+
+ pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma);
+
+ tx_ring->desc = NULL;
+}
+
+/**
+ * ixgbevf_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_tx_queues; i++)
+ if (adapter->tx_ring[i].desc)
+ ixgbevf_free_tx_resources(adapter,
+ &adapter->tx_ring[i]);
+
+}
+
+/**
+ * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @adapter: board private structure
+ * @tx_ring: tx descriptor ring (for a specific queue) to setup
+ *
+ * Return 0 on success, negative on failure
+ **/
+int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int size;
+
+ size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
+ tx_ring->tx_buffer_info = vmalloc(size);
+ if (!tx_ring->tx_buffer_info)
+ goto err;
+ memset(tx_ring->tx_buffer_info, 0, size);
+
+ /* round up to nearest 4K */
+ tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
+ tx_ring->size = ALIGN(tx_ring->size, 4096);
+
+ tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
+ &tx_ring->dma);
+ if (!tx_ring->desc)
+ goto err;
+
+ tx_ring->next_to_use = 0;
+ tx_ring->next_to_clean = 0;
+ tx_ring->work_limit = tx_ring->count;
+ return 0;
+
+err:
+ vfree(tx_ring->tx_buffer_info);
+ tx_ring->tx_buffer_info = NULL;
+ hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit "
+ "descriptor ring\n");
+ return -ENOMEM;
+}
+
+/**
+ * ixgbevf_setup_all_tx_resources - allocate all queues Tx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ err = ixgbevf_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+ if (!err)
+ continue;
+ hw_dbg(&adapter->hw,
+ "Allocation for Tx Queue %u failed\n", i);
+ break;
+ }
+
+ return err;
+}
+
+/**
+ * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @adapter: board private structure
+ * @rx_ring: rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *rx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int size;
+
+ size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
+ rx_ring->rx_buffer_info = vmalloc(size);
+ if (!rx_ring->rx_buffer_info) {
+ hw_dbg(&adapter->hw,
+ "Unable to vmalloc buffer memory for "
+ "the receive descriptor ring\n");
+ goto alloc_failed;
+ }
+ memset(rx_ring->rx_buffer_info, 0, size);
+
+ /* Round up to nearest 4K */
+ rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
+ rx_ring->size = ALIGN(rx_ring->size, 4096);
+
+ rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size,
+ &rx_ring->dma);
+
+ if (!rx_ring->desc) {
+ hw_dbg(&adapter->hw,
+ "Unable to allocate memory for "
+ "the receive descriptor ring\n");
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+ goto alloc_failed;
+ }
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+
+ return 0;
+alloc_failed:
+ return -ENOMEM;
+}
+
+/**
+ * ixgbevf_setup_all_rx_resources - allocate all queues Rx resources
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not). It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
+{
+ int i, err = 0;
+
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ err = ixgbevf_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+ if (!err)
+ continue;
+ hw_dbg(&adapter->hw,
+ "Allocation for Rx Queue %u failed\n", i);
+ break;
+ }
+ return err;
+}
+
+/**
+ * ixgbevf_free_rx_resources - Free Rx Resources
+ * @adapter: board private structure
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+void ixgbevf_free_rx_resources(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *rx_ring)
+{
+ struct pci_dev *pdev = adapter->pdev;
+
+ ixgbevf_clean_rx_ring(adapter, rx_ring);
+
+ vfree(rx_ring->rx_buffer_info);
+ rx_ring->rx_buffer_info = NULL;
+
+ pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma);
+
+ rx_ring->desc = NULL;
+}
+
+/**
+ * ixgbevf_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+static void ixgbevf_free_all_rx_resources(struct ixgbevf_adapter *adapter)
+{
+ int i;
+
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ if (adapter->rx_ring[i].desc)
+ ixgbevf_free_rx_resources(adapter,
+ &adapter->rx_ring[i]);
+}
+
+/**
+ * ixgbevf_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP). At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+static int ixgbevf_open(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ int err;
+
+ /* disallow open during test */
+ if (test_bit(__IXGBEVF_TESTING, &adapter->state))
+ return -EBUSY;
+
+ if (hw->adapter_stopped) {
+ ixgbevf_reset(adapter);
+ /* if adapter is still stopped then PF isn't up and
+ * the vf can't start. */
+ if (hw->adapter_stopped) {
+ err = IXGBE_ERR_MBX;
+ printk(KERN_ERR "Unable to start - perhaps the PF"
+ "Driver isn't up yet\n");
+ goto err_setup_reset;
+ }
+ }
+
+ /* allocate transmit descriptors */
+ err = ixgbevf_setup_all_tx_resources(adapter);
+ if (err)
+ goto err_setup_tx;
+
+ /* allocate receive descriptors */
+ err = ixgbevf_setup_all_rx_resources(adapter);
+ if (err)
+ goto err_setup_rx;
+
+ ixgbevf_configure(adapter);
+
+ /*
+ * Map the Tx/Rx rings to the vectors we were allotted.
+ * if request_irq will be called in this function map_rings
+ * must be called *before* up_complete
+ */
+ ixgbevf_map_rings_to_vectors(adapter);
+
+ err = ixgbevf_up_complete(adapter);
+ if (err)
+ goto err_up;
+
+ /* clear any pending interrupts, may auto mask */
+ IXGBE_READ_REG(hw, IXGBE_VTEICR);
+ err = ixgbevf_request_irq(adapter);
+ if (err)
+ goto err_req_irq;
+
+ ixgbevf_irq_enable(adapter, true, true);
+
+ return 0;
+
+err_req_irq:
+ ixgbevf_down(adapter);
+err_up:
+ ixgbevf_free_irq(adapter);
+err_setup_rx:
+ ixgbevf_free_all_rx_resources(adapter);
+err_setup_tx:
+ ixgbevf_free_all_tx_resources(adapter);
+ ixgbevf_reset(adapter);
+
+err_setup_reset:
+
+ return err;
+}
+
+/**
+ * ixgbevf_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS. The hardware is still under the drivers control, but
+ * needs to be disabled. A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+static int ixgbevf_close(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ ixgbevf_down(adapter);
+ ixgbevf_free_irq(adapter);
+
+ ixgbevf_free_all_tx_resources(adapter);
+ ixgbevf_free_all_rx_resources(adapter);
+
+ return 0;
+}
+
+static int ixgbevf_tso(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
+{
+ struct ixgbe_adv_tx_context_desc *context_desc;
+ unsigned int i;
+ int err;
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ u32 vlan_macip_lens = 0, type_tucmd_mlhl;
+ u32 mss_l4len_idx, l4len;
+
+ if (skb_is_gso(skb)) {
+ if (skb_header_cloned(skb)) {
+ err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (err)
+ return err;
+ }
+ l4len = tcp_hdrlen(skb);
+ *hdr_len += l4len;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *iph = ip_hdr(skb);
+ iph->tot_len = 0;
+ iph->check = 0;
+ tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+ iph->daddr, 0,
+ IPPROTO_TCP,
+ 0);
+ adapter->hw_tso_ctxt++;
+ } else if (skb_is_gso_v6(skb)) {
+ ipv6_hdr(skb)->payload_len = 0;
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ 0, IPPROTO_TCP, 0);
+ adapter->hw_tso6_ctxt++;
+ }
+
+ i = tx_ring->next_to_use;
+
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
+
+ /* VLAN MACLEN IPLEN */
+ if (tx_flags & IXGBE_TX_FLAGS_VLAN)
+ vlan_macip_lens |=
+ (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK);
+ vlan_macip_lens |= ((skb_network_offset(skb)) <<
+ IXGBE_ADVTXD_MACLEN_SHIFT);
+ *hdr_len += skb_network_offset(skb);
+ vlan_macip_lens |=
+ (skb_transport_header(skb) - skb_network_header(skb));
+ *hdr_len +=
+ (skb_transport_header(skb) - skb_network_header(skb));
+ context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
+ context_desc->seqnum_seed = 0;
+
+ /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
+ type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
+ IXGBE_ADVTXD_DTYP_CTXT);
+
+ if (skb->protocol == htons(ETH_P_IP))
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
+
+ /* MSS L4LEN IDX */
+ mss_l4len_idx =
+ (skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT);
+ mss_l4len_idx |= (l4len << IXGBE_ADVTXD_L4LEN_SHIFT);
+ /* use index 1 for TSO */
+ mss_l4len_idx |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
+ context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
+
+ tx_buffer_info->time_stamp = jiffies;
+ tx_buffer_info->next_to_watch = i;
+
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ tx_ring->next_to_use = i;
+
+ return true;
+ }
+
+ return false;
+}
+
+static bool ixgbevf_tx_csum(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags)
+{
+ struct ixgbe_adv_tx_context_desc *context_desc;
+ unsigned int i;
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL ||
+ (tx_flags & IXGBE_TX_FLAGS_VLAN)) {
+ i = tx_ring->next_to_use;
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ context_desc = IXGBE_TX_CTXTDESC_ADV(*tx_ring, i);
+
+ if (tx_flags & IXGBE_TX_FLAGS_VLAN)
+ vlan_macip_lens |= (tx_flags &
+ IXGBE_TX_FLAGS_VLAN_MASK);
+ vlan_macip_lens |= (skb_network_offset(skb) <<
+ IXGBE_ADVTXD_MACLEN_SHIFT);
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ vlan_macip_lens |= (skb_transport_header(skb) -
+ skb_network_header(skb));
+
+ context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
+ context_desc->seqnum_seed = 0;
+
+ type_tucmd_mlhl |= (IXGBE_TXD_CMD_DEXT |
+ IXGBE_ADVTXD_DTYP_CTXT);
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ switch (skb->protocol) {
+ case __constant_htons(ETH_P_IP):
+ type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ type_tucmd_mlhl |=
+ IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ break;
+ case __constant_htons(ETH_P_IPV6):
+ /* XXX what about other V6 headers?? */
+ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+ type_tucmd_mlhl |=
+ IXGBE_ADVTXD_TUCMD_L4T_TCP;
+ break;
+ default:
+ if (unlikely(net_ratelimit())) {
+ printk(KERN_WARNING
+ "partial checksum but "
+ "proto=%x!\n",
+ skb->protocol);
+ }
+ break;
+ }
+ }
+
+ context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
+ /* use index zero for tx checksum offload */
+ context_desc->mss_l4len_idx = 0;
+
+ tx_buffer_info->time_stamp = jiffies;
+ tx_buffer_info->next_to_watch = i;
+
+ adapter->hw_csum_tx_good++;
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ tx_ring->next_to_use = i;
+
+ return true;
+ }
+
+ return false;
+}
+
+static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring,
+ struct sk_buff *skb, u32 tx_flags,
+ unsigned int first)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ unsigned int len;
+ unsigned int total = skb->len;
+ unsigned int offset = 0, size, count = 0, i;
+ unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
+ unsigned int f;
+
+ i = tx_ring->next_to_use;
+
+ len = min(skb_headlen(skb), total);
+ while (len) {
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
+
+ tx_buffer_info->length = size;
+ tx_buffer_info->mapped_as_page = false;
+ tx_buffer_info->dma = pci_map_single(adapter->pdev,
+ skb->data + offset,
+ size, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+ goto dma_error;
+ tx_buffer_info->time_stamp = jiffies;
+ tx_buffer_info->next_to_watch = i;
+
+ len -= size;
+ total -= size;
+ offset += size;
+ count++;
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ }
+
+ for (f = 0; f < nr_frags; f++) {
+ struct skb_frag_struct *frag;
+
+ frag = &skb_shinfo(skb)->frags[f];
+ len = min((unsigned int)frag->size, total);
+ offset = frag->page_offset;
+
+ while (len) {
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ size = min(len, (unsigned int)IXGBE_MAX_DATA_PER_TXD);
+
+ tx_buffer_info->length = size;
+ tx_buffer_info->dma = pci_map_page(adapter->pdev,
+ frag->page,
+ offset,
+ size,
+ PCI_DMA_TODEVICE);
+ tx_buffer_info->mapped_as_page = true;
+ if (pci_dma_mapping_error(pdev, tx_buffer_info->dma))
+ goto dma_error;
+ tx_buffer_info->time_stamp = jiffies;
+ tx_buffer_info->next_to_watch = i;
+
+ len -= size;
+ total -= size;
+ offset += size;
+ count++;
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ }
+ if (total == 0)
+ break;
+ }
+
+ if (i == 0)
+ i = tx_ring->count - 1;
+ else
+ i = i - 1;
+ tx_ring->tx_buffer_info[i].skb = skb;
+ tx_ring->tx_buffer_info[first].next_to_watch = i;
+
+ return count;
+
+dma_error:
+ dev_err(&pdev->dev, "TX DMA map failed\n");
+
+ /* clear timestamp and dma mappings for failed tx_buffer_info map */
+ tx_buffer_info->dma = 0;
+ tx_buffer_info->time_stamp = 0;
+ tx_buffer_info->next_to_watch = 0;
+ count--;
+
+ /* clear timestamp and dma mappings for remaining portion of packet */
+ while (count >= 0) {
+ count--;
+ i--;
+ if (i < 0)
+ i += tx_ring->count;
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ ixgbevf_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+ }
+
+ return count;
+}
+
+static void ixgbevf_tx_queue(struct ixgbevf_adapter *adapter,
+ struct ixgbevf_ring *tx_ring, int tx_flags,
+ int count, u32 paylen, u8 hdr_len)
+{
+ union ixgbe_adv_tx_desc *tx_desc = NULL;
+ struct ixgbevf_tx_buffer *tx_buffer_info;
+ u32 olinfo_status = 0, cmd_type_len = 0;
+ unsigned int i;
+
+ u32 txd_cmd = IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS | IXGBE_TXD_CMD_IFCS;
+
+ cmd_type_len |= IXGBE_ADVTXD_DTYP_DATA;
+
+ cmd_type_len |= IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_DEXT;
+
+ if (tx_flags & IXGBE_TX_FLAGS_VLAN)
+ cmd_type_len |= IXGBE_ADVTXD_DCMD_VLE;
+
+ if (tx_flags & IXGBE_TX_FLAGS_TSO) {
+ cmd_type_len |= IXGBE_ADVTXD_DCMD_TSE;
+
+ olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
+ IXGBE_ADVTXD_POPTS_SHIFT;
+
+ /* use index 1 context for tso */
+ olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
+ if (tx_flags & IXGBE_TX_FLAGS_IPV4)
+ olinfo_status |= IXGBE_TXD_POPTS_IXSM <<
+ IXGBE_ADVTXD_POPTS_SHIFT;
+
+ } else if (tx_flags & IXGBE_TX_FLAGS_CSUM)
+ olinfo_status |= IXGBE_TXD_POPTS_TXSM <<
+ IXGBE_ADVTXD_POPTS_SHIFT;
+
+ olinfo_status |= ((paylen - hdr_len) << IXGBE_ADVTXD_PAYLEN_SHIFT);
+
+ i = tx_ring->next_to_use;
+ while (count--) {
+ tx_buffer_info = &tx_ring->tx_buffer_info[i];
+ tx_desc = IXGBE_TX_DESC_ADV(*tx_ring, i);
+ tx_desc->read.buffer_addr = cpu_to_le64(tx_buffer_info->dma);
+ tx_desc->read.cmd_type_len =
+ cpu_to_le32(cmd_type_len | tx_buffer_info->length);
+ tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
+ }
+
+ tx_desc->read.cmd_type_len |= cpu_to_le32(txd_cmd);
+
+ /*
+ * Force memory writes to complete before letting h/w
+ * know there are new descriptors to fetch. (Only
+ * applicable for weak-ordered memory model archs,
+ * such as IA-64).
+ */
+ wmb();
+
+ tx_ring->next_to_use = i;
+ writel(i, adapter->hw.hw_addr + tx_ring->tail);
+}
+
+static int __ixgbevf_maybe_stop_tx(struct net_device *netdev,
+ struct ixgbevf_ring *tx_ring, int size)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ netif_stop_subqueue(netdev, tx_ring->queue_index);
+ /* Herbert's original patch had:
+ * smp_mb__after_netif_stop_queue();
+ * but since that doesn't exist yet, just open code it. */
+ smp_mb();
+
+ /* We need to check again in a case another CPU has just
+ * made room available. */
+ if (likely(IXGBE_DESC_UNUSED(tx_ring) < size))
+ return -EBUSY;
+
+ /* A reprieve! - use start_queue because it doesn't call schedule */
+ netif_start_subqueue(netdev, tx_ring->queue_index);
+ ++adapter->restart_queue;
+ return 0;
+}
+
+static int ixgbevf_maybe_stop_tx(struct net_device *netdev,
+ struct ixgbevf_ring *tx_ring, int size)
+{
+ if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
+ return 0;
+ return __ixgbevf_maybe_stop_tx(netdev, tx_ring, size);
+}
+
+static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbevf_ring *tx_ring;
+ unsigned int first;
+ unsigned int tx_flags = 0;
+ u8 hdr_len = 0;
+ int r_idx = 0, tso;
+ int count = 0;
+
+ unsigned int f;
+
+ tx_ring = &adapter->tx_ring[r_idx];
+
+ if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
+ tx_flags |= vlan_tx_tag_get(skb);
+ tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
+ tx_flags |= IXGBE_TX_FLAGS_VLAN;
+ }
+
+ /* four things can cause us to need a context descriptor */
+ if (skb_is_gso(skb) ||
+ (skb->ip_summed == CHECKSUM_PARTIAL) ||
+ (tx_flags & IXGBE_TX_FLAGS_VLAN))
+ count++;
+
+ count += TXD_USE_COUNT(skb_headlen(skb));
+ for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
+ count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
+
+ if (ixgbevf_maybe_stop_tx(netdev, tx_ring, count)) {
+ adapter->tx_busy++;
+ return NETDEV_TX_BUSY;
+ }
+
+ first = tx_ring->next_to_use;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ tx_flags |= IXGBE_TX_FLAGS_IPV4;
+ tso = ixgbevf_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
+ if (tso < 0) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (tso)
+ tx_flags |= IXGBE_TX_FLAGS_TSO;
+ else if (ixgbevf_tx_csum(adapter, tx_ring, skb, tx_flags) &&
+ (skb->ip_summed == CHECKSUM_PARTIAL))
+ tx_flags |= IXGBE_TX_FLAGS_CSUM;
+
+ ixgbevf_tx_queue(adapter, tx_ring, tx_flags,
+ ixgbevf_tx_map(adapter, tx_ring, skb, tx_flags, first),
+ skb->len, hdr_len);
+
+ netdev->trans_start = jiffies;
+
+ ixgbevf_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
+
+ return NETDEV_TX_OK;
+}
+
+/**
+ * ixgbevf_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ **/
+static struct net_device_stats *ixgbevf_get_stats(struct net_device *netdev)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ /* only return the current stats */
+ return &adapter->net_stats;
+}
+
+/**
+ * ixgbevf_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int ixgbevf_set_mac(struct net_device *netdev, void *p)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
+
+ if (hw->mac.ops.set_rar)
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
+
+ return 0;
+}
+
+/**
+ * ixgbevf_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int ixgbevf_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+
+ /* MTU < 68 is an error and causes problems on some kernels */
+ if ((new_mtu < 68) || (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
+ return -EINVAL;
+
+ hw_dbg(&adapter->hw, "changing MTU from %d to %d\n",
+ netdev->mtu, new_mtu);
+ /* must set new MTU before calling down or up */
+ netdev->mtu = new_mtu;
+
+ if (netif_running(netdev))
+ ixgbevf_reinit_locked(adapter);
+
+ return 0;
+}
+
+static void ixgbevf_shutdown(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev)) {
+ ixgbevf_down(adapter);
+ ixgbevf_free_irq(adapter);
+ ixgbevf_free_all_tx_resources(adapter);
+ ixgbevf_free_all_rx_resources(adapter);
+ }
+
+#ifdef CONFIG_PM
+ pci_save_state(pdev);
+#endif
+
+ pci_disable_device(pdev);
+}
+
+static const struct net_device_ops ixgbe_netdev_ops = {
+ .ndo_open = &ixgbevf_open,
+ .ndo_stop = &ixgbevf_close,
+ .ndo_start_xmit = &ixgbevf_xmit_frame,
+ .ndo_get_stats = &ixgbevf_get_stats,
+ .ndo_set_rx_mode = &ixgbevf_set_rx_mode,
+ .ndo_set_multicast_list = &ixgbevf_set_rx_mode,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = &ixgbevf_set_mac,
+ .ndo_change_mtu = &ixgbevf_change_mtu,
+ .ndo_tx_timeout = &ixgbevf_tx_timeout,
+ .ndo_vlan_rx_register = &ixgbevf_vlan_rx_register,
+ .ndo_vlan_rx_add_vid = &ixgbevf_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = &ixgbevf_vlan_rx_kill_vid,
+};
+
+static void ixgbevf_assign_netdev_ops(struct net_device *dev)
+{
+ struct ixgbevf_adapter *adapter;
+ adapter = netdev_priv(dev);
+ dev->netdev_ops = &ixgbe_netdev_ops;
+ ixgbevf_set_ethtool_ops(dev);
+ dev->watchdog_timeo = 5 * HZ;
+}
+
+/**
+ * ixgbevf_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in ixgbevf_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * ixgbevf_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int __devinit ixgbevf_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct net_device *netdev;
+ struct ixgbevf_adapter *adapter = NULL;
+ struct ixgbe_hw *hw = NULL;
+ const struct ixgbevf_info *ii = ixgbevf_info_tbl[ent->driver_data];
+ static int cards_found;
+ int err, pci_using_dac;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+ !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ pci_using_dac = 1;
+ } else {
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (err) {
+ err = pci_set_consistent_dma_mask(pdev,
+ DMA_BIT_MASK(32));
+ if (err) {
+ dev_err(&pdev->dev, "No usable DMA "
+ "configuration, aborting\n");
+ goto err_dma;
+ }
+ }
+ pci_using_dac = 0;
+ }
+
+ err = pci_request_regions(pdev, ixgbevf_driver_name);
+ if (err) {
+ dev_err(&pdev->dev, "pci_request_regions failed 0x%x\n", err);
+ goto err_pci_reg;
+ }
+
+ pci_set_master(pdev);
+
+#ifdef HAVE_TX_MQ
+ netdev = alloc_etherdev_mq(sizeof(struct ixgbevf_adapter),
+ MAX_TX_QUEUES);
+#else
+ netdev = alloc_etherdev(sizeof(struct ixgbevf_adapter));
+#endif
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_alloc_etherdev;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ pci_set_drvdata(pdev, netdev);
+ adapter = netdev_priv(netdev);
+
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ hw = &adapter->hw;
+ hw->back = adapter;
+ adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
+
+ /*
+ * call save state here in standalone driver because it relies on
+ * adapter struct to exist, and needs to call netdev_priv
+ */
+ pci_save_state(pdev);
+
+ hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!hw->hw_addr) {
+ err = -EIO;
+ goto err_ioremap;
+ }
+
+ ixgbevf_assign_netdev_ops(netdev);
+
+ adapter->bd_number = cards_found;
+
+ /* Setup hw api */
+ memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
+ hw->mac.type = ii->mac;
+
+ memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops,
+ sizeof(struct ixgbe_mac_operations));
+
+ adapter->flags &= ~IXGBE_FLAG_RX_PS_CAPABLE;
+ adapter->flags &= ~IXGBE_FLAG_RX_PS_ENABLED;
+ adapter->flags |= IXGBE_FLAG_RX_1BUF_CAPABLE;
+
+ /* setup the private structure */
+ err = ixgbevf_sw_init(adapter);
+
+ ixgbevf_init_last_counter_stats(adapter);
+
+#ifdef MAX_SKB_FRAGS
+ netdev->features = NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_HW_VLAN_TX |
+ NETIF_F_HW_VLAN_RX |
+ NETIF_F_HW_VLAN_FILTER;
+
+ netdev->features |= NETIF_F_IPV6_CSUM;
+ netdev->features |= NETIF_F_TSO;
+ netdev->features |= NETIF_F_TSO6;
+ netdev->vlan_features |= NETIF_F_TSO;
+ netdev->vlan_features |= NETIF_F_TSO6;
+ netdev->vlan_features |= NETIF_F_IP_CSUM;
+ netdev->vlan_features |= NETIF_F_SG;
+
+ if (pci_using_dac)
+ netdev->features |= NETIF_F_HIGHDMA;
+
+#endif /* MAX_SKB_FRAGS */
+
+ /* The HW MAC address was set and/or determined in sw_init */
+ memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
+ memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
+
+ if (!is_valid_ether_addr(netdev->dev_addr)) {
+ printk(KERN_ERR "invalid MAC address\n");
+ err = -EIO;
+ goto err_sw_init;
+ }
+
+ init_timer(&adapter->watchdog_timer);
+ adapter->watchdog_timer.function = &ixgbevf_watchdog;
+ adapter->watchdog_timer.data = (unsigned long)adapter;
+
+ INIT_WORK(&adapter->reset_task, ixgbevf_reset_task);
+ INIT_WORK(&adapter->watchdog_task, ixgbevf_watchdog_task);
+
+ err = ixgbevf_init_interrupt_scheme(adapter);
+ if (err)
+ goto err_sw_init;
+
+ /* pick up the PCI bus settings for reporting later */
+ if (hw->mac.ops.get_bus_info)
+ hw->mac.ops.get_bus_info(hw);
+
+
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+
+ strcpy(netdev->name, "eth%d");
+
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+
+ adapter->netdev_registered = true;
+
+ /* print the MAC address */
+ hw_dbg(hw, "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n",
+ netdev->dev_addr[0],
+ netdev->dev_addr[1],
+ netdev->dev_addr[2],
+ netdev->dev_addr[3],
+ netdev->dev_addr[4],
+ netdev->dev_addr[5]);
+
+ hw_dbg(hw, "MAC: %d\n", hw->mac.type);
+
+ hw_dbg(hw, "LRO is disabled \n");
+
+ hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
+ cards_found++;
+ return 0;
+
+err_register:
+err_sw_init:
+ ixgbevf_reset_interrupt_capability(adapter);
+ iounmap(hw->hw_addr);
+err_ioremap:
+ free_netdev(netdev);
+err_alloc_etherdev:
+ pci_release_regions(pdev);
+err_pci_reg:
+err_dma:
+ pci_disable_device(pdev);
+ return err;
+}
+
+/**
+ * ixgbevf_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * ixgbevf_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device. The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void __devexit ixgbevf_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct ixgbevf_adapter *adapter = netdev_priv(netdev);
+
+ set_bit(__IXGBEVF_DOWN, &adapter->state);
+
+ del_timer_sync(&adapter->watchdog_timer);
+
+ cancel_work_sync(&adapter->watchdog_task);
+
+ flush_scheduled_work();
+
+ if (adapter->netdev_registered) {
+ unregister_netdev(netdev);
+ adapter->netdev_registered = false;
+ }
+
+ ixgbevf_reset_interrupt_capability(adapter);
+
+ iounmap(adapter->hw.hw_addr);
+ pci_release_regions(pdev);
+
+ hw_dbg(&adapter->hw, "Remove complete\n");
+
+ kfree(adapter->tx_ring);
+ kfree(adapter->rx_ring);
+
+ free_netdev(netdev);
+
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver ixgbevf_driver = {
+ .name = ixgbevf_driver_name,
+ .id_table = ixgbevf_pci_tbl,
+ .probe = ixgbevf_probe,
+ .remove = __devexit_p(ixgbevf_remove),
+ .shutdown = ixgbevf_shutdown,
+};
+
+/**
+ * ixgbe_init_module - Driver Registration Routine
+ *
+ * ixgbe_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init ixgbevf_init_module(void)
+{
+ int ret;
+ printk(KERN_INFO "ixgbevf: %s - version %s\n", ixgbevf_driver_string,
+ ixgbevf_driver_version);
+
+ printk(KERN_INFO "%s\n", ixgbevf_copyright);
+
+ ret = pci_register_driver(&ixgbevf_driver);
+ return ret;
+}
+
+module_init(ixgbevf_init_module);
+
+/**
+ * ixgbe_exit_module - Driver Exit Cleanup Routine
+ *
+ * ixgbe_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+static void __exit ixgbevf_exit_module(void)
+{
+ pci_unregister_driver(&ixgbevf_driver);
+}
+
+#ifdef DEBUG
+/**
+ * ixgbe_get_hw_dev_name - return device name string
+ * used by hardware layer to print debugging information
+ **/
+char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
+{
+ struct ixgbevf_adapter *adapter = hw->back;
+ return adapter->netdev->name;
+}
+
+#endif
+module_exit(ixgbevf_exit_module);
+
+/* ixgbevf_main.c */
diff --git a/drivers/net/ixgbevf/mbx.c b/drivers/net/ixgbevf/mbx.c
new file mode 100644
index 00000000000..b8143501e6f
--- /dev/null
+++ b/drivers/net/ixgbevf/mbx.c
@@ -0,0 +1,341 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "mbx.h"
+
+/**
+ * ixgbevf_poll_for_msg - Wait for message notification
+ * @hw: pointer to the HW structure
+ *
+ * returns 0 if it successfully received a message notification
+ **/
+static s32 ixgbevf_poll_for_msg(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ while (countdown && mbx->ops.check_for_msg(hw)) {
+ countdown--;
+ udelay(mbx->udelay);
+ }
+
+ /* if we failed, all future posted messages fail until reset */
+ if (!countdown)
+ mbx->timeout = 0;
+
+ return countdown ? 0 : IXGBE_ERR_MBX;
+}
+
+/**
+ * ixgbevf_poll_for_ack - Wait for message acknowledgement
+ * @hw: pointer to the HW structure
+ *
+ * returns 0 if it successfully received a message acknowledgement
+ **/
+static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ int countdown = mbx->timeout;
+
+ while (countdown && mbx->ops.check_for_ack(hw)) {
+ countdown--;
+ udelay(mbx->udelay);
+ }
+
+ /* if we failed, all future posted messages fail until reset */
+ if (!countdown)
+ mbx->timeout = 0;
+
+ return countdown ? 0 : IXGBE_ERR_MBX;
+}
+
+/**
+ * ixgbevf_read_posted_mbx - Wait for message notification and receive message
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfully received a message notification and
+ * copied it into the receive buffer.
+ **/
+static s32 ixgbevf_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ ret_val = ixgbevf_poll_for_msg(hw);
+
+ /* if ack received read message, otherwise we timed out */
+ if (!ret_val)
+ ret_val = mbx->ops.read(hw, msg, size);
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_write_posted_mbx - Write a message to the mailbox, wait for ack
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfully copied message into the buffer and
+ * received an ack to that message within delay * timeout period
+ **/
+static s32 ixgbevf_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ s32 ret_val;
+
+ /* send msg */
+ ret_val = mbx->ops.write(hw, msg, size);
+
+ /* if msg sent wait until we receive an ack */
+ if (!ret_val)
+ ret_val = ixgbevf_poll_for_ack(hw);
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_read_v2p_mailbox - read v2p mailbox
+ * @hw: pointer to the HW structure
+ *
+ * This function is used to read the v2p mailbox without losing the read to
+ * clear status bits.
+ **/
+static u32 ixgbevf_read_v2p_mailbox(struct ixgbe_hw *hw)
+{
+ u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX);
+
+ v2p_mailbox |= hw->mbx.v2p_mailbox;
+ hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS;
+
+ return v2p_mailbox;
+}
+
+/**
+ * ixgbevf_check_for_bit_vf - Determine if a status bit was set
+ * @hw: pointer to the HW structure
+ * @mask: bitmask for bits to be tested and cleared
+ *
+ * This function is used to check for the read to clear bits within
+ * the V2P mailbox.
+ **/
+static s32 ixgbevf_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask)
+{
+ u32 v2p_mailbox = ixgbevf_read_v2p_mailbox(hw);
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (v2p_mailbox & mask)
+ ret_val = 0;
+
+ hw->mbx.v2p_mailbox &= ~mask;
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_check_for_msg_vf - checks to see if the PF has sent mail
+ * @hw: pointer to the HW structure
+ *
+ * returns 0 if the PF has set the Status bit or else ERR_MBX
+ **/
+static s32 ixgbevf_check_for_msg_vf(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) {
+ ret_val = 0;
+ hw->mbx.stats.reqs++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_check_for_ack_vf - checks to see if the PF has ACK'd
+ * @hw: pointer to the HW structure
+ *
+ * returns 0 if the PF has set the ACK bit or else ERR_MBX
+ **/
+static s32 ixgbevf_check_for_ack_vf(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (!ixgbevf_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) {
+ ret_val = 0;
+ hw->mbx.stats.acks++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_check_for_rst_vf - checks to see if the PF has reset
+ * @hw: pointer to the HW structure
+ *
+ * returns true if the PF has set the reset done bit or else false
+ **/
+static s32 ixgbevf_check_for_rst_vf(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ if (!ixgbevf_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD |
+ IXGBE_VFMAILBOX_RSTI))) {
+ ret_val = 0;
+ hw->mbx.stats.rsts++;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_obtain_mbx_lock_vf - obtain mailbox lock
+ * @hw: pointer to the HW structure
+ *
+ * return 0 if we obtained the mailbox lock
+ **/
+static s32 ixgbevf_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
+{
+ s32 ret_val = IXGBE_ERR_MBX;
+
+ /* Take ownership of the buffer */
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU);
+
+ /* reserve mailbox for vf use */
+ if (ixgbevf_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU)
+ ret_val = 0;
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_write_mbx_vf - Write a message to the mailbox
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfully copied message into the buffer
+ **/
+static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+ s32 ret_val;
+ u16 i;
+
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbevf_obtain_mbx_lock_vf(hw);
+ if (ret_val)
+ goto out_no_write;
+
+ /* flush msg and acks as we are overwriting the message buffer */
+ ixgbevf_check_for_msg_vf(hw);
+ ixgbevf_check_for_ack_vf(hw);
+
+ /* copy the caller specified message to the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]);
+
+ /* update stats */
+ hw->mbx.stats.msgs_tx++;
+
+ /* Drop VFU and interrupt the PF to tell it a message has been sent */
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ);
+
+out_no_write:
+ return ret_val;
+}
+
+/**
+ * ixgbevf_read_mbx_vf - Reads a message from the inbox intended for vf
+ * @hw: pointer to the HW structure
+ * @msg: The message buffer
+ * @size: Length of buffer
+ *
+ * returns 0 if it successfuly read message from buffer
+ **/
+static s32 ixgbevf_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
+{
+ s32 ret_val = 0;
+ u16 i;
+
+ /* lock the mailbox to prevent pf/vf race condition */
+ ret_val = ixgbevf_obtain_mbx_lock_vf(hw);
+ if (ret_val)
+ goto out_no_read;
+
+ /* copy the message from the mailbox memory buffer */
+ for (i = 0; i < size; i++)
+ msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i);
+
+ /* Acknowledge receipt and release mailbox, then we're done */
+ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK);
+
+ /* update stats */
+ hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+ return ret_val;
+}
+
+/**
+ * ixgbevf_init_mbx_params_vf - set initial values for vf mailbox
+ * @hw: pointer to the HW structure
+ *
+ * Initializes the hw->mbx struct to correct values for vf mailbox
+ */
+s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+
+ /* start mailbox as timed out and let the reset_hw call set the timeout
+ * value to begin communications */
+ mbx->timeout = 0;
+ mbx->udelay = IXGBE_VF_MBX_INIT_DELAY;
+
+ mbx->size = IXGBE_VFMAILBOX_SIZE;
+
+ mbx->stats.msgs_tx = 0;
+ mbx->stats.msgs_rx = 0;
+ mbx->stats.reqs = 0;
+ mbx->stats.acks = 0;
+ mbx->stats.rsts = 0;
+
+ return 0;
+}
+
+struct ixgbe_mbx_operations ixgbevf_mbx_ops = {
+ .init_params = ixgbevf_init_mbx_params_vf,
+ .read = ixgbevf_read_mbx_vf,
+ .write = ixgbevf_write_mbx_vf,
+ .read_posted = ixgbevf_read_posted_mbx,
+ .write_posted = ixgbevf_write_posted_mbx,
+ .check_for_msg = ixgbevf_check_for_msg_vf,
+ .check_for_ack = ixgbevf_check_for_ack_vf,
+ .check_for_rst = ixgbevf_check_for_rst_vf,
+};
+
diff --git a/drivers/net/ixgbevf/mbx.h b/drivers/net/ixgbevf/mbx.h
new file mode 100644
index 00000000000..1b0e0bf4c0f
--- /dev/null
+++ b/drivers/net/ixgbevf/mbx.h
@@ -0,0 +1,100 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBE_MBX_H_
+#define _IXGBE_MBX_H_
+
+#include "vf.h"
+
+#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */
+#define IXGBE_ERR_MBX -100
+
+#define IXGBE_VFMAILBOX 0x002FC
+#define IXGBE_VFMBMEM 0x00200
+
+/* Define mailbox register bits */
+#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */
+#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */
+#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */
+#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */
+#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */
+#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */
+#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */
+
+#define IXGBE_PFMAILBOX(x) (0x04B00 + (4 * x))
+#define IXGBE_PFMBMEM(vfn) (0x13000 + (64 * vfn))
+
+#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */
+#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */
+#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */
+#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */
+
+#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */
+#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */
+#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
+#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
+
+
+/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
+ * PF. The reverse is true if it is IXGBE_PF_*.
+ * Message ACK's are the value or'd with 0xF0000000
+ */
+#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
+ * this are the ACK */
+#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
+ * this are the NACK */
+#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
+ * clear to send requests */
+#define IXGBE_VT_MSGINFO_SHIFT 16
+/* bits 23:16 are used for exra info for certain messages */
+#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
+
+#define IXGBE_VF_RESET 0x01 /* VF requests reset */
+#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
+#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
+#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
+#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
+
+/* length of permanent address message returned from PF */
+#define IXGBE_VF_PERMADDR_MSG_LEN 4
+/* word in permanent address message with the current multicast type */
+#define IXGBE_VF_MC_TYPE_WORD 3
+
+#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */
+
+#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
+#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */
+
+/* forward declaration of the HW struct */
+struct ixgbe_hw;
+
+s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *);
+
+#endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ixgbevf/regs.h b/drivers/net/ixgbevf/regs.h
new file mode 100644
index 00000000000..12f75960aec
--- /dev/null
+++ b/drivers/net/ixgbevf/regs.h
@@ -0,0 +1,85 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _IXGBEVF_REGS_H_
+#define _IXGBEVF_REGS_H_
+
+#define IXGBE_VFCTRL 0x00000
+#define IXGBE_VFSTATUS 0x00008
+#define IXGBE_VFLINKS 0x00010
+#define IXGBE_VFRTIMER 0x00048
+#define IXGBE_VFRXMEMWRAP 0x03190
+#define IXGBE_VTEICR 0x00100
+#define IXGBE_VTEICS 0x00104
+#define IXGBE_VTEIMS 0x00108
+#define IXGBE_VTEIMC 0x0010C
+#define IXGBE_VTEIAC 0x00110
+#define IXGBE_VTEIAM 0x00114
+#define IXGBE_VTEITR(x) (0x00820 + (4 * x))
+#define IXGBE_VTIVAR(x) (0x00120 + (4 * x))
+#define IXGBE_VTIVAR_MISC 0x00140
+#define IXGBE_VTRSCINT(x) (0x00180 + (4 * x))
+#define IXGBE_VFRDBAL(x) (0x01000 + (0x40 * x))
+#define IXGBE_VFRDBAH(x) (0x01004 + (0x40 * x))
+#define IXGBE_VFRDLEN(x) (0x01008 + (0x40 * x))
+#define IXGBE_VFRDH(x) (0x01010 + (0x40 * x))
+#define IXGBE_VFRDT(x) (0x01018 + (0x40 * x))
+#define IXGBE_VFRXDCTL(x) (0x01028 + (0x40 * x))
+#define IXGBE_VFSRRCTL(x) (0x01014 + (0x40 * x))
+#define IXGBE_VFRSCCTL(x) (0x0102C + (0x40 * x))
+#define IXGBE_VFPSRTYPE 0x00300
+#define IXGBE_VFTDBAL(x) (0x02000 + (0x40 * x))
+#define IXGBE_VFTDBAH(x) (0x02004 + (0x40 * x))
+#define IXGBE_VFTDLEN(x) (0x02008 + (0x40 * x))
+#define IXGBE_VFTDH(x) (0x02010 + (0x40 * x))
+#define IXGBE_VFTDT(x) (0x02018 + (0x40 * x))
+#define IXGBE_VFTXDCTL(x) (0x02028 + (0x40 * x))
+#define IXGBE_VFTDWBAL(x) (0x02038 + (0x40 * x))
+#define IXGBE_VFTDWBAH(x) (0x0203C + (0x40 * x))
+#define IXGBE_VFDCA_RXCTRL(x) (0x0100C + (0x40 * x))
+#define IXGBE_VFDCA_TXCTRL(x) (0x0200c + (0x40 * x))
+#define IXGBE_VFGPRC 0x0101C
+#define IXGBE_VFGPTC 0x0201C
+#define IXGBE_VFGORC_LSB 0x01020
+#define IXGBE_VFGORC_MSB 0x01024
+#define IXGBE_VFGOTC_LSB 0x02020
+#define IXGBE_VFGOTC_MSB 0x02024
+#define IXGBE_VFMPRC 0x01034
+
+#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
+
+#define IXGBE_READ_REG(a, reg) readl((a)->hw_addr + (reg))
+
+#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) ( \
+ writel((value), ((a)->hw_addr + (reg) + ((offset) << 2))))
+
+#define IXGBE_READ_REG_ARRAY(a, reg, offset) ( \
+ readl((a)->hw_addr + (reg) + ((offset) << 2)))
+
+#define IXGBE_WRITE_FLUSH(a) (IXGBE_READ_REG(a, IXGBE_VFSTATUS))
+
+#endif /* _IXGBEVF_REGS_H_ */
diff --git a/drivers/net/ixgbevf/vf.c b/drivers/net/ixgbevf/vf.c
new file mode 100644
index 00000000000..4b5dec0ec14
--- /dev/null
+++ b/drivers/net/ixgbevf/vf.c
@@ -0,0 +1,387 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "vf.h"
+
+/**
+ * ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware by filling the bus info structure and media type, clears
+ * all on chip counters, initializes receive address registers, multicast
+ * table, VLAN filter table, calls routine to set up link and flow control
+ * settings, and leaves transmit and receive units disabled and uninitialized
+ **/
+static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw)
+{
+ /* Clear adapter stopped flag */
+ hw->adapter_stopped = false;
+
+ return 0;
+}
+
+/**
+ * ixgbevf_init_hw_vf - virtual function hardware initialization
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the hardware by resetting the hardware and then starting
+ * the hardware
+ **/
+static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
+{
+ s32 status = hw->mac.ops.start_hw(hw);
+
+ hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
+
+ return status;
+}
+
+/**
+ * ixgbevf_reset_hw_vf - Performs hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by reseting the transmit and receive units, masks and
+ * clears all interrupts.
+ **/
+static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 timeout = IXGBE_VF_INIT_TIMEOUT;
+ s32 ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
+ u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
+ u8 *addr = (u8 *)(&msgbuf[1]);
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ hw->mac.ops.stop_adapter(hw);
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* we cannot reset while the RSTI / RSTD bits are asserted */
+ while (!mbx->ops.check_for_rst(hw) && timeout) {
+ timeout--;
+ udelay(5);
+ }
+
+ if (!timeout)
+ return IXGBE_ERR_RESET_FAILED;
+
+ /* mailbox timeout can now become active */
+ mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
+
+ msgbuf[0] = IXGBE_VF_RESET;
+ mbx->ops.write_posted(hw, msgbuf, 1);
+
+ msleep(10);
+
+ /* set our "perm_addr" based on info provided by PF */
+ /* also set up the mc_filter_type which is piggy backed
+ * on the mac address in word 3 */
+ ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
+ if (ret_val)
+ return ret_val;
+
+ if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK))
+ return IXGBE_ERR_INVALID_MAC_ADDR;
+
+ memcpy(hw->mac.perm_addr, addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
+ hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
+
+ return 0;
+}
+
+/**
+ * ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
+ * @hw: pointer to hardware structure
+ *
+ * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
+ * disables transmit and receive units. The adapter_stopped flag is used by
+ * the shared code and drivers to determine if the adapter is in a stopped
+ * state and should not touch the hardware.
+ **/
+static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
+{
+ u32 number_of_queues;
+ u32 reg_val;
+ u16 i;
+
+ /*
+ * Set the adapter_stopped flag so other driver functions stop touching
+ * the hardware
+ */
+ hw->adapter_stopped = true;
+
+ /* Disable the receive unit by stopped each queue */
+ number_of_queues = hw->mac.max_rx_queues;
+ for (i = 0; i < number_of_queues; i++) {
+ reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
+ if (reg_val & IXGBE_RXDCTL_ENABLE) {
+ reg_val &= ~IXGBE_RXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
+ }
+ }
+
+ IXGBE_WRITE_FLUSH(hw);
+
+ /* Clear interrupt mask to stop from interrupts being generated */
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
+
+ /* Clear any pending interrupts */
+ IXGBE_READ_REG(hw, IXGBE_VTEICR);
+
+ /* Disable the transmit unit. Each queue must be disabled. */
+ number_of_queues = hw->mac.max_tx_queues;
+ for (i = 0; i < number_of_queues; i++) {
+ reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+ if (reg_val & IXGBE_TXDCTL_ENABLE) {
+ reg_val &= ~IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * ixgbevf_mta_vector - Determines bit-vector in multicast table to set
+ * @hw: pointer to hardware structure
+ * @mc_addr: the multicast address
+ *
+ * Extracts the 12 bits, from a multicast address, to determine which
+ * bit-vector to set in the multicast table. The hardware uses 12 bits, from
+ * incoming rx multicast addresses, to determine the bit-vector to check in
+ * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
+ * by the MO field of the MCSTCTRL. The MO field is set during initialization
+ * to mc_filter_type.
+ **/
+static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
+{
+ u32 vector = 0;
+
+ switch (hw->mac.mc_filter_type) {
+ case 0: /* use bits [47:36] of the address */
+ vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
+ break;
+ case 1: /* use bits [46:35] of the address */
+ vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
+ break;
+ case 2: /* use bits [45:34] of the address */
+ vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
+ break;
+ case 3: /* use bits [43:32] of the address */
+ vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
+ break;
+ default: /* Invalid mc_filter_type */
+ break;
+ }
+
+ /* vector can only be 12-bits or boundary will be exceeded */
+ vector &= 0xFFF;
+ return vector;
+}
+
+/**
+ * ixgbevf_get_mac_addr_vf - Read device MAC address
+ * @hw: pointer to the HW structure
+ * @mac_addr: pointer to storage for retrieved MAC address
+ **/
+static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
+{
+ memcpy(mac_addr, hw->mac.perm_addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
+
+ return 0;
+}
+
+/**
+ * ixgbevf_set_rar_vf - set device MAC address
+ * @hw: pointer to hardware structure
+ * @index: Receive address register to write
+ * @addr: Address to put into receive address register
+ * @vmdq: Unused in this implementation
+ **/
+static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
+ u32 vmdq)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[3];
+ u8 *msg_addr = (u8 *)(&msgbuf[1]);
+ s32 ret_val;
+
+ memset(msgbuf, 0, sizeof(msgbuf));
+ msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
+ memcpy(msg_addr, addr, 6);
+ ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
+
+ if (!ret_val)
+ ret_val = mbx->ops.read_posted(hw, msgbuf, 3);
+
+ msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+ /* if nacked the address was rejected, use "perm_addr" */
+ if (!ret_val &&
+ (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK)))
+ ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
+
+ return ret_val;
+}
+
+/**
+ * ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
+ * @hw: pointer to the HW structure
+ * @mc_addr_list: array of multicast addresses to program
+ * @mc_addr_count: number of multicast addresses to program
+ * @next: caller supplied function to return next address in list
+ *
+ * Updates the Multicast Table Array.
+ **/
+static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list,
+ u32 mc_addr_count,
+ ixgbe_mc_addr_itr next)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
+ u16 *vector_list = (u16 *)&msgbuf[1];
+ u32 vector;
+ u32 cnt, i;
+ u32 vmdq;
+
+ /* Each entry in the list uses 1 16 bit word. We have 30
+ * 16 bit words available in our HW msg buffer (minus 1 for the
+ * msg type). That's 30 hash values if we pack 'em right. If
+ * there are more than 30 MC addresses to add then punt the
+ * extras for now and then add code to handle more than 30 later.
+ * It would be unusual for a server to request that many multi-cast
+ * addresses except for in large enterprise network environments.
+ */
+
+ cnt = (mc_addr_count > 30) ? 30 : mc_addr_count;
+ msgbuf[0] = IXGBE_VF_SET_MULTICAST;
+ msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
+
+ for (i = 0; i < cnt; i++) {
+ vector = ixgbevf_mta_vector(hw, next(hw, &mc_addr_list, &vmdq));
+ vector_list[i] = vector;
+ }
+
+ mbx->ops.write_posted(hw, msgbuf, IXGBE_VFMAILBOX_SIZE);
+
+ return 0;
+}
+
+/**
+ * ixgbevf_set_vfta_vf - Set/Unset vlan filter table address
+ * @hw: pointer to the HW structure
+ * @vlan: 12 bit VLAN ID
+ * @vind: unused by VF drivers
+ * @vlan_on: if true then set bit, else clear bit
+ **/
+static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
+ bool vlan_on)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ u32 msgbuf[2];
+
+ msgbuf[0] = IXGBE_VF_SET_VLAN;
+ msgbuf[1] = vlan;
+ /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
+ msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
+
+ return mbx->ops.write_posted(hw, msgbuf, 2);
+}
+
+/**
+ * ixgbevf_setup_mac_link_vf - Setup MAC link settings
+ * @hw: pointer to hardware structure
+ * @speed: Unused in this implementation
+ * @autoneg: Unused in this implementation
+ * @autoneg_wait_to_complete: Unused in this implementation
+ *
+ * Do nothing and return success. VF drivers are not allowed to change
+ * global settings. Maintained for driver compatibility.
+ **/
+static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
+ ixgbe_link_speed speed, bool autoneg,
+ bool autoneg_wait_to_complete)
+{
+ return 0;
+}
+
+/**
+ * ixgbevf_check_mac_link_vf - Get link/speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: true is link is up, false otherwise
+ * @autoneg_wait_to_complete: true when waiting for completion is needed
+ *
+ * Reads the links register to determine if link is up and the current speed
+ **/
+static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
+ ixgbe_link_speed *speed,
+ bool *link_up,
+ bool autoneg_wait_to_complete)
+{
+ u32 links_reg;
+
+ if (!(hw->mbx.ops.check_for_rst(hw))) {
+ *link_up = false;
+ *speed = 0;
+ return -1;
+ }
+
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+
+ if (links_reg & IXGBE_LINKS_UP)
+ *link_up = true;
+ else
+ *link_up = false;
+
+ if (links_reg & IXGBE_LINKS_SPEED)
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ else
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+
+ return 0;
+}
+
+struct ixgbe_mac_operations ixgbevf_mac_ops = {
+ .init_hw = ixgbevf_init_hw_vf,
+ .reset_hw = ixgbevf_reset_hw_vf,
+ .start_hw = ixgbevf_start_hw_vf,
+ .get_mac_addr = ixgbevf_get_mac_addr_vf,
+ .stop_adapter = ixgbevf_stop_hw_vf,
+ .setup_link = ixgbevf_setup_mac_link_vf,
+ .check_link = ixgbevf_check_mac_link_vf,
+ .set_rar = ixgbevf_set_rar_vf,
+ .update_mc_addr_list = ixgbevf_update_mc_addr_list_vf,
+ .set_vfta = ixgbevf_set_vfta_vf,
+};
+
+struct ixgbevf_info ixgbevf_vf_info = {
+ .mac = ixgbe_mac_82599_vf,
+ .mac_ops = &ixgbevf_mac_ops,
+};
+
diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ixgbevf/vf.h
new file mode 100644
index 00000000000..799600e9270
--- /dev/null
+++ b/drivers/net/ixgbevf/vf.h
@@ -0,0 +1,168 @@
+/*******************************************************************************
+
+ Intel 82599 Virtual Function driver
+ Copyright(c) 1999 - 2009 Intel Corporation.
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Contact Information:
+ e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef __IXGBE_VF_H__
+#define __IXGBE_VF_H__
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+
+#include "defines.h"
+#include "regs.h"
+#include "mbx.h"
+
+struct ixgbe_hw;
+
+/* iterator type for walking multicast address lists */
+typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
+ u32 *vmdq);
+struct ixgbe_mac_operations {
+ s32 (*init_hw)(struct ixgbe_hw *);
+ s32 (*reset_hw)(struct ixgbe_hw *);
+ s32 (*start_hw)(struct ixgbe_hw *);
+ s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
+ enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
+ u32 (*get_supported_physical_layer)(struct ixgbe_hw *);
+ s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
+ s32 (*stop_adapter)(struct ixgbe_hw *);
+ s32 (*get_bus_info)(struct ixgbe_hw *);
+
+ /* Link */
+ s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool, bool);
+ s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
+ s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
+ bool *);
+
+ /* RAR, Multicast, VLAN */
+ s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32);
+ s32 (*init_rx_addrs)(struct ixgbe_hw *);
+ s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
+ ixgbe_mc_addr_itr);
+ s32 (*enable_mc)(struct ixgbe_hw *);
+ s32 (*disable_mc)(struct ixgbe_hw *);
+ s32 (*clear_vfta)(struct ixgbe_hw *);
+ s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
+};
+
+enum ixgbe_mac_type {
+ ixgbe_mac_unknown = 0,
+ ixgbe_mac_82599_vf,
+ ixgbe_num_macs
+};
+
+struct ixgbe_mac_info {
+ struct ixgbe_mac_operations ops;
+ u8 addr[6];
+ u8 perm_addr[6];
+
+ enum ixgbe_mac_type type;
+
+ s32 mc_filter_type;
+
+ bool get_link_status;
+ u32 max_tx_queues;
+ u32 max_rx_queues;
+ u32 max_msix_vectors;
+};
+
+struct ixgbe_mbx_operations {
+ s32 (*init_params)(struct ixgbe_hw *hw);
+ s32 (*read)(struct ixgbe_hw *, u32 *, u16);
+ s32 (*write)(struct ixgbe_hw *, u32 *, u16);
+ s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16);
+ s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16);
+ s32 (*check_for_msg)(struct ixgbe_hw *);
+ s32 (*check_for_ack)(struct ixgbe_hw *);
+ s32 (*check_for_rst)(struct ixgbe_hw *);
+};
+
+struct ixgbe_mbx_stats {
+ u32 msgs_tx;
+ u32 msgs_rx;
+
+ u32 acks;
+ u32 reqs;
+ u32 rsts;
+};
+
+struct ixgbe_mbx_info {
+ struct ixgbe_mbx_operations ops;
+ struct ixgbe_mbx_stats stats;
+ u32 timeout;
+ u32 udelay;
+ u32 v2p_mailbox;
+ u16 size;
+};
+
+struct ixgbe_hw {
+ void *back;
+
+ u8 __iomem *hw_addr;
+ u8 *flash_address;
+ unsigned long io_base;
+
+ struct ixgbe_mac_info mac;
+ struct ixgbe_mbx_info mbx;
+
+ u16 device_id;
+ u16 subsystem_vendor_id;
+ u16 subsystem_device_id;
+ u16 vendor_id;
+
+ u8 revision_id;
+ bool adapter_stopped;
+};
+
+struct ixgbevf_hw_stats {
+ u64 base_vfgprc;
+ u64 base_vfgptc;
+ u64 base_vfgorc;
+ u64 base_vfgotc;
+ u64 base_vfmprc;
+
+ u64 last_vfgprc;
+ u64 last_vfgptc;
+ u64 last_vfgorc;
+ u64 last_vfgotc;
+ u64 last_vfmprc;
+
+ u64 vfgprc;
+ u64 vfgptc;
+ u64 vfgorc;
+ u64 vfgotc;
+ u64 vfmprc;
+};
+
+struct ixgbevf_info {
+ enum ixgbe_mac_type mac;
+ struct ixgbe_mac_operations *mac_ops;
+};
+
+#endif /* __IXGBE_VF_H__ */
+
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index 792b88fc357..0f31497833d 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -288,7 +288,7 @@ jme_set_rx_pcc(struct jme_adapter *jme, int p)
wmb();
if (!(test_bit(JME_FLAG_POLL, &jme->flags)))
- msg_rx_status(jme, "Switched to PCC_P%d\n", p);
+ netif_info(jme, rx_status, jme->dev, "Switched to PCC_P%d\n", p);
}
static void
@@ -483,13 +483,13 @@ jme_check_link(struct net_device *netdev, int testonly)
strcat(linkmsg, (phylink & PHY_LINK_MDI_STAT) ?
"MDI-X" :
"MDI");
- msg_link(jme, "Link is up at %s.\n", linkmsg);
+ netif_info(jme, link, jme->dev, "Link is up at %s.\n", linkmsg);
netif_carrier_on(netdev);
} else {
if (testonly)
goto out;
- msg_link(jme, "Link is down.\n");
+ netif_info(jme, link, jme->dev, "Link is down.\n");
jme->phylink = 0;
netif_carrier_off(netdev);
}
@@ -883,20 +883,20 @@ jme_rxsum_ok(struct jme_adapter *jme, u16 flags)
if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_TCPON | RXWBFLAG_TCPCS))
== RXWBFLAG_TCPON)) {
if (flags & RXWBFLAG_IPV4)
- msg_rx_err(jme, "TCP Checksum error\n");
+ netif_err(jme, rx_err, jme->dev, "TCP Checksum error\n");
return false;
}
if (unlikely((flags & (RXWBFLAG_MF | RXWBFLAG_UDPON | RXWBFLAG_UDPCS))
== RXWBFLAG_UDPON)) {
if (flags & RXWBFLAG_IPV4)
- msg_rx_err(jme, "UDP Checksum error.\n");
+ netif_err(jme, rx_err, jme->dev, "UDP Checksum error.\n");
return false;
}
if (unlikely((flags & (RXWBFLAG_IPV4 | RXWBFLAG_IPCS))
== RXWBFLAG_IPV4)) {
- msg_rx_err(jme, "IPv4 Checksum error.\n");
+ netif_err(jme, rx_err, jme->dev, "IPv4 Checksum error.\n");
return false;
}
@@ -1186,9 +1186,9 @@ jme_link_change_tasklet(unsigned long arg)
while (!atomic_dec_and_test(&jme->link_changing)) {
atomic_inc(&jme->link_changing);
- msg_intr(jme, "Get link change lock failed.\n");
+ netif_info(jme, intr, jme->dev, "Get link change lock failed.\n");
while (atomic_read(&jme->link_changing) != 1)
- msg_intr(jme, "Waiting link change lock.\n");
+ netif_info(jme, intr, jme->dev, "Waiting link change lock.\n");
}
if (jme_check_link(netdev, 1) && jme->old_mtu == netdev->mtu)
@@ -1305,7 +1305,7 @@ jme_rx_empty_tasklet(unsigned long arg)
if (unlikely(!netif_carrier_ok(jme->dev)))
return;
- msg_rx_status(jme, "RX Queue Full!\n");
+ netif_info(jme, rx_status, jme->dev, "RX Queue Full!\n");
jme_rx_clean_tasklet(arg);
@@ -1325,7 +1325,7 @@ jme_wake_queue_if_stopped(struct jme_adapter *jme)
smp_wmb();
if (unlikely(netif_queue_stopped(jme->dev) &&
atomic_read(&txring->nr_free) >= (jme->tx_wake_threshold))) {
- msg_tx_done(jme, "TX Queue Waked.\n");
+ netif_info(jme, tx_done, jme->dev, "TX Queue Waked.\n");
netif_wake_queue(jme->dev);
}
@@ -1835,7 +1835,7 @@ jme_tx_csum(struct jme_adapter *jme, struct sk_buff *skb, u8 *flags)
*flags |= TXFLAG_UDPCS;
break;
default:
- msg_tx_err(jme, "Error upper layer protocol.\n");
+ netif_err(jme, tx_err, jme->dev, "Error upper layer protocol.\n");
break;
}
}
@@ -1910,12 +1910,12 @@ jme_stop_queue_if_full(struct jme_adapter *jme)
smp_wmb();
if (unlikely(atomic_read(&txring->nr_free) < (MAX_SKB_FRAGS+2))) {
netif_stop_queue(jme->dev);
- msg_tx_queued(jme, "TX Queue Paused.\n");
+ netif_info(jme, tx_queued, jme->dev, "TX Queue Paused.\n");
smp_wmb();
if (atomic_read(&txring->nr_free)
>= (jme->tx_wake_threshold)) {
netif_wake_queue(jme->dev);
- msg_tx_queued(jme, "TX Queue Fast Waked.\n");
+ netif_info(jme, tx_queued, jme->dev, "TX Queue Fast Waked.\n");
}
}
@@ -1923,7 +1923,7 @@ jme_stop_queue_if_full(struct jme_adapter *jme)
(jiffies - txbi->start_xmit) >= TX_TIMEOUT &&
txbi->skb)) {
netif_stop_queue(jme->dev);
- msg_tx_queued(jme, "TX Queue Stopped %d@%lu.\n", idx, jiffies);
+ netif_info(jme, tx_queued, jme->dev, "TX Queue Stopped %d@%lu.\n", idx, jiffies);
}
}
@@ -1946,7 +1946,7 @@ jme_start_xmit(struct sk_buff *skb, struct net_device *netdev)
if (unlikely(idx < 0)) {
netif_stop_queue(netdev);
- msg_tx_err(jme, "BUG! Tx ring full when queue awake!\n");
+ netif_err(jme, tx_err, jme->dev, "BUG! Tx ring full when queue awake!\n");
return NETDEV_TX_BUSY;
}
@@ -1997,7 +1997,6 @@ jme_set_multi(struct net_device *netdev)
{
struct jme_adapter *jme = netdev_priv(netdev);
u32 mc_hash[2] = {};
- int i;
spin_lock_bh(&jme->rxmcs_lock);
@@ -2012,10 +2011,7 @@ jme_set_multi(struct net_device *netdev)
int bit_nr;
jme->reg_rxmcs |= RXMCS_MULFRAME | RXMCS_MULFILTERED;
- for (i = 0, mclist = netdev->mc_list;
- mclist && i < netdev->mc_count;
- ++i, mclist = mclist->next) {
-
+ netdev_for_each_mc_addr(mclist, netdev) {
bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3F;
mc_hash[bit_nr >> 5] |= 1 << (bit_nr & 0x1F);
}
@@ -2473,7 +2469,7 @@ jme_smb_read(struct jme_adapter *jme, unsigned int addr)
val = jread32(jme, JME_SMBCSR);
}
if (!to) {
- msg_hw(jme, "SMB Bus Busy.\n");
+ netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n");
return 0xFF;
}
@@ -2489,7 +2485,7 @@ jme_smb_read(struct jme_adapter *jme, unsigned int addr)
val = jread32(jme, JME_SMBINTF);
}
if (!to) {
- msg_hw(jme, "SMB Bus Busy.\n");
+ netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n");
return 0xFF;
}
@@ -2509,7 +2505,7 @@ jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data)
val = jread32(jme, JME_SMBCSR);
}
if (!to) {
- msg_hw(jme, "SMB Bus Busy.\n");
+ netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n");
return;
}
@@ -2526,7 +2522,7 @@ jme_smb_write(struct jme_adapter *jme, unsigned int addr, u8 data)
val = jread32(jme, JME_SMBINTF);
}
if (!to) {
- msg_hw(jme, "SMB Bus Busy.\n");
+ netif_err(jme, hw, jme->dev, "SMB Bus Busy.\n");
return;
}
@@ -2876,14 +2872,14 @@ jme_init_one(struct pci_dev *pdev,
goto err_out_unmap;
}
- msg_probe(jme, "%s%s ver:%x rev:%x macaddr:%pM\n",
- (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ?
- "JMC250 Gigabit Ethernet" :
- (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ?
- "JMC260 Fast Ethernet" : "Unknown",
- (jme->fpgaver != 0) ? " (FPGA)" : "",
- (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev,
- jme->rev, netdev->dev_addr);
+ netif_info(jme, probe, jme->dev, "%s%s ver:%x rev:%x macaddr:%pM\n",
+ (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC250) ?
+ "JMC250 Gigabit Ethernet" :
+ (jme->pdev->device == PCI_DEVICE_ID_JMICRON_JMC260) ?
+ "JMC260 Fast Ethernet" : "Unknown",
+ (jme->fpgaver != 0) ? " (FPGA)" : "",
+ (jme->fpgaver != 0) ? jme->fpgaver : jme->chiprev,
+ jme->rev, netdev->dev_addr);
return 0;
@@ -2994,7 +2990,7 @@ jme_resume(struct pci_dev *pdev)
}
#endif
-static struct pci_device_id jme_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(jme_pci_tbl) = {
{ PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC250) },
{ PCI_VDEVICE(JMICRON, PCI_DEVICE_ID_JMICRON_JMC260) },
{ }
diff --git a/drivers/net/jme.h b/drivers/net/jme.h
index 251abed3817..c19db9146a2 100644
--- a/drivers/net/jme.h
+++ b/drivers/net/jme.h
@@ -45,43 +45,16 @@
printk(KERN_ERR PFX fmt, ## args)
#ifdef TX_DEBUG
-#define tx_dbg(priv, fmt, args...) \
- printk(KERN_DEBUG "%s: " fmt, (priv)->dev->name, ## args)
+#define tx_dbg(priv, fmt, args...) \
+ printk(KERN_DEBUG "%s: " fmt, (priv)->dev->name, ##args)
#else
-#define tx_dbg(priv, fmt, args...)
+#define tx_dbg(priv, fmt, args...) \
+do { \
+ if (0) \
+ printk(KERN_DEBUG "%s: " fmt, (priv)->dev->name, ##args); \
+} while (0)
#endif
-#define jme_msg(msglvl, type, priv, fmt, args...) \
- if (netif_msg_##type(priv)) \
- printk(msglvl "%s: " fmt, (priv)->dev->name, ## args)
-
-#define msg_probe(priv, fmt, args...) \
- jme_msg(KERN_INFO, probe, priv, fmt, ## args)
-
-#define msg_link(priv, fmt, args...) \
- jme_msg(KERN_INFO, link, priv, fmt, ## args)
-
-#define msg_intr(priv, fmt, args...) \
- jme_msg(KERN_INFO, intr, priv, fmt, ## args)
-
-#define msg_rx_err(priv, fmt, args...) \
- jme_msg(KERN_ERR, rx_err, priv, fmt, ## args)
-
-#define msg_rx_status(priv, fmt, args...) \
- jme_msg(KERN_INFO, rx_status, priv, fmt, ## args)
-
-#define msg_tx_err(priv, fmt, args...) \
- jme_msg(KERN_ERR, tx_err, priv, fmt, ## args)
-
-#define msg_tx_done(priv, fmt, args...) \
- jme_msg(KERN_INFO, tx_done, priv, fmt, ## args)
-
-#define msg_tx_queued(priv, fmt, args...) \
- jme_msg(KERN_INFO, tx_queued, priv, fmt, ## args)
-
-#define msg_hw(priv, fmt, args...) \
- jme_msg(KERN_ERR, hw, priv, fmt, ## args)
-
/*
* Extra PCI Configuration space interface
*/
diff --git a/drivers/net/korina.c b/drivers/net/korina.c
index 25e2af6997e..300c2249812 100644
--- a/drivers/net/korina.c
+++ b/drivers/net/korina.c
@@ -482,7 +482,7 @@ static void korina_multicast_list(struct net_device *dev)
{
struct korina_private *lp = netdev_priv(dev);
unsigned long flags;
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
u32 recognise = ETH_ARC_AB; /* always accept broadcasts */
int i;
@@ -490,23 +490,21 @@ static void korina_multicast_list(struct net_device *dev)
if (dev->flags & IFF_PROMISC)
recognise |= ETH_ARC_PRO;
- else if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 4))
+ else if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 4))
/* All multicast and broadcast */
recognise |= ETH_ARC_AM;
/* Build the hash table */
- if (dev->mc_count > 4) {
+ if (netdev_mc_count(dev) > 4) {
u16 hash_table[4];
u32 crc;
for (i = 0; i < 4; i++)
hash_table[i] = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ netdev_for_each_mc_addr(dmi, dev) {
char *addrs = dmi->dmi_addr;
- dmi = dmi->next;
-
if (!(*addrs & 1))
continue;
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index 6d3ac65bc35..b5219cce12e 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -965,14 +965,13 @@ static void ks8851_set_rx_mode(struct net_device *dev)
rxctrl.rxcr1 = (RXCR1_RXME | RXCR1_RXAE |
RXCR1_RXPAFMA | RXCR1_RXMAFMA);
- } else if (dev->flags & IFF_MULTICAST && dev->mc_count > 0) {
- struct dev_mc_list *mcptr = dev->mc_list;
+ } else if (dev->flags & IFF_MULTICAST && !netdev_mc_empty(dev)) {
+ struct dev_mc_list *mcptr;
u32 crc;
- int i;
/* accept some multicast */
- for (i = dev->mc_count; i > 0; i--) {
+ netdev_for_each_mc_addr(mcptr, dev) {
crc = ether_crc(ETH_ALEN, mcptr->dmi_addr);
crc >>= (32 - 6); /* get top six bits */
diff --git a/drivers/net/ks8851_mll.c b/drivers/net/ks8851_mll.c
index c0ceebccaa4..84b0e15831f 100644
--- a/drivers/net/ks8851_mll.c
+++ b/drivers/net/ks8851_mll.c
@@ -1193,10 +1193,11 @@ static void ks_set_rx_mode(struct net_device *netdev)
else
ks_set_promis(ks, false);
- if ((netdev->flags & IFF_MULTICAST) && netdev->mc_count) {
- if (netdev->mc_count <= MAX_MCAST_LST) {
+ if ((netdev->flags & IFF_MULTICAST) && netdev_mc_count(netdev)) {
+ if (netdev_mc_count(netdev) <= MAX_MCAST_LST) {
int i = 0;
- for (ptr = netdev->mc_list; ptr; ptr = ptr->next) {
+
+ netdev_for_each_mc_addr(ptr, netdev) {
if (!(*ptr->dmi_addr & 1))
continue;
if (i >= MAX_MCAST_LST)
diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c
new file mode 100644
index 00000000000..7264a3e5c2c
--- /dev/null
+++ b/drivers/net/ksz884x.c
@@ -0,0 +1,7335 @@
+/**
+ * drivers/net/ksx884x.c - Micrel KSZ8841/2 PCI Ethernet driver
+ *
+ * Copyright (c) 2009-2010 Micrel, Inc.
+ * Tristram Ha <Tristram.Ha@micrel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/proc_fs.h>
+#include <linux/mii.h>
+#include <linux/platform_device.h>
+#include <linux/ethtool.h>
+#include <linux/etherdevice.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/if_vlan.h>
+#include <linux/crc32.h>
+#include <linux/sched.h>
+
+
+/* DMA Registers */
+
+#define KS_DMA_TX_CTRL 0x0000
+#define DMA_TX_ENABLE 0x00000001
+#define DMA_TX_CRC_ENABLE 0x00000002
+#define DMA_TX_PAD_ENABLE 0x00000004
+#define DMA_TX_LOOPBACK 0x00000100
+#define DMA_TX_FLOW_ENABLE 0x00000200
+#define DMA_TX_CSUM_IP 0x00010000
+#define DMA_TX_CSUM_TCP 0x00020000
+#define DMA_TX_CSUM_UDP 0x00040000
+#define DMA_TX_BURST_SIZE 0x3F000000
+
+#define KS_DMA_RX_CTRL 0x0004
+#define DMA_RX_ENABLE 0x00000001
+#define KS884X_DMA_RX_MULTICAST 0x00000002
+#define DMA_RX_PROMISCUOUS 0x00000004
+#define DMA_RX_ERROR 0x00000008
+#define DMA_RX_UNICAST 0x00000010
+#define DMA_RX_ALL_MULTICAST 0x00000020
+#define DMA_RX_BROADCAST 0x00000040
+#define DMA_RX_FLOW_ENABLE 0x00000200
+#define DMA_RX_CSUM_IP 0x00010000
+#define DMA_RX_CSUM_TCP 0x00020000
+#define DMA_RX_CSUM_UDP 0x00040000
+#define DMA_RX_BURST_SIZE 0x3F000000
+
+#define DMA_BURST_SHIFT 24
+#define DMA_BURST_DEFAULT 8
+
+#define KS_DMA_TX_START 0x0008
+#define KS_DMA_RX_START 0x000C
+#define DMA_START 0x00000001
+
+#define KS_DMA_TX_ADDR 0x0010
+#define KS_DMA_RX_ADDR 0x0014
+
+#define DMA_ADDR_LIST_MASK 0xFFFFFFFC
+#define DMA_ADDR_LIST_SHIFT 2
+
+/* MTR0 */
+#define KS884X_MULTICAST_0_OFFSET 0x0020
+#define KS884X_MULTICAST_1_OFFSET 0x0021
+#define KS884X_MULTICAST_2_OFFSET 0x0022
+#define KS884x_MULTICAST_3_OFFSET 0x0023
+/* MTR1 */
+#define KS884X_MULTICAST_4_OFFSET 0x0024
+#define KS884X_MULTICAST_5_OFFSET 0x0025
+#define KS884X_MULTICAST_6_OFFSET 0x0026
+#define KS884X_MULTICAST_7_OFFSET 0x0027
+
+/* Interrupt Registers */
+
+/* INTEN */
+#define KS884X_INTERRUPTS_ENABLE 0x0028
+/* INTST */
+#define KS884X_INTERRUPTS_STATUS 0x002C
+
+#define KS884X_INT_RX_STOPPED 0x02000000
+#define KS884X_INT_TX_STOPPED 0x04000000
+#define KS884X_INT_RX_OVERRUN 0x08000000
+#define KS884X_INT_TX_EMPTY 0x10000000
+#define KS884X_INT_RX 0x20000000
+#define KS884X_INT_TX 0x40000000
+#define KS884X_INT_PHY 0x80000000
+
+#define KS884X_INT_RX_MASK \
+ (KS884X_INT_RX | KS884X_INT_RX_OVERRUN)
+#define KS884X_INT_TX_MASK \
+ (KS884X_INT_TX | KS884X_INT_TX_EMPTY)
+#define KS884X_INT_MASK (KS884X_INT_RX | KS884X_INT_TX | KS884X_INT_PHY)
+
+/* MAC Additional Station Address */
+
+/* MAAL0 */
+#define KS_ADD_ADDR_0_LO 0x0080
+/* MAAH0 */
+#define KS_ADD_ADDR_0_HI 0x0084
+/* MAAL1 */
+#define KS_ADD_ADDR_1_LO 0x0088
+/* MAAH1 */
+#define KS_ADD_ADDR_1_HI 0x008C
+/* MAAL2 */
+#define KS_ADD_ADDR_2_LO 0x0090
+/* MAAH2 */
+#define KS_ADD_ADDR_2_HI 0x0094
+/* MAAL3 */
+#define KS_ADD_ADDR_3_LO 0x0098
+/* MAAH3 */
+#define KS_ADD_ADDR_3_HI 0x009C
+/* MAAL4 */
+#define KS_ADD_ADDR_4_LO 0x00A0
+/* MAAH4 */
+#define KS_ADD_ADDR_4_HI 0x00A4
+/* MAAL5 */
+#define KS_ADD_ADDR_5_LO 0x00A8
+/* MAAH5 */
+#define KS_ADD_ADDR_5_HI 0x00AC
+/* MAAL6 */
+#define KS_ADD_ADDR_6_LO 0x00B0
+/* MAAH6 */
+#define KS_ADD_ADDR_6_HI 0x00B4
+/* MAAL7 */
+#define KS_ADD_ADDR_7_LO 0x00B8
+/* MAAH7 */
+#define KS_ADD_ADDR_7_HI 0x00BC
+/* MAAL8 */
+#define KS_ADD_ADDR_8_LO 0x00C0
+/* MAAH8 */
+#define KS_ADD_ADDR_8_HI 0x00C4
+/* MAAL9 */
+#define KS_ADD_ADDR_9_LO 0x00C8
+/* MAAH9 */
+#define KS_ADD_ADDR_9_HI 0x00CC
+/* MAAL10 */
+#define KS_ADD_ADDR_A_LO 0x00D0
+/* MAAH10 */
+#define KS_ADD_ADDR_A_HI 0x00D4
+/* MAAL11 */
+#define KS_ADD_ADDR_B_LO 0x00D8
+/* MAAH11 */
+#define KS_ADD_ADDR_B_HI 0x00DC
+/* MAAL12 */
+#define KS_ADD_ADDR_C_LO 0x00E0
+/* MAAH12 */
+#define KS_ADD_ADDR_C_HI 0x00E4
+/* MAAL13 */
+#define KS_ADD_ADDR_D_LO 0x00E8
+/* MAAH13 */
+#define KS_ADD_ADDR_D_HI 0x00EC
+/* MAAL14 */
+#define KS_ADD_ADDR_E_LO 0x00F0
+/* MAAH14 */
+#define KS_ADD_ADDR_E_HI 0x00F4
+/* MAAL15 */
+#define KS_ADD_ADDR_F_LO 0x00F8
+/* MAAH15 */
+#define KS_ADD_ADDR_F_HI 0x00FC
+
+#define ADD_ADDR_HI_MASK 0x0000FFFF
+#define ADD_ADDR_ENABLE 0x80000000
+#define ADD_ADDR_INCR 8
+
+/* Miscellaneous Registers */
+
+/* MARL */
+#define KS884X_ADDR_0_OFFSET 0x0200
+#define KS884X_ADDR_1_OFFSET 0x0201
+/* MARM */
+#define KS884X_ADDR_2_OFFSET 0x0202
+#define KS884X_ADDR_3_OFFSET 0x0203
+/* MARH */
+#define KS884X_ADDR_4_OFFSET 0x0204
+#define KS884X_ADDR_5_OFFSET 0x0205
+
+/* OBCR */
+#define KS884X_BUS_CTRL_OFFSET 0x0210
+
+#define BUS_SPEED_125_MHZ 0x0000
+#define BUS_SPEED_62_5_MHZ 0x0001
+#define BUS_SPEED_41_66_MHZ 0x0002
+#define BUS_SPEED_25_MHZ 0x0003
+
+/* EEPCR */
+#define KS884X_EEPROM_CTRL_OFFSET 0x0212
+
+#define EEPROM_CHIP_SELECT 0x0001
+#define EEPROM_SERIAL_CLOCK 0x0002
+#define EEPROM_DATA_OUT 0x0004
+#define EEPROM_DATA_IN 0x0008
+#define EEPROM_ACCESS_ENABLE 0x0010
+
+/* MBIR */
+#define KS884X_MEM_INFO_OFFSET 0x0214
+
+#define RX_MEM_TEST_FAILED 0x0008
+#define RX_MEM_TEST_FINISHED 0x0010
+#define TX_MEM_TEST_FAILED 0x0800
+#define TX_MEM_TEST_FINISHED 0x1000
+
+/* GCR */
+#define KS884X_GLOBAL_CTRL_OFFSET 0x0216
+#define GLOBAL_SOFTWARE_RESET 0x0001
+
+#define KS8841_POWER_MANAGE_OFFSET 0x0218
+
+/* WFCR */
+#define KS8841_WOL_CTRL_OFFSET 0x021A
+#define KS8841_WOL_MAGIC_ENABLE 0x0080
+#define KS8841_WOL_FRAME3_ENABLE 0x0008
+#define KS8841_WOL_FRAME2_ENABLE 0x0004
+#define KS8841_WOL_FRAME1_ENABLE 0x0002
+#define KS8841_WOL_FRAME0_ENABLE 0x0001
+
+/* WF0 */
+#define KS8841_WOL_FRAME_CRC_OFFSET 0x0220
+#define KS8841_WOL_FRAME_BYTE0_OFFSET 0x0224
+#define KS8841_WOL_FRAME_BYTE2_OFFSET 0x0228
+
+/* IACR */
+#define KS884X_IACR_P 0x04A0
+#define KS884X_IACR_OFFSET KS884X_IACR_P
+
+/* IADR1 */
+#define KS884X_IADR1_P 0x04A2
+#define KS884X_IADR2_P 0x04A4
+#define KS884X_IADR3_P 0x04A6
+#define KS884X_IADR4_P 0x04A8
+#define KS884X_IADR5_P 0x04AA
+
+#define KS884X_ACC_CTRL_SEL_OFFSET KS884X_IACR_P
+#define KS884X_ACC_CTRL_INDEX_OFFSET (KS884X_ACC_CTRL_SEL_OFFSET + 1)
+
+#define KS884X_ACC_DATA_0_OFFSET KS884X_IADR4_P
+#define KS884X_ACC_DATA_1_OFFSET (KS884X_ACC_DATA_0_OFFSET + 1)
+#define KS884X_ACC_DATA_2_OFFSET KS884X_IADR5_P
+#define KS884X_ACC_DATA_3_OFFSET (KS884X_ACC_DATA_2_OFFSET + 1)
+#define KS884X_ACC_DATA_4_OFFSET KS884X_IADR2_P
+#define KS884X_ACC_DATA_5_OFFSET (KS884X_ACC_DATA_4_OFFSET + 1)
+#define KS884X_ACC_DATA_6_OFFSET KS884X_IADR3_P
+#define KS884X_ACC_DATA_7_OFFSET (KS884X_ACC_DATA_6_OFFSET + 1)
+#define KS884X_ACC_DATA_8_OFFSET KS884X_IADR1_P
+
+/* P1MBCR */
+#define KS884X_P1MBCR_P 0x04D0
+#define KS884X_P1MBSR_P 0x04D2
+#define KS884X_PHY1ILR_P 0x04D4
+#define KS884X_PHY1IHR_P 0x04D6
+#define KS884X_P1ANAR_P 0x04D8
+#define KS884X_P1ANLPR_P 0x04DA
+
+/* P2MBCR */
+#define KS884X_P2MBCR_P 0x04E0
+#define KS884X_P2MBSR_P 0x04E2
+#define KS884X_PHY2ILR_P 0x04E4
+#define KS884X_PHY2IHR_P 0x04E6
+#define KS884X_P2ANAR_P 0x04E8
+#define KS884X_P2ANLPR_P 0x04EA
+
+#define KS884X_PHY_1_CTRL_OFFSET KS884X_P1MBCR_P
+#define PHY_CTRL_INTERVAL (KS884X_P2MBCR_P - KS884X_P1MBCR_P)
+
+#define KS884X_PHY_CTRL_OFFSET 0x00
+
+/* Mode Control Register */
+#define PHY_REG_CTRL 0
+
+#define PHY_RESET 0x8000
+#define PHY_LOOPBACK 0x4000
+#define PHY_SPEED_100MBIT 0x2000
+#define PHY_AUTO_NEG_ENABLE 0x1000
+#define PHY_POWER_DOWN 0x0800
+#define PHY_MII_DISABLE 0x0400
+#define PHY_AUTO_NEG_RESTART 0x0200
+#define PHY_FULL_DUPLEX 0x0100
+#define PHY_COLLISION_TEST 0x0080
+#define PHY_HP_MDIX 0x0020
+#define PHY_FORCE_MDIX 0x0010
+#define PHY_AUTO_MDIX_DISABLE 0x0008
+#define PHY_REMOTE_FAULT_DISABLE 0x0004
+#define PHY_TRANSMIT_DISABLE 0x0002
+#define PHY_LED_DISABLE 0x0001
+
+#define KS884X_PHY_STATUS_OFFSET 0x02
+
+/* Mode Status Register */
+#define PHY_REG_STATUS 1
+
+#define PHY_100BT4_CAPABLE 0x8000
+#define PHY_100BTX_FD_CAPABLE 0x4000
+#define PHY_100BTX_CAPABLE 0x2000
+#define PHY_10BT_FD_CAPABLE 0x1000
+#define PHY_10BT_CAPABLE 0x0800
+#define PHY_MII_SUPPRESS_CAPABLE 0x0040
+#define PHY_AUTO_NEG_ACKNOWLEDGE 0x0020
+#define PHY_REMOTE_FAULT 0x0010
+#define PHY_AUTO_NEG_CAPABLE 0x0008
+#define PHY_LINK_STATUS 0x0004
+#define PHY_JABBER_DETECT 0x0002
+#define PHY_EXTENDED_CAPABILITY 0x0001
+
+#define KS884X_PHY_ID_1_OFFSET 0x04
+#define KS884X_PHY_ID_2_OFFSET 0x06
+
+/* PHY Identifier Registers */
+#define PHY_REG_ID_1 2
+#define PHY_REG_ID_2 3
+
+#define KS884X_PHY_AUTO_NEG_OFFSET 0x08
+
+/* Auto-Negotiation Advertisement Register */
+#define PHY_REG_AUTO_NEGOTIATION 4
+
+#define PHY_AUTO_NEG_NEXT_PAGE 0x8000
+#define PHY_AUTO_NEG_REMOTE_FAULT 0x2000
+/* Not supported. */
+#define PHY_AUTO_NEG_ASYM_PAUSE 0x0800
+#define PHY_AUTO_NEG_SYM_PAUSE 0x0400
+#define PHY_AUTO_NEG_100BT4 0x0200
+#define PHY_AUTO_NEG_100BTX_FD 0x0100
+#define PHY_AUTO_NEG_100BTX 0x0080
+#define PHY_AUTO_NEG_10BT_FD 0x0040
+#define PHY_AUTO_NEG_10BT 0x0020
+#define PHY_AUTO_NEG_SELECTOR 0x001F
+#define PHY_AUTO_NEG_802_3 0x0001
+
+#define PHY_AUTO_NEG_PAUSE (PHY_AUTO_NEG_SYM_PAUSE | PHY_AUTO_NEG_ASYM_PAUSE)
+
+#define KS884X_PHY_REMOTE_CAP_OFFSET 0x0A
+
+/* Auto-Negotiation Link Partner Ability Register */
+#define PHY_REG_REMOTE_CAPABILITY 5
+
+#define PHY_REMOTE_NEXT_PAGE 0x8000
+#define PHY_REMOTE_ACKNOWLEDGE 0x4000
+#define PHY_REMOTE_REMOTE_FAULT 0x2000
+#define PHY_REMOTE_SYM_PAUSE 0x0400
+#define PHY_REMOTE_100BTX_FD 0x0100
+#define PHY_REMOTE_100BTX 0x0080
+#define PHY_REMOTE_10BT_FD 0x0040
+#define PHY_REMOTE_10BT 0x0020
+
+/* P1VCT */
+#define KS884X_P1VCT_P 0x04F0
+#define KS884X_P1PHYCTRL_P 0x04F2
+
+/* P2VCT */
+#define KS884X_P2VCT_P 0x04F4
+#define KS884X_P2PHYCTRL_P 0x04F6
+
+#define KS884X_PHY_SPECIAL_OFFSET KS884X_P1VCT_P
+#define PHY_SPECIAL_INTERVAL (KS884X_P2VCT_P - KS884X_P1VCT_P)
+
+#define KS884X_PHY_LINK_MD_OFFSET 0x00
+
+#define PHY_START_CABLE_DIAG 0x8000
+#define PHY_CABLE_DIAG_RESULT 0x6000
+#define PHY_CABLE_STAT_NORMAL 0x0000
+#define PHY_CABLE_STAT_OPEN 0x2000
+#define PHY_CABLE_STAT_SHORT 0x4000
+#define PHY_CABLE_STAT_FAILED 0x6000
+#define PHY_CABLE_10M_SHORT 0x1000
+#define PHY_CABLE_FAULT_COUNTER 0x01FF
+
+#define KS884X_PHY_PHY_CTRL_OFFSET 0x02
+
+#define PHY_STAT_REVERSED_POLARITY 0x0020
+#define PHY_STAT_MDIX 0x0010
+#define PHY_FORCE_LINK 0x0008
+#define PHY_POWER_SAVING_DISABLE 0x0004
+#define PHY_REMOTE_LOOPBACK 0x0002
+
+/* SIDER */
+#define KS884X_SIDER_P 0x0400
+#define KS884X_CHIP_ID_OFFSET KS884X_SIDER_P
+#define KS884X_FAMILY_ID_OFFSET (KS884X_CHIP_ID_OFFSET + 1)
+
+#define REG_FAMILY_ID 0x88
+
+#define REG_CHIP_ID_41 0x8810
+#define REG_CHIP_ID_42 0x8800
+
+#define KS884X_CHIP_ID_MASK_41 0xFF10
+#define KS884X_CHIP_ID_MASK 0xFFF0
+#define KS884X_CHIP_ID_SHIFT 4
+#define KS884X_REVISION_MASK 0x000E
+#define KS884X_REVISION_SHIFT 1
+#define KS8842_START 0x0001
+
+#define CHIP_IP_41_M 0x8810
+#define CHIP_IP_42_M 0x8800
+#define CHIP_IP_61_M 0x8890
+#define CHIP_IP_62_M 0x8880
+
+#define CHIP_IP_41_P 0x8850
+#define CHIP_IP_42_P 0x8840
+#define CHIP_IP_61_P 0x88D0
+#define CHIP_IP_62_P 0x88C0
+
+/* SGCR1 */
+#define KS8842_SGCR1_P 0x0402
+#define KS8842_SWITCH_CTRL_1_OFFSET KS8842_SGCR1_P
+
+#define SWITCH_PASS_ALL 0x8000
+#define SWITCH_TX_FLOW_CTRL 0x2000
+#define SWITCH_RX_FLOW_CTRL 0x1000
+#define SWITCH_CHECK_LENGTH 0x0800
+#define SWITCH_AGING_ENABLE 0x0400
+#define SWITCH_FAST_AGING 0x0200
+#define SWITCH_AGGR_BACKOFF 0x0100
+#define SWITCH_PASS_PAUSE 0x0008
+#define SWITCH_LINK_AUTO_AGING 0x0001
+
+/* SGCR2 */
+#define KS8842_SGCR2_P 0x0404
+#define KS8842_SWITCH_CTRL_2_OFFSET KS8842_SGCR2_P
+
+#define SWITCH_VLAN_ENABLE 0x8000
+#define SWITCH_IGMP_SNOOP 0x4000
+#define IPV6_MLD_SNOOP_ENABLE 0x2000
+#define IPV6_MLD_SNOOP_OPTION 0x1000
+#define PRIORITY_SCHEME_SELECT 0x0800
+#define SWITCH_MIRROR_RX_TX 0x0100
+#define UNICAST_VLAN_BOUNDARY 0x0080
+#define MULTICAST_STORM_DISABLE 0x0040
+#define SWITCH_BACK_PRESSURE 0x0020
+#define FAIR_FLOW_CTRL 0x0010
+#define NO_EXC_COLLISION_DROP 0x0008
+#define SWITCH_HUGE_PACKET 0x0004
+#define SWITCH_LEGAL_PACKET 0x0002
+#define SWITCH_BUF_RESERVE 0x0001
+
+/* SGCR3 */
+#define KS8842_SGCR3_P 0x0406
+#define KS8842_SWITCH_CTRL_3_OFFSET KS8842_SGCR3_P
+
+#define BROADCAST_STORM_RATE_LO 0xFF00
+#define SWITCH_REPEATER 0x0080
+#define SWITCH_HALF_DUPLEX 0x0040
+#define SWITCH_FLOW_CTRL 0x0020
+#define SWITCH_10_MBIT 0x0010
+#define SWITCH_REPLACE_NULL_VID 0x0008
+#define BROADCAST_STORM_RATE_HI 0x0007
+
+#define BROADCAST_STORM_RATE 0x07FF
+
+/* SGCR4 */
+#define KS8842_SGCR4_P 0x0408
+
+/* SGCR5 */
+#define KS8842_SGCR5_P 0x040A
+#define KS8842_SWITCH_CTRL_5_OFFSET KS8842_SGCR5_P
+
+#define LED_MODE 0x8200
+#define LED_SPEED_DUPLEX_ACT 0x0000
+#define LED_SPEED_DUPLEX_LINK_ACT 0x8000
+#define LED_DUPLEX_10_100 0x0200
+
+/* SGCR6 */
+#define KS8842_SGCR6_P 0x0410
+#define KS8842_SWITCH_CTRL_6_OFFSET KS8842_SGCR6_P
+
+#define KS8842_PRIORITY_MASK 3
+#define KS8842_PRIORITY_SHIFT 2
+
+/* SGCR7 */
+#define KS8842_SGCR7_P 0x0412
+#define KS8842_SWITCH_CTRL_7_OFFSET KS8842_SGCR7_P
+
+#define SWITCH_UNK_DEF_PORT_ENABLE 0x0008
+#define SWITCH_UNK_DEF_PORT_3 0x0004
+#define SWITCH_UNK_DEF_PORT_2 0x0002
+#define SWITCH_UNK_DEF_PORT_1 0x0001
+
+/* MACAR1 */
+#define KS8842_MACAR1_P 0x0470
+#define KS8842_MACAR2_P 0x0472
+#define KS8842_MACAR3_P 0x0474
+#define KS8842_MAC_ADDR_1_OFFSET KS8842_MACAR1_P
+#define KS8842_MAC_ADDR_0_OFFSET (KS8842_MAC_ADDR_1_OFFSET + 1)
+#define KS8842_MAC_ADDR_3_OFFSET KS8842_MACAR2_P
+#define KS8842_MAC_ADDR_2_OFFSET (KS8842_MAC_ADDR_3_OFFSET + 1)
+#define KS8842_MAC_ADDR_5_OFFSET KS8842_MACAR3_P
+#define KS8842_MAC_ADDR_4_OFFSET (KS8842_MAC_ADDR_5_OFFSET + 1)
+
+/* TOSR1 */
+#define KS8842_TOSR1_P 0x0480
+#define KS8842_TOSR2_P 0x0482
+#define KS8842_TOSR3_P 0x0484
+#define KS8842_TOSR4_P 0x0486
+#define KS8842_TOSR5_P 0x0488
+#define KS8842_TOSR6_P 0x048A
+#define KS8842_TOSR7_P 0x0490
+#define KS8842_TOSR8_P 0x0492
+#define KS8842_TOS_1_OFFSET KS8842_TOSR1_P
+#define KS8842_TOS_2_OFFSET KS8842_TOSR2_P
+#define KS8842_TOS_3_OFFSET KS8842_TOSR3_P
+#define KS8842_TOS_4_OFFSET KS8842_TOSR4_P
+#define KS8842_TOS_5_OFFSET KS8842_TOSR5_P
+#define KS8842_TOS_6_OFFSET KS8842_TOSR6_P
+
+#define KS8842_TOS_7_OFFSET KS8842_TOSR7_P
+#define KS8842_TOS_8_OFFSET KS8842_TOSR8_P
+
+/* P1CR1 */
+#define KS8842_P1CR1_P 0x0500
+#define KS8842_P1CR2_P 0x0502
+#define KS8842_P1VIDR_P 0x0504
+#define KS8842_P1CR3_P 0x0506
+#define KS8842_P1IRCR_P 0x0508
+#define KS8842_P1ERCR_P 0x050A
+#define KS884X_P1SCSLMD_P 0x0510
+#define KS884X_P1CR4_P 0x0512
+#define KS884X_P1SR_P 0x0514
+
+/* P2CR1 */
+#define KS8842_P2CR1_P 0x0520
+#define KS8842_P2CR2_P 0x0522
+#define KS8842_P2VIDR_P 0x0524
+#define KS8842_P2CR3_P 0x0526
+#define KS8842_P2IRCR_P 0x0528
+#define KS8842_P2ERCR_P 0x052A
+#define KS884X_P2SCSLMD_P 0x0530
+#define KS884X_P2CR4_P 0x0532
+#define KS884X_P2SR_P 0x0534
+
+/* P3CR1 */
+#define KS8842_P3CR1_P 0x0540
+#define KS8842_P3CR2_P 0x0542
+#define KS8842_P3VIDR_P 0x0544
+#define KS8842_P3CR3_P 0x0546
+#define KS8842_P3IRCR_P 0x0548
+#define KS8842_P3ERCR_P 0x054A
+
+#define KS8842_PORT_1_CTRL_1 KS8842_P1CR1_P
+#define KS8842_PORT_2_CTRL_1 KS8842_P2CR1_P
+#define KS8842_PORT_3_CTRL_1 KS8842_P3CR1_P
+
+#define PORT_CTRL_ADDR(port, addr) \
+ (addr = KS8842_PORT_1_CTRL_1 + (port) * \
+ (KS8842_PORT_2_CTRL_1 - KS8842_PORT_1_CTRL_1))
+
+#define KS8842_PORT_CTRL_1_OFFSET 0x00
+
+#define PORT_BROADCAST_STORM 0x0080
+#define PORT_DIFFSERV_ENABLE 0x0040
+#define PORT_802_1P_ENABLE 0x0020
+#define PORT_BASED_PRIORITY_MASK 0x0018
+#define PORT_BASED_PRIORITY_BASE 0x0003
+#define PORT_BASED_PRIORITY_SHIFT 3
+#define PORT_BASED_PRIORITY_0 0x0000
+#define PORT_BASED_PRIORITY_1 0x0008
+#define PORT_BASED_PRIORITY_2 0x0010
+#define PORT_BASED_PRIORITY_3 0x0018
+#define PORT_INSERT_TAG 0x0004
+#define PORT_REMOVE_TAG 0x0002
+#define PORT_PRIO_QUEUE_ENABLE 0x0001
+
+#define KS8842_PORT_CTRL_2_OFFSET 0x02
+
+#define PORT_INGRESS_VLAN_FILTER 0x4000
+#define PORT_DISCARD_NON_VID 0x2000
+#define PORT_FORCE_FLOW_CTRL 0x1000
+#define PORT_BACK_PRESSURE 0x0800
+#define PORT_TX_ENABLE 0x0400
+#define PORT_RX_ENABLE 0x0200
+#define PORT_LEARN_DISABLE 0x0100
+#define PORT_MIRROR_SNIFFER 0x0080
+#define PORT_MIRROR_RX 0x0040
+#define PORT_MIRROR_TX 0x0020
+#define PORT_USER_PRIORITY_CEILING 0x0008
+#define PORT_VLAN_MEMBERSHIP 0x0007
+
+#define KS8842_PORT_CTRL_VID_OFFSET 0x04
+
+#define PORT_DEFAULT_VID 0x0001
+
+#define KS8842_PORT_CTRL_3_OFFSET 0x06
+
+#define PORT_INGRESS_LIMIT_MODE 0x000C
+#define PORT_INGRESS_ALL 0x0000
+#define PORT_INGRESS_UNICAST 0x0004
+#define PORT_INGRESS_MULTICAST 0x0008
+#define PORT_INGRESS_BROADCAST 0x000C
+#define PORT_COUNT_IFG 0x0002
+#define PORT_COUNT_PREAMBLE 0x0001
+
+#define KS8842_PORT_IN_RATE_OFFSET 0x08
+#define KS8842_PORT_OUT_RATE_OFFSET 0x0A
+
+#define PORT_PRIORITY_RATE 0x0F
+#define PORT_PRIORITY_RATE_SHIFT 4
+
+#define KS884X_PORT_LINK_MD 0x10
+
+#define PORT_CABLE_10M_SHORT 0x8000
+#define PORT_CABLE_DIAG_RESULT 0x6000
+#define PORT_CABLE_STAT_NORMAL 0x0000
+#define PORT_CABLE_STAT_OPEN 0x2000
+#define PORT_CABLE_STAT_SHORT 0x4000
+#define PORT_CABLE_STAT_FAILED 0x6000
+#define PORT_START_CABLE_DIAG 0x1000
+#define PORT_FORCE_LINK 0x0800
+#define PORT_POWER_SAVING_DISABLE 0x0400
+#define PORT_PHY_REMOTE_LOOPBACK 0x0200
+#define PORT_CABLE_FAULT_COUNTER 0x01FF
+
+#define KS884X_PORT_CTRL_4_OFFSET 0x12
+
+#define PORT_LED_OFF 0x8000
+#define PORT_TX_DISABLE 0x4000
+#define PORT_AUTO_NEG_RESTART 0x2000
+#define PORT_REMOTE_FAULT_DISABLE 0x1000
+#define PORT_POWER_DOWN 0x0800
+#define PORT_AUTO_MDIX_DISABLE 0x0400
+#define PORT_FORCE_MDIX 0x0200
+#define PORT_LOOPBACK 0x0100
+#define PORT_AUTO_NEG_ENABLE 0x0080
+#define PORT_FORCE_100_MBIT 0x0040
+#define PORT_FORCE_FULL_DUPLEX 0x0020
+#define PORT_AUTO_NEG_SYM_PAUSE 0x0010
+#define PORT_AUTO_NEG_100BTX_FD 0x0008
+#define PORT_AUTO_NEG_100BTX 0x0004
+#define PORT_AUTO_NEG_10BT_FD 0x0002
+#define PORT_AUTO_NEG_10BT 0x0001
+
+#define KS884X_PORT_STATUS_OFFSET 0x14
+
+#define PORT_HP_MDIX 0x8000
+#define PORT_REVERSED_POLARITY 0x2000
+#define PORT_RX_FLOW_CTRL 0x0800
+#define PORT_TX_FLOW_CTRL 0x1000
+#define PORT_STATUS_SPEED_100MBIT 0x0400
+#define PORT_STATUS_FULL_DUPLEX 0x0200
+#define PORT_REMOTE_FAULT 0x0100
+#define PORT_MDIX_STATUS 0x0080
+#define PORT_AUTO_NEG_COMPLETE 0x0040
+#define PORT_STATUS_LINK_GOOD 0x0020
+#define PORT_REMOTE_SYM_PAUSE 0x0010
+#define PORT_REMOTE_100BTX_FD 0x0008
+#define PORT_REMOTE_100BTX 0x0004
+#define PORT_REMOTE_10BT_FD 0x0002
+#define PORT_REMOTE_10BT 0x0001
+
+/*
+#define STATIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF
+#define STATIC_MAC_TABLE_FWD_PORTS 00-00070000-00000000
+#define STATIC_MAC_TABLE_VALID 00-00080000-00000000
+#define STATIC_MAC_TABLE_OVERRIDE 00-00100000-00000000
+#define STATIC_MAC_TABLE_USE_FID 00-00200000-00000000
+#define STATIC_MAC_TABLE_FID 00-03C00000-00000000
+*/
+
+#define STATIC_MAC_TABLE_ADDR 0x0000FFFF
+#define STATIC_MAC_TABLE_FWD_PORTS 0x00070000
+#define STATIC_MAC_TABLE_VALID 0x00080000
+#define STATIC_MAC_TABLE_OVERRIDE 0x00100000
+#define STATIC_MAC_TABLE_USE_FID 0x00200000
+#define STATIC_MAC_TABLE_FID 0x03C00000
+
+#define STATIC_MAC_FWD_PORTS_SHIFT 16
+#define STATIC_MAC_FID_SHIFT 22
+
+/*
+#define VLAN_TABLE_VID 00-00000000-00000FFF
+#define VLAN_TABLE_FID 00-00000000-0000F000
+#define VLAN_TABLE_MEMBERSHIP 00-00000000-00070000
+#define VLAN_TABLE_VALID 00-00000000-00080000
+*/
+
+#define VLAN_TABLE_VID 0x00000FFF
+#define VLAN_TABLE_FID 0x0000F000
+#define VLAN_TABLE_MEMBERSHIP 0x00070000
+#define VLAN_TABLE_VALID 0x00080000
+
+#define VLAN_TABLE_FID_SHIFT 12
+#define VLAN_TABLE_MEMBERSHIP_SHIFT 16
+
+/*
+#define DYNAMIC_MAC_TABLE_ADDR 00-0000FFFF-FFFFFFFF
+#define DYNAMIC_MAC_TABLE_FID 00-000F0000-00000000
+#define DYNAMIC_MAC_TABLE_SRC_PORT 00-00300000-00000000
+#define DYNAMIC_MAC_TABLE_TIMESTAMP 00-00C00000-00000000
+#define DYNAMIC_MAC_TABLE_ENTRIES 03-FF000000-00000000
+#define DYNAMIC_MAC_TABLE_MAC_EMPTY 04-00000000-00000000
+#define DYNAMIC_MAC_TABLE_RESERVED 78-00000000-00000000
+#define DYNAMIC_MAC_TABLE_NOT_READY 80-00000000-00000000
+*/
+
+#define DYNAMIC_MAC_TABLE_ADDR 0x0000FFFF
+#define DYNAMIC_MAC_TABLE_FID 0x000F0000
+#define DYNAMIC_MAC_TABLE_SRC_PORT 0x00300000
+#define DYNAMIC_MAC_TABLE_TIMESTAMP 0x00C00000
+#define DYNAMIC_MAC_TABLE_ENTRIES 0xFF000000
+
+#define DYNAMIC_MAC_TABLE_ENTRIES_H 0x03
+#define DYNAMIC_MAC_TABLE_MAC_EMPTY 0x04
+#define DYNAMIC_MAC_TABLE_RESERVED 0x78
+#define DYNAMIC_MAC_TABLE_NOT_READY 0x80
+
+#define DYNAMIC_MAC_FID_SHIFT 16
+#define DYNAMIC_MAC_SRC_PORT_SHIFT 20
+#define DYNAMIC_MAC_TIMESTAMP_SHIFT 22
+#define DYNAMIC_MAC_ENTRIES_SHIFT 24
+#define DYNAMIC_MAC_ENTRIES_H_SHIFT 8
+
+/*
+#define MIB_COUNTER_VALUE 00-00000000-3FFFFFFF
+#define MIB_COUNTER_VALID 00-00000000-40000000
+#define MIB_COUNTER_OVERFLOW 00-00000000-80000000
+*/
+
+#define MIB_COUNTER_VALUE 0x3FFFFFFF
+#define MIB_COUNTER_VALID 0x40000000
+#define MIB_COUNTER_OVERFLOW 0x80000000
+
+#define MIB_PACKET_DROPPED 0x0000FFFF
+
+#define KS_MIB_PACKET_DROPPED_TX_0 0x100
+#define KS_MIB_PACKET_DROPPED_TX_1 0x101
+#define KS_MIB_PACKET_DROPPED_TX 0x102
+#define KS_MIB_PACKET_DROPPED_RX_0 0x103
+#define KS_MIB_PACKET_DROPPED_RX_1 0x104
+#define KS_MIB_PACKET_DROPPED_RX 0x105
+
+/* Change default LED mode. */
+#define SET_DEFAULT_LED LED_SPEED_DUPLEX_ACT
+
+#define MAC_ADDR_LEN 6
+#define MAC_ADDR_ORDER(i) (MAC_ADDR_LEN - 1 - (i))
+
+#define MAX_ETHERNET_BODY_SIZE 1500
+#define ETHERNET_HEADER_SIZE 14
+
+#define MAX_ETHERNET_PACKET_SIZE \
+ (MAX_ETHERNET_BODY_SIZE + ETHERNET_HEADER_SIZE)
+
+#define REGULAR_RX_BUF_SIZE (MAX_ETHERNET_PACKET_SIZE + 4)
+#define MAX_RX_BUF_SIZE (1912 + 4)
+
+#define ADDITIONAL_ENTRIES 16
+#define MAX_MULTICAST_LIST 32
+
+#define HW_MULTICAST_SIZE 8
+
+#define HW_TO_DEV_PORT(port) (port - 1)
+
+enum {
+ media_connected,
+ media_disconnected
+};
+
+enum {
+ OID_COUNTER_UNKOWN,
+
+ OID_COUNTER_FIRST,
+
+ /* total transmit errors */
+ OID_COUNTER_XMIT_ERROR,
+
+ /* total receive errors */
+ OID_COUNTER_RCV_ERROR,
+
+ OID_COUNTER_LAST
+};
+
+/*
+ * Hardware descriptor definitions
+ */
+
+#define DESC_ALIGNMENT 16
+#define BUFFER_ALIGNMENT 8
+
+#define NUM_OF_RX_DESC 64
+#define NUM_OF_TX_DESC 64
+
+#define KS_DESC_RX_FRAME_LEN 0x000007FF
+#define KS_DESC_RX_FRAME_TYPE 0x00008000
+#define KS_DESC_RX_ERROR_CRC 0x00010000
+#define KS_DESC_RX_ERROR_RUNT 0x00020000
+#define KS_DESC_RX_ERROR_TOO_LONG 0x00040000
+#define KS_DESC_RX_ERROR_PHY 0x00080000
+#define KS884X_DESC_RX_PORT_MASK 0x00300000
+#define KS_DESC_RX_MULTICAST 0x01000000
+#define KS_DESC_RX_ERROR 0x02000000
+#define KS_DESC_RX_ERROR_CSUM_UDP 0x04000000
+#define KS_DESC_RX_ERROR_CSUM_TCP 0x08000000
+#define KS_DESC_RX_ERROR_CSUM_IP 0x10000000
+#define KS_DESC_RX_LAST 0x20000000
+#define KS_DESC_RX_FIRST 0x40000000
+#define KS_DESC_RX_ERROR_COND \
+ (KS_DESC_RX_ERROR_CRC | \
+ KS_DESC_RX_ERROR_RUNT | \
+ KS_DESC_RX_ERROR_PHY | \
+ KS_DESC_RX_ERROR_TOO_LONG)
+
+#define KS_DESC_HW_OWNED 0x80000000
+
+#define KS_DESC_BUF_SIZE 0x000007FF
+#define KS884X_DESC_TX_PORT_MASK 0x00300000
+#define KS_DESC_END_OF_RING 0x02000000
+#define KS_DESC_TX_CSUM_GEN_UDP 0x04000000
+#define KS_DESC_TX_CSUM_GEN_TCP 0x08000000
+#define KS_DESC_TX_CSUM_GEN_IP 0x10000000
+#define KS_DESC_TX_LAST 0x20000000
+#define KS_DESC_TX_FIRST 0x40000000
+#define KS_DESC_TX_INTERRUPT 0x80000000
+
+#define KS_DESC_PORT_SHIFT 20
+
+#define KS_DESC_RX_MASK (KS_DESC_BUF_SIZE)
+
+#define KS_DESC_TX_MASK \
+ (KS_DESC_TX_INTERRUPT | \
+ KS_DESC_TX_FIRST | \
+ KS_DESC_TX_LAST | \
+ KS_DESC_TX_CSUM_GEN_IP | \
+ KS_DESC_TX_CSUM_GEN_TCP | \
+ KS_DESC_TX_CSUM_GEN_UDP | \
+ KS_DESC_BUF_SIZE)
+
+struct ksz_desc_rx_stat {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 hw_owned:1;
+ u32 first_desc:1;
+ u32 last_desc:1;
+ u32 csum_err_ip:1;
+ u32 csum_err_tcp:1;
+ u32 csum_err_udp:1;
+ u32 error:1;
+ u32 multicast:1;
+ u32 src_port:4;
+ u32 err_phy:1;
+ u32 err_too_long:1;
+ u32 err_runt:1;
+ u32 err_crc:1;
+ u32 frame_type:1;
+ u32 reserved1:4;
+ u32 frame_len:11;
+#else
+ u32 frame_len:11;
+ u32 reserved1:4;
+ u32 frame_type:1;
+ u32 err_crc:1;
+ u32 err_runt:1;
+ u32 err_too_long:1;
+ u32 err_phy:1;
+ u32 src_port:4;
+ u32 multicast:1;
+ u32 error:1;
+ u32 csum_err_udp:1;
+ u32 csum_err_tcp:1;
+ u32 csum_err_ip:1;
+ u32 last_desc:1;
+ u32 first_desc:1;
+ u32 hw_owned:1;
+#endif
+};
+
+struct ksz_desc_tx_stat {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 hw_owned:1;
+ u32 reserved1:31;
+#else
+ u32 reserved1:31;
+ u32 hw_owned:1;
+#endif
+};
+
+struct ksz_desc_rx_buf {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 reserved4:6;
+ u32 end_of_ring:1;
+ u32 reserved3:14;
+ u32 buf_size:11;
+#else
+ u32 buf_size:11;
+ u32 reserved3:14;
+ u32 end_of_ring:1;
+ u32 reserved4:6;
+#endif
+};
+
+struct ksz_desc_tx_buf {
+#ifdef __BIG_ENDIAN_BITFIELD
+ u32 intr:1;
+ u32 first_seg:1;
+ u32 last_seg:1;
+ u32 csum_gen_ip:1;
+ u32 csum_gen_tcp:1;
+ u32 csum_gen_udp:1;
+ u32 end_of_ring:1;
+ u32 reserved4:1;
+ u32 dest_port:4;
+ u32 reserved3:9;
+ u32 buf_size:11;
+#else
+ u32 buf_size:11;
+ u32 reserved3:9;
+ u32 dest_port:4;
+ u32 reserved4:1;
+ u32 end_of_ring:1;
+ u32 csum_gen_udp:1;
+ u32 csum_gen_tcp:1;
+ u32 csum_gen_ip:1;
+ u32 last_seg:1;
+ u32 first_seg:1;
+ u32 intr:1;
+#endif
+};
+
+union desc_stat {
+ struct ksz_desc_rx_stat rx;
+ struct ksz_desc_tx_stat tx;
+ u32 data;
+};
+
+union desc_buf {
+ struct ksz_desc_rx_buf rx;
+ struct ksz_desc_tx_buf tx;
+ u32 data;
+};
+
+/**
+ * struct ksz_hw_desc - Hardware descriptor data structure
+ * @ctrl: Descriptor control value.
+ * @buf: Descriptor buffer value.
+ * @addr: Physical address of memory buffer.
+ * @next: Pointer to next hardware descriptor.
+ */
+struct ksz_hw_desc {
+ union desc_stat ctrl;
+ union desc_buf buf;
+ u32 addr;
+ u32 next;
+};
+
+/**
+ * struct ksz_sw_desc - Software descriptor data structure
+ * @ctrl: Descriptor control value.
+ * @buf: Descriptor buffer value.
+ * @buf_size: Current buffers size value in hardware descriptor.
+ */
+struct ksz_sw_desc {
+ union desc_stat ctrl;
+ union desc_buf buf;
+ u32 buf_size;
+};
+
+/**
+ * struct ksz_dma_buf - OS dependent DMA buffer data structure
+ * @skb: Associated socket buffer.
+ * @dma: Associated physical DMA address.
+ * len: Actual len used.
+ */
+struct ksz_dma_buf {
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ int len;
+};
+
+/**
+ * struct ksz_desc - Descriptor structure
+ * @phw: Hardware descriptor pointer to uncached physical memory.
+ * @sw: Cached memory to hold hardware descriptor values for
+ * manipulation.
+ * @dma_buf: Operating system dependent data structure to hold physical
+ * memory buffer allocation information.
+ */
+struct ksz_desc {
+ struct ksz_hw_desc *phw;
+ struct ksz_sw_desc sw;
+ struct ksz_dma_buf dma_buf;
+};
+
+#define DMA_BUFFER(desc) ((struct ksz_dma_buf *)(&(desc)->dma_buf))
+
+/**
+ * struct ksz_desc_info - Descriptor information data structure
+ * @ring: First descriptor in the ring.
+ * @cur: Current descriptor being manipulated.
+ * @ring_virt: First hardware descriptor in the ring.
+ * @ring_phys: The physical address of the first descriptor of the ring.
+ * @size: Size of hardware descriptor.
+ * @alloc: Number of descriptors allocated.
+ * @avail: Number of descriptors available for use.
+ * @last: Index for last descriptor released to hardware.
+ * @next: Index for next descriptor available for use.
+ * @mask: Mask for index wrapping.
+ */
+struct ksz_desc_info {
+ struct ksz_desc *ring;
+ struct ksz_desc *cur;
+ struct ksz_hw_desc *ring_virt;
+ u32 ring_phys;
+ int size;
+ int alloc;
+ int avail;
+ int last;
+ int next;
+ int mask;
+};
+
+/*
+ * KSZ8842 switch definitions
+ */
+
+enum {
+ TABLE_STATIC_MAC = 0,
+ TABLE_VLAN,
+ TABLE_DYNAMIC_MAC,
+ TABLE_MIB
+};
+
+#define LEARNED_MAC_TABLE_ENTRIES 1024
+#define STATIC_MAC_TABLE_ENTRIES 8
+
+/**
+ * struct ksz_mac_table - Static MAC table data structure
+ * @mac_addr: MAC address to filter.
+ * @vid: VID value.
+ * @fid: FID value.
+ * @ports: Port membership.
+ * @override: Override setting.
+ * @use_fid: FID use setting.
+ * @valid: Valid setting indicating the entry is being used.
+ */
+struct ksz_mac_table {
+ u8 mac_addr[MAC_ADDR_LEN];
+ u16 vid;
+ u8 fid;
+ u8 ports;
+ u8 override:1;
+ u8 use_fid:1;
+ u8 valid:1;
+};
+
+#define VLAN_TABLE_ENTRIES 16
+
+/**
+ * struct ksz_vlan_table - VLAN table data structure
+ * @vid: VID value.
+ * @fid: FID value.
+ * @member: Port membership.
+ */
+struct ksz_vlan_table {
+ u16 vid;
+ u8 fid;
+ u8 member;
+};
+
+#define DIFFSERV_ENTRIES 64
+#define PRIO_802_1P_ENTRIES 8
+#define PRIO_QUEUES 4
+
+#define SWITCH_PORT_NUM 2
+#define TOTAL_PORT_NUM (SWITCH_PORT_NUM + 1)
+#define HOST_MASK (1 << SWITCH_PORT_NUM)
+#define PORT_MASK 7
+
+#define MAIN_PORT 0
+#define OTHER_PORT 1
+#define HOST_PORT SWITCH_PORT_NUM
+
+#define PORT_COUNTER_NUM 0x20
+#define TOTAL_PORT_COUNTER_NUM (PORT_COUNTER_NUM + 2)
+
+#define MIB_COUNTER_RX_LO_PRIORITY 0x00
+#define MIB_COUNTER_RX_HI_PRIORITY 0x01
+#define MIB_COUNTER_RX_UNDERSIZE 0x02
+#define MIB_COUNTER_RX_FRAGMENT 0x03
+#define MIB_COUNTER_RX_OVERSIZE 0x04
+#define MIB_COUNTER_RX_JABBER 0x05
+#define MIB_COUNTER_RX_SYMBOL_ERR 0x06
+#define MIB_COUNTER_RX_CRC_ERR 0x07
+#define MIB_COUNTER_RX_ALIGNMENT_ERR 0x08
+#define MIB_COUNTER_RX_CTRL_8808 0x09
+#define MIB_COUNTER_RX_PAUSE 0x0A
+#define MIB_COUNTER_RX_BROADCAST 0x0B
+#define MIB_COUNTER_RX_MULTICAST 0x0C
+#define MIB_COUNTER_RX_UNICAST 0x0D
+#define MIB_COUNTER_RX_OCTET_64 0x0E
+#define MIB_COUNTER_RX_OCTET_65_127 0x0F
+#define MIB_COUNTER_RX_OCTET_128_255 0x10
+#define MIB_COUNTER_RX_OCTET_256_511 0x11
+#define MIB_COUNTER_RX_OCTET_512_1023 0x12
+#define MIB_COUNTER_RX_OCTET_1024_1522 0x13
+#define MIB_COUNTER_TX_LO_PRIORITY 0x14
+#define MIB_COUNTER_TX_HI_PRIORITY 0x15
+#define MIB_COUNTER_TX_LATE_COLLISION 0x16
+#define MIB_COUNTER_TX_PAUSE 0x17
+#define MIB_COUNTER_TX_BROADCAST 0x18
+#define MIB_COUNTER_TX_MULTICAST 0x19
+#define MIB_COUNTER_TX_UNICAST 0x1A
+#define MIB_COUNTER_TX_DEFERRED 0x1B
+#define MIB_COUNTER_TX_TOTAL_COLLISION 0x1C
+#define MIB_COUNTER_TX_EXCESS_COLLISION 0x1D
+#define MIB_COUNTER_TX_SINGLE_COLLISION 0x1E
+#define MIB_COUNTER_TX_MULTI_COLLISION 0x1F
+
+#define MIB_COUNTER_RX_DROPPED_PACKET 0x20
+#define MIB_COUNTER_TX_DROPPED_PACKET 0x21
+
+/**
+ * struct ksz_port_mib - Port MIB data structure
+ * @cnt_ptr: Current pointer to MIB counter index.
+ * @link_down: Indication the link has just gone down.
+ * @state: Connection status of the port.
+ * @mib_start: The starting counter index. Some ports do not start at 0.
+ * @counter: 64-bit MIB counter value.
+ * @dropped: Temporary buffer to remember last read packet dropped values.
+ *
+ * MIB counters needs to be read periodically so that counters do not get
+ * overflowed and give incorrect values. A right balance is needed to
+ * satisfy this condition and not waste too much CPU time.
+ *
+ * It is pointless to read MIB counters when the port is disconnected. The
+ * @state provides the connection status so that MIB counters are read only
+ * when the port is connected. The @link_down indicates the port is just
+ * disconnected so that all MIB counters are read one last time to update the
+ * information.
+ */
+struct ksz_port_mib {
+ u8 cnt_ptr;
+ u8 link_down;
+ u8 state;
+ u8 mib_start;
+
+ u64 counter[TOTAL_PORT_COUNTER_NUM];
+ u32 dropped[2];
+};
+
+/**
+ * struct ksz_port_cfg - Port configuration data structure
+ * @vid: VID value.
+ * @member: Port membership.
+ * @port_prio: Port priority.
+ * @rx_rate: Receive priority rate.
+ * @tx_rate: Transmit priority rate.
+ * @stp_state: Current Spanning Tree Protocol state.
+ */
+struct ksz_port_cfg {
+ u16 vid;
+ u8 member;
+ u8 port_prio;
+ u32 rx_rate[PRIO_QUEUES];
+ u32 tx_rate[PRIO_QUEUES];
+ int stp_state;
+};
+
+/**
+ * struct ksz_switch - KSZ8842 switch data structure
+ * @mac_table: MAC table entries information.
+ * @vlan_table: VLAN table entries information.
+ * @port_cfg: Port configuration information.
+ * @diffserv: DiffServ priority settings. Possible values from 6-bit of ToS
+ * (bit7 ~ bit2) field.
+ * @p_802_1p: 802.1P priority settings. Possible values from 3-bit of 802.1p
+ * Tag priority field.
+ * @br_addr: Bridge address. Used for STP.
+ * @other_addr: Other MAC address. Used for multiple network device mode.
+ * @broad_per: Broadcast storm percentage.
+ * @member: Current port membership. Used for STP.
+ */
+struct ksz_switch {
+ struct ksz_mac_table mac_table[STATIC_MAC_TABLE_ENTRIES];
+ struct ksz_vlan_table vlan_table[VLAN_TABLE_ENTRIES];
+ struct ksz_port_cfg port_cfg[TOTAL_PORT_NUM];
+
+ u8 diffserv[DIFFSERV_ENTRIES];
+ u8 p_802_1p[PRIO_802_1P_ENTRIES];
+
+ u8 br_addr[MAC_ADDR_LEN];
+ u8 other_addr[MAC_ADDR_LEN];
+
+ u8 broad_per;
+ u8 member;
+};
+
+#define TX_RATE_UNIT 10000
+
+/**
+ * struct ksz_port_info - Port information data structure
+ * @state: Connection status of the port.
+ * @tx_rate: Transmit rate divided by 10000 to get Mbit.
+ * @duplex: Duplex mode.
+ * @advertised: Advertised auto-negotiation setting. Used to determine link.
+ * @partner: Auto-negotiation partner setting. Used to determine link.
+ * @port_id: Port index to access actual hardware register.
+ * @pdev: Pointer to OS dependent network device.
+ */
+struct ksz_port_info {
+ uint state;
+ uint tx_rate;
+ u8 duplex;
+ u8 advertised;
+ u8 partner;
+ u8 port_id;
+ void *pdev;
+};
+
+#define MAX_TX_HELD_SIZE 52000
+
+/* Hardware features and bug fixes. */
+#define LINK_INT_WORKING (1 << 0)
+#define SMALL_PACKET_TX_BUG (1 << 1)
+#define HALF_DUPLEX_SIGNAL_BUG (1 << 2)
+#define IPV6_CSUM_GEN_HACK (1 << 3)
+#define RX_HUGE_FRAME (1 << 4)
+#define STP_SUPPORT (1 << 8)
+
+/* Software overrides. */
+#define PAUSE_FLOW_CTRL (1 << 0)
+#define FAST_AGING (1 << 1)
+
+/**
+ * struct ksz_hw - KSZ884X hardware data structure
+ * @io: Virtual address assigned.
+ * @ksz_switch: Pointer to KSZ8842 switch.
+ * @port_info: Port information.
+ * @port_mib: Port MIB information.
+ * @dev_count: Number of network devices this hardware supports.
+ * @dst_ports: Destination ports in switch for transmission.
+ * @id: Hardware ID. Used for display only.
+ * @mib_cnt: Number of MIB counters this hardware has.
+ * @mib_port_cnt: Number of ports with MIB counters.
+ * @tx_cfg: Cached transmit control settings.
+ * @rx_cfg: Cached receive control settings.
+ * @intr_mask: Current interrupt mask.
+ * @intr_set: Current interrup set.
+ * @intr_blocked: Interrupt blocked.
+ * @rx_desc_info: Receive descriptor information.
+ * @tx_desc_info: Transmit descriptor information.
+ * @tx_int_cnt: Transmit interrupt count. Used for TX optimization.
+ * @tx_int_mask: Transmit interrupt mask. Used for TX optimization.
+ * @tx_size: Transmit data size. Used for TX optimization.
+ * The maximum is defined by MAX_TX_HELD_SIZE.
+ * @perm_addr: Permanent MAC address.
+ * @override_addr: Overrided MAC address.
+ * @address: Additional MAC address entries.
+ * @addr_list_size: Additional MAC address list size.
+ * @mac_override: Indication of MAC address overrided.
+ * @promiscuous: Counter to keep track of promiscuous mode set.
+ * @all_multi: Counter to keep track of all multicast mode set.
+ * @multi_list: Multicast address entries.
+ * @multi_bits: Cached multicast hash table settings.
+ * @multi_list_size: Multicast address list size.
+ * @enabled: Indication of hardware enabled.
+ * @rx_stop: Indication of receive process stop.
+ * @features: Hardware features to enable.
+ * @overrides: Hardware features to override.
+ * @parent: Pointer to parent, network device private structure.
+ */
+struct ksz_hw {
+ void __iomem *io;
+
+ struct ksz_switch *ksz_switch;
+ struct ksz_port_info port_info[SWITCH_PORT_NUM];
+ struct ksz_port_mib port_mib[TOTAL_PORT_NUM];
+ int dev_count;
+ int dst_ports;
+ int id;
+ int mib_cnt;
+ int mib_port_cnt;
+
+ u32 tx_cfg;
+ u32 rx_cfg;
+ u32 intr_mask;
+ u32 intr_set;
+ uint intr_blocked;
+
+ struct ksz_desc_info rx_desc_info;
+ struct ksz_desc_info tx_desc_info;
+
+ int tx_int_cnt;
+ int tx_int_mask;
+ int tx_size;
+
+ u8 perm_addr[MAC_ADDR_LEN];
+ u8 override_addr[MAC_ADDR_LEN];
+ u8 address[ADDITIONAL_ENTRIES][MAC_ADDR_LEN];
+ u8 addr_list_size;
+ u8 mac_override;
+ u8 promiscuous;
+ u8 all_multi;
+ u8 multi_list[MAX_MULTICAST_LIST][MAC_ADDR_LEN];
+ u8 multi_bits[HW_MULTICAST_SIZE];
+ u8 multi_list_size;
+
+ u8 enabled;
+ u8 rx_stop;
+ u8 reserved2[1];
+
+ uint features;
+ uint overrides;
+
+ void *parent;
+};
+
+enum {
+ PHY_NO_FLOW_CTRL,
+ PHY_FLOW_CTRL,
+ PHY_TX_ONLY,
+ PHY_RX_ONLY
+};
+
+/**
+ * struct ksz_port - Virtual port data structure
+ * @duplex: Duplex mode setting. 1 for half duplex, 2 for full
+ * duplex, and 0 for auto, which normally results in full
+ * duplex.
+ * @speed: Speed setting. 10 for 10 Mbit, 100 for 100 Mbit, and
+ * 0 for auto, which normally results in 100 Mbit.
+ * @force_link: Force link setting. 0 for auto-negotiation, and 1 for
+ * force.
+ * @flow_ctrl: Flow control setting. PHY_NO_FLOW_CTRL for no flow
+ * control, and PHY_FLOW_CTRL for flow control.
+ * PHY_TX_ONLY and PHY_RX_ONLY are not supported for 100
+ * Mbit PHY.
+ * @first_port: Index of first port this port supports.
+ * @mib_port_cnt: Number of ports with MIB counters.
+ * @port_cnt: Number of ports this port supports.
+ * @counter: Port statistics counter.
+ * @hw: Pointer to hardware structure.
+ * @linked: Pointer to port information linked to this port.
+ */
+struct ksz_port {
+ u8 duplex;
+ u8 speed;
+ u8 force_link;
+ u8 flow_ctrl;
+
+ int first_port;
+ int mib_port_cnt;
+ int port_cnt;
+ u64 counter[OID_COUNTER_LAST];
+
+ struct ksz_hw *hw;
+ struct ksz_port_info *linked;
+};
+
+/**
+ * struct ksz_timer_info - Timer information data structure
+ * @timer: Kernel timer.
+ * @cnt: Running timer counter.
+ * @max: Number of times to run timer; -1 for infinity.
+ * @period: Timer period in jiffies.
+ */
+struct ksz_timer_info {
+ struct timer_list timer;
+ int cnt;
+ int max;
+ int period;
+};
+
+/**
+ * struct ksz_shared_mem - OS dependent shared memory data structure
+ * @dma_addr: Physical DMA address allocated.
+ * @alloc_size: Allocation size.
+ * @phys: Actual physical address used.
+ * @alloc_virt: Virtual address allocated.
+ * @virt: Actual virtual address used.
+ */
+struct ksz_shared_mem {
+ dma_addr_t dma_addr;
+ uint alloc_size;
+ uint phys;
+ u8 *alloc_virt;
+ u8 *virt;
+};
+
+/**
+ * struct ksz_counter_info - OS dependent counter information data structure
+ * @counter: Wait queue to wakeup after counters are read.
+ * @time: Next time in jiffies to read counter.
+ * @read: Indication of counters read in full or not.
+ */
+struct ksz_counter_info {
+ wait_queue_head_t counter;
+ unsigned long time;
+ int read;
+};
+
+/**
+ * struct dev_info - Network device information data structure
+ * @dev: Pointer to network device.
+ * @pdev: Pointer to PCI device.
+ * @hw: Hardware structure.
+ * @desc_pool: Physical memory used for descriptor pool.
+ * @hwlock: Spinlock to prevent hardware from accessing.
+ * @lock: Mutex lock to prevent device from accessing.
+ * @dev_rcv: Receive process function used.
+ * @last_skb: Socket buffer allocated for descriptor rx fragments.
+ * @skb_index: Buffer index for receiving fragments.
+ * @skb_len: Buffer length for receiving fragments.
+ * @mib_read: Workqueue to read MIB counters.
+ * @mib_timer_info: Timer to read MIB counters.
+ * @counter: Used for MIB reading.
+ * @mtu: Current MTU used. The default is REGULAR_RX_BUF_SIZE;
+ * the maximum is MAX_RX_BUF_SIZE.
+ * @opened: Counter to keep track of device open.
+ * @rx_tasklet: Receive processing tasklet.
+ * @tx_tasklet: Transmit processing tasklet.
+ * @wol_enable: Wake-on-LAN enable set by ethtool.
+ * @wol_support: Wake-on-LAN support used by ethtool.
+ * @pme_wait: Used for KSZ8841 power management.
+ */
+struct dev_info {
+ struct net_device *dev;
+ struct pci_dev *pdev;
+
+ struct ksz_hw hw;
+ struct ksz_shared_mem desc_pool;
+
+ spinlock_t hwlock;
+ struct mutex lock;
+
+ int (*dev_rcv)(struct dev_info *);
+
+ struct sk_buff *last_skb;
+ int skb_index;
+ int skb_len;
+
+ struct work_struct mib_read;
+ struct ksz_timer_info mib_timer_info;
+ struct ksz_counter_info counter[TOTAL_PORT_NUM];
+
+ int mtu;
+ int opened;
+
+ struct tasklet_struct rx_tasklet;
+ struct tasklet_struct tx_tasklet;
+
+ int wol_enable;
+ int wol_support;
+ unsigned long pme_wait;
+};
+
+/**
+ * struct dev_priv - Network device private data structure
+ * @adapter: Adapter device information.
+ * @port: Port information.
+ * @monitor_time_info: Timer to monitor ports.
+ * @stats: Network statistics.
+ * @proc_sem: Semaphore for proc accessing.
+ * @id: Device ID.
+ * @mii_if: MII interface information.
+ * @advertising: Temporary variable to store advertised settings.
+ * @msg_enable: The message flags controlling driver output.
+ * @media_state: The connection status of the device.
+ * @multicast: The all multicast state of the device.
+ * @promiscuous: The promiscuous state of the device.
+ */
+struct dev_priv {
+ struct dev_info *adapter;
+ struct ksz_port port;
+ struct ksz_timer_info monitor_timer_info;
+ struct net_device_stats stats;
+
+ struct semaphore proc_sem;
+ int id;
+
+ struct mii_if_info mii_if;
+ u32 advertising;
+
+ u32 msg_enable;
+ int media_state;
+ int multicast;
+ int promiscuous;
+};
+
+#define ks_info(_ks, _msg...) dev_info(&(_ks)->pdev->dev, _msg)
+#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->pdev->dev, _msg)
+#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->pdev->dev, _msg)
+#define ks_err(_ks, _msg...) dev_err(&(_ks)->pdev->dev, _msg)
+
+#define DRV_NAME "KSZ884X PCI"
+#define DEVICE_NAME "KSZ884x PCI"
+#define DRV_VERSION "1.0.0"
+#define DRV_RELDATE "Feb 8, 2010"
+
+static char version[] __devinitdata =
+ "Micrel " DEVICE_NAME " " DRV_VERSION " (" DRV_RELDATE ")";
+
+static u8 DEFAULT_MAC_ADDRESS[] = { 0x00, 0x10, 0xA1, 0x88, 0x42, 0x01 };
+
+/*
+ * Interrupt processing primary routines
+ */
+
+static inline void hw_ack_intr(struct ksz_hw *hw, uint interrupt)
+{
+ writel(interrupt, hw->io + KS884X_INTERRUPTS_STATUS);
+}
+
+static inline void hw_dis_intr(struct ksz_hw *hw)
+{
+ hw->intr_blocked = hw->intr_mask;
+ writel(0, hw->io + KS884X_INTERRUPTS_ENABLE);
+ hw->intr_set = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
+}
+
+static inline void hw_set_intr(struct ksz_hw *hw, uint interrupt)
+{
+ hw->intr_set = interrupt;
+ writel(interrupt, hw->io + KS884X_INTERRUPTS_ENABLE);
+}
+
+static inline void hw_ena_intr(struct ksz_hw *hw)
+{
+ hw->intr_blocked = 0;
+ hw_set_intr(hw, hw->intr_mask);
+}
+
+static inline void hw_dis_intr_bit(struct ksz_hw *hw, uint bit)
+{
+ hw->intr_mask &= ~(bit);
+}
+
+static inline void hw_turn_off_intr(struct ksz_hw *hw, uint interrupt)
+{
+ u32 read_intr;
+
+ read_intr = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
+ hw->intr_set = read_intr & ~interrupt;
+ writel(hw->intr_set, hw->io + KS884X_INTERRUPTS_ENABLE);
+ hw_dis_intr_bit(hw, interrupt);
+}
+
+/**
+ * hw_turn_on_intr - turn on specified interrupts
+ * @hw: The hardware instance.
+ * @bit: The interrupt bits to be on.
+ *
+ * This routine turns on the specified interrupts in the interrupt mask so that
+ * those interrupts will be enabled.
+ */
+static void hw_turn_on_intr(struct ksz_hw *hw, u32 bit)
+{
+ hw->intr_mask |= bit;
+
+ if (!hw->intr_blocked)
+ hw_set_intr(hw, hw->intr_mask);
+}
+
+static inline void hw_ena_intr_bit(struct ksz_hw *hw, uint interrupt)
+{
+ u32 read_intr;
+
+ read_intr = readl(hw->io + KS884X_INTERRUPTS_ENABLE);
+ hw->intr_set = read_intr | interrupt;
+ writel(hw->intr_set, hw->io + KS884X_INTERRUPTS_ENABLE);
+}
+
+static inline void hw_read_intr(struct ksz_hw *hw, uint *status)
+{
+ *status = readl(hw->io + KS884X_INTERRUPTS_STATUS);
+ *status = *status & hw->intr_set;
+}
+
+static inline void hw_restore_intr(struct ksz_hw *hw, uint interrupt)
+{
+ if (interrupt)
+ hw_ena_intr(hw);
+}
+
+/**
+ * hw_block_intr - block hardware interrupts
+ *
+ * This function blocks all interrupts of the hardware and returns the current
+ * interrupt enable mask so that interrupts can be restored later.
+ *
+ * Return the current interrupt enable mask.
+ */
+static uint hw_block_intr(struct ksz_hw *hw)
+{
+ uint interrupt = 0;
+
+ if (!hw->intr_blocked) {
+ hw_dis_intr(hw);
+ interrupt = hw->intr_blocked;
+ }
+ return interrupt;
+}
+
+/*
+ * Hardware descriptor routines
+ */
+
+static inline void reset_desc(struct ksz_desc *desc, union desc_stat status)
+{
+ status.rx.hw_owned = 0;
+ desc->phw->ctrl.data = cpu_to_le32(status.data);
+}
+
+static inline void release_desc(struct ksz_desc *desc)
+{
+ desc->sw.ctrl.tx.hw_owned = 1;
+ if (desc->sw.buf_size != desc->sw.buf.data) {
+ desc->sw.buf_size = desc->sw.buf.data;
+ desc->phw->buf.data = cpu_to_le32(desc->sw.buf.data);
+ }
+ desc->phw->ctrl.data = cpu_to_le32(desc->sw.ctrl.data);
+}
+
+static void get_rx_pkt(struct ksz_desc_info *info, struct ksz_desc **desc)
+{
+ *desc = &info->ring[info->last];
+ info->last++;
+ info->last &= info->mask;
+ info->avail--;
+ (*desc)->sw.buf.data &= ~KS_DESC_RX_MASK;
+}
+
+static inline void set_rx_buf(struct ksz_desc *desc, u32 addr)
+{
+ desc->phw->addr = cpu_to_le32(addr);
+}
+
+static inline void set_rx_len(struct ksz_desc *desc, u32 len)
+{
+ desc->sw.buf.rx.buf_size = len;
+}
+
+static inline void get_tx_pkt(struct ksz_desc_info *info,
+ struct ksz_desc **desc)
+{
+ *desc = &info->ring[info->next];
+ info->next++;
+ info->next &= info->mask;
+ info->avail--;
+ (*desc)->sw.buf.data &= ~KS_DESC_TX_MASK;
+}
+
+static inline void set_tx_buf(struct ksz_desc *desc, u32 addr)
+{
+ desc->phw->addr = cpu_to_le32(addr);
+}
+
+static inline void set_tx_len(struct ksz_desc *desc, u32 len)
+{
+ desc->sw.buf.tx.buf_size = len;
+}
+
+/* Switch functions */
+
+#define TABLE_READ 0x10
+#define TABLE_SEL_SHIFT 2
+
+#define HW_DELAY(hw, reg) \
+ do { \
+ u16 dummy; \
+ dummy = readw(hw->io + reg); \
+ } while (0)
+
+/**
+ * sw_r_table - read 4 bytes of data from switch table
+ * @hw: The hardware instance.
+ * @table: The table selector.
+ * @addr: The address of the table entry.
+ * @data: Buffer to store the read data.
+ *
+ * This routine reads 4 bytes of data from the table of the switch.
+ * Hardware interrupts are disabled to minimize corruption of read data.
+ */
+static void sw_r_table(struct ksz_hw *hw, int table, u16 addr, u32 *data)
+{
+ u16 ctrl_addr;
+ uint interrupt;
+
+ ctrl_addr = (((table << TABLE_SEL_SHIFT) | TABLE_READ) << 8) | addr;
+
+ interrupt = hw_block_intr(hw);
+
+ writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
+ HW_DELAY(hw, KS884X_IACR_OFFSET);
+ *data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
+
+ hw_restore_intr(hw, interrupt);
+}
+
+/**
+ * sw_w_table_64 - write 8 bytes of data to the switch table
+ * @hw: The hardware instance.
+ * @table: The table selector.
+ * @addr: The address of the table entry.
+ * @data_hi: The high part of data to be written (bit63 ~ bit32).
+ * @data_lo: The low part of data to be written (bit31 ~ bit0).
+ *
+ * This routine writes 8 bytes of data to the table of the switch.
+ * Hardware interrupts are disabled to minimize corruption of written data.
+ */
+static void sw_w_table_64(struct ksz_hw *hw, int table, u16 addr, u32 data_hi,
+ u32 data_lo)
+{
+ u16 ctrl_addr;
+ uint interrupt;
+
+ ctrl_addr = ((table << TABLE_SEL_SHIFT) << 8) | addr;
+
+ interrupt = hw_block_intr(hw);
+
+ writel(data_hi, hw->io + KS884X_ACC_DATA_4_OFFSET);
+ writel(data_lo, hw->io + KS884X_ACC_DATA_0_OFFSET);
+
+ writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
+ HW_DELAY(hw, KS884X_IACR_OFFSET);
+
+ hw_restore_intr(hw, interrupt);
+}
+
+/**
+ * sw_w_sta_mac_table - write to the static MAC table
+ * @hw: The hardware instance.
+ * @addr: The address of the table entry.
+ * @mac_addr: The MAC address.
+ * @ports: The port members.
+ * @override: The flag to override the port receive/transmit settings.
+ * @valid: The flag to indicate entry is valid.
+ * @use_fid: The flag to indicate the FID is valid.
+ * @fid: The FID value.
+ *
+ * This routine writes an entry of the static MAC table of the switch. It
+ * calls sw_w_table_64() to write the data.
+ */
+static void sw_w_sta_mac_table(struct ksz_hw *hw, u16 addr, u8 *mac_addr,
+ u8 ports, int override, int valid, int use_fid, u8 fid)
+{
+ u32 data_hi;
+ u32 data_lo;
+
+ data_lo = ((u32) mac_addr[2] << 24) |
+ ((u32) mac_addr[3] << 16) |
+ ((u32) mac_addr[4] << 8) | mac_addr[5];
+ data_hi = ((u32) mac_addr[0] << 8) | mac_addr[1];
+ data_hi |= (u32) ports << STATIC_MAC_FWD_PORTS_SHIFT;
+
+ if (override)
+ data_hi |= STATIC_MAC_TABLE_OVERRIDE;
+ if (use_fid) {
+ data_hi |= STATIC_MAC_TABLE_USE_FID;
+ data_hi |= (u32) fid << STATIC_MAC_FID_SHIFT;
+ }
+ if (valid)
+ data_hi |= STATIC_MAC_TABLE_VALID;
+
+ sw_w_table_64(hw, TABLE_STATIC_MAC, addr, data_hi, data_lo);
+}
+
+/**
+ * sw_r_vlan_table - read from the VLAN table
+ * @hw: The hardware instance.
+ * @addr: The address of the table entry.
+ * @vid: Buffer to store the VID.
+ * @fid: Buffer to store the VID.
+ * @member: Buffer to store the port membership.
+ *
+ * This function reads an entry of the VLAN table of the switch. It calls
+ * sw_r_table() to get the data.
+ *
+ * Return 0 if the entry is valid; otherwise -1.
+ */
+static int sw_r_vlan_table(struct ksz_hw *hw, u16 addr, u16 *vid, u8 *fid,
+ u8 *member)
+{
+ u32 data;
+
+ sw_r_table(hw, TABLE_VLAN, addr, &data);
+ if (data & VLAN_TABLE_VALID) {
+ *vid = (u16)(data & VLAN_TABLE_VID);
+ *fid = (u8)((data & VLAN_TABLE_FID) >> VLAN_TABLE_FID_SHIFT);
+ *member = (u8)((data & VLAN_TABLE_MEMBERSHIP) >>
+ VLAN_TABLE_MEMBERSHIP_SHIFT);
+ return 0;
+ }
+ return -1;
+}
+
+/**
+ * port_r_mib_cnt - read MIB counter
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @addr: The address of the counter.
+ * @cnt: Buffer to store the counter.
+ *
+ * This routine reads a MIB counter of the port.
+ * Hardware interrupts are disabled to minimize corruption of read data.
+ */
+static void port_r_mib_cnt(struct ksz_hw *hw, int port, u16 addr, u64 *cnt)
+{
+ u32 data;
+ u16 ctrl_addr;
+ uint interrupt;
+ int timeout;
+
+ ctrl_addr = addr + PORT_COUNTER_NUM * port;
+
+ interrupt = hw_block_intr(hw);
+
+ ctrl_addr |= (((TABLE_MIB << TABLE_SEL_SHIFT) | TABLE_READ) << 8);
+ writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
+ HW_DELAY(hw, KS884X_IACR_OFFSET);
+
+ for (timeout = 100; timeout > 0; timeout--) {
+ data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
+
+ if (data & MIB_COUNTER_VALID) {
+ if (data & MIB_COUNTER_OVERFLOW)
+ *cnt += MIB_COUNTER_VALUE + 1;
+ *cnt += data & MIB_COUNTER_VALUE;
+ break;
+ }
+ }
+
+ hw_restore_intr(hw, interrupt);
+}
+
+/**
+ * port_r_mib_pkt - read dropped packet counts
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @cnt: Buffer to store the receive and transmit dropped packet counts.
+ *
+ * This routine reads the dropped packet counts of the port.
+ * Hardware interrupts are disabled to minimize corruption of read data.
+ */
+static void port_r_mib_pkt(struct ksz_hw *hw, int port, u32 *last, u64 *cnt)
+{
+ u32 cur;
+ u32 data;
+ u16 ctrl_addr;
+ uint interrupt;
+ int index;
+
+ index = KS_MIB_PACKET_DROPPED_RX_0 + port;
+ do {
+ interrupt = hw_block_intr(hw);
+
+ ctrl_addr = (u16) index;
+ ctrl_addr |= (((TABLE_MIB << TABLE_SEL_SHIFT) | TABLE_READ)
+ << 8);
+ writew(ctrl_addr, hw->io + KS884X_IACR_OFFSET);
+ HW_DELAY(hw, KS884X_IACR_OFFSET);
+ data = readl(hw->io + KS884X_ACC_DATA_0_OFFSET);
+
+ hw_restore_intr(hw, interrupt);
+
+ data &= MIB_PACKET_DROPPED;
+ cur = *last;
+ if (data != cur) {
+ *last = data;
+ if (data < cur)
+ data += MIB_PACKET_DROPPED + 1;
+ data -= cur;
+ *cnt += data;
+ }
+ ++last;
+ ++cnt;
+ index -= KS_MIB_PACKET_DROPPED_TX -
+ KS_MIB_PACKET_DROPPED_TX_0 + 1;
+ } while (index >= KS_MIB_PACKET_DROPPED_TX_0 + port);
+}
+
+/**
+ * port_r_cnt - read MIB counters periodically
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine is used to read the counters of the port periodically to avoid
+ * counter overflow. The hardware should be acquired first before calling this
+ * routine.
+ *
+ * Return non-zero when not all counters not read.
+ */
+static int port_r_cnt(struct ksz_hw *hw, int port)
+{
+ struct ksz_port_mib *mib = &hw->port_mib[port];
+
+ if (mib->mib_start < PORT_COUNTER_NUM)
+ while (mib->cnt_ptr < PORT_COUNTER_NUM) {
+ port_r_mib_cnt(hw, port, mib->cnt_ptr,
+ &mib->counter[mib->cnt_ptr]);
+ ++mib->cnt_ptr;
+ }
+ if (hw->mib_cnt > PORT_COUNTER_NUM)
+ port_r_mib_pkt(hw, port, mib->dropped,
+ &mib->counter[PORT_COUNTER_NUM]);
+ mib->cnt_ptr = 0;
+ return 0;
+}
+
+/**
+ * port_init_cnt - initialize MIB counter values
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine is used to initialize all counters to zero if the hardware
+ * cannot do it after reset.
+ */
+static void port_init_cnt(struct ksz_hw *hw, int port)
+{
+ struct ksz_port_mib *mib = &hw->port_mib[port];
+
+ mib->cnt_ptr = 0;
+ if (mib->mib_start < PORT_COUNTER_NUM)
+ do {
+ port_r_mib_cnt(hw, port, mib->cnt_ptr,
+ &mib->counter[mib->cnt_ptr]);
+ ++mib->cnt_ptr;
+ } while (mib->cnt_ptr < PORT_COUNTER_NUM);
+ if (hw->mib_cnt > PORT_COUNTER_NUM)
+ port_r_mib_pkt(hw, port, mib->dropped,
+ &mib->counter[PORT_COUNTER_NUM]);
+ memset((void *) mib->counter, 0, sizeof(u64) * TOTAL_PORT_COUNTER_NUM);
+ mib->cnt_ptr = 0;
+}
+
+/*
+ * Port functions
+ */
+
+/**
+ * port_chk - check port register bits
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @offset: The offset of the port register.
+ * @bits: The data bits to check.
+ *
+ * This function checks whether the specified bits of the port register are set
+ * or not.
+ *
+ * Return 0 if the bits are not set.
+ */
+static int port_chk(struct ksz_hw *hw, int port, int offset, u16 bits)
+{
+ u32 addr;
+ u16 data;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += offset;
+ data = readw(hw->io + addr);
+ return (data & bits) == bits;
+}
+
+/**
+ * port_cfg - set port register bits
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @offset: The offset of the port register.
+ * @bits: The data bits to set.
+ * @set: The flag indicating whether the bits are to be set or not.
+ *
+ * This routine sets or resets the specified bits of the port register.
+ */
+static void port_cfg(struct ksz_hw *hw, int port, int offset, u16 bits,
+ int set)
+{
+ u32 addr;
+ u16 data;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += offset;
+ data = readw(hw->io + addr);
+ if (set)
+ data |= bits;
+ else
+ data &= ~bits;
+ writew(data, hw->io + addr);
+}
+
+/**
+ * port_chk_shift - check port bit
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @offset: The offset of the register.
+ * @shift: Number of bits to shift.
+ *
+ * This function checks whether the specified port is set in the register or
+ * not.
+ *
+ * Return 0 if the port is not set.
+ */
+static int port_chk_shift(struct ksz_hw *hw, int port, u32 addr, int shift)
+{
+ u16 data;
+ u16 bit = 1 << port;
+
+ data = readw(hw->io + addr);
+ data >>= shift;
+ return (data & bit) == bit;
+}
+
+/**
+ * port_cfg_shift - set port bit
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @offset: The offset of the register.
+ * @shift: Number of bits to shift.
+ * @set: The flag indicating whether the port is to be set or not.
+ *
+ * This routine sets or resets the specified port in the register.
+ */
+static void port_cfg_shift(struct ksz_hw *hw, int port, u32 addr, int shift,
+ int set)
+{
+ u16 data;
+ u16 bits = 1 << port;
+
+ data = readw(hw->io + addr);
+ bits <<= shift;
+ if (set)
+ data |= bits;
+ else
+ data &= ~bits;
+ writew(data, hw->io + addr);
+}
+
+/**
+ * port_r8 - read byte from port register
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @offset: The offset of the port register.
+ * @data: Buffer to store the data.
+ *
+ * This routine reads a byte from the port register.
+ */
+static void port_r8(struct ksz_hw *hw, int port, int offset, u8 *data)
+{
+ u32 addr;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += offset;
+ *data = readb(hw->io + addr);
+}
+
+/**
+ * port_r16 - read word from port register.
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @offset: The offset of the port register.
+ * @data: Buffer to store the data.
+ *
+ * This routine reads a word from the port register.
+ */
+static void port_r16(struct ksz_hw *hw, int port, int offset, u16 *data)
+{
+ u32 addr;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += offset;
+ *data = readw(hw->io + addr);
+}
+
+/**
+ * port_w16 - write word to port register.
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @offset: The offset of the port register.
+ * @data: Data to write.
+ *
+ * This routine writes a word to the port register.
+ */
+static void port_w16(struct ksz_hw *hw, int port, int offset, u16 data)
+{
+ u32 addr;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += offset;
+ writew(data, hw->io + addr);
+}
+
+/**
+ * sw_chk - check switch register bits
+ * @hw: The hardware instance.
+ * @addr: The address of the switch register.
+ * @bits: The data bits to check.
+ *
+ * This function checks whether the specified bits of the switch register are
+ * set or not.
+ *
+ * Return 0 if the bits are not set.
+ */
+static int sw_chk(struct ksz_hw *hw, u32 addr, u16 bits)
+{
+ u16 data;
+
+ data = readw(hw->io + addr);
+ return (data & bits) == bits;
+}
+
+/**
+ * sw_cfg - set switch register bits
+ * @hw: The hardware instance.
+ * @addr: The address of the switch register.
+ * @bits: The data bits to set.
+ * @set: The flag indicating whether the bits are to be set or not.
+ *
+ * This function sets or resets the specified bits of the switch register.
+ */
+static void sw_cfg(struct ksz_hw *hw, u32 addr, u16 bits, int set)
+{
+ u16 data;
+
+ data = readw(hw->io + addr);
+ if (set)
+ data |= bits;
+ else
+ data &= ~bits;
+ writew(data, hw->io + addr);
+}
+
+/* Bandwidth */
+
+static inline void port_cfg_broad_storm(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_BROADCAST_STORM, set);
+}
+
+static inline int port_chk_broad_storm(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_BROADCAST_STORM);
+}
+
+/* Driver set switch broadcast storm protection at 10% rate. */
+#define BROADCAST_STORM_PROTECTION_RATE 10
+
+/* 148,800 frames * 67 ms / 100 */
+#define BROADCAST_STORM_VALUE 9969
+
+/**
+ * sw_cfg_broad_storm - configure broadcast storm threshold
+ * @hw: The hardware instance.
+ * @percent: Broadcast storm threshold in percent of transmit rate.
+ *
+ * This routine configures the broadcast storm threshold of the switch.
+ */
+static void sw_cfg_broad_storm(struct ksz_hw *hw, u8 percent)
+{
+ u16 data;
+ u32 value = ((u32) BROADCAST_STORM_VALUE * (u32) percent / 100);
+
+ if (value > BROADCAST_STORM_RATE)
+ value = BROADCAST_STORM_RATE;
+
+ data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
+ data &= ~(BROADCAST_STORM_RATE_LO | BROADCAST_STORM_RATE_HI);
+ data |= ((value & 0x00FF) << 8) | ((value & 0xFF00) >> 8);
+ writew(data, hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
+}
+
+/**
+ * sw_get_board_storm - get broadcast storm threshold
+ * @hw: The hardware instance.
+ * @percent: Buffer to store the broadcast storm threshold percentage.
+ *
+ * This routine retrieves the broadcast storm threshold of the switch.
+ */
+static void sw_get_broad_storm(struct ksz_hw *hw, u8 *percent)
+{
+ int num;
+ u16 data;
+
+ data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
+ num = (data & BROADCAST_STORM_RATE_HI);
+ num <<= 8;
+ num |= (data & BROADCAST_STORM_RATE_LO) >> 8;
+ num = (num * 100 + BROADCAST_STORM_VALUE / 2) / BROADCAST_STORM_VALUE;
+ *percent = (u8) num;
+}
+
+/**
+ * sw_dis_broad_storm - disable broadstorm
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine disables the broadcast storm limit function of the switch.
+ */
+static void sw_dis_broad_storm(struct ksz_hw *hw, int port)
+{
+ port_cfg_broad_storm(hw, port, 0);
+}
+
+/**
+ * sw_ena_broad_storm - enable broadcast storm
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine enables the broadcast storm limit function of the switch.
+ */
+static void sw_ena_broad_storm(struct ksz_hw *hw, int port)
+{
+ sw_cfg_broad_storm(hw, hw->ksz_switch->broad_per);
+ port_cfg_broad_storm(hw, port, 1);
+}
+
+/**
+ * sw_init_broad_storm - initialize broadcast storm
+ * @hw: The hardware instance.
+ *
+ * This routine initializes the broadcast storm limit function of the switch.
+ */
+static void sw_init_broad_storm(struct ksz_hw *hw)
+{
+ int port;
+
+ hw->ksz_switch->broad_per = 1;
+ sw_cfg_broad_storm(hw, hw->ksz_switch->broad_per);
+ for (port = 0; port < TOTAL_PORT_NUM; port++)
+ sw_dis_broad_storm(hw, port);
+ sw_cfg(hw, KS8842_SWITCH_CTRL_2_OFFSET, MULTICAST_STORM_DISABLE, 1);
+}
+
+/**
+ * hw_cfg_broad_storm - configure broadcast storm
+ * @hw: The hardware instance.
+ * @percent: Broadcast storm threshold in percent of transmit rate.
+ *
+ * This routine configures the broadcast storm threshold of the switch.
+ * It is called by user functions. The hardware should be acquired first.
+ */
+static void hw_cfg_broad_storm(struct ksz_hw *hw, u8 percent)
+{
+ if (percent > 100)
+ percent = 100;
+
+ sw_cfg_broad_storm(hw, percent);
+ sw_get_broad_storm(hw, &percent);
+ hw->ksz_switch->broad_per = percent;
+}
+
+/**
+ * sw_dis_prio_rate - disable switch priority rate
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine disables the priority rate function of the switch.
+ */
+static void sw_dis_prio_rate(struct ksz_hw *hw, int port)
+{
+ u32 addr;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += KS8842_PORT_IN_RATE_OFFSET;
+ writel(0, hw->io + addr);
+}
+
+/**
+ * sw_init_prio_rate - initialize switch prioirty rate
+ * @hw: The hardware instance.
+ *
+ * This routine initializes the priority rate function of the switch.
+ */
+static void sw_init_prio_rate(struct ksz_hw *hw)
+{
+ int port;
+ int prio;
+ struct ksz_switch *sw = hw->ksz_switch;
+
+ for (port = 0; port < TOTAL_PORT_NUM; port++) {
+ for (prio = 0; prio < PRIO_QUEUES; prio++) {
+ sw->port_cfg[port].rx_rate[prio] =
+ sw->port_cfg[port].tx_rate[prio] = 0;
+ }
+ sw_dis_prio_rate(hw, port);
+ }
+}
+
+/* Communication */
+
+static inline void port_cfg_back_pressure(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_BACK_PRESSURE, set);
+}
+
+static inline void port_cfg_force_flow_ctrl(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_FORCE_FLOW_CTRL, set);
+}
+
+static inline int port_chk_back_pressure(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_BACK_PRESSURE);
+}
+
+static inline int port_chk_force_flow_ctrl(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_FORCE_FLOW_CTRL);
+}
+
+/* Spanning Tree */
+
+static inline void port_cfg_dis_learn(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_LEARN_DISABLE, set);
+}
+
+static inline void port_cfg_rx(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_RX_ENABLE, set);
+}
+
+static inline void port_cfg_tx(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_TX_ENABLE, set);
+}
+
+static inline void sw_cfg_fast_aging(struct ksz_hw *hw, int set)
+{
+ sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET, SWITCH_FAST_AGING, set);
+}
+
+static inline void sw_flush_dyn_mac_table(struct ksz_hw *hw)
+{
+ if (!(hw->overrides & FAST_AGING)) {
+ sw_cfg_fast_aging(hw, 1);
+ mdelay(1);
+ sw_cfg_fast_aging(hw, 0);
+ }
+}
+
+/* VLAN */
+
+static inline void port_cfg_ins_tag(struct ksz_hw *hw, int p, int insert)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_INSERT_TAG, insert);
+}
+
+static inline void port_cfg_rmv_tag(struct ksz_hw *hw, int p, int remove)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_REMOVE_TAG, remove);
+}
+
+static inline int port_chk_ins_tag(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_INSERT_TAG);
+}
+
+static inline int port_chk_rmv_tag(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_REMOVE_TAG);
+}
+
+static inline void port_cfg_dis_non_vid(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_DISCARD_NON_VID, set);
+}
+
+static inline void port_cfg_in_filter(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_INGRESS_VLAN_FILTER, set);
+}
+
+static inline int port_chk_dis_non_vid(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_DISCARD_NON_VID);
+}
+
+static inline int port_chk_in_filter(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_INGRESS_VLAN_FILTER);
+}
+
+/* Mirroring */
+
+static inline void port_cfg_mirror_sniffer(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_SNIFFER, set);
+}
+
+static inline void port_cfg_mirror_rx(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_RX, set);
+}
+
+static inline void port_cfg_mirror_tx(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_MIRROR_TX, set);
+}
+
+static inline void sw_cfg_mirror_rx_tx(struct ksz_hw *hw, int set)
+{
+ sw_cfg(hw, KS8842_SWITCH_CTRL_2_OFFSET, SWITCH_MIRROR_RX_TX, set);
+}
+
+static void sw_init_mirror(struct ksz_hw *hw)
+{
+ int port;
+
+ for (port = 0; port < TOTAL_PORT_NUM; port++) {
+ port_cfg_mirror_sniffer(hw, port, 0);
+ port_cfg_mirror_rx(hw, port, 0);
+ port_cfg_mirror_tx(hw, port, 0);
+ }
+ sw_cfg_mirror_rx_tx(hw, 0);
+}
+
+static inline void sw_cfg_unk_def_deliver(struct ksz_hw *hw, int set)
+{
+ sw_cfg(hw, KS8842_SWITCH_CTRL_7_OFFSET,
+ SWITCH_UNK_DEF_PORT_ENABLE, set);
+}
+
+static inline int sw_cfg_chk_unk_def_deliver(struct ksz_hw *hw)
+{
+ return sw_chk(hw, KS8842_SWITCH_CTRL_7_OFFSET,
+ SWITCH_UNK_DEF_PORT_ENABLE);
+}
+
+static inline void sw_cfg_unk_def_port(struct ksz_hw *hw, int port, int set)
+{
+ port_cfg_shift(hw, port, KS8842_SWITCH_CTRL_7_OFFSET, 0, set);
+}
+
+static inline int sw_chk_unk_def_port(struct ksz_hw *hw, int port)
+{
+ return port_chk_shift(hw, port, KS8842_SWITCH_CTRL_7_OFFSET, 0);
+}
+
+/* Priority */
+
+static inline void port_cfg_diffserv(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_DIFFSERV_ENABLE, set);
+}
+
+static inline void port_cfg_802_1p(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_802_1P_ENABLE, set);
+}
+
+static inline void port_cfg_replace_vid(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_USER_PRIORITY_CEILING, set);
+}
+
+static inline void port_cfg_prio(struct ksz_hw *hw, int p, int set)
+{
+ port_cfg(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_PRIO_QUEUE_ENABLE, set);
+}
+
+static inline int port_chk_diffserv(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_DIFFSERV_ENABLE);
+}
+
+static inline int port_chk_802_1p(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_802_1P_ENABLE);
+}
+
+static inline int port_chk_replace_vid(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_2_OFFSET, PORT_USER_PRIORITY_CEILING);
+}
+
+static inline int port_chk_prio(struct ksz_hw *hw, int p)
+{
+ return port_chk(hw, p,
+ KS8842_PORT_CTRL_1_OFFSET, PORT_PRIO_QUEUE_ENABLE);
+}
+
+/**
+ * sw_dis_diffserv - disable switch DiffServ priority
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine disables the DiffServ priority function of the switch.
+ */
+static void sw_dis_diffserv(struct ksz_hw *hw, int port)
+{
+ port_cfg_diffserv(hw, port, 0);
+}
+
+/**
+ * sw_dis_802_1p - disable switch 802.1p priority
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine disables the 802.1p priority function of the switch.
+ */
+static void sw_dis_802_1p(struct ksz_hw *hw, int port)
+{
+ port_cfg_802_1p(hw, port, 0);
+}
+
+/**
+ * sw_cfg_replace_null_vid -
+ * @hw: The hardware instance.
+ * @set: The flag to disable or enable.
+ *
+ */
+static void sw_cfg_replace_null_vid(struct ksz_hw *hw, int set)
+{
+ sw_cfg(hw, KS8842_SWITCH_CTRL_3_OFFSET, SWITCH_REPLACE_NULL_VID, set);
+}
+
+/**
+ * sw_cfg_replace_vid - enable switch 802.10 priority re-mapping
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @set: The flag to disable or enable.
+ *
+ * This routine enables the 802.1p priority re-mapping function of the switch.
+ * That allows 802.1p priority field to be replaced with the port's default
+ * tag's priority value if the ingress packet's 802.1p priority has a higher
+ * priority than port's default tag's priority.
+ */
+static void sw_cfg_replace_vid(struct ksz_hw *hw, int port, int set)
+{
+ port_cfg_replace_vid(hw, port, set);
+}
+
+/**
+ * sw_cfg_port_based - configure switch port based priority
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @prio: The priority to set.
+ *
+ * This routine configures the port based priority of the switch.
+ */
+static void sw_cfg_port_based(struct ksz_hw *hw, int port, u8 prio)
+{
+ u16 data;
+
+ if (prio > PORT_BASED_PRIORITY_BASE)
+ prio = PORT_BASED_PRIORITY_BASE;
+
+ hw->ksz_switch->port_cfg[port].port_prio = prio;
+
+ port_r16(hw, port, KS8842_PORT_CTRL_1_OFFSET, &data);
+ data &= ~PORT_BASED_PRIORITY_MASK;
+ data |= prio << PORT_BASED_PRIORITY_SHIFT;
+ port_w16(hw, port, KS8842_PORT_CTRL_1_OFFSET, data);
+}
+
+/**
+ * sw_dis_multi_queue - disable transmit multiple queues
+ * @hw: The hardware instance.
+ * @port: The port index.
+ *
+ * This routine disables the transmit multiple queues selection of the switch
+ * port. Only single transmit queue on the port.
+ */
+static void sw_dis_multi_queue(struct ksz_hw *hw, int port)
+{
+ port_cfg_prio(hw, port, 0);
+}
+
+/**
+ * sw_init_prio - initialize switch priority
+ * @hw: The hardware instance.
+ *
+ * This routine initializes the switch QoS priority functions.
+ */
+static void sw_init_prio(struct ksz_hw *hw)
+{
+ int port;
+ int tos;
+ struct ksz_switch *sw = hw->ksz_switch;
+
+ /*
+ * Init all the 802.1p tag priority value to be assigned to different
+ * priority queue.
+ */
+ sw->p_802_1p[0] = 0;
+ sw->p_802_1p[1] = 0;
+ sw->p_802_1p[2] = 1;
+ sw->p_802_1p[3] = 1;
+ sw->p_802_1p[4] = 2;
+ sw->p_802_1p[5] = 2;
+ sw->p_802_1p[6] = 3;
+ sw->p_802_1p[7] = 3;
+
+ /*
+ * Init all the DiffServ priority value to be assigned to priority
+ * queue 0.
+ */
+ for (tos = 0; tos < DIFFSERV_ENTRIES; tos++)
+ sw->diffserv[tos] = 0;
+
+ /* All QoS functions disabled. */
+ for (port = 0; port < TOTAL_PORT_NUM; port++) {
+ sw_dis_multi_queue(hw, port);
+ sw_dis_diffserv(hw, port);
+ sw_dis_802_1p(hw, port);
+ sw_cfg_replace_vid(hw, port, 0);
+
+ sw->port_cfg[port].port_prio = 0;
+ sw_cfg_port_based(hw, port, sw->port_cfg[port].port_prio);
+ }
+ sw_cfg_replace_null_vid(hw, 0);
+}
+
+/**
+ * port_get_def_vid - get port default VID.
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @vid: Buffer to store the VID.
+ *
+ * This routine retrieves the default VID of the port.
+ */
+static void port_get_def_vid(struct ksz_hw *hw, int port, u16 *vid)
+{
+ u32 addr;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += KS8842_PORT_CTRL_VID_OFFSET;
+ *vid = readw(hw->io + addr);
+}
+
+/**
+ * sw_init_vlan - initialize switch VLAN
+ * @hw: The hardware instance.
+ *
+ * This routine initializes the VLAN function of the switch.
+ */
+static void sw_init_vlan(struct ksz_hw *hw)
+{
+ int port;
+ int entry;
+ struct ksz_switch *sw = hw->ksz_switch;
+
+ /* Read 16 VLAN entries from device's VLAN table. */
+ for (entry = 0; entry < VLAN_TABLE_ENTRIES; entry++) {
+ sw_r_vlan_table(hw, entry,
+ &sw->vlan_table[entry].vid,
+ &sw->vlan_table[entry].fid,
+ &sw->vlan_table[entry].member);
+ }
+
+ for (port = 0; port < TOTAL_PORT_NUM; port++) {
+ port_get_def_vid(hw, port, &sw->port_cfg[port].vid);
+ sw->port_cfg[port].member = PORT_MASK;
+ }
+}
+
+/**
+ * sw_cfg_port_base_vlan - configure port-based VLAN membership
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @member: The port-based VLAN membership.
+ *
+ * This routine configures the port-based VLAN membership of the port.
+ */
+static void sw_cfg_port_base_vlan(struct ksz_hw *hw, int port, u8 member)
+{
+ u32 addr;
+ u8 data;
+
+ PORT_CTRL_ADDR(port, addr);
+ addr += KS8842_PORT_CTRL_2_OFFSET;
+
+ data = readb(hw->io + addr);
+ data &= ~PORT_VLAN_MEMBERSHIP;
+ data |= (member & PORT_MASK);
+ writeb(data, hw->io + addr);
+
+ hw->ksz_switch->port_cfg[port].member = member;
+}
+
+/**
+ * sw_get_addr - get the switch MAC address.
+ * @hw: The hardware instance.
+ * @mac_addr: Buffer to store the MAC address.
+ *
+ * This function retrieves the MAC address of the switch.
+ */
+static inline void sw_get_addr(struct ksz_hw *hw, u8 *mac_addr)
+{
+ int i;
+
+ for (i = 0; i < 6; i += 2) {
+ mac_addr[i] = readb(hw->io + KS8842_MAC_ADDR_0_OFFSET + i);
+ mac_addr[1 + i] = readb(hw->io + KS8842_MAC_ADDR_1_OFFSET + i);
+ }
+}
+
+/**
+ * sw_set_addr - configure switch MAC address
+ * @hw: The hardware instance.
+ * @mac_addr: The MAC address.
+ *
+ * This function configures the MAC address of the switch.
+ */
+static void sw_set_addr(struct ksz_hw *hw, u8 *mac_addr)
+{
+ int i;
+
+ for (i = 0; i < 6; i += 2) {
+ writeb(mac_addr[i], hw->io + KS8842_MAC_ADDR_0_OFFSET + i);
+ writeb(mac_addr[1 + i], hw->io + KS8842_MAC_ADDR_1_OFFSET + i);
+ }
+}
+
+/**
+ * sw_set_global_ctrl - set switch global control
+ * @hw: The hardware instance.
+ *
+ * This routine sets the global control of the switch function.
+ */
+static void sw_set_global_ctrl(struct ksz_hw *hw)
+{
+ u16 data;
+
+ /* Enable switch MII flow control. */
+ data = readw(hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
+ data |= SWITCH_FLOW_CTRL;
+ writew(data, hw->io + KS8842_SWITCH_CTRL_3_OFFSET);
+
+ data = readw(hw->io + KS8842_SWITCH_CTRL_1_OFFSET);
+
+ /* Enable aggressive back off algorithm in half duplex mode. */
+ data |= SWITCH_AGGR_BACKOFF;
+
+ /* Enable automatic fast aging when link changed detected. */
+ data |= SWITCH_AGING_ENABLE;
+ data |= SWITCH_LINK_AUTO_AGING;
+
+ if (hw->overrides & FAST_AGING)
+ data |= SWITCH_FAST_AGING;
+ else
+ data &= ~SWITCH_FAST_AGING;
+ writew(data, hw->io + KS8842_SWITCH_CTRL_1_OFFSET);
+
+ data = readw(hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
+
+ /* Enable no excessive collision drop. */
+ data |= NO_EXC_COLLISION_DROP;
+ writew(data, hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
+}
+
+enum {
+ STP_STATE_DISABLED = 0,
+ STP_STATE_LISTENING,
+ STP_STATE_LEARNING,
+ STP_STATE_FORWARDING,
+ STP_STATE_BLOCKED,
+ STP_STATE_SIMPLE
+};
+
+/**
+ * port_set_stp_state - configure port spanning tree state
+ * @hw: The hardware instance.
+ * @port: The port index.
+ * @state: The spanning tree state.
+ *
+ * This routine configures the spanning tree state of the port.
+ */
+static void port_set_stp_state(struct ksz_hw *hw, int port, int state)
+{
+ u16 data;
+
+ port_r16(hw, port, KS8842_PORT_CTRL_2_OFFSET, &data);
+ switch (state) {
+ case STP_STATE_DISABLED:
+ data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE);
+ data |= PORT_LEARN_DISABLE;
+ break;
+ case STP_STATE_LISTENING:
+/*
+ * No need to turn on transmit because of port direct mode.
+ * Turning on receive is required if static MAC table is not setup.
+ */
+ data &= ~PORT_TX_ENABLE;
+ data |= PORT_RX_ENABLE;
+ data |= PORT_LEARN_DISABLE;
+ break;
+ case STP_STATE_LEARNING:
+ data &= ~PORT_TX_ENABLE;
+ data |= PORT_RX_ENABLE;
+ data &= ~PORT_LEARN_DISABLE;
+ break;
+ case STP_STATE_FORWARDING:
+ data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
+ data &= ~PORT_LEARN_DISABLE;
+ break;
+ case STP_STATE_BLOCKED:
+/*
+ * Need to setup static MAC table with override to keep receiving BPDU
+ * messages. See sw_init_stp routine.
+ */
+ data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE);
+ data |= PORT_LEARN_DISABLE;
+ break;
+ case STP_STATE_SIMPLE:
+ data |= (PORT_TX_ENABLE | PORT_RX_ENABLE);
+ data |= PORT_LEARN_DISABLE;
+ break;
+ }
+ port_w16(hw, port, KS8842_PORT_CTRL_2_OFFSET, data);
+ hw->ksz_switch->port_cfg[port].stp_state = state;
+}
+
+#define STP_ENTRY 0
+#define BROADCAST_ENTRY 1
+#define BRIDGE_ADDR_ENTRY 2
+#define IPV6_ADDR_ENTRY 3
+
+/**
+ * sw_clr_sta_mac_table - clear static MAC table
+ * @hw: The hardware instance.
+ *
+ * This routine clears the static MAC table.
+ */
+static void sw_clr_sta_mac_table(struct ksz_hw *hw)
+{
+ struct ksz_mac_table *entry;
+ int i;
+
+ for (i = 0; i < STATIC_MAC_TABLE_ENTRIES; i++) {
+ entry = &hw->ksz_switch->mac_table[i];
+ sw_w_sta_mac_table(hw, i,
+ entry->mac_addr, entry->ports,
+ entry->override, 0,
+ entry->use_fid, entry->fid);
+ }
+}
+
+/**
+ * sw_init_stp - initialize switch spanning tree support
+ * @hw: The hardware instance.
+ *
+ * This routine initializes the spanning tree support of the switch.
+ */
+static void sw_init_stp(struct ksz_hw *hw)
+{
+ struct ksz_mac_table *entry;
+
+ entry = &hw->ksz_switch->mac_table[STP_ENTRY];
+ entry->mac_addr[0] = 0x01;
+ entry->mac_addr[1] = 0x80;
+ entry->mac_addr[2] = 0xC2;
+ entry->mac_addr[3] = 0x00;
+ entry->mac_addr[4] = 0x00;
+ entry->mac_addr[5] = 0x00;
+ entry->ports = HOST_MASK;
+ entry->override = 1;
+ entry->valid = 1;
+ sw_w_sta_mac_table(hw, STP_ENTRY,
+ entry->mac_addr, entry->ports,
+ entry->override, entry->valid,
+ entry->use_fid, entry->fid);
+}
+
+/**
+ * sw_block_addr - block certain packets from the host port
+ * @hw: The hardware instance.
+ *
+ * This routine blocks certain packets from reaching to the host port.
+ */
+static void sw_block_addr(struct ksz_hw *hw)
+{
+ struct ksz_mac_table *entry;
+ int i;
+
+ for (i = BROADCAST_ENTRY; i <= IPV6_ADDR_ENTRY; i++) {
+ entry = &hw->ksz_switch->mac_table[i];
+ entry->valid = 0;
+ sw_w_sta_mac_table(hw, i,
+ entry->mac_addr, entry->ports,
+ entry->override, entry->valid,
+ entry->use_fid, entry->fid);
+ }
+}
+
+#define PHY_LINK_SUPPORT \
+ (PHY_AUTO_NEG_ASYM_PAUSE | \
+ PHY_AUTO_NEG_SYM_PAUSE | \
+ PHY_AUTO_NEG_100BT4 | \
+ PHY_AUTO_NEG_100BTX_FD | \
+ PHY_AUTO_NEG_100BTX | \
+ PHY_AUTO_NEG_10BT_FD | \
+ PHY_AUTO_NEG_10BT)
+
+static inline void hw_r_phy_ctrl(struct ksz_hw *hw, int phy, u16 *data)
+{
+ *data = readw(hw->io + phy + KS884X_PHY_CTRL_OFFSET);
+}
+
+static inline void hw_w_phy_ctrl(struct ksz_hw *hw, int phy, u16 data)
+{
+ writew(data, hw->io + phy + KS884X_PHY_CTRL_OFFSET);
+}
+
+static inline void hw_r_phy_link_stat(struct ksz_hw *hw, int phy, u16 *data)
+{
+ *data = readw(hw->io + phy + KS884X_PHY_STATUS_OFFSET);
+}
+
+static inline void hw_r_phy_auto_neg(struct ksz_hw *hw, int phy, u16 *data)
+{
+ *data = readw(hw->io + phy + KS884X_PHY_AUTO_NEG_OFFSET);
+}
+
+static inline void hw_w_phy_auto_neg(struct ksz_hw *hw, int phy, u16 data)
+{
+ writew(data, hw->io + phy + KS884X_PHY_AUTO_NEG_OFFSET);
+}
+
+static inline void hw_r_phy_rem_cap(struct ksz_hw *hw, int phy, u16 *data)
+{
+ *data = readw(hw->io + phy + KS884X_PHY_REMOTE_CAP_OFFSET);
+}
+
+static inline void hw_r_phy_crossover(struct ksz_hw *hw, int phy, u16 *data)
+{
+ *data = readw(hw->io + phy + KS884X_PHY_CTRL_OFFSET);
+}
+
+static inline void hw_w_phy_crossover(struct ksz_hw *hw, int phy, u16 data)
+{
+ writew(data, hw->io + phy + KS884X_PHY_CTRL_OFFSET);
+}
+
+static inline void hw_r_phy_polarity(struct ksz_hw *hw, int phy, u16 *data)
+{
+ *data = readw(hw->io + phy + KS884X_PHY_PHY_CTRL_OFFSET);
+}
+
+static inline void hw_w_phy_polarity(struct ksz_hw *hw, int phy, u16 data)
+{
+ writew(data, hw->io + phy + KS884X_PHY_PHY_CTRL_OFFSET);
+}
+
+static inline void hw_r_phy_link_md(struct ksz_hw *hw, int phy, u16 *data)
+{
+ *data = readw(hw->io + phy + KS884X_PHY_LINK_MD_OFFSET);
+}
+
+static inline void hw_w_phy_link_md(struct ksz_hw *hw, int phy, u16 data)
+{
+ writew(data, hw->io + phy + KS884X_PHY_LINK_MD_OFFSET);
+}
+
+/**
+ * hw_r_phy - read data from PHY register
+ * @hw: The hardware instance.
+ * @port: Port to read.
+ * @reg: PHY register to read.
+ * @val: Buffer to store the read data.
+ *
+ * This routine reads data from the PHY register.
+ */
+static void hw_r_phy(struct ksz_hw *hw, int port, u16 reg, u16 *val)
+{
+ int phy;
+
+ phy = KS884X_PHY_1_CTRL_OFFSET + port * PHY_CTRL_INTERVAL + reg;
+ *val = readw(hw->io + phy);
+}
+
+/**
+ * port_w_phy - write data to PHY register
+ * @hw: The hardware instance.
+ * @port: Port to write.
+ * @reg: PHY register to write.
+ * @val: Word data to write.
+ *
+ * This routine writes data to the PHY register.
+ */
+static void hw_w_phy(struct ksz_hw *hw, int port, u16 reg, u16 val)
+{
+ int phy;
+
+ phy = KS884X_PHY_1_CTRL_OFFSET + port * PHY_CTRL_INTERVAL + reg;
+ writew(val, hw->io + phy);
+}
+
+/*
+ * EEPROM access functions
+ */
+
+#define AT93C_CODE 0
+#define AT93C_WR_OFF 0x00
+#define AT93C_WR_ALL 0x10
+#define AT93C_ER_ALL 0x20
+#define AT93C_WR_ON 0x30
+
+#define AT93C_WRITE 1
+#define AT93C_READ 2
+#define AT93C_ERASE 3
+
+#define EEPROM_DELAY 4
+
+static inline void drop_gpio(struct ksz_hw *hw, u8 gpio)
+{
+ u16 data;
+
+ data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
+ data &= ~gpio;
+ writew(data, hw->io + KS884X_EEPROM_CTRL_OFFSET);
+}
+
+static inline void raise_gpio(struct ksz_hw *hw, u8 gpio)
+{
+ u16 data;
+
+ data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
+ data |= gpio;
+ writew(data, hw->io + KS884X_EEPROM_CTRL_OFFSET);
+}
+
+static inline u8 state_gpio(struct ksz_hw *hw, u8 gpio)
+{
+ u16 data;
+
+ data = readw(hw->io + KS884X_EEPROM_CTRL_OFFSET);
+ return (u8)(data & gpio);
+}
+
+static void eeprom_clk(struct ksz_hw *hw)
+{
+ raise_gpio(hw, EEPROM_SERIAL_CLOCK);
+ udelay(EEPROM_DELAY);
+ drop_gpio(hw, EEPROM_SERIAL_CLOCK);
+ udelay(EEPROM_DELAY);
+}
+
+static u16 spi_r(struct ksz_hw *hw)
+{
+ int i;
+ u16 temp = 0;
+
+ for (i = 15; i >= 0; i--) {
+ raise_gpio(hw, EEPROM_SERIAL_CLOCK);
+ udelay(EEPROM_DELAY);
+
+ temp |= (state_gpio(hw, EEPROM_DATA_IN)) ? 1 << i : 0;
+
+ drop_gpio(hw, EEPROM_SERIAL_CLOCK);
+ udelay(EEPROM_DELAY);
+ }
+ return temp;
+}
+
+static void spi_w(struct ksz_hw *hw, u16 data)
+{
+ int i;
+
+ for (i = 15; i >= 0; i--) {
+ (data & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
+ drop_gpio(hw, EEPROM_DATA_OUT);
+ eeprom_clk(hw);
+ }
+}
+
+static void spi_reg(struct ksz_hw *hw, u8 data, u8 reg)
+{
+ int i;
+
+ /* Initial start bit */
+ raise_gpio(hw, EEPROM_DATA_OUT);
+ eeprom_clk(hw);
+
+ /* AT93C operation */
+ for (i = 1; i >= 0; i--) {
+ (data & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
+ drop_gpio(hw, EEPROM_DATA_OUT);
+ eeprom_clk(hw);
+ }
+
+ /* Address location */
+ for (i = 5; i >= 0; i--) {
+ (reg & (0x01 << i)) ? raise_gpio(hw, EEPROM_DATA_OUT) :
+ drop_gpio(hw, EEPROM_DATA_OUT);
+ eeprom_clk(hw);
+ }
+}
+
+#define EEPROM_DATA_RESERVED 0
+#define EEPROM_DATA_MAC_ADDR_0 1
+#define EEPROM_DATA_MAC_ADDR_1 2
+#define EEPROM_DATA_MAC_ADDR_2 3
+#define EEPROM_DATA_SUBSYS_ID 4
+#define EEPROM_DATA_SUBSYS_VEN_ID 5
+#define EEPROM_DATA_PM_CAP 6
+
+/* User defined EEPROM data */
+#define EEPROM_DATA_OTHER_MAC_ADDR 9
+
+/**
+ * eeprom_read - read from AT93C46 EEPROM
+ * @hw: The hardware instance.
+ * @reg: The register offset.
+ *
+ * This function reads a word from the AT93C46 EEPROM.
+ *
+ * Return the data value.
+ */
+static u16 eeprom_read(struct ksz_hw *hw, u8 reg)
+{
+ u16 data;
+
+ raise_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
+
+ spi_reg(hw, AT93C_READ, reg);
+ data = spi_r(hw);
+
+ drop_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
+
+ return data;
+}
+
+/**
+ * eeprom_write - write to AT93C46 EEPROM
+ * @hw: The hardware instance.
+ * @reg: The register offset.
+ * @data: The data value.
+ *
+ * This procedure writes a word to the AT93C46 EEPROM.
+ */
+static void eeprom_write(struct ksz_hw *hw, u8 reg, u16 data)
+{
+ int timeout;
+
+ raise_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
+
+ /* Enable write. */
+ spi_reg(hw, AT93C_CODE, AT93C_WR_ON);
+ drop_gpio(hw, EEPROM_CHIP_SELECT);
+ udelay(1);
+
+ /* Erase the register. */
+ raise_gpio(hw, EEPROM_CHIP_SELECT);
+ spi_reg(hw, AT93C_ERASE, reg);
+ drop_gpio(hw, EEPROM_CHIP_SELECT);
+ udelay(1);
+
+ /* Check operation complete. */
+ raise_gpio(hw, EEPROM_CHIP_SELECT);
+ timeout = 8;
+ mdelay(2);
+ do {
+ mdelay(1);
+ } while (!state_gpio(hw, EEPROM_DATA_IN) && --timeout);
+ drop_gpio(hw, EEPROM_CHIP_SELECT);
+ udelay(1);
+
+ /* Write the register. */
+ raise_gpio(hw, EEPROM_CHIP_SELECT);
+ spi_reg(hw, AT93C_WRITE, reg);
+ spi_w(hw, data);
+ drop_gpio(hw, EEPROM_CHIP_SELECT);
+ udelay(1);
+
+ /* Check operation complete. */
+ raise_gpio(hw, EEPROM_CHIP_SELECT);
+ timeout = 8;
+ mdelay(2);
+ do {
+ mdelay(1);
+ } while (!state_gpio(hw, EEPROM_DATA_IN) && --timeout);
+ drop_gpio(hw, EEPROM_CHIP_SELECT);
+ udelay(1);
+
+ /* Disable write. */
+ raise_gpio(hw, EEPROM_CHIP_SELECT);
+ spi_reg(hw, AT93C_CODE, AT93C_WR_OFF);
+
+ drop_gpio(hw, EEPROM_ACCESS_ENABLE | EEPROM_CHIP_SELECT);
+}
+
+/*
+ * Link detection routines
+ */
+
+static u16 advertised_flow_ctrl(struct ksz_port *port, u16 ctrl)
+{
+ ctrl &= ~PORT_AUTO_NEG_SYM_PAUSE;
+ switch (port->flow_ctrl) {
+ case PHY_FLOW_CTRL:
+ ctrl |= PORT_AUTO_NEG_SYM_PAUSE;
+ break;
+ /* Not supported. */
+ case PHY_TX_ONLY:
+ case PHY_RX_ONLY:
+ default:
+ break;
+ }
+ return ctrl;
+}
+
+static void set_flow_ctrl(struct ksz_hw *hw, int rx, int tx)
+{
+ u32 rx_cfg;
+ u32 tx_cfg;
+
+ rx_cfg = hw->rx_cfg;
+ tx_cfg = hw->tx_cfg;
+ if (rx)
+ hw->rx_cfg |= DMA_RX_FLOW_ENABLE;
+ else
+ hw->rx_cfg &= ~DMA_RX_FLOW_ENABLE;
+ if (tx)
+ hw->tx_cfg |= DMA_TX_FLOW_ENABLE;
+ else
+ hw->tx_cfg &= ~DMA_TX_FLOW_ENABLE;
+ if (hw->enabled) {
+ if (rx_cfg != hw->rx_cfg)
+ writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
+ if (tx_cfg != hw->tx_cfg)
+ writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
+ }
+}
+
+static void determine_flow_ctrl(struct ksz_hw *hw, struct ksz_port *port,
+ u16 local, u16 remote)
+{
+ int rx;
+ int tx;
+
+ if (hw->overrides & PAUSE_FLOW_CTRL)
+ return;
+
+ rx = tx = 0;
+ if (port->force_link)
+ rx = tx = 1;
+ if (remote & PHY_AUTO_NEG_SYM_PAUSE) {
+ if (local & PHY_AUTO_NEG_SYM_PAUSE) {
+ rx = tx = 1;
+ } else if ((remote & PHY_AUTO_NEG_ASYM_PAUSE) &&
+ (local & PHY_AUTO_NEG_PAUSE) ==
+ PHY_AUTO_NEG_ASYM_PAUSE) {
+ tx = 1;
+ }
+ } else if (remote & PHY_AUTO_NEG_ASYM_PAUSE) {
+ if ((local & PHY_AUTO_NEG_PAUSE) == PHY_AUTO_NEG_PAUSE)
+ rx = 1;
+ }
+ if (!hw->ksz_switch)
+ set_flow_ctrl(hw, rx, tx);
+}
+
+static inline void port_cfg_change(struct ksz_hw *hw, struct ksz_port *port,
+ struct ksz_port_info *info, u16 link_status)
+{
+ if ((hw->features & HALF_DUPLEX_SIGNAL_BUG) &&
+ !(hw->overrides & PAUSE_FLOW_CTRL)) {
+ u32 cfg = hw->tx_cfg;
+
+ /* Disable flow control in the half duplex mode. */
+ if (1 == info->duplex)
+ hw->tx_cfg &= ~DMA_TX_FLOW_ENABLE;
+ if (hw->enabled && cfg != hw->tx_cfg)
+ writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
+ }
+}
+
+/**
+ * port_get_link_speed - get current link status
+ * @port: The port instance.
+ *
+ * This routine reads PHY registers to determine the current link status of the
+ * switch ports.
+ */
+static void port_get_link_speed(struct ksz_port *port)
+{
+ uint interrupt;
+ struct ksz_port_info *info;
+ struct ksz_port_info *linked = NULL;
+ struct ksz_hw *hw = port->hw;
+ u16 data;
+ u16 status;
+ u8 local;
+ u8 remote;
+ int i;
+ int p;
+ int change = 0;
+
+ interrupt = hw_block_intr(hw);
+
+ for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
+ info = &hw->port_info[p];
+ port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data);
+ port_r16(hw, p, KS884X_PORT_STATUS_OFFSET, &status);
+
+ /*
+ * Link status is changing all the time even when there is no
+ * cable connection!
+ */
+ remote = status & (PORT_AUTO_NEG_COMPLETE |
+ PORT_STATUS_LINK_GOOD);
+ local = (u8) data;
+
+ /* No change to status. */
+ if (local == info->advertised && remote == info->partner)
+ continue;
+
+ info->advertised = local;
+ info->partner = remote;
+ if (status & PORT_STATUS_LINK_GOOD) {
+
+ /* Remember the first linked port. */
+ if (!linked)
+ linked = info;
+
+ info->tx_rate = 10 * TX_RATE_UNIT;
+ if (status & PORT_STATUS_SPEED_100MBIT)
+ info->tx_rate = 100 * TX_RATE_UNIT;
+
+ info->duplex = 1;
+ if (status & PORT_STATUS_FULL_DUPLEX)
+ info->duplex = 2;
+
+ if (media_connected != info->state) {
+ hw_r_phy(hw, p, KS884X_PHY_AUTO_NEG_OFFSET,
+ &data);
+ hw_r_phy(hw, p, KS884X_PHY_REMOTE_CAP_OFFSET,
+ &status);
+ determine_flow_ctrl(hw, port, data, status);
+ if (hw->ksz_switch) {
+ port_cfg_back_pressure(hw, p,
+ (1 == info->duplex));
+ }
+ change |= 1 << i;
+ port_cfg_change(hw, port, info, status);
+ }
+ info->state = media_connected;
+ } else {
+ if (media_disconnected != info->state) {
+ change |= 1 << i;
+
+ /* Indicate the link just goes down. */
+ hw->port_mib[p].link_down = 1;
+ }
+ info->state = media_disconnected;
+ }
+ hw->port_mib[p].state = (u8) info->state;
+ }
+
+ if (linked && media_disconnected == port->linked->state)
+ port->linked = linked;
+
+ hw_restore_intr(hw, interrupt);
+}
+
+#define PHY_RESET_TIMEOUT 10
+
+/**
+ * port_set_link_speed - set port speed
+ * @port: The port instance.
+ *
+ * This routine sets the link speed of the switch ports.
+ */
+static void port_set_link_speed(struct ksz_port *port)
+{
+ struct ksz_port_info *info;
+ struct ksz_hw *hw = port->hw;
+ u16 data;
+ u16 cfg;
+ u8 status;
+ int i;
+ int p;
+
+ for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
+ info = &hw->port_info[p];
+
+ port_r16(hw, p, KS884X_PORT_CTRL_4_OFFSET, &data);
+ port_r8(hw, p, KS884X_PORT_STATUS_OFFSET, &status);
+
+ cfg = 0;
+ if (status & PORT_STATUS_LINK_GOOD)
+ cfg = data;
+
+ data |= PORT_AUTO_NEG_ENABLE;
+ data = advertised_flow_ctrl(port, data);
+
+ data |= PORT_AUTO_NEG_100BTX_FD | PORT_AUTO_NEG_100BTX |
+ PORT_AUTO_NEG_10BT_FD | PORT_AUTO_NEG_10BT;
+
+ /* Check if manual configuration is specified by the user. */
+ if (port->speed || port->duplex) {
+ if (10 == port->speed)
+ data &= ~(PORT_AUTO_NEG_100BTX_FD |
+ PORT_AUTO_NEG_100BTX);
+ else if (100 == port->speed)
+ data &= ~(PORT_AUTO_NEG_10BT_FD |
+ PORT_AUTO_NEG_10BT);
+ if (1 == port->duplex)
+ data &= ~(PORT_AUTO_NEG_100BTX_FD |
+ PORT_AUTO_NEG_10BT_FD);
+ else if (2 == port->duplex)
+ data &= ~(PORT_AUTO_NEG_100BTX |
+ PORT_AUTO_NEG_10BT);
+ }
+ if (data != cfg) {
+ data |= PORT_AUTO_NEG_RESTART;
+ port_w16(hw, p, KS884X_PORT_CTRL_4_OFFSET, data);
+ }
+ }
+}
+
+/**
+ * port_force_link_speed - force port speed
+ * @port: The port instance.
+ *
+ * This routine forces the link speed of the switch ports.
+ */
+static void port_force_link_speed(struct ksz_port *port)
+{
+ struct ksz_hw *hw = port->hw;
+ u16 data;
+ int i;
+ int phy;
+ int p;
+
+ for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
+ phy = KS884X_PHY_1_CTRL_OFFSET + p * PHY_CTRL_INTERVAL;
+ hw_r_phy_ctrl(hw, phy, &data);
+
+ data &= ~PHY_AUTO_NEG_ENABLE;
+
+ if (10 == port->speed)
+ data &= ~PHY_SPEED_100MBIT;
+ else if (100 == port->speed)
+ data |= PHY_SPEED_100MBIT;
+ if (1 == port->duplex)
+ data &= ~PHY_FULL_DUPLEX;
+ else if (2 == port->duplex)
+ data |= PHY_FULL_DUPLEX;
+ hw_w_phy_ctrl(hw, phy, data);
+ }
+}
+
+static void port_set_power_saving(struct ksz_port *port, int enable)
+{
+ struct ksz_hw *hw = port->hw;
+ int i;
+ int p;
+
+ for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++)
+ port_cfg(hw, p,
+ KS884X_PORT_CTRL_4_OFFSET, PORT_POWER_DOWN, enable);
+}
+
+/*
+ * KSZ8841 power management functions
+ */
+
+/**
+ * hw_chk_wol_pme_status - check PMEN pin
+ * @hw: The hardware instance.
+ *
+ * This function is used to check PMEN pin is asserted.
+ *
+ * Return 1 if PMEN pin is asserted; otherwise, 0.
+ */
+static int hw_chk_wol_pme_status(struct ksz_hw *hw)
+{
+ struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
+ struct pci_dev *pdev = hw_priv->pdev;
+ u16 data;
+
+ if (!pdev->pm_cap)
+ return 0;
+ pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
+ return (data & PCI_PM_CTRL_PME_STATUS) == PCI_PM_CTRL_PME_STATUS;
+}
+
+/**
+ * hw_clr_wol_pme_status - clear PMEN pin
+ * @hw: The hardware instance.
+ *
+ * This routine is used to clear PME_Status to deassert PMEN pin.
+ */
+static void hw_clr_wol_pme_status(struct ksz_hw *hw)
+{
+ struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
+ struct pci_dev *pdev = hw_priv->pdev;
+ u16 data;
+
+ if (!pdev->pm_cap)
+ return;
+
+ /* Clear PME_Status to deassert PMEN pin. */
+ pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
+ data |= PCI_PM_CTRL_PME_STATUS;
+ pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, data);
+}
+
+/**
+ * hw_cfg_wol_pme - enable or disable Wake-on-LAN
+ * @hw: The hardware instance.
+ * @set: The flag indicating whether to enable or disable.
+ *
+ * This routine is used to enable or disable Wake-on-LAN.
+ */
+static void hw_cfg_wol_pme(struct ksz_hw *hw, int set)
+{
+ struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
+ struct pci_dev *pdev = hw_priv->pdev;
+ u16 data;
+
+ if (!pdev->pm_cap)
+ return;
+ pci_read_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, &data);
+ data &= ~PCI_PM_CTRL_STATE_MASK;
+ if (set)
+ data |= PCI_PM_CTRL_PME_ENABLE | PCI_D3hot;
+ else
+ data &= ~PCI_PM_CTRL_PME_ENABLE;
+ pci_write_config_word(pdev, pdev->pm_cap + PCI_PM_CTRL, data);
+}
+
+/**
+ * hw_cfg_wol - configure Wake-on-LAN features
+ * @hw: The hardware instance.
+ * @frame: The pattern frame bit.
+ * @set: The flag indicating whether to enable or disable.
+ *
+ * This routine is used to enable or disable certain Wake-on-LAN features.
+ */
+static void hw_cfg_wol(struct ksz_hw *hw, u16 frame, int set)
+{
+ u16 data;
+
+ data = readw(hw->io + KS8841_WOL_CTRL_OFFSET);
+ if (set)
+ data |= frame;
+ else
+ data &= ~frame;
+ writew(data, hw->io + KS8841_WOL_CTRL_OFFSET);
+}
+
+/**
+ * hw_set_wol_frame - program Wake-on-LAN pattern
+ * @hw: The hardware instance.
+ * @i: The frame index.
+ * @mask_size: The size of the mask.
+ * @mask: Mask to ignore certain bytes in the pattern.
+ * @frame_size: The size of the frame.
+ * @pattern: The frame data.
+ *
+ * This routine is used to program Wake-on-LAN pattern.
+ */
+static void hw_set_wol_frame(struct ksz_hw *hw, int i, uint mask_size,
+ u8 *mask, uint frame_size, u8 *pattern)
+{
+ int bits;
+ int from;
+ int len;
+ int to;
+ u32 crc;
+ u8 data[64];
+ u8 val = 0;
+
+ if (frame_size > mask_size * 8)
+ frame_size = mask_size * 8;
+ if (frame_size > 64)
+ frame_size = 64;
+
+ i *= 0x10;
+ writel(0, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i);
+ writel(0, hw->io + KS8841_WOL_FRAME_BYTE2_OFFSET + i);
+
+ bits = len = from = to = 0;
+ do {
+ if (bits) {
+ if ((val & 1))
+ data[to++] = pattern[from];
+ val >>= 1;
+ ++from;
+ --bits;
+ } else {
+ val = mask[len];
+ writeb(val, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i
+ + len);
+ ++len;
+ if (val)
+ bits = 8;
+ else
+ from += 8;
+ }
+ } while (from < (int) frame_size);
+ if (val) {
+ bits = mask[len - 1];
+ val <<= (from % 8);
+ bits &= ~val;
+ writeb(bits, hw->io + KS8841_WOL_FRAME_BYTE0_OFFSET + i + len -
+ 1);
+ }
+ crc = ether_crc(to, data);
+ writel(crc, hw->io + KS8841_WOL_FRAME_CRC_OFFSET + i);
+}
+
+/**
+ * hw_add_wol_arp - add ARP pattern
+ * @hw: The hardware instance.
+ * @ip_addr: The IPv4 address assigned to the device.
+ *
+ * This routine is used to add ARP pattern for waking up the host.
+ */
+static void hw_add_wol_arp(struct ksz_hw *hw, u8 *ip_addr)
+{
+ u8 mask[6] = { 0x3F, 0xF0, 0x3F, 0x00, 0xC0, 0x03 };
+ u8 pattern[42] = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x08, 0x06,
+ 0x00, 0x01, 0x08, 0x00, 0x06, 0x04, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00 };
+
+ memcpy(&pattern[38], ip_addr, 4);
+ hw_set_wol_frame(hw, 3, 6, mask, 42, pattern);
+}
+
+/**
+ * hw_add_wol_bcast - add broadcast pattern
+ * @hw: The hardware instance.
+ *
+ * This routine is used to add broadcast pattern for waking up the host.
+ */
+static void hw_add_wol_bcast(struct ksz_hw *hw)
+{
+ u8 mask[] = { 0x3F };
+ u8 pattern[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
+
+ hw_set_wol_frame(hw, 2, 1, mask, MAC_ADDR_LEN, pattern);
+}
+
+/**
+ * hw_add_wol_mcast - add multicast pattern
+ * @hw: The hardware instance.
+ *
+ * This routine is used to add multicast pattern for waking up the host.
+ *
+ * It is assumed the multicast packet is the ICMPv6 neighbor solicitation used
+ * by IPv6 ping command. Note that multicast packets are filtred through the
+ * multicast hash table, so not all multicast packets can wake up the host.
+ */
+static void hw_add_wol_mcast(struct ksz_hw *hw)
+{
+ u8 mask[] = { 0x3F };
+ u8 pattern[] = { 0x33, 0x33, 0xFF, 0x00, 0x00, 0x00 };
+
+ memcpy(&pattern[3], &hw->override_addr[3], 3);
+ hw_set_wol_frame(hw, 1, 1, mask, 6, pattern);
+}
+
+/**
+ * hw_add_wol_ucast - add unicast pattern
+ * @hw: The hardware instance.
+ *
+ * This routine is used to add unicast pattern to wakeup the host.
+ *
+ * It is assumed the unicast packet is directed to the device, as the hardware
+ * can only receive them in normal case.
+ */
+static void hw_add_wol_ucast(struct ksz_hw *hw)
+{
+ u8 mask[] = { 0x3F };
+
+ hw_set_wol_frame(hw, 0, 1, mask, MAC_ADDR_LEN, hw->override_addr);
+}
+
+/**
+ * hw_enable_wol - enable Wake-on-LAN
+ * @hw: The hardware instance.
+ * @wol_enable: The Wake-on-LAN settings.
+ * @net_addr: The IPv4 address assigned to the device.
+ *
+ * This routine is used to enable Wake-on-LAN depending on driver settings.
+ */
+static void hw_enable_wol(struct ksz_hw *hw, u32 wol_enable, u8 *net_addr)
+{
+ hw_cfg_wol(hw, KS8841_WOL_MAGIC_ENABLE, (wol_enable & WAKE_MAGIC));
+ hw_cfg_wol(hw, KS8841_WOL_FRAME0_ENABLE, (wol_enable & WAKE_UCAST));
+ hw_add_wol_ucast(hw);
+ hw_cfg_wol(hw, KS8841_WOL_FRAME1_ENABLE, (wol_enable & WAKE_MCAST));
+ hw_add_wol_mcast(hw);
+ hw_cfg_wol(hw, KS8841_WOL_FRAME2_ENABLE, (wol_enable & WAKE_BCAST));
+ hw_cfg_wol(hw, KS8841_WOL_FRAME3_ENABLE, (wol_enable & WAKE_ARP));
+ hw_add_wol_arp(hw, net_addr);
+}
+
+/**
+ * hw_init - check driver is correct for the hardware
+ * @hw: The hardware instance.
+ *
+ * This function checks the hardware is correct for this driver and sets the
+ * hardware up for proper initialization.
+ *
+ * Return number of ports or 0 if not right.
+ */
+static int hw_init(struct ksz_hw *hw)
+{
+ int rc = 0;
+ u16 data;
+ u16 revision;
+
+ /* Set bus speed to 125MHz. */
+ writew(BUS_SPEED_125_MHZ, hw->io + KS884X_BUS_CTRL_OFFSET);
+
+ /* Check KSZ884x chip ID. */
+ data = readw(hw->io + KS884X_CHIP_ID_OFFSET);
+
+ revision = (data & KS884X_REVISION_MASK) >> KS884X_REVISION_SHIFT;
+ data &= KS884X_CHIP_ID_MASK_41;
+ if (REG_CHIP_ID_41 == data)
+ rc = 1;
+ else if (REG_CHIP_ID_42 == data)
+ rc = 2;
+ else
+ return 0;
+
+ /* Setup hardware features or bug workarounds. */
+ if (revision <= 1) {
+ hw->features |= SMALL_PACKET_TX_BUG;
+ if (1 == rc)
+ hw->features |= HALF_DUPLEX_SIGNAL_BUG;
+ }
+ hw->features |= IPV6_CSUM_GEN_HACK;
+ return rc;
+}
+
+/**
+ * hw_reset - reset the hardware
+ * @hw: The hardware instance.
+ *
+ * This routine resets the hardware.
+ */
+static void hw_reset(struct ksz_hw *hw)
+{
+ writew(GLOBAL_SOFTWARE_RESET, hw->io + KS884X_GLOBAL_CTRL_OFFSET);
+
+ /* Wait for device to reset. */
+ mdelay(10);
+
+ /* Write 0 to clear device reset. */
+ writew(0, hw->io + KS884X_GLOBAL_CTRL_OFFSET);
+}
+
+/**
+ * hw_setup - setup the hardware
+ * @hw: The hardware instance.
+ *
+ * This routine setup the hardware for proper operation.
+ */
+static void hw_setup(struct ksz_hw *hw)
+{
+#if SET_DEFAULT_LED
+ u16 data;
+
+ /* Change default LED mode. */
+ data = readw(hw->io + KS8842_SWITCH_CTRL_5_OFFSET);
+ data &= ~LED_MODE;
+ data |= SET_DEFAULT_LED;
+ writew(data, hw->io + KS8842_SWITCH_CTRL_5_OFFSET);
+#endif
+
+ /* Setup transmit control. */
+ hw->tx_cfg = (DMA_TX_PAD_ENABLE | DMA_TX_CRC_ENABLE |
+ (DMA_BURST_DEFAULT << DMA_BURST_SHIFT) | DMA_TX_ENABLE);
+
+ /* Setup receive control. */
+ hw->rx_cfg = (DMA_RX_BROADCAST | DMA_RX_UNICAST |
+ (DMA_BURST_DEFAULT << DMA_BURST_SHIFT) | DMA_RX_ENABLE);
+ hw->rx_cfg |= KS884X_DMA_RX_MULTICAST;
+
+ /* Hardware cannot handle UDP packet in IP fragments. */
+ hw->rx_cfg |= (DMA_RX_CSUM_TCP | DMA_RX_CSUM_IP);
+
+ if (hw->all_multi)
+ hw->rx_cfg |= DMA_RX_ALL_MULTICAST;
+ if (hw->promiscuous)
+ hw->rx_cfg |= DMA_RX_PROMISCUOUS;
+}
+
+/**
+ * hw_setup_intr - setup interrupt mask
+ * @hw: The hardware instance.
+ *
+ * This routine setup the interrupt mask for proper operation.
+ */
+static void hw_setup_intr(struct ksz_hw *hw)
+{
+ hw->intr_mask = KS884X_INT_MASK | KS884X_INT_RX_OVERRUN;
+}
+
+static void ksz_check_desc_num(struct ksz_desc_info *info)
+{
+#define MIN_DESC_SHIFT 2
+
+ int alloc = info->alloc;
+ int shift;
+
+ shift = 0;
+ while (!(alloc & 1)) {
+ shift++;
+ alloc >>= 1;
+ }
+ if (alloc != 1 || shift < MIN_DESC_SHIFT) {
+ printk(KERN_ALERT "Hardware descriptor numbers not right!\n");
+ while (alloc) {
+ shift++;
+ alloc >>= 1;
+ }
+ if (shift < MIN_DESC_SHIFT)
+ shift = MIN_DESC_SHIFT;
+ alloc = 1 << shift;
+ info->alloc = alloc;
+ }
+ info->mask = info->alloc - 1;
+}
+
+static void hw_init_desc(struct ksz_desc_info *desc_info, int transmit)
+{
+ int i;
+ u32 phys = desc_info->ring_phys;
+ struct ksz_hw_desc *desc = desc_info->ring_virt;
+ struct ksz_desc *cur = desc_info->ring;
+ struct ksz_desc *previous = NULL;
+
+ for (i = 0; i < desc_info->alloc; i++) {
+ cur->phw = desc++;
+ phys += desc_info->size;
+ previous = cur++;
+ previous->phw->next = cpu_to_le32(phys);
+ }
+ previous->phw->next = cpu_to_le32(desc_info->ring_phys);
+ previous->sw.buf.rx.end_of_ring = 1;
+ previous->phw->buf.data = cpu_to_le32(previous->sw.buf.data);
+
+ desc_info->avail = desc_info->alloc;
+ desc_info->last = desc_info->next = 0;
+
+ desc_info->cur = desc_info->ring;
+}
+
+/**
+ * hw_set_desc_base - set descriptor base addresses
+ * @hw: The hardware instance.
+ * @tx_addr: The transmit descriptor base.
+ * @rx_addr: The receive descriptor base.
+ *
+ * This routine programs the descriptor base addresses after reset.
+ */
+static void hw_set_desc_base(struct ksz_hw *hw, u32 tx_addr, u32 rx_addr)
+{
+ /* Set base address of Tx/Rx descriptors. */
+ writel(tx_addr, hw->io + KS_DMA_TX_ADDR);
+ writel(rx_addr, hw->io + KS_DMA_RX_ADDR);
+}
+
+static void hw_reset_pkts(struct ksz_desc_info *info)
+{
+ info->cur = info->ring;
+ info->avail = info->alloc;
+ info->last = info->next = 0;
+}
+
+static inline void hw_resume_rx(struct ksz_hw *hw)
+{
+ writel(DMA_START, hw->io + KS_DMA_RX_START);
+}
+
+/**
+ * hw_start_rx - start receiving
+ * @hw: The hardware instance.
+ *
+ * This routine starts the receive function of the hardware.
+ */
+static void hw_start_rx(struct ksz_hw *hw)
+{
+ writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
+
+ /* Notify when the receive stops. */
+ hw->intr_mask |= KS884X_INT_RX_STOPPED;
+
+ writel(DMA_START, hw->io + KS_DMA_RX_START);
+ hw_ack_intr(hw, KS884X_INT_RX_STOPPED);
+ hw->rx_stop++;
+
+ /* Variable overflows. */
+ if (0 == hw->rx_stop)
+ hw->rx_stop = 2;
+}
+
+/*
+ * hw_stop_rx - stop receiving
+ * @hw: The hardware instance.
+ *
+ * This routine stops the receive function of the hardware.
+ */
+static void hw_stop_rx(struct ksz_hw *hw)
+{
+ hw->rx_stop = 0;
+ hw_turn_off_intr(hw, KS884X_INT_RX_STOPPED);
+ writel((hw->rx_cfg & ~DMA_RX_ENABLE), hw->io + KS_DMA_RX_CTRL);
+}
+
+/**
+ * hw_start_tx - start transmitting
+ * @hw: The hardware instance.
+ *
+ * This routine starts the transmit function of the hardware.
+ */
+static void hw_start_tx(struct ksz_hw *hw)
+{
+ writel(hw->tx_cfg, hw->io + KS_DMA_TX_CTRL);
+}
+
+/**
+ * hw_stop_tx - stop transmitting
+ * @hw: The hardware instance.
+ *
+ * This routine stops the transmit function of the hardware.
+ */
+static void hw_stop_tx(struct ksz_hw *hw)
+{
+ writel((hw->tx_cfg & ~DMA_TX_ENABLE), hw->io + KS_DMA_TX_CTRL);
+}
+
+/**
+ * hw_disable - disable hardware
+ * @hw: The hardware instance.
+ *
+ * This routine disables the hardware.
+ */
+static void hw_disable(struct ksz_hw *hw)
+{
+ hw_stop_rx(hw);
+ hw_stop_tx(hw);
+ hw->enabled = 0;
+}
+
+/**
+ * hw_enable - enable hardware
+ * @hw: The hardware instance.
+ *
+ * This routine enables the hardware.
+ */
+static void hw_enable(struct ksz_hw *hw)
+{
+ hw_start_tx(hw);
+ hw_start_rx(hw);
+ hw->enabled = 1;
+}
+
+/**
+ * hw_alloc_pkt - allocate enough descriptors for transmission
+ * @hw: The hardware instance.
+ * @length: The length of the packet.
+ * @physical: Number of descriptors required.
+ *
+ * This function allocates descriptors for transmission.
+ *
+ * Return 0 if not successful; 1 for buffer copy; or number of descriptors.
+ */
+static int hw_alloc_pkt(struct ksz_hw *hw, int length, int physical)
+{
+ /* Always leave one descriptor free. */
+ if (hw->tx_desc_info.avail <= 1)
+ return 0;
+
+ /* Allocate a descriptor for transmission and mark it current. */
+ get_tx_pkt(&hw->tx_desc_info, &hw->tx_desc_info.cur);
+ hw->tx_desc_info.cur->sw.buf.tx.first_seg = 1;
+
+ /* Keep track of number of transmit descriptors used so far. */
+ ++hw->tx_int_cnt;
+ hw->tx_size += length;
+
+ /* Cannot hold on too much data. */
+ if (hw->tx_size >= MAX_TX_HELD_SIZE)
+ hw->tx_int_cnt = hw->tx_int_mask + 1;
+
+ if (physical > hw->tx_desc_info.avail)
+ return 1;
+
+ return hw->tx_desc_info.avail;
+}
+
+/**
+ * hw_send_pkt - mark packet for transmission
+ * @hw: The hardware instance.
+ *
+ * This routine marks the packet for transmission in PCI version.
+ */
+static void hw_send_pkt(struct ksz_hw *hw)
+{
+ struct ksz_desc *cur = hw->tx_desc_info.cur;
+
+ cur->sw.buf.tx.last_seg = 1;
+
+ /* Interrupt only after specified number of descriptors used. */
+ if (hw->tx_int_cnt > hw->tx_int_mask) {
+ cur->sw.buf.tx.intr = 1;
+ hw->tx_int_cnt = 0;
+ hw->tx_size = 0;
+ }
+
+ /* KSZ8842 supports port directed transmission. */
+ cur->sw.buf.tx.dest_port = hw->dst_ports;
+
+ release_desc(cur);
+
+ writel(0, hw->io + KS_DMA_TX_START);
+}
+
+static int empty_addr(u8 *addr)
+{
+ u32 *addr1 = (u32 *) addr;
+ u16 *addr2 = (u16 *) &addr[4];
+
+ return 0 == *addr1 && 0 == *addr2;
+}
+
+/**
+ * hw_set_addr - set MAC address
+ * @hw: The hardware instance.
+ *
+ * This routine programs the MAC address of the hardware when the address is
+ * overrided.
+ */
+static void hw_set_addr(struct ksz_hw *hw)
+{
+ int i;
+
+ for (i = 0; i < MAC_ADDR_LEN; i++)
+ writeb(hw->override_addr[MAC_ADDR_ORDER(i)],
+ hw->io + KS884X_ADDR_0_OFFSET + i);
+
+ sw_set_addr(hw, hw->override_addr);
+}
+
+/**
+ * hw_read_addr - read MAC address
+ * @hw: The hardware instance.
+ *
+ * This routine retrieves the MAC address of the hardware.
+ */
+static void hw_read_addr(struct ksz_hw *hw)
+{
+ int i;
+
+ for (i = 0; i < MAC_ADDR_LEN; i++)
+ hw->perm_addr[MAC_ADDR_ORDER(i)] = readb(hw->io +
+ KS884X_ADDR_0_OFFSET + i);
+
+ if (!hw->mac_override) {
+ memcpy(hw->override_addr, hw->perm_addr, MAC_ADDR_LEN);
+ if (empty_addr(hw->override_addr)) {
+ memcpy(hw->perm_addr, DEFAULT_MAC_ADDRESS,
+ MAC_ADDR_LEN);
+ memcpy(hw->override_addr, DEFAULT_MAC_ADDRESS,
+ MAC_ADDR_LEN);
+ hw->override_addr[5] += hw->id;
+ hw_set_addr(hw);
+ }
+ }
+}
+
+static void hw_ena_add_addr(struct ksz_hw *hw, int index, u8 *mac_addr)
+{
+ int i;
+ u32 mac_addr_lo;
+ u32 mac_addr_hi;
+
+ mac_addr_hi = 0;
+ for (i = 0; i < 2; i++) {
+ mac_addr_hi <<= 8;
+ mac_addr_hi |= mac_addr[i];
+ }
+ mac_addr_hi |= ADD_ADDR_ENABLE;
+ mac_addr_lo = 0;
+ for (i = 2; i < 6; i++) {
+ mac_addr_lo <<= 8;
+ mac_addr_lo |= mac_addr[i];
+ }
+ index *= ADD_ADDR_INCR;
+
+ writel(mac_addr_lo, hw->io + index + KS_ADD_ADDR_0_LO);
+ writel(mac_addr_hi, hw->io + index + KS_ADD_ADDR_0_HI);
+}
+
+static void hw_set_add_addr(struct ksz_hw *hw)
+{
+ int i;
+
+ for (i = 0; i < ADDITIONAL_ENTRIES; i++) {
+ if (empty_addr(hw->address[i]))
+ writel(0, hw->io + ADD_ADDR_INCR * i +
+ KS_ADD_ADDR_0_HI);
+ else
+ hw_ena_add_addr(hw, i, hw->address[i]);
+ }
+}
+
+static int hw_add_addr(struct ksz_hw *hw, u8 *mac_addr)
+{
+ int i;
+ int j = ADDITIONAL_ENTRIES;
+
+ if (!memcmp(hw->override_addr, mac_addr, MAC_ADDR_LEN))
+ return 0;
+ for (i = 0; i < hw->addr_list_size; i++) {
+ if (!memcmp(hw->address[i], mac_addr, MAC_ADDR_LEN))
+ return 0;
+ if (ADDITIONAL_ENTRIES == j && empty_addr(hw->address[i]))
+ j = i;
+ }
+ if (j < ADDITIONAL_ENTRIES) {
+ memcpy(hw->address[j], mac_addr, MAC_ADDR_LEN);
+ hw_ena_add_addr(hw, j, hw->address[j]);
+ return 0;
+ }
+ return -1;
+}
+
+static int hw_del_addr(struct ksz_hw *hw, u8 *mac_addr)
+{
+ int i;
+
+ for (i = 0; i < hw->addr_list_size; i++) {
+ if (!memcmp(hw->address[i], mac_addr, MAC_ADDR_LEN)) {
+ memset(hw->address[i], 0, MAC_ADDR_LEN);
+ writel(0, hw->io + ADD_ADDR_INCR * i +
+ KS_ADD_ADDR_0_HI);
+ return 0;
+ }
+ }
+ return -1;
+}
+
+/**
+ * hw_clr_multicast - clear multicast addresses
+ * @hw: The hardware instance.
+ *
+ * This routine removes all multicast addresses set in the hardware.
+ */
+static void hw_clr_multicast(struct ksz_hw *hw)
+{
+ int i;
+
+ for (i = 0; i < HW_MULTICAST_SIZE; i++) {
+ hw->multi_bits[i] = 0;
+
+ writeb(0, hw->io + KS884X_MULTICAST_0_OFFSET + i);
+ }
+}
+
+/**
+ * hw_set_grp_addr - set multicast addresses
+ * @hw: The hardware instance.
+ *
+ * This routine programs multicast addresses for the hardware to accept those
+ * addresses.
+ */
+static void hw_set_grp_addr(struct ksz_hw *hw)
+{
+ int i;
+ int index;
+ int position;
+ int value;
+
+ memset(hw->multi_bits, 0, sizeof(u8) * HW_MULTICAST_SIZE);
+
+ for (i = 0; i < hw->multi_list_size; i++) {
+ position = (ether_crc(6, hw->multi_list[i]) >> 26) & 0x3f;
+ index = position >> 3;
+ value = 1 << (position & 7);
+ hw->multi_bits[index] |= (u8) value;
+ }
+
+ for (i = 0; i < HW_MULTICAST_SIZE; i++)
+ writeb(hw->multi_bits[i], hw->io + KS884X_MULTICAST_0_OFFSET +
+ i);
+}
+
+/**
+ * hw_set_multicast - enable or disable all multicast receiving
+ * @hw: The hardware instance.
+ * @multicast: To turn on or off the all multicast feature.
+ *
+ * This routine enables/disables the hardware to accept all multicast packets.
+ */
+static void hw_set_multicast(struct ksz_hw *hw, u8 multicast)
+{
+ /* Stop receiving for reconfiguration. */
+ hw_stop_rx(hw);
+
+ if (multicast)
+ hw->rx_cfg |= DMA_RX_ALL_MULTICAST;
+ else
+ hw->rx_cfg &= ~DMA_RX_ALL_MULTICAST;
+
+ if (hw->enabled)
+ hw_start_rx(hw);
+}
+
+/**
+ * hw_set_promiscuous - enable or disable promiscuous receiving
+ * @hw: The hardware instance.
+ * @prom: To turn on or off the promiscuous feature.
+ *
+ * This routine enables/disables the hardware to accept all packets.
+ */
+static void hw_set_promiscuous(struct ksz_hw *hw, u8 prom)
+{
+ /* Stop receiving for reconfiguration. */
+ hw_stop_rx(hw);
+
+ if (prom)
+ hw->rx_cfg |= DMA_RX_PROMISCUOUS;
+ else
+ hw->rx_cfg &= ~DMA_RX_PROMISCUOUS;
+
+ if (hw->enabled)
+ hw_start_rx(hw);
+}
+
+/**
+ * sw_enable - enable the switch
+ * @hw: The hardware instance.
+ * @enable: The flag to enable or disable the switch
+ *
+ * This routine is used to enable/disable the switch in KSZ8842.
+ */
+static void sw_enable(struct ksz_hw *hw, int enable)
+{
+ int port;
+
+ for (port = 0; port < SWITCH_PORT_NUM; port++) {
+ if (hw->dev_count > 1) {
+ /* Set port-base vlan membership with host port. */
+ sw_cfg_port_base_vlan(hw, port,
+ HOST_MASK | (1 << port));
+ port_set_stp_state(hw, port, STP_STATE_DISABLED);
+ } else {
+ sw_cfg_port_base_vlan(hw, port, PORT_MASK);
+ port_set_stp_state(hw, port, STP_STATE_FORWARDING);
+ }
+ }
+ if (hw->dev_count > 1)
+ port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_SIMPLE);
+ else
+ port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_FORWARDING);
+
+ if (enable)
+ enable = KS8842_START;
+ writew(enable, hw->io + KS884X_CHIP_ID_OFFSET);
+}
+
+/**
+ * sw_setup - setup the switch
+ * @hw: The hardware instance.
+ *
+ * This routine setup the hardware switch engine for default operation.
+ */
+static void sw_setup(struct ksz_hw *hw)
+{
+ int port;
+
+ sw_set_global_ctrl(hw);
+
+ /* Enable switch broadcast storm protection at 10% percent rate. */
+ sw_init_broad_storm(hw);
+ hw_cfg_broad_storm(hw, BROADCAST_STORM_PROTECTION_RATE);
+ for (port = 0; port < SWITCH_PORT_NUM; port++)
+ sw_ena_broad_storm(hw, port);
+
+ sw_init_prio(hw);
+
+ sw_init_mirror(hw);
+
+ sw_init_prio_rate(hw);
+
+ sw_init_vlan(hw);
+
+ if (hw->features & STP_SUPPORT)
+ sw_init_stp(hw);
+ if (!sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
+ SWITCH_TX_FLOW_CTRL | SWITCH_RX_FLOW_CTRL))
+ hw->overrides |= PAUSE_FLOW_CTRL;
+ sw_enable(hw, 1);
+}
+
+/**
+ * ksz_start_timer - start kernel timer
+ * @info: Kernel timer information.
+ * @time: The time tick.
+ *
+ * This routine starts the kernel timer after the specified time tick.
+ */
+static void ksz_start_timer(struct ksz_timer_info *info, int time)
+{
+ info->cnt = 0;
+ info->timer.expires = jiffies + time;
+ add_timer(&info->timer);
+
+ /* infinity */
+ info->max = -1;
+}
+
+/**
+ * ksz_stop_timer - stop kernel timer
+ * @info: Kernel timer information.
+ *
+ * This routine stops the kernel timer.
+ */
+static void ksz_stop_timer(struct ksz_timer_info *info)
+{
+ if (info->max) {
+ info->max = 0;
+ del_timer_sync(&info->timer);
+ }
+}
+
+static void ksz_init_timer(struct ksz_timer_info *info, int period,
+ void (*function)(unsigned long), void *data)
+{
+ info->max = 0;
+ info->period = period;
+ init_timer(&info->timer);
+ info->timer.function = function;
+ info->timer.data = (unsigned long) data;
+}
+
+static void ksz_update_timer(struct ksz_timer_info *info)
+{
+ ++info->cnt;
+ if (info->max > 0) {
+ if (info->cnt < info->max) {
+ info->timer.expires = jiffies + info->period;
+ add_timer(&info->timer);
+ } else
+ info->max = 0;
+ } else if (info->max < 0) {
+ info->timer.expires = jiffies + info->period;
+ add_timer(&info->timer);
+ }
+}
+
+/**
+ * ksz_alloc_soft_desc - allocate software descriptors
+ * @desc_info: Descriptor information structure.
+ * @transmit: Indication that descriptors are for transmit.
+ *
+ * This local function allocates software descriptors for manipulation in
+ * memory.
+ *
+ * Return 0 if successful.
+ */
+static int ksz_alloc_soft_desc(struct ksz_desc_info *desc_info, int transmit)
+{
+ desc_info->ring = kmalloc(sizeof(struct ksz_desc) * desc_info->alloc,
+ GFP_KERNEL);
+ if (!desc_info->ring)
+ return 1;
+ memset((void *) desc_info->ring, 0,
+ sizeof(struct ksz_desc) * desc_info->alloc);
+ hw_init_desc(desc_info, transmit);
+ return 0;
+}
+
+/**
+ * ksz_alloc_desc - allocate hardware descriptors
+ * @adapter: Adapter information structure.
+ *
+ * This local function allocates hardware descriptors for receiving and
+ * transmitting.
+ *
+ * Return 0 if successful.
+ */
+static int ksz_alloc_desc(struct dev_info *adapter)
+{
+ struct ksz_hw *hw = &adapter->hw;
+ int offset;
+
+ /* Allocate memory for RX & TX descriptors. */
+ adapter->desc_pool.alloc_size =
+ hw->rx_desc_info.size * hw->rx_desc_info.alloc +
+ hw->tx_desc_info.size * hw->tx_desc_info.alloc +
+ DESC_ALIGNMENT;
+
+ adapter->desc_pool.alloc_virt =
+ pci_alloc_consistent(
+ adapter->pdev, adapter->desc_pool.alloc_size,
+ &adapter->desc_pool.dma_addr);
+ if (adapter->desc_pool.alloc_virt == NULL) {
+ adapter->desc_pool.alloc_size = 0;
+ return 1;
+ }
+ memset(adapter->desc_pool.alloc_virt, 0, adapter->desc_pool.alloc_size);
+
+ /* Align to the next cache line boundary. */
+ offset = (((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT) ?
+ (DESC_ALIGNMENT -
+ ((ulong) adapter->desc_pool.alloc_virt % DESC_ALIGNMENT)) : 0);
+ adapter->desc_pool.virt = adapter->desc_pool.alloc_virt + offset;
+ adapter->desc_pool.phys = adapter->desc_pool.dma_addr + offset;
+
+ /* Allocate receive/transmit descriptors. */
+ hw->rx_desc_info.ring_virt = (struct ksz_hw_desc *)
+ adapter->desc_pool.virt;
+ hw->rx_desc_info.ring_phys = adapter->desc_pool.phys;
+ offset = hw->rx_desc_info.alloc * hw->rx_desc_info.size;
+ hw->tx_desc_info.ring_virt = (struct ksz_hw_desc *)
+ (adapter->desc_pool.virt + offset);
+ hw->tx_desc_info.ring_phys = adapter->desc_pool.phys + offset;
+
+ if (ksz_alloc_soft_desc(&hw->rx_desc_info, 0))
+ return 1;
+ if (ksz_alloc_soft_desc(&hw->tx_desc_info, 1))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * free_dma_buf - release DMA buffer resources
+ * @adapter: Adapter information structure.
+ *
+ * This routine is just a helper function to release the DMA buffer resources.
+ */
+static void free_dma_buf(struct dev_info *adapter, struct ksz_dma_buf *dma_buf,
+ int direction)
+{
+ pci_unmap_single(adapter->pdev, dma_buf->dma, dma_buf->len, direction);
+ dev_kfree_skb(dma_buf->skb);
+ dma_buf->skb = NULL;
+ dma_buf->dma = 0;
+}
+
+/**
+ * ksz_init_rx_buffers - initialize receive descriptors
+ * @adapter: Adapter information structure.
+ *
+ * This routine initializes DMA buffers for receiving.
+ */
+static void ksz_init_rx_buffers(struct dev_info *adapter)
+{
+ int i;
+ struct ksz_desc *desc;
+ struct ksz_dma_buf *dma_buf;
+ struct ksz_hw *hw = &adapter->hw;
+ struct ksz_desc_info *info = &hw->rx_desc_info;
+
+ for (i = 0; i < hw->rx_desc_info.alloc; i++) {
+ get_rx_pkt(info, &desc);
+
+ dma_buf = DMA_BUFFER(desc);
+ if (dma_buf->skb && dma_buf->len != adapter->mtu)
+ free_dma_buf(adapter, dma_buf, PCI_DMA_FROMDEVICE);
+ dma_buf->len = adapter->mtu;
+ if (!dma_buf->skb)
+ dma_buf->skb = alloc_skb(dma_buf->len, GFP_ATOMIC);
+ if (dma_buf->skb && !dma_buf->dma) {
+ dma_buf->skb->dev = adapter->dev;
+ dma_buf->dma = pci_map_single(
+ adapter->pdev,
+ skb_tail_pointer(dma_buf->skb),
+ dma_buf->len,
+ PCI_DMA_FROMDEVICE);
+ }
+
+ /* Set descriptor. */
+ set_rx_buf(desc, dma_buf->dma);
+ set_rx_len(desc, dma_buf->len);
+ release_desc(desc);
+ }
+}
+
+/**
+ * ksz_alloc_mem - allocate memory for hardware descriptors
+ * @adapter: Adapter information structure.
+ *
+ * This function allocates memory for use by hardware descriptors for receiving
+ * and transmitting.
+ *
+ * Return 0 if successful.
+ */
+static int ksz_alloc_mem(struct dev_info *adapter)
+{
+ struct ksz_hw *hw = &adapter->hw;
+
+ /* Determine the number of receive and transmit descriptors. */
+ hw->rx_desc_info.alloc = NUM_OF_RX_DESC;
+ hw->tx_desc_info.alloc = NUM_OF_TX_DESC;
+
+ /* Determine how many descriptors to skip transmit interrupt. */
+ hw->tx_int_cnt = 0;
+ hw->tx_int_mask = NUM_OF_TX_DESC / 4;
+ if (hw->tx_int_mask > 8)
+ hw->tx_int_mask = 8;
+ while (hw->tx_int_mask) {
+ hw->tx_int_cnt++;
+ hw->tx_int_mask >>= 1;
+ }
+ if (hw->tx_int_cnt) {
+ hw->tx_int_mask = (1 << (hw->tx_int_cnt - 1)) - 1;
+ hw->tx_int_cnt = 0;
+ }
+
+ /* Determine the descriptor size. */
+ hw->rx_desc_info.size =
+ (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) /
+ DESC_ALIGNMENT) * DESC_ALIGNMENT);
+ hw->tx_desc_info.size =
+ (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) /
+ DESC_ALIGNMENT) * DESC_ALIGNMENT);
+ if (hw->rx_desc_info.size != sizeof(struct ksz_hw_desc))
+ printk(KERN_ALERT
+ "Hardware descriptor size not right!\n");
+ ksz_check_desc_num(&hw->rx_desc_info);
+ ksz_check_desc_num(&hw->tx_desc_info);
+
+ /* Allocate descriptors. */
+ if (ksz_alloc_desc(adapter))
+ return 1;
+
+ return 0;
+}
+
+/**
+ * ksz_free_desc - free software and hardware descriptors
+ * @adapter: Adapter information structure.
+ *
+ * This local routine frees the software and hardware descriptors allocated by
+ * ksz_alloc_desc().
+ */
+static void ksz_free_desc(struct dev_info *adapter)
+{
+ struct ksz_hw *hw = &adapter->hw;
+
+ /* Reset descriptor. */
+ hw->rx_desc_info.ring_virt = NULL;
+ hw->tx_desc_info.ring_virt = NULL;
+ hw->rx_desc_info.ring_phys = 0;
+ hw->tx_desc_info.ring_phys = 0;
+
+ /* Free memory. */
+ if (adapter->desc_pool.alloc_virt)
+ pci_free_consistent(
+ adapter->pdev,
+ adapter->desc_pool.alloc_size,
+ adapter->desc_pool.alloc_virt,
+ adapter->desc_pool.dma_addr);
+
+ /* Reset resource pool. */
+ adapter->desc_pool.alloc_size = 0;
+ adapter->desc_pool.alloc_virt = NULL;
+
+ kfree(hw->rx_desc_info.ring);
+ hw->rx_desc_info.ring = NULL;
+ kfree(hw->tx_desc_info.ring);
+ hw->tx_desc_info.ring = NULL;
+}
+
+/**
+ * ksz_free_buffers - free buffers used in the descriptors
+ * @adapter: Adapter information structure.
+ * @desc_info: Descriptor information structure.
+ *
+ * This local routine frees buffers used in the DMA buffers.
+ */
+static void ksz_free_buffers(struct dev_info *adapter,
+ struct ksz_desc_info *desc_info, int direction)
+{
+ int i;
+ struct ksz_dma_buf *dma_buf;
+ struct ksz_desc *desc = desc_info->ring;
+
+ for (i = 0; i < desc_info->alloc; i++) {
+ dma_buf = DMA_BUFFER(desc);
+ if (dma_buf->skb)
+ free_dma_buf(adapter, dma_buf, direction);
+ desc++;
+ }
+}
+
+/**
+ * ksz_free_mem - free all resources used by descriptors
+ * @adapter: Adapter information structure.
+ *
+ * This local routine frees all the resources allocated by ksz_alloc_mem().
+ */
+static void ksz_free_mem(struct dev_info *adapter)
+{
+ /* Free transmit buffers. */
+ ksz_free_buffers(adapter, &adapter->hw.tx_desc_info,
+ PCI_DMA_TODEVICE);
+
+ /* Free receive buffers. */
+ ksz_free_buffers(adapter, &adapter->hw.rx_desc_info,
+ PCI_DMA_FROMDEVICE);
+
+ /* Free descriptors. */
+ ksz_free_desc(adapter);
+}
+
+static void get_mib_counters(struct ksz_hw *hw, int first, int cnt,
+ u64 *counter)
+{
+ int i;
+ int mib;
+ int port;
+ struct ksz_port_mib *port_mib;
+
+ memset(counter, 0, sizeof(u64) * TOTAL_PORT_COUNTER_NUM);
+ for (i = 0, port = first; i < cnt; i++, port++) {
+ port_mib = &hw->port_mib[port];
+ for (mib = port_mib->mib_start; mib < hw->mib_cnt; mib++)
+ counter[mib] += port_mib->counter[mib];
+ }
+}
+
+/**
+ * send_packet - send packet
+ * @skb: Socket buffer.
+ * @dev: Network device.
+ *
+ * This routine is used to send a packet out to the network.
+ */
+static void send_packet(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ksz_desc *desc;
+ struct ksz_desc *first;
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_desc_info *info = &hw->tx_desc_info;
+ struct ksz_dma_buf *dma_buf;
+ int len;
+ int last_frag = skb_shinfo(skb)->nr_frags;
+
+ /*
+ * KSZ8842 with multiple device interfaces needs to be told which port
+ * to send.
+ */
+ if (hw->dev_count > 1)
+ hw->dst_ports = 1 << priv->port.first_port;
+
+ /* Hardware will pad the length to 60. */
+ len = skb->len;
+
+ /* Remember the very first descriptor. */
+ first = info->cur;
+ desc = first;
+
+ dma_buf = DMA_BUFFER(desc);
+ if (last_frag) {
+ int frag;
+ skb_frag_t *this_frag;
+
+ dma_buf->len = skb->len - skb->data_len;
+
+ dma_buf->dma = pci_map_single(
+ hw_priv->pdev, skb->data, dma_buf->len,
+ PCI_DMA_TODEVICE);
+ set_tx_buf(desc, dma_buf->dma);
+ set_tx_len(desc, dma_buf->len);
+
+ frag = 0;
+ do {
+ this_frag = &skb_shinfo(skb)->frags[frag];
+
+ /* Get a new descriptor. */
+ get_tx_pkt(info, &desc);
+
+ /* Keep track of descriptors used so far. */
+ ++hw->tx_int_cnt;
+
+ dma_buf = DMA_BUFFER(desc);
+ dma_buf->len = this_frag->size;
+
+ dma_buf->dma = pci_map_single(
+ hw_priv->pdev,
+ page_address(this_frag->page) +
+ this_frag->page_offset,
+ dma_buf->len,
+ PCI_DMA_TODEVICE);
+ set_tx_buf(desc, dma_buf->dma);
+ set_tx_len(desc, dma_buf->len);
+
+ frag++;
+ if (frag == last_frag)
+ break;
+
+ /* Do not release the last descriptor here. */
+ release_desc(desc);
+ } while (1);
+
+ /* current points to the last descriptor. */
+ info->cur = desc;
+
+ /* Release the first descriptor. */
+ release_desc(first);
+ } else {
+ dma_buf->len = len;
+
+ dma_buf->dma = pci_map_single(
+ hw_priv->pdev, skb->data, dma_buf->len,
+ PCI_DMA_TODEVICE);
+ set_tx_buf(desc, dma_buf->dma);
+ set_tx_len(desc, dma_buf->len);
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ (desc)->sw.buf.tx.csum_gen_tcp = 1;
+ (desc)->sw.buf.tx.csum_gen_udp = 1;
+ }
+
+ /*
+ * The last descriptor holds the packet so that it can be returned to
+ * network subsystem after all descriptors are transmitted.
+ */
+ dma_buf->skb = skb;
+
+ hw_send_pkt(hw);
+
+ /* Update transmit statistics. */
+ priv->stats.tx_packets++;
+ priv->stats.tx_bytes += len;
+}
+
+/**
+ * transmit_cleanup - clean up transmit descriptors
+ * @dev: Network device.
+ *
+ * This routine is called to clean up the transmitted buffers.
+ */
+static void transmit_cleanup(struct dev_info *hw_priv, int normal)
+{
+ int last;
+ union desc_stat status;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_desc_info *info = &hw->tx_desc_info;
+ struct ksz_desc *desc;
+ struct ksz_dma_buf *dma_buf;
+ struct net_device *dev = NULL;
+
+ spin_lock(&hw_priv->hwlock);
+ last = info->last;
+
+ while (info->avail < info->alloc) {
+ /* Get next descriptor which is not hardware owned. */
+ desc = &info->ring[last];
+ status.data = le32_to_cpu(desc->phw->ctrl.data);
+ if (status.tx.hw_owned) {
+ if (normal)
+ break;
+ else
+ reset_desc(desc, status);
+ }
+
+ dma_buf = DMA_BUFFER(desc);
+ pci_unmap_single(
+ hw_priv->pdev, dma_buf->dma, dma_buf->len,
+ PCI_DMA_TODEVICE);
+
+ /* This descriptor contains the last buffer in the packet. */
+ if (dma_buf->skb) {
+ dev = dma_buf->skb->dev;
+
+ /* Release the packet back to network subsystem. */
+ dev_kfree_skb_irq(dma_buf->skb);
+ dma_buf->skb = NULL;
+ }
+
+ /* Free the transmitted descriptor. */
+ last++;
+ last &= info->mask;
+ info->avail++;
+ }
+ info->last = last;
+ spin_unlock(&hw_priv->hwlock);
+
+ /* Notify the network subsystem that the packet has been sent. */
+ if (dev)
+ dev->trans_start = jiffies;
+}
+
+/**
+ * transmit_done - transmit done processing
+ * @dev: Network device.
+ *
+ * This routine is called when the transmit interrupt is triggered, indicating
+ * either a packet is sent successfully or there are transmit errors.
+ */
+static void tx_done(struct dev_info *hw_priv)
+{
+ struct ksz_hw *hw = &hw_priv->hw;
+ int port;
+
+ transmit_cleanup(hw_priv, 1);
+
+ for (port = 0; port < hw->dev_count; port++) {
+ struct net_device *dev = hw->port_info[port].pdev;
+
+ if (netif_running(dev) && netif_queue_stopped(dev))
+ netif_wake_queue(dev);
+ }
+}
+
+static inline void copy_old_skb(struct sk_buff *old, struct sk_buff *skb)
+{
+ skb->dev = old->dev;
+ skb->protocol = old->protocol;
+ skb->ip_summed = old->ip_summed;
+ skb->csum = old->csum;
+ skb_set_network_header(skb, ETH_HLEN);
+
+ dev_kfree_skb(old);
+}
+
+/**
+ * netdev_tx - send out packet
+ * @skb: Socket buffer.
+ * @dev: Network device.
+ *
+ * This function is used by the upper network layer to send out a packet.
+ *
+ * Return 0 if successful; otherwise an error code indicating failure.
+ */
+static int netdev_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ int left;
+ int num = 1;
+ int rc = 0;
+
+ if (hw->features & SMALL_PACKET_TX_BUG) {
+ struct sk_buff *org_skb = skb;
+
+ if (skb->len <= 48) {
+ if (skb_end_pointer(skb) - skb->data >= 50) {
+ memset(&skb->data[skb->len], 0, 50 - skb->len);
+ skb->len = 50;
+ } else {
+ skb = dev_alloc_skb(50);
+ if (!skb)
+ return NETDEV_TX_BUSY;
+ memcpy(skb->data, org_skb->data, org_skb->len);
+ memset(&skb->data[org_skb->len], 0,
+ 50 - org_skb->len);
+ skb->len = 50;
+ copy_old_skb(org_skb, skb);
+ }
+ }
+ }
+
+ spin_lock_irq(&hw_priv->hwlock);
+
+ num = skb_shinfo(skb)->nr_frags + 1;
+ left = hw_alloc_pkt(hw, skb->len, num);
+ if (left) {
+ if (left < num ||
+ ((hw->features & IPV6_CSUM_GEN_HACK) &&
+ (CHECKSUM_PARTIAL == skb->ip_summed) &&
+ (ETH_P_IPV6 == htons(skb->protocol)))) {
+ struct sk_buff *org_skb = skb;
+
+ skb = dev_alloc_skb(org_skb->len);
+ if (!skb)
+ return NETDEV_TX_BUSY;
+ skb_copy_and_csum_dev(org_skb, skb->data);
+ org_skb->ip_summed = 0;
+ skb->len = org_skb->len;
+ copy_old_skb(org_skb, skb);
+ }
+ send_packet(skb, dev);
+ if (left <= num)
+ netif_stop_queue(dev);
+ } else {
+ /* Stop the transmit queue until packet is allocated. */
+ netif_stop_queue(dev);
+ rc = NETDEV_TX_BUSY;
+ }
+
+ spin_unlock_irq(&hw_priv->hwlock);
+
+ return rc;
+}
+
+/**
+ * netdev_tx_timeout - transmit timeout processing
+ * @dev: Network device.
+ *
+ * This routine is called when the transmit timer expires. That indicates the
+ * hardware is not running correctly because transmit interrupts are not
+ * triggered to free up resources so that the transmit routine can continue
+ * sending out packets. The hardware is reset to correct the problem.
+ */
+static void netdev_tx_timeout(struct net_device *dev)
+{
+ static unsigned long last_reset;
+
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ int port;
+
+ if (hw->dev_count > 1) {
+ /*
+ * Only reset the hardware if time between calls is long
+ * enough.
+ */
+ if (jiffies - last_reset <= dev->watchdog_timeo)
+ hw_priv = NULL;
+ }
+
+ last_reset = jiffies;
+ if (hw_priv) {
+ hw_dis_intr(hw);
+ hw_disable(hw);
+
+ transmit_cleanup(hw_priv, 0);
+ hw_reset_pkts(&hw->rx_desc_info);
+ hw_reset_pkts(&hw->tx_desc_info);
+ ksz_init_rx_buffers(hw_priv);
+
+ hw_reset(hw);
+
+ hw_set_desc_base(hw,
+ hw->tx_desc_info.ring_phys,
+ hw->rx_desc_info.ring_phys);
+ hw_set_addr(hw);
+ if (hw->all_multi)
+ hw_set_multicast(hw, hw->all_multi);
+ else if (hw->multi_list_size)
+ hw_set_grp_addr(hw);
+
+ if (hw->dev_count > 1) {
+ hw_set_add_addr(hw);
+ for (port = 0; port < SWITCH_PORT_NUM; port++) {
+ struct net_device *port_dev;
+
+ port_set_stp_state(hw, port,
+ STP_STATE_DISABLED);
+
+ port_dev = hw->port_info[port].pdev;
+ if (netif_running(port_dev))
+ port_set_stp_state(hw, port,
+ STP_STATE_SIMPLE);
+ }
+ }
+
+ hw_enable(hw);
+ hw_ena_intr(hw);
+ }
+
+ dev->trans_start = jiffies;
+ netif_wake_queue(dev);
+}
+
+static inline void csum_verified(struct sk_buff *skb)
+{
+ unsigned short protocol;
+ struct iphdr *iph;
+
+ protocol = skb->protocol;
+ skb_reset_network_header(skb);
+ iph = (struct iphdr *) skb_network_header(skb);
+ if (protocol == htons(ETH_P_8021Q)) {
+ protocol = iph->tot_len;
+ skb_set_network_header(skb, VLAN_HLEN);
+ iph = (struct iphdr *) skb_network_header(skb);
+ }
+ if (protocol == htons(ETH_P_IP)) {
+ if (iph->protocol == IPPROTO_TCP)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+}
+
+static inline int rx_proc(struct net_device *dev, struct ksz_hw* hw,
+ struct ksz_desc *desc, union desc_stat status)
+{
+ int packet_len;
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_dma_buf *dma_buf;
+ struct sk_buff *skb;
+ int rx_status;
+
+ /* Received length includes 4-byte CRC. */
+ packet_len = status.rx.frame_len - 4;
+
+ dma_buf = DMA_BUFFER(desc);
+ pci_dma_sync_single_for_cpu(
+ hw_priv->pdev, dma_buf->dma, packet_len + 4,
+ PCI_DMA_FROMDEVICE);
+
+ do {
+ /* skb->data != skb->head */
+ skb = dev_alloc_skb(packet_len + 2);
+ if (!skb) {
+ priv->stats.rx_dropped++;
+ return -ENOMEM;
+ }
+
+ /*
+ * Align socket buffer in 4-byte boundary for better
+ * performance.
+ */
+ skb_reserve(skb, 2);
+
+ memcpy(skb_put(skb, packet_len),
+ dma_buf->skb->data, packet_len);
+ } while (0);
+
+ skb->dev = dev;
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+ if (hw->rx_cfg & (DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP))
+ csum_verified(skb);
+
+ /* Update receive statistics. */
+ priv->stats.rx_packets++;
+ priv->stats.rx_bytes += packet_len;
+
+ /* Notify upper layer for received packet. */
+ dev->last_rx = jiffies;
+
+ rx_status = netif_rx(skb);
+
+ return 0;
+}
+
+static int dev_rcv_packets(struct dev_info *hw_priv)
+{
+ int next;
+ union desc_stat status;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct net_device *dev = hw->port_info[0].pdev;
+ struct ksz_desc_info *info = &hw->rx_desc_info;
+ int left = info->alloc;
+ struct ksz_desc *desc;
+ int received = 0;
+
+ next = info->next;
+ while (left--) {
+ /* Get next descriptor which is not hardware owned. */
+ desc = &info->ring[next];
+ status.data = le32_to_cpu(desc->phw->ctrl.data);
+ if (status.rx.hw_owned)
+ break;
+
+ /* Status valid only when last descriptor bit is set. */
+ if (status.rx.last_desc && status.rx.first_desc) {
+ if (rx_proc(dev, hw, desc, status))
+ goto release_packet;
+ received++;
+ }
+
+release_packet:
+ release_desc(desc);
+ next++;
+ next &= info->mask;
+ }
+ info->next = next;
+
+ return received;
+}
+
+static int port_rcv_packets(struct dev_info *hw_priv)
+{
+ int next;
+ union desc_stat status;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct net_device *dev = hw->port_info[0].pdev;
+ struct ksz_desc_info *info = &hw->rx_desc_info;
+ int left = info->alloc;
+ struct ksz_desc *desc;
+ int received = 0;
+
+ next = info->next;
+ while (left--) {
+ /* Get next descriptor which is not hardware owned. */
+ desc = &info->ring[next];
+ status.data = le32_to_cpu(desc->phw->ctrl.data);
+ if (status.rx.hw_owned)
+ break;
+
+ if (hw->dev_count > 1) {
+ /* Get received port number. */
+ int p = HW_TO_DEV_PORT(status.rx.src_port);
+
+ dev = hw->port_info[p].pdev;
+ if (!netif_running(dev))
+ goto release_packet;
+ }
+
+ /* Status valid only when last descriptor bit is set. */
+ if (status.rx.last_desc && status.rx.first_desc) {
+ if (rx_proc(dev, hw, desc, status))
+ goto release_packet;
+ received++;
+ }
+
+release_packet:
+ release_desc(desc);
+ next++;
+ next &= info->mask;
+ }
+ info->next = next;
+
+ return received;
+}
+
+static int dev_rcv_special(struct dev_info *hw_priv)
+{
+ int next;
+ union desc_stat status;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct net_device *dev = hw->port_info[0].pdev;
+ struct ksz_desc_info *info = &hw->rx_desc_info;
+ int left = info->alloc;
+ struct ksz_desc *desc;
+ int received = 0;
+
+ next = info->next;
+ while (left--) {
+ /* Get next descriptor which is not hardware owned. */
+ desc = &info->ring[next];
+ status.data = le32_to_cpu(desc->phw->ctrl.data);
+ if (status.rx.hw_owned)
+ break;
+
+ if (hw->dev_count > 1) {
+ /* Get received port number. */
+ int p = HW_TO_DEV_PORT(status.rx.src_port);
+
+ dev = hw->port_info[p].pdev;
+ if (!netif_running(dev))
+ goto release_packet;
+ }
+
+ /* Status valid only when last descriptor bit is set. */
+ if (status.rx.last_desc && status.rx.first_desc) {
+ /*
+ * Receive without error. With receive errors
+ * disabled, packets with receive errors will be
+ * dropped, so no need to check the error bit.
+ */
+ if (!status.rx.error || (status.data &
+ KS_DESC_RX_ERROR_COND) ==
+ KS_DESC_RX_ERROR_TOO_LONG) {
+ if (rx_proc(dev, hw, desc, status))
+ goto release_packet;
+ received++;
+ } else {
+ struct dev_priv *priv = netdev_priv(dev);
+
+ /* Update receive error statistics. */
+ priv->port.counter[OID_COUNTER_RCV_ERROR]++;
+ }
+ }
+
+release_packet:
+ release_desc(desc);
+ next++;
+ next &= info->mask;
+ }
+ info->next = next;
+
+ return received;
+}
+
+static void rx_proc_task(unsigned long data)
+{
+ struct dev_info *hw_priv = (struct dev_info *) data;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ if (!hw->enabled)
+ return;
+ if (unlikely(!hw_priv->dev_rcv(hw_priv))) {
+
+ /* In case receive process is suspended because of overrun. */
+ hw_resume_rx(hw);
+
+ /* tasklets are interruptible. */
+ spin_lock_irq(&hw_priv->hwlock);
+ hw_turn_on_intr(hw, KS884X_INT_RX_MASK);
+ spin_unlock_irq(&hw_priv->hwlock);
+ } else {
+ hw_ack_intr(hw, KS884X_INT_RX);
+ tasklet_schedule(&hw_priv->rx_tasklet);
+ }
+}
+
+static void tx_proc_task(unsigned long data)
+{
+ struct dev_info *hw_priv = (struct dev_info *) data;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ hw_ack_intr(hw, KS884X_INT_TX_MASK);
+
+ tx_done(hw_priv);
+
+ /* tasklets are interruptible. */
+ spin_lock_irq(&hw_priv->hwlock);
+ hw_turn_on_intr(hw, KS884X_INT_TX);
+ spin_unlock_irq(&hw_priv->hwlock);
+}
+
+static inline void handle_rx_stop(struct ksz_hw *hw)
+{
+ /* Receive just has been stopped. */
+ if (0 == hw->rx_stop)
+ hw->intr_mask &= ~KS884X_INT_RX_STOPPED;
+ else if (hw->rx_stop > 1) {
+ if (hw->enabled && (hw->rx_cfg & DMA_RX_ENABLE)) {
+ hw_start_rx(hw);
+ } else {
+ hw->intr_mask &= ~KS884X_INT_RX_STOPPED;
+ hw->rx_stop = 0;
+ }
+ } else
+ /* Receive just has been started. */
+ hw->rx_stop++;
+}
+
+/**
+ * netdev_intr - interrupt handling
+ * @irq: Interrupt number.
+ * @dev_id: Network device.
+ *
+ * This function is called by upper network layer to signal interrupt.
+ *
+ * Return IRQ_HANDLED if interrupt is handled.
+ */
+static irqreturn_t netdev_intr(int irq, void *dev_id)
+{
+ uint int_enable = 0;
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ hw_read_intr(hw, &int_enable);
+
+ /* Not our interrupt! */
+ if (!int_enable)
+ return IRQ_NONE;
+
+ do {
+ hw_ack_intr(hw, int_enable);
+ int_enable &= hw->intr_mask;
+
+ if (unlikely(int_enable & KS884X_INT_TX_MASK)) {
+ hw_dis_intr_bit(hw, KS884X_INT_TX_MASK);
+ tasklet_schedule(&hw_priv->tx_tasklet);
+ }
+
+ if (likely(int_enable & KS884X_INT_RX)) {
+ hw_dis_intr_bit(hw, KS884X_INT_RX);
+ tasklet_schedule(&hw_priv->rx_tasklet);
+ }
+
+ if (unlikely(int_enable & KS884X_INT_RX_OVERRUN)) {
+ priv->stats.rx_fifo_errors++;
+ hw_resume_rx(hw);
+ }
+
+ if (unlikely(int_enable & KS884X_INT_PHY)) {
+ struct ksz_port *port = &priv->port;
+
+ hw->features |= LINK_INT_WORKING;
+ port_get_link_speed(port);
+ }
+
+ if (unlikely(int_enable & KS884X_INT_RX_STOPPED)) {
+ handle_rx_stop(hw);
+ break;
+ }
+
+ if (unlikely(int_enable & KS884X_INT_TX_STOPPED)) {
+ u32 data;
+
+ hw->intr_mask &= ~KS884X_INT_TX_STOPPED;
+ printk(KERN_INFO "Tx stopped\n");
+ data = readl(hw->io + KS_DMA_TX_CTRL);
+ if (!(data & DMA_TX_ENABLE))
+ printk(KERN_INFO "Tx disabled\n");
+ break;
+ }
+ } while (0);
+
+ hw_ena_intr(hw);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Linux network device functions
+ */
+
+static unsigned long next_jiffies;
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void netdev_netpoll(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+
+ hw_dis_intr(&hw_priv->hw);
+ netdev_intr(dev->irq, dev);
+}
+#endif
+
+static void bridge_change(struct ksz_hw *hw)
+{
+ int port;
+ u8 member;
+ struct ksz_switch *sw = hw->ksz_switch;
+
+ /* No ports in forwarding state. */
+ if (!sw->member) {
+ port_set_stp_state(hw, SWITCH_PORT_NUM, STP_STATE_SIMPLE);
+ sw_block_addr(hw);
+ }
+ for (port = 0; port < SWITCH_PORT_NUM; port++) {
+ if (STP_STATE_FORWARDING == sw->port_cfg[port].stp_state)
+ member = HOST_MASK | sw->member;
+ else
+ member = HOST_MASK | (1 << port);
+ if (member != sw->port_cfg[port].member)
+ sw_cfg_port_base_vlan(hw, port, member);
+ }
+}
+
+/**
+ * netdev_close - close network device
+ * @dev: Network device.
+ *
+ * This function process the close operation of network device. This is caused
+ * by the user command "ifconfig ethX down."
+ *
+ * Return 0 if successful; otherwise an error code indicating failure.
+ */
+static int netdev_close(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_port *port = &priv->port;
+ struct ksz_hw *hw = &hw_priv->hw;
+ int pi;
+
+ netif_stop_queue(dev);
+
+ ksz_stop_timer(&priv->monitor_timer_info);
+
+ /* Need to shut the port manually in multiple device interfaces mode. */
+ if (hw->dev_count > 1) {
+ port_set_stp_state(hw, port->first_port, STP_STATE_DISABLED);
+
+ /* Port is closed. Need to change bridge setting. */
+ if (hw->features & STP_SUPPORT) {
+ pi = 1 << port->first_port;
+ if (hw->ksz_switch->member & pi) {
+ hw->ksz_switch->member &= ~pi;
+ bridge_change(hw);
+ }
+ }
+ }
+ if (port->first_port > 0)
+ hw_del_addr(hw, dev->dev_addr);
+ if (!hw_priv->wol_enable)
+ port_set_power_saving(port, true);
+
+ if (priv->multicast)
+ --hw->all_multi;
+ if (priv->promiscuous)
+ --hw->promiscuous;
+
+ hw_priv->opened--;
+ if (!(hw_priv->opened)) {
+ ksz_stop_timer(&hw_priv->mib_timer_info);
+ flush_work(&hw_priv->mib_read);
+
+ hw_dis_intr(hw);
+ hw_disable(hw);
+ hw_clr_multicast(hw);
+
+ /* Delay for receive task to stop scheduling itself. */
+ msleep(2000 / HZ);
+
+ tasklet_disable(&hw_priv->rx_tasklet);
+ tasklet_disable(&hw_priv->tx_tasklet);
+ free_irq(dev->irq, hw_priv->dev);
+
+ transmit_cleanup(hw_priv, 0);
+ hw_reset_pkts(&hw->rx_desc_info);
+ hw_reset_pkts(&hw->tx_desc_info);
+
+ /* Clean out static MAC table when the switch is shutdown. */
+ if (hw->features & STP_SUPPORT)
+ sw_clr_sta_mac_table(hw);
+ }
+
+ return 0;
+}
+
+static void hw_cfg_huge_frame(struct dev_info *hw_priv, struct ksz_hw *hw)
+{
+ if (hw->ksz_switch) {
+ u32 data;
+
+ data = readw(hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
+ if (hw->features & RX_HUGE_FRAME)
+ data |= SWITCH_HUGE_PACKET;
+ else
+ data &= ~SWITCH_HUGE_PACKET;
+ writew(data, hw->io + KS8842_SWITCH_CTRL_2_OFFSET);
+ }
+ if (hw->features & RX_HUGE_FRAME) {
+ hw->rx_cfg |= DMA_RX_ERROR;
+ hw_priv->dev_rcv = dev_rcv_special;
+ } else {
+ hw->rx_cfg &= ~DMA_RX_ERROR;
+ if (hw->dev_count > 1)
+ hw_priv->dev_rcv = port_rcv_packets;
+ else
+ hw_priv->dev_rcv = dev_rcv_packets;
+ }
+}
+
+static int prepare_hardware(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ int rc = 0;
+
+ /* Remember the network device that requests interrupts. */
+ hw_priv->dev = dev;
+ rc = request_irq(dev->irq, netdev_intr, IRQF_SHARED, dev->name, dev);
+ if (rc)
+ return rc;
+ tasklet_enable(&hw_priv->rx_tasklet);
+ tasklet_enable(&hw_priv->tx_tasklet);
+
+ hw->promiscuous = 0;
+ hw->all_multi = 0;
+ hw->multi_list_size = 0;
+
+ hw_reset(hw);
+
+ hw_set_desc_base(hw,
+ hw->tx_desc_info.ring_phys, hw->rx_desc_info.ring_phys);
+ hw_set_addr(hw);
+ hw_cfg_huge_frame(hw_priv, hw);
+ ksz_init_rx_buffers(hw_priv);
+ return 0;
+}
+
+/**
+ * netdev_open - open network device
+ * @dev: Network device.
+ *
+ * This function process the open operation of network device. This is caused
+ * by the user command "ifconfig ethX up."
+ *
+ * Return 0 if successful; otherwise an error code indicating failure.
+ */
+static int netdev_open(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_port *port = &priv->port;
+ int i;
+ int p;
+ int rc = 0;
+
+ priv->multicast = 0;
+ priv->promiscuous = 0;
+
+ /* Reset device statistics. */
+ memset(&priv->stats, 0, sizeof(struct net_device_stats));
+ memset((void *) port->counter, 0,
+ (sizeof(u64) * OID_COUNTER_LAST));
+
+ if (!(hw_priv->opened)) {
+ rc = prepare_hardware(dev);
+ if (rc)
+ return rc;
+ for (i = 0; i < hw->mib_port_cnt; i++) {
+ if (next_jiffies < jiffies)
+ next_jiffies = jiffies + HZ * 2;
+ else
+ next_jiffies += HZ * 1;
+ hw_priv->counter[i].time = next_jiffies;
+ hw->port_mib[i].state = media_disconnected;
+ port_init_cnt(hw, i);
+ }
+ if (hw->ksz_switch)
+ hw->port_mib[HOST_PORT].state = media_connected;
+ else {
+ hw_add_wol_bcast(hw);
+ hw_cfg_wol_pme(hw, 0);
+ hw_clr_wol_pme_status(&hw_priv->hw);
+ }
+ }
+ port_set_power_saving(port, false);
+
+ for (i = 0, p = port->first_port; i < port->port_cnt; i++, p++) {
+ /*
+ * Initialize to invalid value so that link detection
+ * is done.
+ */
+ hw->port_info[p].partner = 0xFF;
+ hw->port_info[p].state = media_disconnected;
+ }
+
+ /* Need to open the port in multiple device interfaces mode. */
+ if (hw->dev_count > 1) {
+ port_set_stp_state(hw, port->first_port, STP_STATE_SIMPLE);
+ if (port->first_port > 0)
+ hw_add_addr(hw, dev->dev_addr);
+ }
+
+ port_get_link_speed(port);
+ if (port->force_link)
+ port_force_link_speed(port);
+ else
+ port_set_link_speed(port);
+
+ if (!(hw_priv->opened)) {
+ hw_setup_intr(hw);
+ hw_enable(hw);
+ hw_ena_intr(hw);
+
+ if (hw->mib_port_cnt)
+ ksz_start_timer(&hw_priv->mib_timer_info,
+ hw_priv->mib_timer_info.period);
+ }
+
+ hw_priv->opened++;
+
+ ksz_start_timer(&priv->monitor_timer_info,
+ priv->monitor_timer_info.period);
+
+ priv->media_state = port->linked->state;
+
+ if (media_connected == priv->media_state)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+ if (netif_msg_link(priv))
+ printk(KERN_INFO "%s link %s\n", dev->name,
+ (media_connected == priv->media_state ?
+ "on" : "off"));
+
+ netif_start_queue(dev);
+
+ return 0;
+}
+
+/* RX errors = rx_errors */
+/* RX dropped = rx_dropped */
+/* RX overruns = rx_fifo_errors */
+/* RX frame = rx_crc_errors + rx_frame_errors + rx_length_errors */
+/* TX errors = tx_errors */
+/* TX dropped = tx_dropped */
+/* TX overruns = tx_fifo_errors */
+/* TX carrier = tx_aborted_errors + tx_carrier_errors + tx_window_errors */
+/* collisions = collisions */
+
+/**
+ * netdev_query_statistics - query network device statistics
+ * @dev: Network device.
+ *
+ * This function returns the statistics of the network device. The device
+ * needs not be opened.
+ *
+ * Return network device statistics.
+ */
+static struct net_device_stats *netdev_query_statistics(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct ksz_port *port = &priv->port;
+ struct ksz_hw *hw = &priv->adapter->hw;
+ struct ksz_port_mib *mib;
+ int i;
+ int p;
+
+ priv->stats.rx_errors = port->counter[OID_COUNTER_RCV_ERROR];
+ priv->stats.tx_errors = port->counter[OID_COUNTER_XMIT_ERROR];
+
+ /* Reset to zero to add count later. */
+ priv->stats.multicast = 0;
+ priv->stats.collisions = 0;
+ priv->stats.rx_length_errors = 0;
+ priv->stats.rx_crc_errors = 0;
+ priv->stats.rx_frame_errors = 0;
+ priv->stats.tx_window_errors = 0;
+
+ for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
+ mib = &hw->port_mib[p];
+
+ priv->stats.multicast += (unsigned long)
+ mib->counter[MIB_COUNTER_RX_MULTICAST];
+
+ priv->stats.collisions += (unsigned long)
+ mib->counter[MIB_COUNTER_TX_TOTAL_COLLISION];
+
+ priv->stats.rx_length_errors += (unsigned long)(
+ mib->counter[MIB_COUNTER_RX_UNDERSIZE] +
+ mib->counter[MIB_COUNTER_RX_FRAGMENT] +
+ mib->counter[MIB_COUNTER_RX_OVERSIZE] +
+ mib->counter[MIB_COUNTER_RX_JABBER]);
+ priv->stats.rx_crc_errors += (unsigned long)
+ mib->counter[MIB_COUNTER_RX_CRC_ERR];
+ priv->stats.rx_frame_errors += (unsigned long)(
+ mib->counter[MIB_COUNTER_RX_ALIGNMENT_ERR] +
+ mib->counter[MIB_COUNTER_RX_SYMBOL_ERR]);
+
+ priv->stats.tx_window_errors += (unsigned long)
+ mib->counter[MIB_COUNTER_TX_LATE_COLLISION];
+ }
+
+ return &priv->stats;
+}
+
+/**
+ * netdev_set_mac_address - set network device MAC address
+ * @dev: Network device.
+ * @addr: Buffer of MAC address.
+ *
+ * This function is used to set the MAC address of the network device.
+ *
+ * Return 0 to indicate success.
+ */
+static int netdev_set_mac_address(struct net_device *dev, void *addr)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct sockaddr *mac = addr;
+ uint interrupt;
+
+ if (priv->port.first_port > 0)
+ hw_del_addr(hw, dev->dev_addr);
+ else {
+ hw->mac_override = 1;
+ memcpy(hw->override_addr, mac->sa_data, MAC_ADDR_LEN);
+ }
+
+ memcpy(dev->dev_addr, mac->sa_data, MAX_ADDR_LEN);
+
+ interrupt = hw_block_intr(hw);
+
+ if (priv->port.first_port > 0)
+ hw_add_addr(hw, dev->dev_addr);
+ else
+ hw_set_addr(hw);
+ hw_restore_intr(hw, interrupt);
+
+ return 0;
+}
+
+static void dev_set_promiscuous(struct net_device *dev, struct dev_priv *priv,
+ struct ksz_hw *hw, int promiscuous)
+{
+ if (promiscuous != priv->promiscuous) {
+ u8 prev_state = hw->promiscuous;
+
+ if (promiscuous)
+ ++hw->promiscuous;
+ else
+ --hw->promiscuous;
+ priv->promiscuous = promiscuous;
+
+ /* Turn on/off promiscuous mode. */
+ if (hw->promiscuous <= 1 && prev_state <= 1)
+ hw_set_promiscuous(hw, hw->promiscuous);
+
+ /*
+ * Port is not in promiscuous mode, meaning it is released
+ * from the bridge.
+ */
+ if ((hw->features & STP_SUPPORT) && !promiscuous &&
+ dev->br_port) {
+ struct ksz_switch *sw = hw->ksz_switch;
+ int port = priv->port.first_port;
+
+ port_set_stp_state(hw, port, STP_STATE_DISABLED);
+ port = 1 << port;
+ if (sw->member & port) {
+ sw->member &= ~port;
+ bridge_change(hw);
+ }
+ }
+ }
+}
+
+static void dev_set_multicast(struct dev_priv *priv, struct ksz_hw *hw,
+ int multicast)
+{
+ if (multicast != priv->multicast) {
+ u8 all_multi = hw->all_multi;
+
+ if (multicast)
+ ++hw->all_multi;
+ else
+ --hw->all_multi;
+ priv->multicast = multicast;
+
+ /* Turn on/off all multicast mode. */
+ if (hw->all_multi <= 1 && all_multi <= 1)
+ hw_set_multicast(hw, hw->all_multi);
+ }
+}
+
+/**
+ * netdev_set_rx_mode
+ * @dev: Network device.
+ *
+ * This routine is used to set multicast addresses or put the network device
+ * into promiscuous mode.
+ */
+static void netdev_set_rx_mode(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct dev_mc_list *mc_ptr;
+ int multicast = (dev->flags & IFF_ALLMULTI);
+
+ dev_set_promiscuous(dev, priv, hw, (dev->flags & IFF_PROMISC));
+
+ if (hw_priv->hw.dev_count > 1)
+ multicast |= (dev->flags & IFF_MULTICAST);
+ dev_set_multicast(priv, hw, multicast);
+
+ /* Cannot use different hashes in multiple device interfaces mode. */
+ if (hw_priv->hw.dev_count > 1)
+ return;
+
+ if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) {
+ int i = 0;
+
+ /* List too big to support so turn on all multicast mode. */
+ if (dev->mc_count > MAX_MULTICAST_LIST) {
+ if (MAX_MULTICAST_LIST != hw->multi_list_size) {
+ hw->multi_list_size = MAX_MULTICAST_LIST;
+ ++hw->all_multi;
+ hw_set_multicast(hw, hw->all_multi);
+ }
+ return;
+ }
+
+ netdev_for_each_mc_addr(mc_ptr, dev) {
+ if (!(*mc_ptr->dmi_addr & 1))
+ continue;
+ if (i >= MAX_MULTICAST_LIST)
+ break;
+ memcpy(hw->multi_list[i++], mc_ptr->dmi_addr,
+ MAC_ADDR_LEN);
+ }
+ hw->multi_list_size = (u8) i;
+ hw_set_grp_addr(hw);
+ } else {
+ if (MAX_MULTICAST_LIST == hw->multi_list_size) {
+ --hw->all_multi;
+ hw_set_multicast(hw, hw->all_multi);
+ }
+ hw->multi_list_size = 0;
+ hw_clr_multicast(hw);
+ }
+}
+
+static int netdev_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ int hw_mtu;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ /* Cannot use different MTU in multiple device interfaces mode. */
+ if (hw->dev_count > 1)
+ if (dev != hw_priv->dev)
+ return 0;
+ if (new_mtu < 60)
+ return -EINVAL;
+
+ if (dev->mtu != new_mtu) {
+ hw_mtu = new_mtu + ETHERNET_HEADER_SIZE + 4;
+ if (hw_mtu > MAX_RX_BUF_SIZE)
+ return -EINVAL;
+ if (hw_mtu > REGULAR_RX_BUF_SIZE) {
+ hw->features |= RX_HUGE_FRAME;
+ hw_mtu = MAX_RX_BUF_SIZE;
+ } else {
+ hw->features &= ~RX_HUGE_FRAME;
+ hw_mtu = REGULAR_RX_BUF_SIZE;
+ }
+ hw_mtu = (hw_mtu + 3) & ~3;
+ hw_priv->mtu = hw_mtu;
+ dev->mtu = new_mtu;
+ }
+ return 0;
+}
+
+/**
+ * netdev_ioctl - I/O control processing
+ * @dev: Network device.
+ * @ifr: Interface request structure.
+ * @cmd: I/O control code.
+ *
+ * This function is used to process I/O control calls.
+ *
+ * Return 0 to indicate success.
+ */
+static int netdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_port *port = &priv->port;
+ int rc;
+ int result = 0;
+ struct mii_ioctl_data *data = if_mii(ifr);
+
+ if (down_interruptible(&priv->proc_sem))
+ return -ERESTARTSYS;
+
+ /* assume success */
+ rc = 0;
+ switch (cmd) {
+ /* Get address of MII PHY in use. */
+ case SIOCGMIIPHY:
+ data->phy_id = priv->id;
+
+ /* Fallthrough... */
+
+ /* Read MII PHY register. */
+ case SIOCGMIIREG:
+ if (data->phy_id != priv->id || data->reg_num >= 6)
+ result = -EIO;
+ else
+ hw_r_phy(hw, port->linked->port_id, data->reg_num,
+ &data->val_out);
+ break;
+
+ /* Write MII PHY register. */
+ case SIOCSMIIREG:
+ if (!capable(CAP_NET_ADMIN))
+ result = -EPERM;
+ else if (data->phy_id != priv->id || data->reg_num >= 6)
+ result = -EIO;
+ else
+ hw_w_phy(hw, port->linked->port_id, data->reg_num,
+ data->val_in);
+ break;
+
+ default:
+ result = -EOPNOTSUPP;
+ }
+
+ up(&priv->proc_sem);
+
+ return result;
+}
+
+/*
+ * MII support
+ */
+
+/**
+ * mdio_read - read PHY register
+ * @dev: Network device.
+ * @phy_id: The PHY id.
+ * @reg_num: The register number.
+ *
+ * This function returns the PHY register value.
+ *
+ * Return the register value.
+ */
+static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct ksz_port *port = &priv->port;
+ struct ksz_hw *hw = port->hw;
+ u16 val_out;
+
+ hw_r_phy(hw, port->linked->port_id, reg_num << 1, &val_out);
+ return val_out;
+}
+
+/**
+ * mdio_write - set PHY register
+ * @dev: Network device.
+ * @phy_id: The PHY id.
+ * @reg_num: The register number.
+ * @val: The register value.
+ *
+ * This procedure sets the PHY register value.
+ */
+static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct ksz_port *port = &priv->port;
+ struct ksz_hw *hw = port->hw;
+ int i;
+ int pi;
+
+ for (i = 0, pi = port->first_port; i < port->port_cnt; i++, pi++)
+ hw_w_phy(hw, pi, reg_num << 1, val);
+}
+
+/*
+ * ethtool support
+ */
+
+#define EEPROM_SIZE 0x40
+
+static u16 eeprom_data[EEPROM_SIZE] = { 0 };
+
+#define ADVERTISED_ALL \
+ (ADVERTISED_10baseT_Half | \
+ ADVERTISED_10baseT_Full | \
+ ADVERTISED_100baseT_Half | \
+ ADVERTISED_100baseT_Full)
+
+/* These functions use the MII functions in mii.c. */
+
+/**
+ * netdev_get_settings - get network device settings
+ * @dev: Network device.
+ * @cmd: Ethtool command.
+ *
+ * This function queries the PHY and returns its state in the ethtool command.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+
+ mutex_lock(&hw_priv->lock);
+ mii_ethtool_gset(&priv->mii_if, cmd);
+ cmd->advertising |= SUPPORTED_TP;
+ mutex_unlock(&hw_priv->lock);
+
+ /* Save advertised settings for workaround in next function. */
+ priv->advertising = cmd->advertising;
+ return 0;
+}
+
+/**
+ * netdev_set_settings - set network device settings
+ * @dev: Network device.
+ * @cmd: Ethtool command.
+ *
+ * This function sets the PHY according to the ethtool command.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_port *port = &priv->port;
+ int rc;
+
+ /*
+ * ethtool utility does not change advertised setting if auto
+ * negotiation is not specified explicitly.
+ */
+ if (cmd->autoneg && priv->advertising == cmd->advertising) {
+ cmd->advertising |= ADVERTISED_ALL;
+ if (10 == cmd->speed)
+ cmd->advertising &=
+ ~(ADVERTISED_100baseT_Full |
+ ADVERTISED_100baseT_Half);
+ else if (100 == cmd->speed)
+ cmd->advertising &=
+ ~(ADVERTISED_10baseT_Full |
+ ADVERTISED_10baseT_Half);
+ if (0 == cmd->duplex)
+ cmd->advertising &=
+ ~(ADVERTISED_100baseT_Full |
+ ADVERTISED_10baseT_Full);
+ else if (1 == cmd->duplex)
+ cmd->advertising &=
+ ~(ADVERTISED_100baseT_Half |
+ ADVERTISED_10baseT_Half);
+ }
+ mutex_lock(&hw_priv->lock);
+ if (cmd->autoneg &&
+ (cmd->advertising & ADVERTISED_ALL) ==
+ ADVERTISED_ALL) {
+ port->duplex = 0;
+ port->speed = 0;
+ port->force_link = 0;
+ } else {
+ port->duplex = cmd->duplex + 1;
+ if (cmd->speed != 1000)
+ port->speed = cmd->speed;
+ if (cmd->autoneg)
+ port->force_link = 0;
+ else
+ port->force_link = 1;
+ }
+ rc = mii_ethtool_sset(&priv->mii_if, cmd);
+ mutex_unlock(&hw_priv->lock);
+ return rc;
+}
+
+/**
+ * netdev_nway_reset - restart auto-negotiation
+ * @dev: Network device.
+ *
+ * This function restarts the PHY for auto-negotiation.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+static int netdev_nway_reset(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ int rc;
+
+ mutex_lock(&hw_priv->lock);
+ rc = mii_nway_restart(&priv->mii_if);
+ mutex_unlock(&hw_priv->lock);
+ return rc;
+}
+
+/**
+ * netdev_get_link - get network device link status
+ * @dev: Network device.
+ *
+ * This function gets the link status from the PHY.
+ *
+ * Return true if PHY is linked and false otherwise.
+ */
+static u32 netdev_get_link(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ int rc;
+
+ rc = mii_link_ok(&priv->mii_if);
+ return rc;
+}
+
+/**
+ * netdev_get_drvinfo - get network driver information
+ * @dev: Network device.
+ * @info: Ethtool driver info data structure.
+ *
+ * This procedure returns the driver information.
+ */
+static void netdev_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+
+ strcpy(info->driver, DRV_NAME);
+ strcpy(info->version, DRV_VERSION);
+ strcpy(info->bus_info, pci_name(hw_priv->pdev));
+}
+
+/**
+ * netdev_get_regs_len - get length of register dump
+ * @dev: Network device.
+ *
+ * This function returns the length of the register dump.
+ *
+ * Return length of the register dump.
+ */
+static struct hw_regs {
+ int start;
+ int end;
+} hw_regs_range[] = {
+ { KS_DMA_TX_CTRL, KS884X_INTERRUPTS_STATUS },
+ { KS_ADD_ADDR_0_LO, KS_ADD_ADDR_F_HI },
+ { KS884X_ADDR_0_OFFSET, KS8841_WOL_FRAME_BYTE2_OFFSET },
+ { KS884X_SIDER_P, KS8842_SGCR7_P },
+ { KS8842_MACAR1_P, KS8842_TOSR8_P },
+ { KS884X_P1MBCR_P, KS8842_P3ERCR_P },
+ { 0, 0 }
+};
+
+static int netdev_get_regs_len(struct net_device *dev)
+{
+ struct hw_regs *range = hw_regs_range;
+ int regs_len = 0x10 * sizeof(u32);
+
+ while (range->end > range->start) {
+ regs_len += (range->end - range->start + 3) / 4 * 4;
+ range++;
+ }
+ return regs_len;
+}
+
+/**
+ * netdev_get_regs - get register dump
+ * @dev: Network device.
+ * @regs: Ethtool registers data structure.
+ * @ptr: Buffer to store the register values.
+ *
+ * This procedure dumps the register values in the provided buffer.
+ */
+static void netdev_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *ptr)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ int *buf = (int *) ptr;
+ struct hw_regs *range = hw_regs_range;
+ int len;
+
+ mutex_lock(&hw_priv->lock);
+ regs->version = 0;
+ for (len = 0; len < 0x40; len += 4) {
+ pci_read_config_dword(hw_priv->pdev, len, buf);
+ buf++;
+ }
+ while (range->end > range->start) {
+ for (len = range->start; len < range->end; len += 4) {
+ *buf = readl(hw->io + len);
+ buf++;
+ }
+ range++;
+ }
+ mutex_unlock(&hw_priv->lock);
+}
+
+#define WOL_SUPPORT \
+ (WAKE_PHY | WAKE_MAGIC | \
+ WAKE_UCAST | WAKE_MCAST | \
+ WAKE_BCAST | WAKE_ARP)
+
+/**
+ * netdev_get_wol - get Wake-on-LAN support
+ * @dev: Network device.
+ * @wol: Ethtool Wake-on-LAN data structure.
+ *
+ * This procedure returns Wake-on-LAN support.
+ */
+static void netdev_get_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+
+ wol->supported = hw_priv->wol_support;
+ wol->wolopts = hw_priv->wol_enable;
+ memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+/**
+ * netdev_set_wol - set Wake-on-LAN support
+ * @dev: Network device.
+ * @wol: Ethtool Wake-on-LAN data structure.
+ *
+ * This function sets Wake-on-LAN support.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+static int netdev_set_wol(struct net_device *dev,
+ struct ethtool_wolinfo *wol)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+
+ /* Need to find a way to retrieve the device IP address. */
+ u8 net_addr[] = { 192, 168, 1, 1 };
+
+ if (wol->wolopts & ~hw_priv->wol_support)
+ return -EINVAL;
+
+ hw_priv->wol_enable = wol->wolopts;
+
+ /* Link wakeup cannot really be disabled. */
+ if (wol->wolopts)
+ hw_priv->wol_enable |= WAKE_PHY;
+ hw_enable_wol(&hw_priv->hw, hw_priv->wol_enable, net_addr);
+ return 0;
+}
+
+/**
+ * netdev_get_msglevel - get debug message level
+ * @dev: Network device.
+ *
+ * This function returns current debug message level.
+ *
+ * Return current debug message flags.
+ */
+static u32 netdev_get_msglevel(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+
+ return priv->msg_enable;
+}
+
+/**
+ * netdev_set_msglevel - set debug message level
+ * @dev: Network device.
+ * @value: Debug message flags.
+ *
+ * This procedure sets debug message level.
+ */
+static void netdev_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+
+ priv->msg_enable = value;
+}
+
+/**
+ * netdev_get_eeprom_len - get EEPROM length
+ * @dev: Network device.
+ *
+ * This function returns the length of the EEPROM.
+ *
+ * Return length of the EEPROM.
+ */
+static int netdev_get_eeprom_len(struct net_device *dev)
+{
+ return EEPROM_SIZE * 2;
+}
+
+/**
+ * netdev_get_eeprom - get EEPROM data
+ * @dev: Network device.
+ * @eeprom: Ethtool EEPROM data structure.
+ * @data: Buffer to store the EEPROM data.
+ *
+ * This function dumps the EEPROM data in the provided buffer.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+#define EEPROM_MAGIC 0x10A18842
+
+static int netdev_get_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ u8 *eeprom_byte = (u8 *) eeprom_data;
+ int i;
+ int len;
+
+ len = (eeprom->offset + eeprom->len + 1) / 2;
+ for (i = eeprom->offset / 2; i < len; i++)
+ eeprom_data[i] = eeprom_read(&hw_priv->hw, i);
+ eeprom->magic = EEPROM_MAGIC;
+ memcpy(data, &eeprom_byte[eeprom->offset], eeprom->len);
+
+ return 0;
+}
+
+/**
+ * netdev_set_eeprom - write EEPROM data
+ * @dev: Network device.
+ * @eeprom: Ethtool EEPROM data structure.
+ * @data: Data buffer.
+ *
+ * This function modifies the EEPROM data one byte at a time.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+static int netdev_set_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ u16 eeprom_word[EEPROM_SIZE];
+ u8 *eeprom_byte = (u8 *) eeprom_word;
+ int i;
+ int len;
+
+ if (eeprom->magic != EEPROM_MAGIC)
+ return 1;
+
+ len = (eeprom->offset + eeprom->len + 1) / 2;
+ for (i = eeprom->offset / 2; i < len; i++)
+ eeprom_data[i] = eeprom_read(&hw_priv->hw, i);
+ memcpy(eeprom_word, eeprom_data, EEPROM_SIZE * 2);
+ memcpy(&eeprom_byte[eeprom->offset], data, eeprom->len);
+ for (i = 0; i < EEPROM_SIZE; i++)
+ if (eeprom_word[i] != eeprom_data[i]) {
+ eeprom_data[i] = eeprom_word[i];
+ eeprom_write(&hw_priv->hw, i, eeprom_data[i]);
+ }
+
+ return 0;
+}
+
+/**
+ * netdev_get_pauseparam - get flow control parameters
+ * @dev: Network device.
+ * @pause: Ethtool PAUSE settings data structure.
+ *
+ * This procedure returns the PAUSE control flow settings.
+ */
+static void netdev_get_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ pause->autoneg = (hw->overrides & PAUSE_FLOW_CTRL) ? 0 : 1;
+ if (!hw->ksz_switch) {
+ pause->rx_pause =
+ (hw->rx_cfg & DMA_RX_FLOW_ENABLE) ? 1 : 0;
+ pause->tx_pause =
+ (hw->tx_cfg & DMA_TX_FLOW_ENABLE) ? 1 : 0;
+ } else {
+ pause->rx_pause =
+ (sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
+ SWITCH_RX_FLOW_CTRL)) ? 1 : 0;
+ pause->tx_pause =
+ (sw_chk(hw, KS8842_SWITCH_CTRL_1_OFFSET,
+ SWITCH_TX_FLOW_CTRL)) ? 1 : 0;
+ }
+}
+
+/**
+ * netdev_set_pauseparam - set flow control parameters
+ * @dev: Network device.
+ * @pause: Ethtool PAUSE settings data structure.
+ *
+ * This function sets the PAUSE control flow settings.
+ * Not implemented yet.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+static int netdev_set_pauseparam(struct net_device *dev,
+ struct ethtool_pauseparam *pause)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_port *port = &priv->port;
+
+ mutex_lock(&hw_priv->lock);
+ if (pause->autoneg) {
+ if (!pause->rx_pause && !pause->tx_pause)
+ port->flow_ctrl = PHY_NO_FLOW_CTRL;
+ else
+ port->flow_ctrl = PHY_FLOW_CTRL;
+ hw->overrides &= ~PAUSE_FLOW_CTRL;
+ port->force_link = 0;
+ if (hw->ksz_switch) {
+ sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
+ SWITCH_RX_FLOW_CTRL, 1);
+ sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
+ SWITCH_TX_FLOW_CTRL, 1);
+ }
+ port_set_link_speed(port);
+ } else {
+ hw->overrides |= PAUSE_FLOW_CTRL;
+ if (hw->ksz_switch) {
+ sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
+ SWITCH_RX_FLOW_CTRL, pause->rx_pause);
+ sw_cfg(hw, KS8842_SWITCH_CTRL_1_OFFSET,
+ SWITCH_TX_FLOW_CTRL, pause->tx_pause);
+ } else
+ set_flow_ctrl(hw, pause->rx_pause, pause->tx_pause);
+ }
+ mutex_unlock(&hw_priv->lock);
+
+ return 0;
+}
+
+/**
+ * netdev_get_ringparam - get tx/rx ring parameters
+ * @dev: Network device.
+ * @pause: Ethtool RING settings data structure.
+ *
+ * This procedure returns the TX/RX ring settings.
+ */
+static void netdev_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ ring->tx_max_pending = (1 << 9);
+ ring->tx_pending = hw->tx_desc_info.alloc;
+ ring->rx_max_pending = (1 << 9);
+ ring->rx_pending = hw->rx_desc_info.alloc;
+}
+
+#define STATS_LEN (TOTAL_PORT_COUNTER_NUM)
+
+static struct {
+ char string[ETH_GSTRING_LEN];
+} ethtool_stats_keys[STATS_LEN] = {
+ { "rx_lo_priority_octets" },
+ { "rx_hi_priority_octets" },
+ { "rx_undersize_packets" },
+ { "rx_fragments" },
+ { "rx_oversize_packets" },
+ { "rx_jabbers" },
+ { "rx_symbol_errors" },
+ { "rx_crc_errors" },
+ { "rx_align_errors" },
+ { "rx_mac_ctrl_packets" },
+ { "rx_pause_packets" },
+ { "rx_bcast_packets" },
+ { "rx_mcast_packets" },
+ { "rx_ucast_packets" },
+ { "rx_64_or_less_octet_packets" },
+ { "rx_65_to_127_octet_packets" },
+ { "rx_128_to_255_octet_packets" },
+ { "rx_256_to_511_octet_packets" },
+ { "rx_512_to_1023_octet_packets" },
+ { "rx_1024_to_1522_octet_packets" },
+
+ { "tx_lo_priority_octets" },
+ { "tx_hi_priority_octets" },
+ { "tx_late_collisions" },
+ { "tx_pause_packets" },
+ { "tx_bcast_packets" },
+ { "tx_mcast_packets" },
+ { "tx_ucast_packets" },
+ { "tx_deferred" },
+ { "tx_total_collisions" },
+ { "tx_excessive_collisions" },
+ { "tx_single_collisions" },
+ { "tx_mult_collisions" },
+
+ { "rx_discards" },
+ { "tx_discards" },
+};
+
+/**
+ * netdev_get_strings - get statistics identity strings
+ * @dev: Network device.
+ * @stringset: String set identifier.
+ * @buf: Buffer to store the strings.
+ *
+ * This procedure returns the strings used to identify the statistics.
+ */
+static void netdev_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ if (ETH_SS_STATS == stringset)
+ memcpy(buf, &ethtool_stats_keys,
+ ETH_GSTRING_LEN * hw->mib_cnt);
+}
+
+/**
+ * netdev_get_sset_count - get statistics size
+ * @dev: Network device.
+ * @sset: The statistics set number.
+ *
+ * This function returns the size of the statistics to be reported.
+ *
+ * Return size of the statistics to be reported.
+ */
+static int netdev_get_sset_count(struct net_device *dev, int sset)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return hw->mib_cnt;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * netdev_get_ethtool_stats - get network device statistics
+ * @dev: Network device.
+ * @stats: Ethtool statistics data structure.
+ * @data: Buffer to store the statistics.
+ *
+ * This procedure returns the statistics.
+ */
+static void netdev_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_port *port = &priv->port;
+ int n_stats = stats->n_stats;
+ int i;
+ int n;
+ int p;
+ int rc;
+ u64 counter[TOTAL_PORT_COUNTER_NUM];
+
+ mutex_lock(&hw_priv->lock);
+ n = SWITCH_PORT_NUM;
+ for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
+ if (media_connected == hw->port_mib[p].state) {
+ hw_priv->counter[p].read = 1;
+
+ /* Remember first port that requests read. */
+ if (n == SWITCH_PORT_NUM)
+ n = p;
+ }
+ }
+ mutex_unlock(&hw_priv->lock);
+
+ if (n < SWITCH_PORT_NUM)
+ schedule_work(&hw_priv->mib_read);
+
+ if (1 == port->mib_port_cnt && n < SWITCH_PORT_NUM) {
+ p = n;
+ rc = wait_event_interruptible_timeout(
+ hw_priv->counter[p].counter,
+ 2 == hw_priv->counter[p].read,
+ HZ * 1);
+ } else
+ for (i = 0, p = n; i < port->mib_port_cnt - n; i++, p++) {
+ if (0 == i) {
+ rc = wait_event_interruptible_timeout(
+ hw_priv->counter[p].counter,
+ 2 == hw_priv->counter[p].read,
+ HZ * 2);
+ } else if (hw->port_mib[p].cnt_ptr) {
+ rc = wait_event_interruptible_timeout(
+ hw_priv->counter[p].counter,
+ 2 == hw_priv->counter[p].read,
+ HZ * 1);
+ }
+ }
+
+ get_mib_counters(hw, port->first_port, port->mib_port_cnt, counter);
+ n = hw->mib_cnt;
+ if (n > n_stats)
+ n = n_stats;
+ n_stats -= n;
+ for (i = 0; i < n; i++)
+ *data++ = counter[i];
+}
+
+/**
+ * netdev_get_rx_csum - get receive checksum support
+ * @dev: Network device.
+ *
+ * This function gets receive checksum support setting.
+ *
+ * Return true if receive checksum is enabled; false otherwise.
+ */
+static u32 netdev_get_rx_csum(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ return hw->rx_cfg &
+ (DMA_RX_CSUM_UDP |
+ DMA_RX_CSUM_TCP |
+ DMA_RX_CSUM_IP);
+}
+
+/**
+ * netdev_set_rx_csum - set receive checksum support
+ * @dev: Network device.
+ * @data: Zero to disable receive checksum support.
+ *
+ * This function sets receive checksum support setting.
+ *
+ * Return 0 if successful; otherwise an error code.
+ */
+static int netdev_set_rx_csum(struct net_device *dev, u32 data)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ u32 new_setting = hw->rx_cfg;
+
+ if (data)
+ new_setting |=
+ (DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP |
+ DMA_RX_CSUM_IP);
+ else
+ new_setting &=
+ ~(DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP |
+ DMA_RX_CSUM_IP);
+ new_setting &= ~DMA_RX_CSUM_UDP;
+ mutex_lock(&hw_priv->lock);
+ if (new_setting != hw->rx_cfg) {
+ hw->rx_cfg = new_setting;
+ if (hw->enabled)
+ writel(hw->rx_cfg, hw->io + KS_DMA_RX_CTRL);
+ }
+ mutex_unlock(&hw_priv->lock);
+ return 0;
+}
+
+static struct ethtool_ops netdev_ethtool_ops = {
+ .get_settings = netdev_get_settings,
+ .set_settings = netdev_set_settings,
+ .nway_reset = netdev_nway_reset,
+ .get_link = netdev_get_link,
+ .get_drvinfo = netdev_get_drvinfo,
+ .get_regs_len = netdev_get_regs_len,
+ .get_regs = netdev_get_regs,
+ .get_wol = netdev_get_wol,
+ .set_wol = netdev_set_wol,
+ .get_msglevel = netdev_get_msglevel,
+ .set_msglevel = netdev_set_msglevel,
+ .get_eeprom_len = netdev_get_eeprom_len,
+ .get_eeprom = netdev_get_eeprom,
+ .set_eeprom = netdev_set_eeprom,
+ .get_pauseparam = netdev_get_pauseparam,
+ .set_pauseparam = netdev_set_pauseparam,
+ .get_ringparam = netdev_get_ringparam,
+ .get_strings = netdev_get_strings,
+ .get_sset_count = netdev_get_sset_count,
+ .get_ethtool_stats = netdev_get_ethtool_stats,
+ .get_rx_csum = netdev_get_rx_csum,
+ .set_rx_csum = netdev_set_rx_csum,
+ .get_tx_csum = ethtool_op_get_tx_csum,
+ .set_tx_csum = ethtool_op_set_tx_csum,
+ .get_sg = ethtool_op_get_sg,
+ .set_sg = ethtool_op_set_sg,
+};
+
+/*
+ * Hardware monitoring
+ */
+
+static void update_link(struct net_device *dev, struct dev_priv *priv,
+ struct ksz_port *port)
+{
+ if (priv->media_state != port->linked->state) {
+ priv->media_state = port->linked->state;
+ if (netif_running(dev)) {
+ if (media_connected == priv->media_state)
+ netif_carrier_on(dev);
+ else
+ netif_carrier_off(dev);
+ if (netif_msg_link(priv))
+ printk(KERN_INFO "%s link %s\n", dev->name,
+ (media_connected == priv->media_state ?
+ "on" : "off"));
+ }
+ }
+}
+
+static void mib_read_work(struct work_struct *work)
+{
+ struct dev_info *hw_priv =
+ container_of(work, struct dev_info, mib_read);
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_port_mib *mib;
+ int i;
+
+ next_jiffies = jiffies;
+ for (i = 0; i < hw->mib_port_cnt; i++) {
+ mib = &hw->port_mib[i];
+
+ /* Reading MIB counters or requested to read. */
+ if (mib->cnt_ptr || 1 == hw_priv->counter[i].read) {
+
+ /* Need to process receive interrupt. */
+ if (port_r_cnt(hw, i))
+ break;
+ hw_priv->counter[i].read = 0;
+
+ /* Finish reading counters. */
+ if (0 == mib->cnt_ptr) {
+ hw_priv->counter[i].read = 2;
+ wake_up_interruptible(
+ &hw_priv->counter[i].counter);
+ }
+ } else if (jiffies >= hw_priv->counter[i].time) {
+ /* Only read MIB counters when the port is connected. */
+ if (media_connected == mib->state)
+ hw_priv->counter[i].read = 1;
+ next_jiffies += HZ * 1 * hw->mib_port_cnt;
+ hw_priv->counter[i].time = next_jiffies;
+
+ /* Port is just disconnected. */
+ } else if (mib->link_down) {
+ mib->link_down = 0;
+
+ /* Read counters one last time after link is lost. */
+ hw_priv->counter[i].read = 1;
+ }
+ }
+}
+
+static void mib_monitor(unsigned long ptr)
+{
+ struct dev_info *hw_priv = (struct dev_info *) ptr;
+
+ mib_read_work(&hw_priv->mib_read);
+
+ /* This is used to verify Wake-on-LAN is working. */
+ if (hw_priv->pme_wait) {
+ if (hw_priv->pme_wait <= jiffies) {
+ hw_clr_wol_pme_status(&hw_priv->hw);
+ hw_priv->pme_wait = 0;
+ }
+ } else if (hw_chk_wol_pme_status(&hw_priv->hw)) {
+
+ /* PME is asserted. Wait 2 seconds to clear it. */
+ hw_priv->pme_wait = jiffies + HZ * 2;
+ }
+
+ ksz_update_timer(&hw_priv->mib_timer_info);
+}
+
+/**
+ * dev_monitor - periodic monitoring
+ * @ptr: Network device pointer.
+ *
+ * This routine is run in a kernel timer to monitor the network device.
+ */
+static void dev_monitor(unsigned long ptr)
+{
+ struct net_device *dev = (struct net_device *) ptr;
+ struct dev_priv *priv = netdev_priv(dev);
+ struct dev_info *hw_priv = priv->adapter;
+ struct ksz_hw *hw = &hw_priv->hw;
+ struct ksz_port *port = &priv->port;
+
+ if (!(hw->features & LINK_INT_WORKING))
+ port_get_link_speed(port);
+ update_link(dev, priv, port);
+
+ ksz_update_timer(&priv->monitor_timer_info);
+}
+
+/*
+ * Linux network device interface functions
+ */
+
+/* Driver exported variables */
+
+static int msg_enable;
+
+static char *macaddr = ":";
+static char *mac1addr = ":";
+
+/*
+ * This enables multiple network device mode for KSZ8842, which contains a
+ * switch with two physical ports. Some users like to take control of the
+ * ports for running Spanning Tree Protocol. The driver will create an
+ * additional eth? device for the other port.
+ *
+ * Some limitations are the network devices cannot have different MTU and
+ * multicast hash tables.
+ */
+static int multi_dev;
+
+/*
+ * As most users select multiple network device mode to use Spanning Tree
+ * Protocol, this enables a feature in which most unicast and multicast packets
+ * are forwarded inside the switch and not passed to the host. Only packets
+ * that need the host's attention are passed to it. This prevents the host
+ * wasting CPU time to examine each and every incoming packets and do the
+ * forwarding itself.
+ *
+ * As the hack requires the private bridge header, the driver cannot compile
+ * with just the kernel headers.
+ *
+ * Enabling STP support also turns on multiple network device mode.
+ */
+static int stp;
+
+/*
+ * This enables fast aging in the KSZ8842 switch. Not sure what situation
+ * needs that. However, fast aging is used to flush the dynamic MAC table when
+ * STP suport is enabled.
+ */
+static int fast_aging;
+
+/**
+ * netdev_init - initalize network device.
+ * @dev: Network device.
+ *
+ * This function initializes the network device.
+ *
+ * Return 0 if successful; otherwise an error code indicating failure.
+ */
+static int __init netdev_init(struct net_device *dev)
+{
+ struct dev_priv *priv = netdev_priv(dev);
+
+ /* 500 ms timeout */
+ ksz_init_timer(&priv->monitor_timer_info, 500 * HZ / 1000,
+ dev_monitor, dev);
+
+ /* 500 ms timeout */
+ dev->watchdog_timeo = HZ / 2;
+
+ dev->features |= NETIF_F_IP_CSUM;
+
+ /*
+ * Hardware does not really support IPv6 checksum generation, but
+ * driver actually runs faster with this on. Refer IPV6_CSUM_GEN_HACK.
+ */
+ dev->features |= NETIF_F_IPV6_CSUM;
+ dev->features |= NETIF_F_SG;
+
+ sema_init(&priv->proc_sem, 1);
+
+ priv->mii_if.phy_id_mask = 0x1;
+ priv->mii_if.reg_num_mask = 0x7;
+ priv->mii_if.dev = dev;
+ priv->mii_if.mdio_read = mdio_read;
+ priv->mii_if.mdio_write = mdio_write;
+ priv->mii_if.phy_id = priv->port.first_port + 1;
+
+ priv->msg_enable = netif_msg_init(msg_enable,
+ (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK));
+
+ return 0;
+}
+
+static const struct net_device_ops netdev_ops = {
+ .ndo_init = netdev_init,
+ .ndo_open = netdev_open,
+ .ndo_stop = netdev_close,
+ .ndo_get_stats = netdev_query_statistics,
+ .ndo_start_xmit = netdev_tx,
+ .ndo_tx_timeout = netdev_tx_timeout,
+ .ndo_change_mtu = netdev_change_mtu,
+ .ndo_set_mac_address = netdev_set_mac_address,
+ .ndo_do_ioctl = netdev_ioctl,
+ .ndo_set_rx_mode = netdev_set_rx_mode,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = netdev_netpoll,
+#endif
+};
+
+static void netdev_free(struct net_device *dev)
+{
+ if (dev->watchdog_timeo)
+ unregister_netdev(dev);
+
+ free_netdev(dev);
+}
+
+struct platform_info {
+ struct dev_info dev_info;
+ struct net_device *netdev[SWITCH_PORT_NUM];
+};
+
+static int net_device_present;
+
+static void get_mac_addr(struct dev_info *hw_priv, u8 *macaddr, int port)
+{
+ int i;
+ int j;
+ int got_num;
+ int num;
+
+ i = j = num = got_num = 0;
+ while (j < MAC_ADDR_LEN) {
+ if (macaddr[i]) {
+ got_num = 1;
+ if ('0' <= macaddr[i] && macaddr[i] <= '9')
+ num = num * 16 + macaddr[i] - '0';
+ else if ('A' <= macaddr[i] && macaddr[i] <= 'F')
+ num = num * 16 + 10 + macaddr[i] - 'A';
+ else if ('a' <= macaddr[i] && macaddr[i] <= 'f')
+ num = num * 16 + 10 + macaddr[i] - 'a';
+ else if (':' == macaddr[i])
+ got_num = 2;
+ else
+ break;
+ } else if (got_num)
+ got_num = 2;
+ else
+ break;
+ if (2 == got_num) {
+ if (MAIN_PORT == port) {
+ hw_priv->hw.override_addr[j++] = (u8) num;
+ hw_priv->hw.override_addr[5] +=
+ hw_priv->hw.id;
+ } else {
+ hw_priv->hw.ksz_switch->other_addr[j++] =
+ (u8) num;
+ hw_priv->hw.ksz_switch->other_addr[5] +=
+ hw_priv->hw.id;
+ }
+ num = got_num = 0;
+ }
+ i++;
+ }
+ if (MAC_ADDR_LEN == j) {
+ if (MAIN_PORT == port)
+ hw_priv->hw.mac_override = 1;
+ }
+}
+
+#define KS884X_DMA_MASK (~0x0UL)
+
+static void read_other_addr(struct ksz_hw *hw)
+{
+ int i;
+ u16 data[3];
+ struct ksz_switch *sw = hw->ksz_switch;
+
+ for (i = 0; i < 3; i++)
+ data[i] = eeprom_read(hw, i + EEPROM_DATA_OTHER_MAC_ADDR);
+ if ((data[0] || data[1] || data[2]) && data[0] != 0xffff) {
+ sw->other_addr[5] = (u8) data[0];
+ sw->other_addr[4] = (u8)(data[0] >> 8);
+ sw->other_addr[3] = (u8) data[1];
+ sw->other_addr[2] = (u8)(data[1] >> 8);
+ sw->other_addr[1] = (u8) data[2];
+ sw->other_addr[0] = (u8)(data[2] >> 8);
+ }
+}
+
+#ifndef PCI_VENDOR_ID_MICREL_KS
+#define PCI_VENDOR_ID_MICREL_KS 0x16c6
+#endif
+
+static int __init pcidev_init(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct net_device *dev;
+ struct dev_priv *priv;
+ struct dev_info *hw_priv;
+ struct ksz_hw *hw;
+ struct platform_info *info;
+ struct ksz_port *port;
+ unsigned long reg_base;
+ unsigned long reg_len;
+ int cnt;
+ int i;
+ int mib_port_count;
+ int pi;
+ int port_count;
+ int result;
+ char banner[80];
+ struct ksz_switch *sw = NULL;
+
+ result = pci_enable_device(pdev);
+ if (result)
+ return result;
+
+ result = -ENODEV;
+
+ if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) ||
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
+ return result;
+
+ reg_base = pci_resource_start(pdev, 0);
+ reg_len = pci_resource_len(pdev, 0);
+ if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0)
+ return result;
+
+ if (!request_mem_region(reg_base, reg_len, DRV_NAME))
+ return result;
+ pci_set_master(pdev);
+
+ result = -ENOMEM;
+
+ info = kmalloc(sizeof(struct platform_info), GFP_KERNEL);
+ if (!info)
+ goto pcidev_init_dev_err;
+ memset(info, 0, sizeof(struct platform_info));
+
+ hw_priv = &info->dev_info;
+ hw_priv->pdev = pdev;
+
+ hw = &hw_priv->hw;
+
+ hw->io = ioremap(reg_base, reg_len);
+ if (!hw->io)
+ goto pcidev_init_io_err;
+
+ cnt = hw_init(hw);
+ if (!cnt) {
+ if (msg_enable & NETIF_MSG_PROBE)
+ printk(KERN_ALERT "chip not detected\n");
+ result = -ENODEV;
+ goto pcidev_init_alloc_err;
+ }
+
+ sprintf(banner, "%s\n", version);
+ banner[13] = cnt + '0';
+ ks_info(hw_priv, "%s", banner);
+ ks_dbg(hw_priv, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq);
+
+ /* Assume device is KSZ8841. */
+ hw->dev_count = 1;
+ port_count = 1;
+ mib_port_count = 1;
+ hw->addr_list_size = 0;
+ hw->mib_cnt = PORT_COUNTER_NUM;
+ hw->mib_port_cnt = 1;
+
+ /* KSZ8842 has a switch with multiple ports. */
+ if (2 == cnt) {
+ if (fast_aging)
+ hw->overrides |= FAST_AGING;
+
+ hw->mib_cnt = TOTAL_PORT_COUNTER_NUM;
+
+ /* Multiple network device interfaces are required. */
+ if (multi_dev) {
+ hw->dev_count = SWITCH_PORT_NUM;
+ hw->addr_list_size = SWITCH_PORT_NUM - 1;
+ }
+
+ /* Single network device has multiple ports. */
+ if (1 == hw->dev_count) {
+ port_count = SWITCH_PORT_NUM;
+ mib_port_count = SWITCH_PORT_NUM;
+ }
+ hw->mib_port_cnt = TOTAL_PORT_NUM;
+ hw->ksz_switch = kmalloc(sizeof(struct ksz_switch), GFP_KERNEL);
+ if (!hw->ksz_switch)
+ goto pcidev_init_alloc_err;
+ memset(hw->ksz_switch, 0, sizeof(struct ksz_switch));
+
+ sw = hw->ksz_switch;
+ }
+ for (i = 0; i < hw->mib_port_cnt; i++)
+ hw->port_mib[i].mib_start = 0;
+
+ hw->parent = hw_priv;
+
+ /* Default MTU is 1500. */
+ hw_priv->mtu = (REGULAR_RX_BUF_SIZE + 3) & ~3;
+
+ if (ksz_alloc_mem(hw_priv))
+ goto pcidev_init_mem_err;
+
+ hw_priv->hw.id = net_device_present;
+
+ spin_lock_init(&hw_priv->hwlock);
+ mutex_init(&hw_priv->lock);
+
+ /* tasklet is enabled. */
+ tasklet_init(&hw_priv->rx_tasklet, rx_proc_task,
+ (unsigned long) hw_priv);
+ tasklet_init(&hw_priv->tx_tasklet, tx_proc_task,
+ (unsigned long) hw_priv);
+
+ /* tasklet_enable will decrement the atomic counter. */
+ tasklet_disable(&hw_priv->rx_tasklet);
+ tasklet_disable(&hw_priv->tx_tasklet);
+
+ for (i = 0; i < TOTAL_PORT_NUM; i++)
+ init_waitqueue_head(&hw_priv->counter[i].counter);
+
+ if (macaddr[0] != ':')
+ get_mac_addr(hw_priv, macaddr, MAIN_PORT);
+
+ /* Read MAC address and initialize override address if not overrided. */
+ hw_read_addr(hw);
+
+ /* Multiple device interfaces mode requires a second MAC address. */
+ if (hw->dev_count > 1) {
+ memcpy(sw->other_addr, hw->override_addr, MAC_ADDR_LEN);
+ read_other_addr(hw);
+ if (mac1addr[0] != ':')
+ get_mac_addr(hw_priv, mac1addr, OTHER_PORT);
+ }
+
+ hw_setup(hw);
+ if (hw->ksz_switch)
+ sw_setup(hw);
+ else {
+ hw_priv->wol_support = WOL_SUPPORT;
+ hw_priv->wol_enable = 0;
+ }
+
+ INIT_WORK(&hw_priv->mib_read, mib_read_work);
+
+ /* 500 ms timeout */
+ ksz_init_timer(&hw_priv->mib_timer_info, 500 * HZ / 1000,
+ mib_monitor, hw_priv);
+
+ for (i = 0; i < hw->dev_count; i++) {
+ dev = alloc_etherdev(sizeof(struct dev_priv));
+ if (!dev)
+ goto pcidev_init_reg_err;
+ info->netdev[i] = dev;
+
+ priv = netdev_priv(dev);
+ priv->adapter = hw_priv;
+ priv->id = net_device_present++;
+
+ port = &priv->port;
+ port->port_cnt = port_count;
+ port->mib_port_cnt = mib_port_count;
+ port->first_port = i;
+ port->flow_ctrl = PHY_FLOW_CTRL;
+
+ port->hw = hw;
+ port->linked = &hw->port_info[port->first_port];
+
+ for (cnt = 0, pi = i; cnt < port_count; cnt++, pi++) {
+ hw->port_info[pi].port_id = pi;
+ hw->port_info[pi].pdev = dev;
+ hw->port_info[pi].state = media_disconnected;
+ }
+
+ dev->mem_start = (unsigned long) hw->io;
+ dev->mem_end = dev->mem_start + reg_len - 1;
+ dev->irq = pdev->irq;
+ if (MAIN_PORT == i)
+ memcpy(dev->dev_addr, hw_priv->hw.override_addr,
+ MAC_ADDR_LEN);
+ else {
+ memcpy(dev->dev_addr, sw->other_addr,
+ MAC_ADDR_LEN);
+ if (!memcmp(sw->other_addr, hw->override_addr,
+ MAC_ADDR_LEN))
+ dev->dev_addr[5] += port->first_port;
+ }
+
+ dev->netdev_ops = &netdev_ops;
+ SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+ if (register_netdev(dev))
+ goto pcidev_init_reg_err;
+ port_set_power_saving(port, true);
+ }
+
+ pci_dev_get(hw_priv->pdev);
+ pci_set_drvdata(pdev, info);
+ return 0;
+
+pcidev_init_reg_err:
+ for (i = 0; i < hw->dev_count; i++) {
+ if (info->netdev[i]) {
+ netdev_free(info->netdev[i]);
+ info->netdev[i] = NULL;
+ }
+ }
+
+pcidev_init_mem_err:
+ ksz_free_mem(hw_priv);
+ kfree(hw->ksz_switch);
+
+pcidev_init_alloc_err:
+ iounmap(hw->io);
+
+pcidev_init_io_err:
+ kfree(info);
+
+pcidev_init_dev_err:
+ release_mem_region(reg_base, reg_len);
+
+ return result;
+}
+
+static void pcidev_exit(struct pci_dev *pdev)
+{
+ int i;
+ struct platform_info *info = pci_get_drvdata(pdev);
+ struct dev_info *hw_priv = &info->dev_info;
+
+ pci_set_drvdata(pdev, NULL);
+
+ release_mem_region(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ for (i = 0; i < hw_priv->hw.dev_count; i++) {
+ if (info->netdev[i])
+ netdev_free(info->netdev[i]);
+ }
+ if (hw_priv->hw.io)
+ iounmap(hw_priv->hw.io);
+ ksz_free_mem(hw_priv);
+ kfree(hw_priv->hw.ksz_switch);
+ pci_dev_put(hw_priv->pdev);
+ kfree(info);
+}
+
+#ifdef CONFIG_PM
+static int pcidev_resume(struct pci_dev *pdev)
+{
+ int i;
+ struct platform_info *info = pci_get_drvdata(pdev);
+ struct dev_info *hw_priv = &info->dev_info;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ pci_enable_wake(pdev, PCI_D0, 0);
+
+ if (hw_priv->wol_enable)
+ hw_cfg_wol_pme(hw, 0);
+ for (i = 0; i < hw->dev_count; i++) {
+ if (info->netdev[i]) {
+ struct net_device *dev = info->netdev[i];
+
+ if (netif_running(dev)) {
+ netdev_open(dev);
+ netif_device_attach(dev);
+ }
+ }
+ }
+ return 0;
+}
+
+static int pcidev_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ int i;
+ struct platform_info *info = pci_get_drvdata(pdev);
+ struct dev_info *hw_priv = &info->dev_info;
+ struct ksz_hw *hw = &hw_priv->hw;
+
+ /* Need to find a way to retrieve the device IP address. */
+ u8 net_addr[] = { 192, 168, 1, 1 };
+
+ for (i = 0; i < hw->dev_count; i++) {
+ if (info->netdev[i]) {
+ struct net_device *dev = info->netdev[i];
+
+ if (netif_running(dev)) {
+ netif_device_detach(dev);
+ netdev_close(dev);
+ }
+ }
+ }
+ if (hw_priv->wol_enable) {
+ hw_enable_wol(hw, hw_priv->wol_enable, net_addr);
+ hw_cfg_wol_pme(hw, 1);
+ }
+
+ pci_save_state(pdev);
+ pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ return 0;
+}
+#endif
+
+static char pcidev_name[] = "ksz884xp";
+
+static struct pci_device_id pcidev_table[] = {
+ { PCI_VENDOR_ID_MICREL_KS, 0x8841,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_MICREL_KS, 0x8842,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { 0 }
+};
+
+MODULE_DEVICE_TABLE(pci, pcidev_table);
+
+static struct pci_driver pci_device_driver = {
+#ifdef CONFIG_PM
+ .suspend = pcidev_suspend,
+ .resume = pcidev_resume,
+#endif
+ .name = pcidev_name,
+ .id_table = pcidev_table,
+ .probe = pcidev_init,
+ .remove = pcidev_exit
+};
+
+static int __init ksz884x_init_module(void)
+{
+ return pci_register_driver(&pci_device_driver);
+}
+
+static void __exit ksz884x_cleanup_module(void)
+{
+ pci_unregister_driver(&pci_device_driver);
+}
+
+module_init(ksz884x_init_module);
+module_exit(ksz884x_cleanup_module);
+
+MODULE_DESCRIPTION("KSZ8841/2 PCI network driver");
+MODULE_AUTHOR("Tristram Ha <Tristram.Ha@micrel.com>");
+MODULE_LICENSE("GPL");
+
+module_param_named(message, msg_enable, int, 0);
+MODULE_PARM_DESC(message, "Message verbosity level (0=none, 31=all)");
+
+module_param(macaddr, charp, 0);
+module_param(mac1addr, charp, 0);
+module_param(fast_aging, int, 0);
+module_param(multi_dev, int, 0);
+module_param(stp, int, 0);
+MODULE_PARM_DESC(macaddr, "MAC address");
+MODULE_PARM_DESC(mac1addr, "Second MAC address");
+MODULE_PARM_DESC(fast_aging, "Fast aging");
+MODULE_PARM_DESC(multi_dev, "Multiple device interfaces");
+MODULE_PARM_DESC(stp, "STP support");
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index 8d7d3d4625f..7b9447646f8 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -1288,7 +1288,7 @@ static void set_multicast_list(struct net_device *dev)
} else {
short multicast_table[4];
int i;
- int num_addrs=dev->mc_count;
+ int num_addrs=netdev_mc_count(dev);
if(dev->flags&IFF_ALLMULTI)
num_addrs=1;
/* FIXIT: We don't use the multicast table, but rely on upper-layer filtering. */
diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c
index b117f7f8b19..443c39a3732 100644
--- a/drivers/net/lib82596.c
+++ b/drivers/net/lib82596.c
@@ -1094,11 +1094,9 @@ static int __devinit i82596_probe(struct net_device *dev)
return i;
};
- DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx,",
- dev->name, dev->base_addr));
- for (i = 0; i < 6; i++)
- DEB(DEB_PROBE, printk(" %2.2X", dev->dev_addr[i]));
- DEB(DEB_PROBE, printk(" IRQ %d.\n", dev->irq));
+ DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx, %pM IRQ %d.\n",
+ dev->name, dev->base_addr, dev->dev_addr,
+ dev->irq));
DEB(DEB_INIT, printk(KERN_INFO
"%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n",
dev->name, dma, (int)sizeof(struct i596_dma),
@@ -1382,31 +1380,32 @@ static void set_multicast_list(struct net_device *dev)
}
}
- cnt = dev->mc_count;
+ cnt = netdev_mc_count(dev);
if (cnt > MAX_MC_CNT) {
cnt = MAX_MC_CNT;
printk(KERN_NOTICE "%s: Only %d multicast addresses supported",
dev->name, cnt);
}
- if (dev->mc_count > 0) {
+ if (!netdev_mc_empty(dev)) {
struct dev_mc_list *dmi;
unsigned char *cp;
struct mc_cmd *cmd;
cmd = &dma->mc_cmd;
cmd->cmd.command = SWAP16(CmdMulticastList);
- cmd->mc_cnt = SWAP16(dev->mc_count * 6);
+ cmd->mc_cnt = SWAP16(netdev_mc_count(dev) * 6);
cp = cmd->mc_addrs;
- for (dmi = dev->mc_list;
- cnt && dmi != NULL;
- dmi = dmi->next, cnt--, cp += 6) {
+ netdev_for_each_mc_addr(dmi, dev) {
+ if (!cnt--)
+ break;
memcpy(cp, dmi->dmi_addr, 6);
if (i596_debug > 1)
DEB(DEB_MULTI,
printk(KERN_DEBUG
"%s: Adding address %pM\n",
dev->name, cp));
+ cp += 6;
}
DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd));
i596_add_cmd(dev, &cmd->cmd);
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c
index 57f25848fe8..56f66f48540 100644
--- a/drivers/net/lib8390.c
+++ b/drivers/net/lib8390.c
@@ -907,15 +907,8 @@ static inline void make_mc_bits(u8 *bits, struct net_device *dev)
{
struct dev_mc_list *dmi;
- for (dmi=dev->mc_list; dmi; dmi=dmi->next)
- {
- u32 crc;
- if (dmi->dmi_addrlen != ETH_ALEN)
- {
- printk(KERN_INFO "%s: invalid multicast address length given.\n", dev->name);
- continue;
- }
- crc = ether_crc(ETH_ALEN, dmi->dmi_addr);
+ netdev_for_each_mc_addr(dmi, dev) {
+ u32 crc = ether_crc(ETH_ALEN, dmi->dmi_addr);
/*
* The 8390 uses the 6 most significant bits of the
* CRC to index the multicast table.
@@ -941,7 +934,7 @@ static void do_set_multicast_list(struct net_device *dev)
if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI)))
{
memset(ei_local->mcfilter, 0, 8);
- if (dev->mc_list)
+ if (!netdev_mc_empty(dev))
make_mc_bits(ei_local->mcfilter, dev);
}
else
@@ -975,7 +968,7 @@ static void do_set_multicast_list(struct net_device *dev)
if(dev->flags&IFF_PROMISC)
ei_outb_p(E8390_RXCONFIG | 0x18, e8390_base + EN0_RXCR);
- else if(dev->flags&IFF_ALLMULTI || dev->mc_list)
+ else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev))
ei_outb_p(E8390_RXCONFIG | 0x08, e8390_base + EN0_RXCR);
else
ei_outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR);
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index a8522bd73ae..a18e3485476 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -224,6 +224,13 @@ static int temac_set_mac_address(struct net_device *ndev, void *address)
return 0;
}
+static int netdev_set_mac_address(struct net_device *ndev, void *p)
+{
+ struct sockaddr *addr = p;
+
+ return temac_set_mac_address(ndev, addr->sa_data);
+}
+
static void temac_set_multicast_list(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
@@ -232,7 +239,7 @@ static void temac_set_multicast_list(struct net_device *ndev)
mutex_lock(&lp->indirect_mutex);
if (ndev->flags & (IFF_ALLMULTI | IFF_PROMISC) ||
- ndev->mc_count > MULTICAST_CAM_TABLE_NUM) {
+ netdev_mc_count(ndev) > MULTICAST_CAM_TABLE_NUM) {
/*
* We must make the kernel realise we had to move
* into promisc mode or we start all out war on
@@ -242,10 +249,11 @@ static void temac_set_multicast_list(struct net_device *ndev)
ndev->flags |= IFF_PROMISC;
temac_indirect_out32(lp, XTE_AFM_OFFSET, XTE_AFM_EPPRM_MASK);
dev_info(&ndev->dev, "Promiscuous mode enabled.\n");
- } else if (ndev->mc_count) {
- struct dev_mc_list *mclist = ndev->mc_list;
- for (i = 0; mclist && i < ndev->mc_count; i++) {
+ } else if (!netdev_mc_empty(ndev)) {
+ struct dev_mc_list *mclist;
+ i = 0;
+ netdev_for_each_mc_addr(mclist, ndev) {
if (i >= MULTICAST_CAM_TABLE_NUM)
break;
multi_addr_msw = ((mclist->dmi_addr[3] << 24) |
@@ -258,7 +266,7 @@ static void temac_set_multicast_list(struct net_device *ndev)
(mclist->dmi_addr[4]) | (i << 16));
temac_indirect_out32(lp, XTE_MAW1_OFFSET,
multi_addr_lsw);
- mclist = mclist->next;
+ i++;
}
} else {
val = temac_indirect_in32(lp, XTE_AFM_OFFSET);
@@ -615,7 +623,7 @@ static void ll_temac_recv(struct net_device *ndev)
while ((bdstat & STS_CTRL_APP0_CMPLT)) {
skb = lp->rx_skb[lp->rx_bd_ci];
- length = cur_p->app4;
+ length = cur_p->app4 & 0x3FFF;
skb_vaddr = virt_to_bus(skb->data);
dma_unmap_single(ndev->dev.parent, skb_vaddr, length,
@@ -768,7 +776,7 @@ static const struct net_device_ops temac_netdev_ops = {
.ndo_open = temac_open,
.ndo_stop = temac_stop,
.ndo_start_xmit = temac_start_xmit,
- .ndo_set_mac_address = temac_set_mac_address,
+ .ndo_set_mac_address = netdev_set_mac_address,
//.ndo_set_multicast_list = temac_set_multicast_list,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = temac_poll_controller,
@@ -938,6 +946,9 @@ static int __devexit temac_of_remove(struct of_device *op)
static struct of_device_id temac_of_match[] __devinitdata = {
{ .compatible = "xlnx,xps-ll-temac-1.01.b", },
+ { .compatible = "xlnx,xps-ll-temac-2.00.a", },
+ { .compatible = "xlnx,xps-ll-temac-2.02.a", },
+ { .compatible = "xlnx,xps-ll-temac-2.03.a", },
{},
};
MODULE_DEVICE_TABLE(of, temac_of_match);
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index b9fcc981983..72b7949c91b 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -72,7 +72,8 @@ struct pcpu_lstats {
static netdev_tx_t loopback_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- struct pcpu_lstats *pcpu_lstats, *lb_stats;
+ struct pcpu_lstats __percpu *pcpu_lstats;
+ struct pcpu_lstats *lb_stats;
int len;
skb_orphan(skb);
@@ -80,7 +81,7 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
skb->protocol = eth_type_trans(skb, dev);
/* it's OK to use per_cpu_ptr() because BHs are off */
- pcpu_lstats = dev->ml_priv;
+ pcpu_lstats = (void __percpu __force *)dev->ml_priv;
lb_stats = this_cpu_ptr(pcpu_lstats);
len = skb->len;
@@ -95,14 +96,14 @@ static netdev_tx_t loopback_xmit(struct sk_buff *skb,
static struct net_device_stats *loopback_get_stats(struct net_device *dev)
{
- const struct pcpu_lstats *pcpu_lstats;
+ const struct pcpu_lstats __percpu *pcpu_lstats;
struct net_device_stats *stats = &dev->stats;
unsigned long bytes = 0;
unsigned long packets = 0;
unsigned long drops = 0;
int i;
- pcpu_lstats = dev->ml_priv;
+ pcpu_lstats = (void __percpu __force *)dev->ml_priv;
for_each_possible_cpu(i) {
const struct pcpu_lstats *lb_stats;
@@ -135,19 +136,20 @@ static const struct ethtool_ops loopback_ethtool_ops = {
static int loopback_dev_init(struct net_device *dev)
{
- struct pcpu_lstats *lstats;
+ struct pcpu_lstats __percpu *lstats;
lstats = alloc_percpu(struct pcpu_lstats);
if (!lstats)
return -ENOMEM;
- dev->ml_priv = lstats;
+ dev->ml_priv = (void __force *)lstats;
return 0;
}
static void loopback_dev_free(struct net_device *dev)
{
- struct pcpu_lstats *lstats = dev->ml_priv;
+ struct pcpu_lstats __percpu *lstats =
+ (void __percpu __force *)dev->ml_priv;
free_percpu(lstats);
free_netdev(dev);
diff --git a/drivers/net/lp486e.c b/drivers/net/lp486e.c
index e20fefc73c8..3e3cc04defd 100644
--- a/drivers/net/lp486e.c
+++ b/drivers/net/lp486e.c
@@ -1253,21 +1253,22 @@ static void set_multicast_list(struct net_device *dev) {
if (i596_debug > 1)
printk ("%s: set multicast list %d\n",
- dev->name, dev->mc_count);
+ dev->name, netdev_mc_count(dev));
- if (dev->mc_count > 0) {
+ if (!netdev_mc_empty(dev)) {
struct dev_mc_list *dmi;
char *cp;
- cmd = kmalloc(sizeof(struct i596_cmd)+2+dev->mc_count*6, GFP_ATOMIC);
+ cmd = kmalloc(sizeof(struct i596_cmd) + 2 +
+ netdev_mc_count(dev) * 6, GFP_ATOMIC);
if (cmd == NULL) {
printk (KERN_ERR "%s: set_multicast Memory squeeze.\n", dev->name);
return;
}
cmd->command = CmdMulticastList;
- *((unsigned short *) (cmd + 1)) = dev->mc_count * 6;
+ *((unsigned short *) (cmd + 1)) = netdev_mc_count(dev) * 6;
cp = ((char *)(cmd + 1))+2;
- for (dmi = dev->mc_list; dmi != NULL; dmi = dmi->next) {
- memcpy(cp, dmi,6);
+ netdev_for_each_mc_addr(dmi, dev) {
+ memcpy(cp, dmi->dmi_addr, 6);
cp += 6;
}
if (i596_debug & LOG_SRCDST)
@@ -1277,7 +1278,8 @@ static void set_multicast_list(struct net_device *dev) {
if (lp->set_conf.pa_next != I596_NULL) {
return;
}
- if (dev->mc_count == 0 && !(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
+ if (netdev_mc_empty(dev) &&
+ !(dev->flags & (IFF_PROMISC | IFF_ALLMULTI))) {
lp->i596_config[8] &= ~0x01;
} else {
lp->i596_config[8] |= 0x01;
diff --git a/drivers/net/mac8390.c b/drivers/net/mac8390.c
index f8fa0c3f0f6..a8768672dc5 100644
--- a/drivers/net/mac8390.c
+++ b/drivers/net/mac8390.c
@@ -17,6 +17,8 @@
/* 2002-12-30: Try to support more cards, some clues from NetBSD driver */
/* 2003-12-26: Make sure Asante cards always work. */
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -34,31 +36,36 @@
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/bitops.h>
+#include <linux/io.h>
#include <asm/system.h>
-#include <asm/io.h>
#include <asm/dma.h>
#include <asm/hwtest.h>
#include <asm/macints.h>
static char version[] =
- "mac8390.c: v0.4 2001-05-15 David Huggins-Daines <dhd@debian.org> and others\n";
+ "v0.4 2001-05-15 David Huggins-Daines <dhd@debian.org> and others\n";
#define EI_SHIFT(x) (ei_local->reg_offset[x])
-#define ei_inb(port) in_8(port)
-#define ei_outb(val,port) out_8(port,val)
-#define ei_inb_p(port) in_8(port)
-#define ei_outb_p(val,port) out_8(port,val)
+#define ei_inb(port) in_8(port)
+#define ei_outb(val, port) out_8(port, val)
+#define ei_inb_p(port) in_8(port)
+#define ei_outb_p(val, port) out_8(port, val)
#include "lib8390.c"
#define WD_START_PG 0x00 /* First page of TX buffer */
#define CABLETRON_RX_START_PG 0x00 /* First page of RX buffer */
#define CABLETRON_RX_STOP_PG 0x30 /* Last page +1 of RX ring */
-#define CABLETRON_TX_START_PG CABLETRON_RX_STOP_PG /* First page of TX buffer */
+#define CABLETRON_TX_START_PG CABLETRON_RX_STOP_PG
+ /* First page of TX buffer */
-/* Unfortunately it seems we have to hardcode these for the moment */
-/* Shouldn't the card know about this? Does anyone know where to read it off the card? Do we trust the data provided by the card? */
+/*
+ * Unfortunately it seems we have to hardcode these for the moment
+ * Shouldn't the card know about this?
+ * Does anyone know where to read it off the card?
+ * Do we trust the data provided by the card?
+ */
#define DAYNA_8390_BASE 0x80000
#define DAYNA_8390_MEM 0x00000
@@ -80,7 +87,7 @@ enum mac8390_type {
MAC8390_KINETICS,
};
-static const char * cardname[] = {
+static const char *cardname[] = {
"apple",
"asante",
"farallon",
@@ -90,7 +97,7 @@ static const char * cardname[] = {
"kinetics",
};
-static int word16[] = {
+static const int word16[] = {
1, /* apple */
1, /* asante */
1, /* farallon */
@@ -101,7 +108,7 @@ static int word16[] = {
};
/* on which cards do we use NuBus resources? */
-static int useresources[] = {
+static const int useresources[] = {
1, /* apple */
1, /* asante */
1, /* farallon */
@@ -117,22 +124,22 @@ enum mac8390_access {
ACCESS_16,
};
-extern int mac8390_memtest(struct net_device * dev);
-static int mac8390_initdev(struct net_device * dev, struct nubus_dev * ndev,
+extern int mac8390_memtest(struct net_device *dev);
+static int mac8390_initdev(struct net_device *dev, struct nubus_dev *ndev,
enum mac8390_type type);
-static int mac8390_open(struct net_device * dev);
-static int mac8390_close(struct net_device * dev);
+static int mac8390_open(struct net_device *dev);
+static int mac8390_close(struct net_device *dev);
static void mac8390_no_reset(struct net_device *dev);
static void interlan_reset(struct net_device *dev);
/* Sane (32-bit chunk memory read/write) - Some Farallon and Apple do this*/
static void sane_get_8390_hdr(struct net_device *dev,
struct e8390_pkt_hdr *hdr, int ring_page);
-static void sane_block_input(struct net_device * dev, int count,
- struct sk_buff * skb, int ring_offset);
-static void sane_block_output(struct net_device * dev, int count,
- const unsigned char * buf, const int start_page);
+static void sane_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset);
+static void sane_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, const int start_page);
/* dayna_memcpy to and from card */
static void dayna_memcpy_fromcard(struct net_device *dev, void *to,
@@ -148,8 +155,8 @@ static void dayna_block_input(struct net_device *dev, int count,
static void dayna_block_output(struct net_device *dev, int count,
const unsigned char *buf, int start_page);
-#define memcpy_fromio(a,b,c) memcpy((a),(void *)(b),(c))
-#define memcpy_toio(a,b,c) memcpy((void *)(a),(b),(c))
+#define memcpy_fromio(a, b, c) memcpy((a), (void *)(b), (c))
+#define memcpy_toio(a, b, c) memcpy((void *)(a), (b), (c))
/* Slow Sane (16-bit chunk memory read/write) Cabletron uses this */
static void slow_sane_get_8390_hdr(struct net_device *dev,
@@ -164,70 +171,72 @@ static void word_memcpy_fromcard(void *tp, const void *fp, int count);
static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
{
switch (dev->dr_sw) {
- case NUBUS_DRSW_3COM:
- switch (dev->dr_hw) {
- case NUBUS_DRHW_APPLE_SONIC_NB:
- case NUBUS_DRHW_APPLE_SONIC_LC:
- case NUBUS_DRHW_SONNET:
- return MAC8390_NONE;
- break;
- default:
- return MAC8390_APPLE;
- break;
- }
+ case NUBUS_DRSW_3COM:
+ switch (dev->dr_hw) {
+ case NUBUS_DRHW_APPLE_SONIC_NB:
+ case NUBUS_DRHW_APPLE_SONIC_LC:
+ case NUBUS_DRHW_SONNET:
+ return MAC8390_NONE;
break;
-
- case NUBUS_DRSW_APPLE:
- switch (dev->dr_hw) {
- case NUBUS_DRHW_ASANTE_LC:
- return MAC8390_NONE;
- break;
- case NUBUS_DRHW_CABLETRON:
- return MAC8390_CABLETRON;
- break;
- default:
- return MAC8390_APPLE;
- break;
- }
+ default:
+ return MAC8390_APPLE;
break;
+ }
+ break;
- case NUBUS_DRSW_ASANTE:
- return MAC8390_ASANTE;
+ case NUBUS_DRSW_APPLE:
+ switch (dev->dr_hw) {
+ case NUBUS_DRHW_ASANTE_LC:
+ return MAC8390_NONE;
break;
-
- case NUBUS_DRSW_TECHWORKS:
- case NUBUS_DRSW_DAYNA2:
- case NUBUS_DRSW_DAYNA_LC:
- if (dev->dr_hw == NUBUS_DRHW_CABLETRON)
- return MAC8390_CABLETRON;
- else
- return MAC8390_APPLE;
+ case NUBUS_DRHW_CABLETRON:
+ return MAC8390_CABLETRON;
break;
-
- case NUBUS_DRSW_FARALLON:
- return MAC8390_FARALLON;
+ default:
+ return MAC8390_APPLE;
break;
+ }
+ break;
- case NUBUS_DRSW_KINETICS:
- switch (dev->dr_hw) {
- case NUBUS_DRHW_INTERLAN:
- return MAC8390_INTERLAN;
- break;
- default:
- return MAC8390_KINETICS;
- break;
- }
- break;
+ case NUBUS_DRSW_ASANTE:
+ return MAC8390_ASANTE;
+ break;
- case NUBUS_DRSW_DAYNA:
- // These correspond to Dayna Sonic cards
- // which use the macsonic driver
- if (dev->dr_hw == NUBUS_DRHW_SMC9194 ||
- dev->dr_hw == NUBUS_DRHW_INTERLAN )
- return MAC8390_NONE;
- else
- return MAC8390_DAYNA;
+ case NUBUS_DRSW_TECHWORKS:
+ case NUBUS_DRSW_DAYNA2:
+ case NUBUS_DRSW_DAYNA_LC:
+ if (dev->dr_hw == NUBUS_DRHW_CABLETRON)
+ return MAC8390_CABLETRON;
+ else
+ return MAC8390_APPLE;
+ break;
+
+ case NUBUS_DRSW_FARALLON:
+ return MAC8390_FARALLON;
+ break;
+
+ case NUBUS_DRSW_KINETICS:
+ switch (dev->dr_hw) {
+ case NUBUS_DRHW_INTERLAN:
+ return MAC8390_INTERLAN;
+ break;
+ default:
+ return MAC8390_KINETICS;
break;
+ }
+ break;
+
+ case NUBUS_DRSW_DAYNA:
+ /*
+ * These correspond to Dayna Sonic cards
+ * which use the macsonic driver
+ */
+ if (dev->dr_hw == NUBUS_DRHW_SMC9194 ||
+ dev->dr_hw == NUBUS_DRHW_INTERLAN)
+ return MAC8390_NONE;
+ else
+ return MAC8390_DAYNA;
+ break;
}
return MAC8390_NONE;
}
@@ -237,14 +246,14 @@ static enum mac8390_access __init mac8390_testio(volatile unsigned long membase)
unsigned long outdata = 0xA5A0B5B0;
unsigned long indata = 0x00000000;
/* Try writing 32 bits */
- memcpy((char *)membase, (char *)&outdata, 4);
+ memcpy(membase, &outdata, 4);
/* Now compare them */
if (memcmp((char *)&outdata, (char *)membase, 4) == 0)
return ACCESS_32;
/* Write 16 bit output */
- word_memcpy_tocard((char *)membase, (char *)&outdata, 4);
+ word_memcpy_tocard(membase, &outdata, 4);
/* Now read it back */
- word_memcpy_fromcard((char *)&indata, (char *)membase, 4);
+ word_memcpy_fromcard(&indata, membase, 4);
if (outdata == indata)
return ACCESS_16;
return ACCESS_UNKNOWN;
@@ -258,7 +267,7 @@ static int __init mac8390_memsize(unsigned long membase)
local_irq_save(flags);
/* Check up to 32K in 4K increments */
for (i = 0; i < 8; i++) {
- volatile unsigned short *m = (unsigned short *) (membase + (i * 0x1000));
+ volatile unsigned short *m = (unsigned short *)(membase + (i * 0x1000));
/* Unwriteable - we have a fully decoded card and the
RAM end located */
@@ -273,28 +282,127 @@ static int __init mac8390_memsize(unsigned long membase)
/* check for partial decode and wrap */
for (j = 0; j < i; j++) {
- volatile unsigned short *p = (unsigned short *) (membase + (j * 0x1000));
+ volatile unsigned short *p = (unsigned short *)(membase + (j * 0x1000));
if (*p != (0xA5A0 | j))
break;
- }
- }
+ }
+ }
local_irq_restore(flags);
- /* in any case, we stopped once we tried one block too many,
- or once we reached 32K */
- return i * 0x1000;
+ /*
+ * in any case, we stopped once we tried one block too many,
+ * or once we reached 32K
+ */
+ return i * 0x1000;
+}
+
+static bool __init mac8390_init(struct net_device *dev, struct nubus_dev *ndev,
+ enum mac8390_type cardtype)
+{
+ struct nubus_dir dir;
+ struct nubus_dirent ent;
+ int offset;
+ volatile unsigned short *i;
+
+ printk_once(KERN_INFO pr_fmt("%s"), version);
+
+ dev->irq = SLOT2IRQ(ndev->board->slot);
+ /* This is getting to be a habit */
+ dev->base_addr = (ndev->board->slot_addr |
+ ((ndev->board->slot & 0xf) << 20));
+
+ /*
+ * Get some Nubus info - we will trust the card's idea
+ * of where its memory and registers are.
+ */
+
+ if (nubus_get_func_dir(ndev, &dir) == -1) {
+ pr_err("%s: Unable to get Nubus functional directory for slot %X!\n",
+ dev->name, ndev->board->slot);
+ return false;
+ }
+
+ /* Get the MAC address */
+ if (nubus_find_rsrc(&dir, NUBUS_RESID_MAC_ADDRESS, &ent) == -1) {
+ pr_info("%s: Couldn't get MAC address!\n", dev->name);
+ return false;
+ }
+
+ nubus_get_rsrc_mem(dev->dev_addr, &ent, 6);
+
+ if (useresources[cardtype] == 1) {
+ nubus_rewinddir(&dir);
+ if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_BASEOS,
+ &ent) == -1) {
+ pr_err("%s: Memory offset resource for slot %X not found!\n",
+ dev->name, ndev->board->slot);
+ return false;
+ }
+ nubus_get_rsrc_mem(&offset, &ent, 4);
+ dev->mem_start = dev->base_addr + offset;
+ /* yes, this is how the Apple driver does it */
+ dev->base_addr = dev->mem_start + 0x10000;
+ nubus_rewinddir(&dir);
+ if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_LENGTH,
+ &ent) == -1) {
+ pr_info("%s: Memory length resource for slot %X not found, probing\n",
+ dev->name, ndev->board->slot);
+ offset = mac8390_memsize(dev->mem_start);
+ } else {
+ nubus_get_rsrc_mem(&offset, &ent, 4);
+ }
+ dev->mem_end = dev->mem_start + offset;
+ } else {
+ switch (cardtype) {
+ case MAC8390_KINETICS:
+ case MAC8390_DAYNA: /* it's the same */
+ dev->base_addr = (int)(ndev->board->slot_addr +
+ DAYNA_8390_BASE);
+ dev->mem_start = (int)(ndev->board->slot_addr +
+ DAYNA_8390_MEM);
+ dev->mem_end = dev->mem_start +
+ mac8390_memsize(dev->mem_start);
+ break;
+ case MAC8390_INTERLAN:
+ dev->base_addr = (int)(ndev->board->slot_addr +
+ INTERLAN_8390_BASE);
+ dev->mem_start = (int)(ndev->board->slot_addr +
+ INTERLAN_8390_MEM);
+ dev->mem_end = dev->mem_start +
+ mac8390_memsize(dev->mem_start);
+ break;
+ case MAC8390_CABLETRON:
+ dev->base_addr = (int)(ndev->board->slot_addr +
+ CABLETRON_8390_BASE);
+ dev->mem_start = (int)(ndev->board->slot_addr +
+ CABLETRON_8390_MEM);
+ /* The base address is unreadable if 0x00
+ * has been written to the command register
+ * Reset the chip by writing E8390_NODMA +
+ * E8390_PAGE0 + E8390_STOP just to be
+ * sure
+ */
+ i = (void *)dev->base_addr;
+ *i = 0x21;
+ dev->mem_end = dev->mem_start +
+ mac8390_memsize(dev->mem_start);
+ break;
+
+ default:
+ pr_err("Card type %s is unsupported, sorry\n",
+ ndev->board->name);
+ return false;
+ }
+ }
+
+ return true;
}
struct net_device * __init mac8390_probe(int unit)
{
struct net_device *dev;
- volatile unsigned short *i;
- int version_disp = 0;
- struct nubus_dev * ndev = NULL;
+ struct nubus_dev *ndev = NULL;
int err = -ENODEV;
- struct nubus_dir dir;
- struct nubus_dirent ent;
- int offset;
static unsigned int slots;
enum mac8390_type cardtype;
@@ -311,118 +419,19 @@ struct net_device * __init mac8390_probe(int unit)
if (unit >= 0)
sprintf(dev->name, "eth%d", unit);
- while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK, NUBUS_TYPE_ETHERNET, ndev))) {
+ while ((ndev = nubus_find_type(NUBUS_CAT_NETWORK, NUBUS_TYPE_ETHERNET,
+ ndev))) {
/* Have we seen it already? */
- if (slots & (1<<ndev->board->slot))
+ if (slots & (1 << ndev->board->slot))
continue;
- slots |= 1<<ndev->board->slot;
+ slots |= 1 << ndev->board->slot;
- if ((cardtype = mac8390_ident(ndev)) == MAC8390_NONE)
+ cardtype = mac8390_ident(ndev);
+ if (cardtype == MAC8390_NONE)
continue;
- if (version_disp == 0) {
- version_disp = 1;
- printk(version);
- }
-
- dev->irq = SLOT2IRQ(ndev->board->slot);
- /* This is getting to be a habit */
- dev->base_addr = ndev->board->slot_addr | ((ndev->board->slot&0xf) << 20);
-
- /* Get some Nubus info - we will trust the card's idea
- of where its memory and registers are. */
-
- if (nubus_get_func_dir(ndev, &dir) == -1) {
- printk(KERN_ERR "%s: Unable to get Nubus functional"
- " directory for slot %X!\n",
- dev->name, ndev->board->slot);
+ if (!mac8390_init(dev, ndev, cardtype))
continue;
- }
-
- /* Get the MAC address */
- if ((nubus_find_rsrc(&dir, NUBUS_RESID_MAC_ADDRESS, &ent)) == -1) {
- printk(KERN_INFO "%s: Couldn't get MAC address!\n",
- dev->name);
- continue;
- } else {
- nubus_get_rsrc_mem(dev->dev_addr, &ent, 6);
- }
-
- if (useresources[cardtype] == 1) {
- nubus_rewinddir(&dir);
- if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_BASEOS, &ent) == -1) {
- printk(KERN_ERR "%s: Memory offset resource"
- " for slot %X not found!\n",
- dev->name, ndev->board->slot);
- continue;
- }
- nubus_get_rsrc_mem(&offset, &ent, 4);
- dev->mem_start = dev->base_addr + offset;
- /* yes, this is how the Apple driver does it */
- dev->base_addr = dev->mem_start + 0x10000;
- nubus_rewinddir(&dir);
- if (nubus_find_rsrc(&dir, NUBUS_RESID_MINOR_LENGTH, &ent) == -1) {
- printk(KERN_INFO "%s: Memory length resource"
- " for slot %X not found"
- ", probing\n",
- dev->name, ndev->board->slot);
- offset = mac8390_memsize(dev->mem_start);
- } else {
- nubus_get_rsrc_mem(&offset, &ent, 4);
- }
- dev->mem_end = dev->mem_start + offset;
- } else {
- switch (cardtype) {
- case MAC8390_KINETICS:
- case MAC8390_DAYNA: /* it's the same */
- dev->base_addr =
- (int)(ndev->board->slot_addr +
- DAYNA_8390_BASE);
- dev->mem_start =
- (int)(ndev->board->slot_addr +
- DAYNA_8390_MEM);
- dev->mem_end =
- dev->mem_start +
- mac8390_memsize(dev->mem_start);
- break;
- case MAC8390_INTERLAN:
- dev->base_addr =
- (int)(ndev->board->slot_addr +
- INTERLAN_8390_BASE);
- dev->mem_start =
- (int)(ndev->board->slot_addr +
- INTERLAN_8390_MEM);
- dev->mem_end =
- dev->mem_start +
- mac8390_memsize(dev->mem_start);
- break;
- case MAC8390_CABLETRON:
- dev->base_addr =
- (int)(ndev->board->slot_addr +
- CABLETRON_8390_BASE);
- dev->mem_start =
- (int)(ndev->board->slot_addr +
- CABLETRON_8390_MEM);
- /* The base address is unreadable if 0x00
- * has been written to the command register
- * Reset the chip by writing E8390_NODMA +
- * E8390_PAGE0 + E8390_STOP just to be
- * sure
- */
- i = (void *)dev->base_addr;
- *i = 0x21;
- dev->mem_end =
- dev->mem_start +
- mac8390_memsize(dev->mem_start);
- break;
-
- default:
- printk(KERN_ERR "Card type %s is"
- " unsupported, sorry\n",
- ndev->board->name);
- continue;
- }
- }
/* Do the nasty 8390 stuff */
if (!mac8390_initdev(dev, ndev, cardtype))
@@ -458,7 +467,7 @@ int init_module(void)
dev_mac890[i] = dev;
}
if (!i) {
- printk(KERN_NOTICE "mac8390.c: No useable cards found, driver NOT installed.\n");
+ pr_notice("No useable cards found, driver NOT installed.\n");
return -ENODEV;
}
return 0;
@@ -493,22 +502,23 @@ static const struct net_device_ops mac8390_netdev_ops = {
#endif
};
-static int __init mac8390_initdev(struct net_device * dev, struct nubus_dev * ndev,
- enum mac8390_type type)
+static int __init mac8390_initdev(struct net_device *dev,
+ struct nubus_dev *ndev,
+ enum mac8390_type type)
{
- static u32 fwrd4_offsets[16]={
+ static u32 fwrd4_offsets[16] = {
0, 4, 8, 12,
16, 20, 24, 28,
32, 36, 40, 44,
48, 52, 56, 60
};
- static u32 back4_offsets[16]={
+ static u32 back4_offsets[16] = {
60, 56, 52, 48,
44, 40, 36, 32,
28, 24, 20, 16,
12, 8, 4, 0
};
- static u32 fwrd2_offsets[16]={
+ static u32 fwrd2_offsets[16] = {
0, 2, 4, 6,
8, 10, 12, 14,
16, 18, 20, 22,
@@ -526,47 +536,47 @@ static int __init mac8390_initdev(struct net_device * dev, struct nubus_dev * nd
/* Cabletron's TX/RX buffers are backwards */
if (type == MAC8390_CABLETRON) {
- ei_status.tx_start_page = CABLETRON_TX_START_PG;
- ei_status.rx_start_page = CABLETRON_RX_START_PG;
- ei_status.stop_page = CABLETRON_RX_STOP_PG;
- ei_status.rmem_start = dev->mem_start;
- ei_status.rmem_end = dev->mem_start + CABLETRON_RX_STOP_PG*256;
+ ei_status.tx_start_page = CABLETRON_TX_START_PG;
+ ei_status.rx_start_page = CABLETRON_RX_START_PG;
+ ei_status.stop_page = CABLETRON_RX_STOP_PG;
+ ei_status.rmem_start = dev->mem_start;
+ ei_status.rmem_end = dev->mem_start + CABLETRON_RX_STOP_PG*256;
} else {
- ei_status.tx_start_page = WD_START_PG;
- ei_status.rx_start_page = WD_START_PG + TX_PAGES;
- ei_status.stop_page = (dev->mem_end - dev->mem_start)/256;
- ei_status.rmem_start = dev->mem_start + TX_PAGES*256;
- ei_status.rmem_end = dev->mem_end;
+ ei_status.tx_start_page = WD_START_PG;
+ ei_status.rx_start_page = WD_START_PG + TX_PAGES;
+ ei_status.stop_page = (dev->mem_end - dev->mem_start)/256;
+ ei_status.rmem_start = dev->mem_start + TX_PAGES*256;
+ ei_status.rmem_end = dev->mem_end;
}
/* Fill in model-specific information and functions */
- switch(type) {
+ switch (type) {
case MAC8390_FARALLON:
case MAC8390_APPLE:
- switch(mac8390_testio(dev->mem_start)) {
- case ACCESS_UNKNOWN:
- printk("Don't know how to access card memory!\n");
- return -ENODEV;
- break;
+ switch (mac8390_testio(dev->mem_start)) {
+ case ACCESS_UNKNOWN:
+ pr_info("Don't know how to access card memory!\n");
+ return -ENODEV;
+ break;
- case ACCESS_16:
- /* 16 bit card, register map is reversed */
- ei_status.reset_8390 = &mac8390_no_reset;
- ei_status.block_input = &slow_sane_block_input;
- ei_status.block_output = &slow_sane_block_output;
- ei_status.get_8390_hdr = &slow_sane_get_8390_hdr;
- ei_status.reg_offset = back4_offsets;
- break;
+ case ACCESS_16:
+ /* 16 bit card, register map is reversed */
+ ei_status.reset_8390 = &mac8390_no_reset;
+ ei_status.block_input = &slow_sane_block_input;
+ ei_status.block_output = &slow_sane_block_output;
+ ei_status.get_8390_hdr = &slow_sane_get_8390_hdr;
+ ei_status.reg_offset = back4_offsets;
+ break;
- case ACCESS_32:
- /* 32 bit card, register map is reversed */
- ei_status.reset_8390 = &mac8390_no_reset;
- ei_status.block_input = &sane_block_input;
- ei_status.block_output = &sane_block_output;
- ei_status.get_8390_hdr = &sane_get_8390_hdr;
- ei_status.reg_offset = back4_offsets;
- access_bitmode = 1;
- break;
+ case ACCESS_32:
+ /* 32 bit card, register map is reversed */
+ ei_status.reset_8390 = &mac8390_no_reset;
+ ei_status.block_input = &sane_block_input;
+ ei_status.block_output = &sane_block_output;
+ ei_status.get_8390_hdr = &sane_get_8390_hdr;
+ ei_status.reg_offset = back4_offsets;
+ access_bitmode = 1;
+ break;
}
break;
@@ -608,24 +618,25 @@ static int __init mac8390_initdev(struct net_device * dev, struct nubus_dev * nd
ei_status.block_input = &slow_sane_block_input;
ei_status.block_output = &slow_sane_block_output;
ei_status.get_8390_hdr = &slow_sane_get_8390_hdr;
- ei_status.reg_offset = fwrd4_offsets;
- break;
+ ei_status.reg_offset = fwrd4_offsets;
+ break;
default:
- printk(KERN_ERR "Card type %s is unsupported, sorry\n", ndev->board->name);
+ pr_err("Card type %s is unsupported, sorry\n",
+ ndev->board->name);
return -ENODEV;
}
__NS8390_init(dev, 0);
/* Good, done, now spit out some messages */
- printk(KERN_INFO "%s: %s in slot %X (type %s)\n",
- dev->name, ndev->board->name, ndev->board->slot, cardname[type]);
- printk(KERN_INFO
- "MAC %pM IRQ %d, %d KB shared memory at %#lx, %d-bit access.\n",
- dev->dev_addr, dev->irq,
- (unsigned int)(dev->mem_end - dev->mem_start) >> 10,
- dev->mem_start, access_bitmode ? 32 : 16);
+ pr_info("%s: %s in slot %X (type %s)\n",
+ dev->name, ndev->board->name, ndev->board->slot,
+ cardname[type]);
+ pr_info("MAC %pM IRQ %d, %d KB shared memory at %#lx, %d-bit access.\n",
+ dev->dev_addr, dev->irq,
+ (unsigned int)(dev->mem_end - dev->mem_start) >> 10,
+ dev->mem_start, access_bitmode ? 32 : 16);
return 0;
}
@@ -633,7 +644,7 @@ static int mac8390_open(struct net_device *dev)
{
__ei_open(dev);
if (request_irq(dev->irq, __ei_interrupt, 0, "8390 Ethernet", dev)) {
- printk ("%s: unable to get IRQ %d.\n", dev->name, dev->irq);
+ pr_info("%s: unable to get IRQ %d.\n", dev->name, dev->irq);
return -EAGAIN;
}
return 0;
@@ -650,72 +661,71 @@ static void mac8390_no_reset(struct net_device *dev)
{
ei_status.txing = 0;
if (ei_debug > 1)
- printk("reset not supported\n");
+ pr_info("reset not supported\n");
return;
}
static void interlan_reset(struct net_device *dev)
{
- unsigned char *target=nubus_slot_addr(IRQ2SLOT(dev->irq));
+ unsigned char *target = nubus_slot_addr(IRQ2SLOT(dev->irq));
if (ei_debug > 1)
- printk("Need to reset the NS8390 t=%lu...", jiffies);
+ pr_info("Need to reset the NS8390 t=%lu...", jiffies);
ei_status.txing = 0;
target[0xC0000] = 0;
if (ei_debug > 1)
- printk("reset complete\n");
+ pr_cont("reset complete\n");
return;
}
/* dayna_memcpy_fromio/dayna_memcpy_toio */
/* directly from daynaport.c by Alan Cox */
-static void dayna_memcpy_fromcard(struct net_device *dev, void *to, int from, int count)
+static void dayna_memcpy_fromcard(struct net_device *dev, void *to, int from,
+ int count)
{
volatile unsigned char *ptr;
- unsigned char *target=to;
- from<<=1; /* word, skip overhead */
- ptr=(unsigned char *)(dev->mem_start+from);
+ unsigned char *target = to;
+ from <<= 1; /* word, skip overhead */
+ ptr = (unsigned char *)(dev->mem_start+from);
/* Leading byte? */
- if (from&2) {
+ if (from & 2) {
*target++ = ptr[-1];
ptr += 2;
count--;
}
- while(count>=2)
- {
+ while (count >= 2) {
*(unsigned short *)target = *(unsigned short volatile *)ptr;
ptr += 4; /* skip cruft */
target += 2;
- count-=2;
+ count -= 2;
}
/* Trailing byte? */
- if(count)
+ if (count)
*target = *ptr;
}
-static void dayna_memcpy_tocard(struct net_device *dev, int to, const void *from, int count)
+static void dayna_memcpy_tocard(struct net_device *dev, int to,
+ const void *from, int count)
{
volatile unsigned short *ptr;
- const unsigned char *src=from;
- to<<=1; /* word, skip overhead */
- ptr=(unsigned short *)(dev->mem_start+to);
+ const unsigned char *src = from;
+ to <<= 1; /* word, skip overhead */
+ ptr = (unsigned short *)(dev->mem_start+to);
/* Leading byte? */
- if (to&2) { /* avoid a byte write (stomps on other data) */
+ if (to & 2) { /* avoid a byte write (stomps on other data) */
ptr[-1] = (ptr[-1]&0xFF00)|*src++;
ptr++;
count--;
}
- while(count>=2)
- {
- *ptr++=*(unsigned short *)src; /* Copy and */
+ while (count >= 2) {
+ *ptr++ = *(unsigned short *)src; /* Copy and */
ptr++; /* skip cruft */
src += 2;
- count-=2;
+ count -= 2;
}
/* Trailing byte? */
- if(count)
- {
+ if (count) {
/* card doesn't like byte writes */
- *ptr=(*ptr&0x00FF)|(*src << 8);
+ *ptr = (*ptr & 0x00FF) | (*src << 8);
}
}
@@ -738,11 +748,14 @@ static void sane_block_input(struct net_device *dev, int count,
if (xfer_start + count > ei_status.rmem_end) {
/* We must wrap the input move. */
int semi_count = ei_status.rmem_end - xfer_start;
- memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base, semi_count);
+ memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base,
+ semi_count);
count -= semi_count;
- memcpy_toio(skb->data + semi_count, (char *)ei_status.rmem_start, count);
+ memcpy_toio(skb->data + semi_count,
+ (char *)ei_status.rmem_start, count);
} else {
- memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base, count);
+ memcpy_fromio(skb->data, (char *)dev->mem_start + xfer_base,
+ count);
}
}
@@ -755,16 +768,18 @@ static void sane_block_output(struct net_device *dev, int count,
}
/* dayna block input/output */
-static void dayna_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
+static void dayna_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr, int ring_page)
{
unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
- dayna_memcpy_fromcard(dev, (void *)hdr, hdr_start, 4);
+ dayna_memcpy_fromcard(dev, hdr, hdr_start, 4);
/* Fix endianness */
- hdr->count=(hdr->count&0xFF)<<8|(hdr->count>>8);
+ hdr->count = (hdr->count & 0xFF) << 8 | (hdr->count >> 8);
}
-static void dayna_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
+static void dayna_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
{
unsigned long xfer_base = ring_offset - (WD_START_PG<<8);
unsigned long xfer_start = xfer_base+dev->mem_start;
@@ -772,8 +787,7 @@ static void dayna_block_input(struct net_device *dev, int count, struct sk_buff
/* Note the offset math is done in card memory space which is word
per long onto our space. */
- if (xfer_start + count > ei_status.rmem_end)
- {
+ if (xfer_start + count > ei_status.rmem_end) {
/* We must wrap the input move. */
int semi_count = ei_status.rmem_end - xfer_start;
dayna_memcpy_fromcard(dev, skb->data, xfer_base, semi_count);
@@ -781,15 +795,14 @@ static void dayna_block_input(struct net_device *dev, int count, struct sk_buff
dayna_memcpy_fromcard(dev, skb->data + semi_count,
ei_status.rmem_start - dev->mem_start,
count);
- }
- else
- {
+ } else {
dayna_memcpy_fromcard(dev, skb->data, xfer_base, count);
}
}
-static void dayna_block_output(struct net_device *dev, int count, const unsigned char *buf,
- int start_page)
+static void dayna_block_output(struct net_device *dev, int count,
+ const unsigned char *buf,
+ int start_page)
{
long shmem = (start_page - WD_START_PG)<<8;
@@ -797,40 +810,39 @@ static void dayna_block_output(struct net_device *dev, int count, const unsigned
}
/* Cabletron block I/O */
-static void slow_sane_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr,
- int ring_page)
+static void slow_sane_get_8390_hdr(struct net_device *dev,
+ struct e8390_pkt_hdr *hdr,
+ int ring_page)
{
unsigned long hdr_start = (ring_page - WD_START_PG)<<8;
- word_memcpy_fromcard((void *)hdr, (char *)dev->mem_start+hdr_start, 4);
+ word_memcpy_fromcard(hdr, (char *)dev->mem_start + hdr_start, 4);
/* Register endianism - fix here rather than 8390.c */
hdr->count = (hdr->count&0xFF)<<8|(hdr->count>>8);
}
-static void slow_sane_block_input(struct net_device *dev, int count, struct sk_buff *skb,
- int ring_offset)
+static void slow_sane_block_input(struct net_device *dev, int count,
+ struct sk_buff *skb, int ring_offset)
{
unsigned long xfer_base = ring_offset - (WD_START_PG<<8);
unsigned long xfer_start = xfer_base+dev->mem_start;
- if (xfer_start + count > ei_status.rmem_end)
- {
+ if (xfer_start + count > ei_status.rmem_end) {
/* We must wrap the input move. */
int semi_count = ei_status.rmem_end - xfer_start;
- word_memcpy_fromcard(skb->data, (char *)dev->mem_start +
- xfer_base, semi_count);
+ word_memcpy_fromcard(skb->data,
+ (char *)dev->mem_start + xfer_base,
+ semi_count);
count -= semi_count;
word_memcpy_fromcard(skb->data + semi_count,
(char *)ei_status.rmem_start, count);
- }
- else
- {
- word_memcpy_fromcard(skb->data, (char *)dev->mem_start +
- xfer_base, count);
+ } else {
+ word_memcpy_fromcard(skb->data,
+ (char *)dev->mem_start + xfer_base, count);
}
}
-static void slow_sane_block_output(struct net_device *dev, int count, const unsigned char *buf,
- int start_page)
+static void slow_sane_block_output(struct net_device *dev, int count,
+ const unsigned char *buf, int start_page)
{
long shmem = (start_page - WD_START_PG)<<8;
@@ -843,10 +855,10 @@ static void word_memcpy_tocard(void *tp, const void *fp, int count)
const unsigned short *from = fp;
count++;
- count/=2;
+ count /= 2;
- while(count--)
- *to++=*from++;
+ while (count--)
+ *to++ = *from++;
}
static void word_memcpy_fromcard(void *tp, const void *fp, int count)
@@ -855,10 +867,10 @@ static void word_memcpy_fromcard(void *tp, const void *fp, int count)
const volatile unsigned short *from = fp;
count++;
- count/=2;
+ count /= 2;
- while(count--)
- *to++=*from++;
+ while (count--)
+ *to++ = *from++;
}
diff --git a/drivers/net/mac89x0.c b/drivers/net/mac89x0.c
index 23b633e2ac4..c292a608f9a 100644
--- a/drivers/net/mac89x0.c
+++ b/drivers/net/mac89x0.c
@@ -568,9 +568,7 @@ static void set_multicast_list(struct net_device *dev)
if(dev->flags&IFF_PROMISC)
{
lp->rx_mode = RX_ALL_ACCEPT;
- }
- else if((dev->flags&IFF_ALLMULTI)||dev->mc_list)
- {
+ } else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev)) {
/* The multicast-accept list is initialized to accept-all, and we
rely on higher-level filtering for now. */
lp->rx_mode = RX_MULTCAST_ACCEPT;
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index 1d0d4d9ab62..c8a18a6203c 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -189,18 +189,11 @@ static void macb_handle_link_change(struct net_device *dev)
static int macb_mii_probe(struct net_device *dev)
{
struct macb *bp = netdev_priv(dev);
- struct phy_device *phydev = NULL;
+ struct phy_device *phydev;
struct eth_platform_data *pdata;
- int phy_addr;
-
- /* find the first phy */
- for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
- if (bp->mii_bus->phy_map[phy_addr]) {
- phydev = bp->mii_bus->phy_map[phy_addr];
- break;
- }
- }
+ int ret;
+ phydev = phy_find_first(bp->mii_bus);
if (!phydev) {
printk (KERN_ERR "%s: no PHY found\n", dev->name);
return -1;
@@ -210,17 +203,13 @@ static int macb_mii_probe(struct net_device *dev)
/* TODO : add pin_irq */
/* attach the mac to the phy */
- if (pdata && pdata->is_rmii) {
- phydev = phy_connect(dev, dev_name(&phydev->dev),
- &macb_handle_link_change, 0, PHY_INTERFACE_MODE_RMII);
- } else {
- phydev = phy_connect(dev, dev_name(&phydev->dev),
- &macb_handle_link_change, 0, PHY_INTERFACE_MODE_MII);
- }
-
- if (IS_ERR(phydev)) {
+ ret = phy_connect_direct(dev, phydev, &macb_handle_link_change, 0,
+ pdata && pdata->is_rmii ?
+ PHY_INTERFACE_MODE_RMII :
+ PHY_INTERFACE_MODE_MII);
+ if (ret) {
printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
- return PTR_ERR(phydev);
+ return ret;
}
/* mask with MAC supported features */
@@ -895,15 +884,12 @@ static void macb_sethashtable(struct net_device *dev)
{
struct dev_mc_list *curr;
unsigned long mc_filter[2];
- unsigned int i, bitnr;
+ unsigned int bitnr;
struct macb *bp = netdev_priv(dev);
mc_filter[0] = mc_filter[1] = 0;
- curr = dev->mc_list;
- for (i = 0; i < dev->mc_count; i++, curr = curr->next) {
- if (!curr) break; /* unexpected end of list */
-
+ netdev_for_each_mc_addr(curr, dev) {
bitnr = hash_get_index(curr->dmi_addr);
mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
}
@@ -934,7 +920,7 @@ static void macb_set_rx_mode(struct net_device *dev)
macb_writel(bp, HRB, -1);
macb_writel(bp, HRT, -1);
cfg |= MACB_BIT(NCFGR_MTI);
- } else if (dev->mc_count > 0) {
+ } else if (!netdev_mc_empty(dev)) {
/* Enable specific multicasts */
macb_sethashtable(dev);
cfg |= MACB_BIT(NCFGR_MTI);
diff --git a/drivers/net/mace.c b/drivers/net/mace.c
index 43aea91e336..ab5f0bf6d1a 100644
--- a/drivers/net/mace.c
+++ b/drivers/net/mace.c
@@ -588,7 +588,7 @@ static void mace_set_multicast(struct net_device *dev)
{
struct mace_data *mp = netdev_priv(dev);
volatile struct mace __iomem *mb = mp->mace;
- int i, j;
+ int i;
u32 crc;
unsigned long flags;
@@ -598,7 +598,7 @@ static void mace_set_multicast(struct net_device *dev)
mp->maccc |= PROM;
} else {
unsigned char multicast_filter[8];
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
if (dev->flags & IFF_ALLMULTI) {
for (i = 0; i < 8; i++)
@@ -606,11 +606,10 @@ static void mace_set_multicast(struct net_device *dev)
} else {
for (i = 0; i < 8; i++)
multicast_filter[i] = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ netdev_for_each_mc_addr(dmi, dev) {
crc = ether_crc_le(6, dmi->dmi_addr);
- j = crc >> 26; /* bit number in multicast_filter */
- multicast_filter[j >> 3] |= 1 << (j & 7);
- dmi = dmi->next;
+ i = crc >> 26; /* bit number in multicast_filter */
+ multicast_filter[i >> 3] |= 1 << (i & 7);
}
}
#if 0
diff --git a/drivers/net/macmace.c b/drivers/net/macmace.c
index 79408c37787..13ba8f4afb7 100644
--- a/drivers/net/macmace.c
+++ b/drivers/net/macmace.c
@@ -495,7 +495,7 @@ static void mace_set_multicast(struct net_device *dev)
{
struct mace_data *mp = netdev_priv(dev);
volatile struct mace *mb = mp->mace;
- int i, j;
+ int i;
u32 crc;
u8 maccc;
unsigned long flags;
@@ -508,7 +508,7 @@ static void mace_set_multicast(struct net_device *dev)
mb->maccc |= PROM;
} else {
unsigned char multicast_filter[8];
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
if (dev->flags & IFF_ALLMULTI) {
for (i = 0; i < 8; i++) {
@@ -517,11 +517,11 @@ static void mace_set_multicast(struct net_device *dev)
} else {
for (i = 0; i < 8; i++)
multicast_filter[i] = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ netdev_for_each_mc_addr(dmi, dev) {
crc = ether_crc_le(6, dmi->dmi_addr);
- j = crc >> 26; /* bit number in multicast_filter */
- multicast_filter[j >> 3] |= 1 << (j & 7);
- dmi = dmi->next;
+ /* bit number in multicast_filter */
+ i = crc >> 26;
+ multicast_filter[i >> 3] |= 1 << (i & 7);
}
}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 21a9c9ab4b3..40faa368b07 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -39,31 +39,6 @@ struct macvlan_port {
struct list_head vlans;
};
-/**
- * struct macvlan_rx_stats - MACVLAN percpu rx stats
- * @rx_packets: number of received packets
- * @rx_bytes: number of received bytes
- * @multicast: number of received multicast packets
- * @rx_errors: number of errors
- */
-struct macvlan_rx_stats {
- unsigned long rx_packets;
- unsigned long rx_bytes;
- unsigned long multicast;
- unsigned long rx_errors;
-};
-
-struct macvlan_dev {
- struct net_device *dev;
- struct list_head list;
- struct hlist_node hlist;
- struct macvlan_port *port;
- struct net_device *lowerdev;
- struct macvlan_rx_stats *rx_stats;
- enum macvlan_mode mode;
-};
-
-
static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port,
const unsigned char *addr)
{
@@ -118,31 +93,17 @@ static int macvlan_addr_busy(const struct macvlan_port *port,
return 0;
}
-static inline void macvlan_count_rx(const struct macvlan_dev *vlan,
- unsigned int len, bool success,
- bool multicast)
-{
- struct macvlan_rx_stats *rx_stats;
-
- rx_stats = per_cpu_ptr(vlan->rx_stats, smp_processor_id());
- if (likely(success)) {
- rx_stats->rx_packets++;;
- rx_stats->rx_bytes += len;
- if (multicast)
- rx_stats->multicast++;
- } else {
- rx_stats->rx_errors++;
- }
-}
-static int macvlan_broadcast_one(struct sk_buff *skb, struct net_device *dev,
+static int macvlan_broadcast_one(struct sk_buff *skb,
+ const struct macvlan_dev *vlan,
const struct ethhdr *eth, bool local)
{
+ struct net_device *dev = vlan->dev;
if (!skb)
return NET_RX_DROP;
if (local)
- return dev_forward_skb(dev, skb);
+ return vlan->forward(dev, skb);
skb->dev = dev;
if (!compare_ether_addr_64bits(eth->h_dest,
@@ -151,7 +112,7 @@ static int macvlan_broadcast_one(struct sk_buff *skb, struct net_device *dev,
else
skb->pkt_type = PACKET_MULTICAST;
- return netif_rx(skb);
+ return vlan->receive(skb);
}
static void macvlan_broadcast(struct sk_buff *skb,
@@ -175,7 +136,7 @@ static void macvlan_broadcast(struct sk_buff *skb,
continue;
nskb = skb_clone(skb, GFP_ATOMIC);
- err = macvlan_broadcast_one(nskb, vlan->dev, eth,
+ err = macvlan_broadcast_one(nskb, vlan, eth,
mode == MACVLAN_MODE_BRIDGE);
macvlan_count_rx(vlan, skb->len + ETH_HLEN,
err == NET_RX_SUCCESS, 1);
@@ -238,7 +199,7 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
skb->dev = dev;
skb->pkt_type = PACKET_HOST;
- netif_rx(skb);
+ vlan->receive(skb);
return NULL;
}
@@ -260,7 +221,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
dest = macvlan_hash_lookup(port, eth->h_dest);
if (dest && dest->mode == MACVLAN_MODE_BRIDGE) {
unsigned int length = skb->len + ETH_HLEN;
- int ret = dev_forward_skb(dest->dev, skb);
+ int ret = dest->forward(dest->dev, skb);
macvlan_count_rx(dest, length,
ret == NET_RX_SUCCESS, 0);
@@ -269,12 +230,12 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
}
xmit_world:
- skb->dev = vlan->lowerdev;
+ skb_set_dev(skb, vlan->lowerdev);
return dev_queue_xmit(skb);
}
-static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
- struct net_device *dev)
+netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
{
int i = skb_get_queue_mapping(skb);
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
@@ -290,6 +251,7 @@ static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
return ret;
}
+EXPORT_SYMBOL_GPL(macvlan_start_xmit);
static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
unsigned short type, const void *daddr,
@@ -418,7 +380,7 @@ static struct lock_class_key macvlan_netdev_addr_lock_key;
#define MACVLAN_FEATURES \
(NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \
- NETIF_F_TSO_ECN | NETIF_F_TSO6)
+ NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO)
#define MACVLAN_STATE_MASK \
((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
@@ -623,8 +585,11 @@ static int macvlan_get_tx_queues(struct net *net,
return 0;
}
-static int macvlan_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[])
+int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[],
+ int (*receive)(struct sk_buff *skb),
+ int (*forward)(struct net_device *dev,
+ struct sk_buff *skb))
{
struct macvlan_dev *vlan = netdev_priv(dev);
struct macvlan_port *port;
@@ -664,6 +629,8 @@ static int macvlan_newlink(struct net *src_net, struct net_device *dev,
vlan->lowerdev = lowerdev;
vlan->dev = dev;
vlan->port = port;
+ vlan->receive = receive;
+ vlan->forward = forward;
vlan->mode = MACVLAN_MODE_VEPA;
if (data && data[IFLA_MACVLAN_MODE])
@@ -677,8 +644,17 @@ static int macvlan_newlink(struct net *src_net, struct net_device *dev,
netif_stacked_transfer_operstate(lowerdev, dev);
return 0;
}
+EXPORT_SYMBOL_GPL(macvlan_common_newlink);
-static void macvlan_dellink(struct net_device *dev, struct list_head *head)
+static int macvlan_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ return macvlan_common_newlink(src_net, dev, tb, data,
+ netif_rx,
+ dev_forward_skb);
+}
+
+void macvlan_dellink(struct net_device *dev, struct list_head *head)
{
struct macvlan_dev *vlan = netdev_priv(dev);
struct macvlan_port *port = vlan->port;
@@ -689,6 +665,7 @@ static void macvlan_dellink(struct net_device *dev, struct list_head *head)
if (list_empty(&port->vlans))
macvlan_port_destroy(port->dev);
}
+EXPORT_SYMBOL_GPL(macvlan_dellink);
static int macvlan_changelink(struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
@@ -720,19 +697,27 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
[IFLA_MACVLAN_MODE] = { .type = NLA_U32 },
};
-static struct rtnl_link_ops macvlan_link_ops __read_mostly = {
+int macvlan_link_register(struct rtnl_link_ops *ops)
+{
+ /* common fields */
+ ops->priv_size = sizeof(struct macvlan_dev);
+ ops->get_tx_queues = macvlan_get_tx_queues;
+ ops->setup = macvlan_setup;
+ ops->validate = macvlan_validate;
+ ops->maxtype = IFLA_MACVLAN_MAX;
+ ops->policy = macvlan_policy;
+ ops->changelink = macvlan_changelink;
+ ops->get_size = macvlan_get_size;
+ ops->fill_info = macvlan_fill_info;
+
+ return rtnl_link_register(ops);
+};
+EXPORT_SYMBOL_GPL(macvlan_link_register);
+
+static struct rtnl_link_ops macvlan_link_ops = {
.kind = "macvlan",
- .priv_size = sizeof(struct macvlan_dev),
- .get_tx_queues = macvlan_get_tx_queues,
- .setup = macvlan_setup,
- .validate = macvlan_validate,
.newlink = macvlan_newlink,
.dellink = macvlan_dellink,
- .maxtype = IFLA_MACVLAN_MAX,
- .policy = macvlan_policy,
- .changelink = macvlan_changelink,
- .get_size = macvlan_get_size,
- .fill_info = macvlan_fill_info,
};
static int macvlan_device_event(struct notifier_block *unused,
@@ -761,7 +746,7 @@ static int macvlan_device_event(struct notifier_block *unused,
break;
case NETDEV_UNREGISTER:
list_for_each_entry_safe(vlan, next, &port->vlans, list)
- macvlan_dellink(vlan->dev, NULL);
+ vlan->dev->rtnl_link_ops->dellink(vlan->dev, NULL);
break;
}
return NOTIFY_DONE;
@@ -778,7 +763,7 @@ static int __init macvlan_init_module(void)
register_netdevice_notifier(&macvlan_notifier_block);
macvlan_handle_frame_hook = macvlan_handle_frame;
- err = rtnl_link_register(&macvlan_link_ops);
+ err = macvlan_link_register(&macvlan_link_ops);
if (err < 0)
goto err1;
return 0;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
new file mode 100644
index 00000000000..55ceae09738
--- /dev/null
+++ b/drivers/net/macvtap.c
@@ -0,0 +1,803 @@
+#include <linux/etherdevice.h>
+#include <linux/if_macvlan.h>
+#include <linux/interrupt.h>
+#include <linux/nsproxy.h>
+#include <linux/compat.h>
+#include <linux/if_tun.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/cache.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/wait.h>
+#include <linux/cdev.h>
+#include <linux/fs.h>
+
+#include <net/net_namespace.h>
+#include <net/rtnetlink.h>
+#include <net/sock.h>
+#include <linux/virtio_net.h>
+
+/*
+ * A macvtap queue is the central object of this driver, it connects
+ * an open character device to a macvlan interface. There can be
+ * multiple queues on one interface, which map back to queues
+ * implemented in hardware on the underlying device.
+ *
+ * macvtap_proto is used to allocate queues through the sock allocation
+ * mechanism.
+ *
+ * TODO: multiqueue support is currently not implemented, even though
+ * macvtap is basically prepared for that. We will need to add this
+ * here as well as in virtio-net and qemu to get line rate on 10gbit
+ * adapters from a guest.
+ */
+struct macvtap_queue {
+ struct sock sk;
+ struct socket sock;
+ struct macvlan_dev *vlan;
+ struct file *file;
+ unsigned int flags;
+};
+
+static struct proto macvtap_proto = {
+ .name = "macvtap",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof (struct macvtap_queue),
+};
+
+/*
+ * Minor number matches netdev->ifindex, so need a potentially
+ * large value. This also makes it possible to split the
+ * tap functionality out again in the future by offering it
+ * from other drivers besides macvtap. As long as every device
+ * only has one tap, the interface numbers assure that the
+ * device nodes are unique.
+ */
+static unsigned int macvtap_major;
+#define MACVTAP_NUM_DEVS 65536
+static struct class *macvtap_class;
+static struct cdev macvtap_cdev;
+
+static const struct proto_ops macvtap_socket_ops;
+
+/*
+ * RCU usage:
+ * The macvtap_queue and the macvlan_dev are loosely coupled, the
+ * pointers from one to the other can only be read while rcu_read_lock
+ * or macvtap_lock is held.
+ *
+ * Both the file and the macvlan_dev hold a reference on the macvtap_queue
+ * through sock_hold(&q->sk). When the macvlan_dev goes away first,
+ * q->vlan becomes inaccessible. When the files gets closed,
+ * macvtap_get_queue() fails.
+ *
+ * There may still be references to the struct sock inside of the
+ * queue from outbound SKBs, but these never reference back to the
+ * file or the dev. The data structure is freed through __sk_free
+ * when both our references and any pending SKBs are gone.
+ */
+static DEFINE_SPINLOCK(macvtap_lock);
+
+/*
+ * Choose the next free queue, for now there is only one
+ */
+static int macvtap_set_queue(struct net_device *dev, struct file *file,
+ struct macvtap_queue *q)
+{
+ struct macvlan_dev *vlan = netdev_priv(dev);
+ int err = -EBUSY;
+
+ spin_lock(&macvtap_lock);
+ if (rcu_dereference(vlan->tap))
+ goto out;
+
+ err = 0;
+ rcu_assign_pointer(q->vlan, vlan);
+ rcu_assign_pointer(vlan->tap, q);
+ sock_hold(&q->sk);
+
+ q->file = file;
+ file->private_data = q;
+
+out:
+ spin_unlock(&macvtap_lock);
+ return err;
+}
+
+/*
+ * The file owning the queue got closed, give up both
+ * the reference that the files holds as well as the
+ * one from the macvlan_dev if that still exists.
+ *
+ * Using the spinlock makes sure that we don't get
+ * to the queue again after destroying it.
+ */
+static void macvtap_put_queue(struct macvtap_queue *q)
+{
+ struct macvlan_dev *vlan;
+
+ spin_lock(&macvtap_lock);
+ vlan = rcu_dereference(q->vlan);
+ if (vlan) {
+ rcu_assign_pointer(vlan->tap, NULL);
+ rcu_assign_pointer(q->vlan, NULL);
+ sock_put(&q->sk);
+ }
+
+ spin_unlock(&macvtap_lock);
+
+ synchronize_rcu();
+ sock_put(&q->sk);
+}
+
+/*
+ * Since we only support one queue, just dereference the pointer.
+ */
+static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
+ struct sk_buff *skb)
+{
+ struct macvlan_dev *vlan = netdev_priv(dev);
+
+ return rcu_dereference(vlan->tap);
+}
+
+/*
+ * The net_device is going away, give up the reference
+ * that it holds on the queue (all the queues one day)
+ * and safely set the pointer from the queues to NULL.
+ */
+static void macvtap_del_queues(struct net_device *dev)
+{
+ struct macvlan_dev *vlan = netdev_priv(dev);
+ struct macvtap_queue *q;
+
+ spin_lock(&macvtap_lock);
+ q = rcu_dereference(vlan->tap);
+ if (!q) {
+ spin_unlock(&macvtap_lock);
+ return;
+ }
+
+ rcu_assign_pointer(vlan->tap, NULL);
+ rcu_assign_pointer(q->vlan, NULL);
+ spin_unlock(&macvtap_lock);
+
+ synchronize_rcu();
+ sock_put(&q->sk);
+}
+
+/*
+ * Forward happens for data that gets sent from one macvlan
+ * endpoint to another one in bridge mode. We just take
+ * the skb and put it into the receive queue.
+ */
+static int macvtap_forward(struct net_device *dev, struct sk_buff *skb)
+{
+ struct macvtap_queue *q = macvtap_get_queue(dev, skb);
+ if (!q)
+ return -ENOLINK;
+
+ skb_queue_tail(&q->sk.sk_receive_queue, skb);
+ wake_up_interruptible_poll(q->sk.sk_sleep, POLLIN | POLLRDNORM | POLLRDBAND);
+ return 0;
+}
+
+/*
+ * Receive is for data from the external interface (lowerdev),
+ * in case of macvtap, we can treat that the same way as
+ * forward, which macvlan cannot.
+ */
+static int macvtap_receive(struct sk_buff *skb)
+{
+ skb_push(skb, ETH_HLEN);
+ return macvtap_forward(skb->dev, skb);
+}
+
+static int macvtap_newlink(struct net *src_net,
+ struct net_device *dev,
+ struct nlattr *tb[],
+ struct nlattr *data[])
+{
+ struct device *classdev;
+ dev_t devt;
+ int err;
+
+ err = macvlan_common_newlink(src_net, dev, tb, data,
+ macvtap_receive, macvtap_forward);
+ if (err)
+ goto out;
+
+ devt = MKDEV(MAJOR(macvtap_major), dev->ifindex);
+
+ classdev = device_create(macvtap_class, &dev->dev, devt,
+ dev, "tap%d", dev->ifindex);
+ if (IS_ERR(classdev)) {
+ err = PTR_ERR(classdev);
+ macvtap_del_queues(dev);
+ }
+
+out:
+ return err;
+}
+
+static void macvtap_dellink(struct net_device *dev,
+ struct list_head *head)
+{
+ device_destroy(macvtap_class,
+ MKDEV(MAJOR(macvtap_major), dev->ifindex));
+
+ macvtap_del_queues(dev);
+ macvlan_dellink(dev, head);
+}
+
+static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
+ .kind = "macvtap",
+ .newlink = macvtap_newlink,
+ .dellink = macvtap_dellink,
+};
+
+
+static void macvtap_sock_write_space(struct sock *sk)
+{
+ if (!sock_writeable(sk) ||
+ !test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
+ return;
+
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible_poll(sk->sk_sleep, POLLOUT | POLLWRNORM | POLLWRBAND);
+}
+
+static int macvtap_open(struct inode *inode, struct file *file)
+{
+ struct net *net = current->nsproxy->net_ns;
+ struct net_device *dev = dev_get_by_index(net, iminor(inode));
+ struct macvtap_queue *q;
+ int err;
+
+ err = -ENODEV;
+ if (!dev)
+ goto out;
+
+ /* check if this is a macvtap device */
+ err = -EINVAL;
+ if (dev->rtnl_link_ops != &macvtap_link_ops)
+ goto out;
+
+ err = -ENOMEM;
+ q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
+ &macvtap_proto);
+ if (!q)
+ goto out;
+
+ init_waitqueue_head(&q->sock.wait);
+ q->sock.type = SOCK_RAW;
+ q->sock.state = SS_CONNECTED;
+ q->sock.file = file;
+ q->sock.ops = &macvtap_socket_ops;
+ sock_init_data(&q->sock, &q->sk);
+ q->sk.sk_write_space = macvtap_sock_write_space;
+ q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
+
+ err = macvtap_set_queue(dev, file, q);
+ if (err)
+ sock_put(&q->sk);
+
+out:
+ if (dev)
+ dev_put(dev);
+
+ return err;
+}
+
+static int macvtap_release(struct inode *inode, struct file *file)
+{
+ struct macvtap_queue *q = file->private_data;
+ macvtap_put_queue(q);
+ return 0;
+}
+
+static unsigned int macvtap_poll(struct file *file, poll_table * wait)
+{
+ struct macvtap_queue *q = file->private_data;
+ unsigned int mask = POLLERR;
+
+ if (!q)
+ goto out;
+
+ mask = 0;
+ poll_wait(file, &q->sock.wait, wait);
+
+ if (!skb_queue_empty(&q->sk.sk_receive_queue))
+ mask |= POLLIN | POLLRDNORM;
+
+ if (sock_writeable(&q->sk) ||
+ (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &q->sock.flags) &&
+ sock_writeable(&q->sk)))
+ mask |= POLLOUT | POLLWRNORM;
+
+out:
+ return mask;
+}
+
+static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad,
+ size_t len, size_t linear,
+ int noblock, int *err)
+{
+ struct sk_buff *skb;
+
+ /* Under a page? Don't bother with paged skb. */
+ if (prepad + len < PAGE_SIZE || !linear)
+ linear = len;
+
+ skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
+ err);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, prepad);
+ skb_put(skb, linear);
+ skb->data_len = len - linear;
+ skb->len += len - linear;
+
+ return skb;
+}
+
+/*
+ * macvtap_skb_from_vnet_hdr and macvtap_skb_to_vnet_hdr should
+ * be shared with the tun/tap driver.
+ */
+static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
+ struct virtio_net_hdr *vnet_hdr)
+{
+ unsigned short gso_type = 0;
+ if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+ switch (vnet_hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
+ case VIRTIO_NET_HDR_GSO_TCPV4:
+ gso_type = SKB_GSO_TCPV4;
+ break;
+ case VIRTIO_NET_HDR_GSO_TCPV6:
+ gso_type = SKB_GSO_TCPV6;
+ break;
+ case VIRTIO_NET_HDR_GSO_UDP:
+ gso_type = SKB_GSO_UDP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (vnet_hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
+ gso_type |= SKB_GSO_TCP_ECN;
+
+ if (vnet_hdr->gso_size == 0)
+ return -EINVAL;
+ }
+
+ if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
+ if (!skb_partial_csum_set(skb, vnet_hdr->csum_start,
+ vnet_hdr->csum_offset))
+ return -EINVAL;
+ }
+
+ if (vnet_hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
+ skb_shinfo(skb)->gso_size = vnet_hdr->gso_size;
+ skb_shinfo(skb)->gso_type = gso_type;
+
+ /* Header must be checked, and gso_segs computed. */
+ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
+ skb_shinfo(skb)->gso_segs = 0;
+ }
+ return 0;
+}
+
+static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
+ struct virtio_net_hdr *vnet_hdr)
+{
+ memset(vnet_hdr, 0, sizeof(*vnet_hdr));
+
+ if (skb_is_gso(skb)) {
+ struct skb_shared_info *sinfo = skb_shinfo(skb);
+
+ /* This is a hint as to how much should be linear. */
+ vnet_hdr->hdr_len = skb_headlen(skb);
+ vnet_hdr->gso_size = sinfo->gso_size;
+ if (sinfo->gso_type & SKB_GSO_TCPV4)
+ vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
+ else if (sinfo->gso_type & SKB_GSO_TCPV6)
+ vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
+ else if (sinfo->gso_type & SKB_GSO_UDP)
+ vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
+ else
+ BUG();
+ if (sinfo->gso_type & SKB_GSO_TCP_ECN)
+ vnet_hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
+ } else
+ vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
+ vnet_hdr->csum_start = skb->csum_start -
+ skb_headroom(skb);
+ vnet_hdr->csum_offset = skb->csum_offset;
+ } /* else everything is zero */
+
+ return 0;
+}
+
+
+/* Get packet from user space buffer */
+static ssize_t macvtap_get_user(struct macvtap_queue *q,
+ const struct iovec *iv, size_t count,
+ int noblock)
+{
+ struct sk_buff *skb;
+ struct macvlan_dev *vlan;
+ size_t len = count;
+ int err;
+ struct virtio_net_hdr vnet_hdr = { 0 };
+ int vnet_hdr_len = 0;
+
+ if (q->flags & IFF_VNET_HDR) {
+ vnet_hdr_len = sizeof(vnet_hdr);
+
+ err = -EINVAL;
+ if ((len -= vnet_hdr_len) < 0)
+ goto err;
+
+ err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0,
+ vnet_hdr_len);
+ if (err < 0)
+ goto err;
+ if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
+ vnet_hdr.csum_start + vnet_hdr.csum_offset + 2 >
+ vnet_hdr.hdr_len)
+ vnet_hdr.hdr_len = vnet_hdr.csum_start +
+ vnet_hdr.csum_offset + 2;
+ err = -EINVAL;
+ if (vnet_hdr.hdr_len > len)
+ goto err;
+ }
+
+ err = -EINVAL;
+ if (unlikely(len < ETH_HLEN))
+ goto err;
+
+ skb = macvtap_alloc_skb(&q->sk, NET_IP_ALIGN, len, vnet_hdr.hdr_len,
+ noblock, &err);
+ if (!skb)
+ goto err;
+
+ err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len, len);
+ if (err)
+ goto err_kfree;
+
+ skb_set_network_header(skb, ETH_HLEN);
+ skb_reset_mac_header(skb);
+ skb->protocol = eth_hdr(skb)->h_proto;
+
+ if (vnet_hdr_len) {
+ err = macvtap_skb_from_vnet_hdr(skb, &vnet_hdr);
+ if (err)
+ goto err_kfree;
+ }
+
+ rcu_read_lock_bh();
+ vlan = rcu_dereference(q->vlan);
+ if (vlan)
+ macvlan_start_xmit(skb, vlan->dev);
+ else
+ kfree_skb(skb);
+ rcu_read_unlock_bh();
+
+ return count;
+
+err_kfree:
+ kfree_skb(skb);
+
+err:
+ rcu_read_lock_bh();
+ vlan = rcu_dereference(q->vlan);
+ if (vlan)
+ netdev_get_tx_queue(vlan->dev, 0)->tx_dropped++;
+ rcu_read_unlock_bh();
+
+ return err;
+}
+
+static ssize_t macvtap_aio_write(struct kiocb *iocb, const struct iovec *iv,
+ unsigned long count, loff_t pos)
+{
+ struct file *file = iocb->ki_filp;
+ ssize_t result = -ENOLINK;
+ struct macvtap_queue *q = file->private_data;
+
+ result = macvtap_get_user(q, iv, iov_length(iv, count),
+ file->f_flags & O_NONBLOCK);
+ return result;
+}
+
+/* Put packet to the user space buffer */
+static ssize_t macvtap_put_user(struct macvtap_queue *q,
+ const struct sk_buff *skb,
+ const struct iovec *iv, int len)
+{
+ struct macvlan_dev *vlan;
+ int ret;
+ int vnet_hdr_len = 0;
+
+ if (q->flags & IFF_VNET_HDR) {
+ struct virtio_net_hdr vnet_hdr;
+ vnet_hdr_len = sizeof (vnet_hdr);
+ if ((len -= vnet_hdr_len) < 0)
+ return -EINVAL;
+
+ ret = macvtap_skb_to_vnet_hdr(skb, &vnet_hdr);
+ if (ret)
+ return ret;
+
+ if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, vnet_hdr_len))
+ return -EFAULT;
+ }
+
+ len = min_t(int, skb->len, len);
+
+ ret = skb_copy_datagram_const_iovec(skb, 0, iv, vnet_hdr_len, len);
+
+ rcu_read_lock_bh();
+ vlan = rcu_dereference(q->vlan);
+ if (vlan)
+ macvlan_count_rx(vlan, len, ret == 0, 0);
+ rcu_read_unlock_bh();
+
+ return ret ? ret : (len + vnet_hdr_len);
+}
+
+static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
+ const struct iovec *iv, unsigned long len,
+ int noblock)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ struct sk_buff *skb;
+ ssize_t ret = 0;
+
+ add_wait_queue(q->sk.sk_sleep, &wait);
+ while (len) {
+ current->state = TASK_INTERRUPTIBLE;
+
+ /* Read frames from the queue */
+ skb = skb_dequeue(&q->sk.sk_receive_queue);
+ if (!skb) {
+ if (noblock) {
+ ret = -EAGAIN;
+ break;
+ }
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+ /* Nothing to read, let's sleep */
+ schedule();
+ continue;
+ }
+ ret = macvtap_put_user(q, skb, iv, len);
+ kfree_skb(skb);
+ break;
+ }
+
+ current->state = TASK_RUNNING;
+ remove_wait_queue(q->sk.sk_sleep, &wait);
+ return ret;
+}
+
+static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv,
+ unsigned long count, loff_t pos)
+{
+ struct file *file = iocb->ki_filp;
+ struct macvtap_queue *q = file->private_data;
+ ssize_t len, ret = 0;
+
+ len = iov_length(iv, count);
+ if (len < 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = macvtap_do_read(q, iocb, iv, len, file->f_flags & O_NONBLOCK);
+ ret = min_t(ssize_t, ret, len); /* XXX copied from tun.c. Why? */
+out:
+ return ret;
+}
+
+/*
+ * provide compatibility with generic tun/tap interface
+ */
+static long macvtap_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct macvtap_queue *q = file->private_data;
+ struct macvlan_dev *vlan;
+ void __user *argp = (void __user *)arg;
+ struct ifreq __user *ifr = argp;
+ unsigned int __user *up = argp;
+ unsigned int u;
+ int ret;
+
+ switch (cmd) {
+ case TUNSETIFF:
+ /* ignore the name, just look at flags */
+ if (get_user(u, &ifr->ifr_flags))
+ return -EFAULT;
+
+ ret = 0;
+ if ((u & ~IFF_VNET_HDR) != (IFF_NO_PI | IFF_TAP))
+ ret = -EINVAL;
+ else
+ q->flags = u;
+
+ return ret;
+
+ case TUNGETIFF:
+ rcu_read_lock_bh();
+ vlan = rcu_dereference(q->vlan);
+ if (vlan)
+ dev_hold(vlan->dev);
+ rcu_read_unlock_bh();
+
+ if (!vlan)
+ return -ENOLINK;
+
+ ret = 0;
+ if (copy_to_user(&ifr->ifr_name, q->vlan->dev->name, IFNAMSIZ) ||
+ put_user(q->flags, &ifr->ifr_flags))
+ ret = -EFAULT;
+ dev_put(vlan->dev);
+ return ret;
+
+ case TUNGETFEATURES:
+ if (put_user(IFF_TAP | IFF_NO_PI | IFF_VNET_HDR, up))
+ return -EFAULT;
+ return 0;
+
+ case TUNSETSNDBUF:
+ if (get_user(u, up))
+ return -EFAULT;
+
+ q->sk.sk_sndbuf = u;
+ return 0;
+
+ case TUNSETOFFLOAD:
+ /* let the user check for future flags */
+ if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
+ TUN_F_TSO_ECN | TUN_F_UFO))
+ return -EINVAL;
+
+ /* TODO: only accept frames with the features that
+ got enabled for forwarded frames */
+ if (!(q->flags & IFF_VNET_HDR))
+ return -EINVAL;
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+#ifdef CONFIG_COMPAT
+static long macvtap_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return macvtap_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
+}
+#endif
+
+static const struct file_operations macvtap_fops = {
+ .owner = THIS_MODULE,
+ .open = macvtap_open,
+ .release = macvtap_release,
+ .aio_read = macvtap_aio_read,
+ .aio_write = macvtap_aio_write,
+ .poll = macvtap_poll,
+ .llseek = no_llseek,
+ .unlocked_ioctl = macvtap_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = macvtap_compat_ioctl,
+#endif
+};
+
+static int macvtap_sendmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len)
+{
+ struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
+ return macvtap_get_user(q, m->msg_iov, total_len,
+ m->msg_flags & MSG_DONTWAIT);
+}
+
+static int macvtap_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len,
+ int flags)
+{
+ struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
+ int ret;
+ if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
+ return -EINVAL;
+ ret = macvtap_do_read(q, iocb, m->msg_iov, total_len,
+ flags & MSG_DONTWAIT);
+ if (ret > total_len) {
+ m->msg_flags |= MSG_TRUNC;
+ ret = flags & MSG_TRUNC ? ret : total_len;
+ }
+ return ret;
+}
+
+/* Ops structure to mimic raw sockets with tun */
+static const struct proto_ops macvtap_socket_ops = {
+ .sendmsg = macvtap_sendmsg,
+ .recvmsg = macvtap_recvmsg,
+};
+
+/* Get an underlying socket object from tun file. Returns error unless file is
+ * attached to a device. The returned object works like a packet socket, it
+ * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
+ * holding a reference to the file for as long as the socket is in use. */
+struct socket *macvtap_get_socket(struct file *file)
+{
+ struct macvtap_queue *q;
+ if (file->f_op != &macvtap_fops)
+ return ERR_PTR(-EINVAL);
+ q = file->private_data;
+ if (!q)
+ return ERR_PTR(-EBADFD);
+ return &q->sock;
+}
+EXPORT_SYMBOL_GPL(macvtap_get_socket);
+
+static int macvtap_init(void)
+{
+ int err;
+
+ err = alloc_chrdev_region(&macvtap_major, 0,
+ MACVTAP_NUM_DEVS, "macvtap");
+ if (err)
+ goto out1;
+
+ cdev_init(&macvtap_cdev, &macvtap_fops);
+ err = cdev_add(&macvtap_cdev, macvtap_major, MACVTAP_NUM_DEVS);
+ if (err)
+ goto out2;
+
+ macvtap_class = class_create(THIS_MODULE, "macvtap");
+ if (IS_ERR(macvtap_class)) {
+ err = PTR_ERR(macvtap_class);
+ goto out3;
+ }
+
+ err = macvlan_link_register(&macvtap_link_ops);
+ if (err)
+ goto out4;
+
+ return 0;
+
+out4:
+ class_unregister(macvtap_class);
+out3:
+ cdev_del(&macvtap_cdev);
+out2:
+ unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
+out1:
+ return err;
+}
+module_init(macvtap_init);
+
+static void macvtap_exit(void)
+{
+ rtnl_link_unregister(&macvtap_link_ops);
+ class_unregister(macvtap_class);
+ cdev_del(&macvtap_cdev);
+ unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
+}
+module_exit(macvtap_exit);
+
+MODULE_ALIAS_RTNL_LINK("macvtap");
+MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/meth.c b/drivers/net/meth.c
index 2af81735386..9f72cb45f4a 100644
--- a/drivers/net/meth.c
+++ b/drivers/net/meth.c
@@ -51,14 +51,11 @@
static const char *meth_str="SGI O2 Fast Ethernet";
-#define HAVE_TX_TIMEOUT
/* The maximum time waited (in jiffies) before assuming a Tx failed. (400ms) */
#define TX_TIMEOUT (400*HZ/1000)
-#ifdef HAVE_TX_TIMEOUT
static int timeout = TX_TIMEOUT;
module_param(timeout, int, 0);
-#endif
/*
* This structure is private to each device. It is used to pass
diff --git a/drivers/net/mlx4/en_rx.c b/drivers/net/mlx4/en_rx.c
index 829b9ec9ff6..64394647ddd 100644
--- a/drivers/net/mlx4/en_rx.c
+++ b/drivers/net/mlx4/en_rx.c
@@ -508,11 +508,11 @@ static struct sk_buff *mlx4_en_rx_skb(struct mlx4_en_priv *priv,
/* We are copying all relevant data to the skb - temporarily
* synch buffers for the copy */
dma = be64_to_cpu(rx_desc->data[0].addr);
- dma_sync_single_range_for_cpu(&mdev->pdev->dev, dma, 0,
- length, DMA_FROM_DEVICE);
+ dma_sync_single_for_cpu(&mdev->pdev->dev, dma, length,
+ DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, va, length);
- dma_sync_single_range_for_device(&mdev->pdev->dev, dma, 0,
- length, DMA_FROM_DEVICE);
+ dma_sync_single_for_device(&mdev->pdev->dev, dma, length,
+ DMA_FROM_DEVICE);
skb->tail += length;
} else {
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 3cf56d90d85..8f6e816a739 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -1271,7 +1271,7 @@ int mlx4_restart_one(struct pci_dev *pdev)
return __mlx4_init_one(pdev, NULL);
}
-static struct pci_device_id mlx4_pci_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
{ PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
{ PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
{ PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index af67af55efe..c97b6e4365a 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -55,7 +55,6 @@
#include <linux/types.h>
#include <linux/inet_lro.h>
#include <asm/system.h>
-#include <linux/list.h>
static char mv643xx_eth_driver_name[] = "mv643xx_eth";
static char mv643xx_eth_driver_version[] = "1.4";
@@ -1697,7 +1696,7 @@ static u32 uc_addr_filter_mask(struct net_device *dev)
return 0;
nibbles = 1 << (dev->dev_addr[5] & 0x0f);
- list_for_each_entry(ha, &dev->uc.list, list) {
+ netdev_for_each_uc_addr(ha, dev) {
if (memcmp(dev->dev_addr, ha->addr, 5))
return 0;
if ((dev->dev_addr[5] ^ ha->addr[5]) & 0xf0)
@@ -1795,7 +1794,7 @@ oom:
memset(mc_spec, 0, 0x100);
memset(mc_other, 0, 0x100);
- for (addr = dev->mc_list; addr != NULL; addr = addr->next) {
+ netdev_for_each_mc_addr(addr, dev) {
u8 *a = addr->da_addr;
u32 *table;
int entry;
@@ -2847,6 +2846,7 @@ static const struct net_device_ops mv643xx_eth_netdev_ops = {
.ndo_start_xmit = mv643xx_eth_xmit,
.ndo_set_rx_mode = mv643xx_eth_set_rx_mode,
.ndo_set_mac_address = mv643xx_eth_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = mv643xx_eth_ioctl,
.ndo_change_mtu = mv643xx_eth_change_mtu,
.ndo_tx_timeout = mv643xx_eth_tx_timeout,
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 3fcb1c356e0..676c513e12f 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -38,6 +38,8 @@
* Myricom, Inc., 325N Santa Anita Avenue, Arcadia, CA 91006
*************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/tcp.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
@@ -75,7 +77,7 @@
#include "myri10ge_mcp.h"
#include "myri10ge_mcp_gen_header.h"
-#define MYRI10GE_VERSION_STR "1.5.1-1.453"
+#define MYRI10GE_VERSION_STR "1.5.2-1.459"
MODULE_DESCRIPTION("Myricom 10G driver (10GbE)");
MODULE_AUTHOR("Maintainer: help@myri.com");
@@ -819,9 +821,7 @@ static int myri10ge_change_pause(struct myri10ge_priv *mgp, int pause)
status = myri10ge_send_cmd(mgp, ctl, &cmd, 0);
if (status) {
- printk(KERN_ERR
- "myri10ge: %s: Failed to set flow control mode\n",
- mgp->dev->name);
+ netdev_err(mgp->dev, "Failed to set flow control mode\n");
return status;
}
mgp->pause = pause;
@@ -837,8 +837,7 @@ myri10ge_change_promisc(struct myri10ge_priv *mgp, int promisc, int atomic)
ctl = promisc ? MXGEFW_ENABLE_PROMISC : MXGEFW_DISABLE_PROMISC;
status = myri10ge_send_cmd(mgp, ctl, &cmd, atomic);
if (status)
- printk(KERN_ERR "myri10ge: %s: Failed to set promisc mode\n",
- mgp->dev->name);
+ netdev_err(mgp->dev, "Failed to set promisc mode\n");
}
static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type)
@@ -1201,6 +1200,9 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
{
struct page *page;
int idx;
+#if MYRI10GE_ALLOC_SIZE > 4096
+ int end_offset;
+#endif
if (unlikely(rx->watchdog_needed && !watchdog))
return;
@@ -1242,9 +1244,9 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx,
#if MYRI10GE_ALLOC_SIZE > 4096
/* don't cross a 4KB boundary */
- if ((rx->page_offset >> 12) !=
- ((rx->page_offset + bytes - 1) >> 12))
- rx->page_offset = (rx->page_offset + 4096) & ~4095;
+ end_offset = rx->page_offset + bytes - 1;
+ if ((unsigned)(rx->page_offset ^ end_offset) > 4095)
+ rx->page_offset = end_offset & ~4095;
#endif
rx->fill_cnt++;
@@ -1482,19 +1484,15 @@ static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp)
if (mgp->link_state == MXGEFW_LINK_UP) {
if (netif_msg_link(mgp))
- printk(KERN_INFO
- "myri10ge: %s: link up\n",
- mgp->dev->name);
+ netdev_info(mgp->dev, "link up\n");
netif_carrier_on(mgp->dev);
mgp->link_changes++;
} else {
if (netif_msg_link(mgp))
- printk(KERN_INFO
- "myri10ge: %s: link %s\n",
- mgp->dev->name,
- (link_up == MXGEFW_LINK_MYRINET ?
- "mismatch (Myrinet detected)" :
- "down"));
+ netdev_info(mgp->dev, "link %s\n",
+ link_up == MXGEFW_LINK_MYRINET ?
+ "mismatch (Myrinet detected)" :
+ "down");
netif_carrier_off(mgp->dev);
mgp->link_changes++;
}
@@ -1503,9 +1501,8 @@ static inline void myri10ge_check_statblock(struct myri10ge_priv *mgp)
ntohl(stats->rdma_tags_available)) {
mgp->rdma_tags_available =
ntohl(stats->rdma_tags_available);
- printk(KERN_WARNING "myri10ge: %s: RDMA timed out! "
- "%d tags left\n", mgp->dev->name,
- mgp->rdma_tags_available);
+ netdev_warn(mgp->dev, "RDMA timed out! %d tags left\n",
+ mgp->rdma_tags_available);
}
mgp->down_cnt += stats->link_down;
if (stats->link_down)
@@ -1576,8 +1573,7 @@ static irqreturn_t myri10ge_intr(int irq, void *arg)
if (send_done_count != tx->pkt_done)
myri10ge_tx_done(ss, (int)send_done_count);
if (unlikely(i > myri10ge_max_irq_loops)) {
- printk(KERN_WARNING "myri10ge: %s: irq stuck?\n",
- mgp->dev->name);
+ netdev_err(mgp->dev, "irq stuck?\n");
stats->valid = 0;
schedule_work(&mgp->watchdog_work);
}
@@ -1614,16 +1610,14 @@ myri10ge_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
*/
ptr = mgp->product_code_string;
if (ptr == NULL) {
- printk(KERN_ERR "myri10ge: %s: Missing product code\n",
- netdev->name);
+ netdev_err(netdev, "Missing product code\n");
return 0;
}
for (i = 0; i < 3; i++, ptr++) {
ptr = strchr(ptr, '-');
if (ptr == NULL) {
- printk(KERN_ERR "myri10ge: %s: Invalid product "
- "code %s\n", netdev->name,
- mgp->product_code_string);
+ netdev_err(netdev, "Invalid product code %s\n",
+ mgp->product_code_string);
return 0;
}
}
@@ -2009,17 +2003,15 @@ static int myri10ge_allocate_rings(struct myri10ge_slice_state *ss)
mgp->small_bytes + MXGEFW_PAD, 0);
if (ss->rx_small.fill_cnt < ss->rx_small.mask + 1) {
- printk(KERN_ERR
- "myri10ge: %s:slice-%d: alloced only %d small bufs\n",
- dev->name, slice, ss->rx_small.fill_cnt);
+ netdev_err(dev, "slice-%d: alloced only %d small bufs\n",
+ slice, ss->rx_small.fill_cnt);
goto abort_with_rx_small_ring;
}
myri10ge_alloc_rx_pages(mgp, &ss->rx_big, mgp->big_bytes, 0);
if (ss->rx_big.fill_cnt < ss->rx_big.mask + 1) {
- printk(KERN_ERR
- "myri10ge: %s:slice-%d: alloced only %d big bufs\n",
- dev->name, slice, ss->rx_big.fill_cnt);
+ netdev_err(dev, "slice-%d: alloced only %d big bufs\n",
+ slice, ss->rx_big.fill_cnt);
goto abort_with_rx_big_ring;
}
@@ -2358,7 +2350,7 @@ static int myri10ge_open(struct net_device *dev)
mgp->running = MYRI10GE_ETH_STARTING;
status = myri10ge_reset(mgp);
if (status != 0) {
- printk(KERN_ERR "myri10ge: %s: failed reset\n", dev->name);
+ netdev_err(dev, "failed reset\n");
goto abort_with_nothing;
}
@@ -2370,9 +2362,7 @@ static int myri10ge_open(struct net_device *dev)
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ENABLE_RSS_QUEUES,
&cmd, 0);
if (status != 0) {
- printk(KERN_ERR
- "myri10ge: %s: failed to set number of slices\n",
- dev->name);
+ netdev_err(dev, "failed to set number of slices\n");
goto abort_with_nothing;
}
/* setup the indirection table */
@@ -2384,9 +2374,7 @@ static int myri10ge_open(struct net_device *dev)
MXGEFW_CMD_GET_RSS_TABLE_OFFSET,
&cmd, 0);
if (status != 0) {
- printk(KERN_ERR
- "myri10ge: %s: failed to setup rss tables\n",
- dev->name);
+ netdev_err(dev, "failed to setup rss tables\n");
goto abort_with_nothing;
}
@@ -2400,9 +2388,7 @@ static int myri10ge_open(struct net_device *dev)
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_RSS_ENABLE,
&cmd, 0);
if (status != 0) {
- printk(KERN_ERR
- "myri10ge: %s: failed to enable slices\n",
- dev->name);
+ netdev_err(dev, "failed to enable slices\n");
goto abort_with_nothing;
}
}
@@ -2450,9 +2436,7 @@ static int myri10ge_open(struct net_device *dev)
status = myri10ge_get_txrx(mgp, slice);
if (status != 0) {
- printk(KERN_ERR
- "myri10ge: %s: failed to get ring sizes or locations\n",
- dev->name);
+ netdev_err(dev, "failed to get ring sizes or locations\n");
goto abort_with_rings;
}
status = myri10ge_allocate_rings(ss);
@@ -2465,9 +2449,7 @@ static int myri10ge_open(struct net_device *dev)
if (slice == 0 || mgp->dev->real_num_tx_queues > 1)
status = myri10ge_set_stats(mgp, slice);
if (status) {
- printk(KERN_ERR
- "myri10ge: %s: Couldn't set stats DMA\n",
- dev->name);
+ netdev_err(dev, "Couldn't set stats DMA\n");
goto abort_with_rings;
}
@@ -2498,8 +2480,7 @@ static int myri10ge_open(struct net_device *dev)
status |=
myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_BIG_BUFFER_SIZE, &cmd, 0);
if (status) {
- printk(KERN_ERR "myri10ge: %s: Couldn't set buffer sizes\n",
- dev->name);
+ netdev_err(dev, "Couldn't set buffer sizes\n");
goto abort_with_rings;
}
@@ -2511,8 +2492,7 @@ static int myri10ge_open(struct net_device *dev)
cmd.data0 = 0;
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_SET_TSO_MODE, &cmd, 0);
if (status && status != -ENOSYS) {
- printk(KERN_ERR "myri10ge: %s: Couldn't set TSO mode\n",
- dev->name);
+ netdev_err(dev, "Couldn't set TSO mode\n");
goto abort_with_rings;
}
@@ -2521,8 +2501,7 @@ static int myri10ge_open(struct net_device *dev)
status = myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_UP, &cmd, 0);
if (status) {
- printk(KERN_ERR "myri10ge: %s: Couldn't bring up link\n",
- dev->name);
+ netdev_err(dev, "Couldn't bring up link\n");
goto abort_with_rings;
}
@@ -2575,15 +2554,12 @@ static int myri10ge_close(struct net_device *dev)
status =
myri10ge_send_cmd(mgp, MXGEFW_CMD_ETHERNET_DOWN, &cmd, 0);
if (status)
- printk(KERN_ERR
- "myri10ge: %s: Couldn't bring down link\n",
- dev->name);
+ netdev_err(dev, "Couldn't bring down link\n");
wait_event_timeout(mgp->down_wq, old_down_cnt != mgp->down_cnt,
HZ);
if (old_down_cnt == mgp->down_cnt)
- printk(KERN_ERR "myri10ge: %s never got down irq\n",
- dev->name);
+ netdev_err(dev, "never got down irq\n");
}
netif_tx_disable(dev);
myri10ge_free_irq(mgp);
@@ -2944,9 +2920,7 @@ abort_linearize:
idx = (idx + 1) & tx->mask;
} while (idx != last_idx);
if (skb_is_gso(skb)) {
- printk(KERN_ERR
- "myri10ge: %s: TSO but wanted to linearize?!?!?\n",
- mgp->dev->name);
+ netdev_err(mgp->dev, "TSO but wanted to linearize?!?!?\n");
goto drop;
}
@@ -3043,8 +3017,8 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
err = myri10ge_send_cmd(mgp, MXGEFW_ENABLE_ALLMULTI, &cmd, 1);
if (err != 0) {
- printk(KERN_ERR "myri10ge: %s: Failed MXGEFW_ENABLE_ALLMULTI,"
- " error status: %d\n", dev->name, err);
+ netdev_err(dev, "Failed MXGEFW_ENABLE_ALLMULTI, error status: %d\n",
+ err);
goto abort;
}
@@ -3058,14 +3032,13 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
err = myri10ge_send_cmd(mgp, MXGEFW_LEAVE_ALL_MULTICAST_GROUPS,
&cmd, 1);
if (err != 0) {
- printk(KERN_ERR
- "myri10ge: %s: Failed MXGEFW_LEAVE_ALL_MULTICAST_GROUPS"
- ", error status: %d\n", dev->name, err);
+ netdev_err(dev, "Failed MXGEFW_LEAVE_ALL_MULTICAST_GROUPS, error status: %d\n",
+ err);
goto abort;
}
/* Walk the multicast list, and add each address */
- for (mc_list = dev->mc_list; mc_list != NULL; mc_list = mc_list->next) {
+ netdev_for_each_mc_addr(mc_list, dev) {
memcpy(data, &mc_list->dmi_addr, 6);
cmd.data0 = ntohl(data[0]);
cmd.data1 = ntohl(data[1]);
@@ -3073,18 +3046,16 @@ static void myri10ge_set_multicast_list(struct net_device *dev)
&cmd, 1);
if (err != 0) {
- printk(KERN_ERR "myri10ge: %s: Failed "
- "MXGEFW_JOIN_MULTICAST_GROUP, error status:"
- "%d\t", dev->name, err);
- printk(KERN_ERR "MAC %pM\n", mc_list->dmi_addr);
+ netdev_err(dev, "Failed MXGEFW_JOIN_MULTICAST_GROUP, error status:%d %pM\n",
+ err, mc_list->dmi_addr);
goto abort;
}
}
/* Enable multicast filtering */
err = myri10ge_send_cmd(mgp, MXGEFW_DISABLE_ALLMULTI, &cmd, 1);
if (err != 0) {
- printk(KERN_ERR "myri10ge: %s: Failed MXGEFW_DISABLE_ALLMULTI,"
- "error status: %d\n", dev->name, err);
+ netdev_err(dev, "Failed MXGEFW_DISABLE_ALLMULTI, error status: %d\n",
+ err);
goto abort;
}
@@ -3105,9 +3076,8 @@ static int myri10ge_set_mac_address(struct net_device *dev, void *addr)
status = myri10ge_update_mac_address(mgp, sa->sa_data);
if (status != 0) {
- printk(KERN_ERR
- "myri10ge: %s: changing mac address failed with %d\n",
- dev->name, status);
+ netdev_err(dev, "changing mac address failed with %d\n",
+ status);
return status;
}
@@ -3122,12 +3092,10 @@ static int myri10ge_change_mtu(struct net_device *dev, int new_mtu)
int error = 0;
if ((new_mtu < 68) || (ETH_HLEN + new_mtu > MYRI10GE_MAX_ETHER_MTU)) {
- printk(KERN_ERR "myri10ge: %s: new mtu (%d) is not valid\n",
- dev->name, new_mtu);
+ netdev_err(dev, "new mtu (%d) is not valid\n", new_mtu);
return -EINVAL;
}
- printk(KERN_INFO "%s: changing mtu from %d to %d\n",
- dev->name, dev->mtu, new_mtu);
+ netdev_info(dev, "changing mtu from %d to %d\n", dev->mtu, new_mtu);
if (mgp->running) {
/* if we change the mtu on an active device, we must
* reset the device so the firmware sees the change */
@@ -3356,7 +3324,7 @@ static int myri10ge_suspend(struct pci_dev *pdev, pm_message_t state)
netif_device_detach(netdev);
if (netif_running(netdev)) {
- printk(KERN_INFO "myri10ge: closing %s\n", netdev->name);
+ netdev_info(netdev, "closing\n");
rtnl_lock();
myri10ge_close(netdev);
rtnl_unlock();
@@ -3383,8 +3351,7 @@ static int myri10ge_resume(struct pci_dev *pdev)
msleep(5); /* give card time to respond */
pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor);
if (vendor == 0xffff) {
- printk(KERN_ERR "myri10ge: %s: device disappeared!\n",
- mgp->dev->name);
+ netdev_err(mgp->dev, "device disappeared!\n");
return -EIO;
}
@@ -3463,10 +3430,9 @@ static void myri10ge_watchdog(struct work_struct *work)
* if the card rebooted due to a parity error
* For now, just report it */
reboot = myri10ge_read_reboot(mgp);
- printk(KERN_ERR
- "myri10ge: %s: NIC rebooted (0x%x),%s resetting\n",
- mgp->dev->name, reboot,
- myri10ge_reset_recover ? " " : " not");
+ netdev_err(mgp->dev, "NIC rebooted (0x%x),%s resetting\n",
+ reboot,
+ myri10ge_reset_recover ? "" : " not");
if (myri10ge_reset_recover == 0)
return;
rtnl_lock();
@@ -3494,31 +3460,26 @@ static void myri10ge_watchdog(struct work_struct *work)
if (cmd == 0xffff) {
pci_read_config_word(mgp->pdev, PCI_VENDOR_ID, &vendor);
if (vendor == 0xffff) {
- printk(KERN_ERR
- "myri10ge: %s: device disappeared!\n",
- mgp->dev->name);
+ netdev_err(mgp->dev, "device disappeared!\n");
return;
}
}
/* Perhaps it is a software error. Try to reset */
- printk(KERN_ERR "myri10ge: %s: device timeout, resetting\n",
- mgp->dev->name);
+ netdev_err(mgp->dev, "device timeout, resetting\n");
for (i = 0; i < mgp->num_slices; i++) {
tx = &mgp->ss[i].tx;
- printk(KERN_INFO
- "myri10ge: %s: (%d): %d %d %d %d %d %d\n",
- mgp->dev->name, i, tx->queue_active, tx->req,
- tx->done, tx->pkt_start, tx->pkt_done,
- (int)ntohl(mgp->ss[i].fw_stats->
- send_done_count));
+ netdev_err(mgp->dev, "(%d): %d %d %d %d %d %d\n",
+ i, tx->queue_active, tx->req,
+ tx->done, tx->pkt_start, tx->pkt_done,
+ (int)ntohl(mgp->ss[i].fw_stats->
+ send_done_count));
msleep(2000);
- printk(KERN_INFO
- "myri10ge: %s: (%d): %d %d %d %d %d %d\n",
- mgp->dev->name, i, tx->queue_active, tx->req,
- tx->done, tx->pkt_start, tx->pkt_done,
- (int)ntohl(mgp->ss[i].fw_stats->
- send_done_count));
+ netdev_info(mgp->dev, "(%d): %d %d %d %d %d %d\n",
+ i, tx->queue_active, tx->req,
+ tx->done, tx->pkt_start, tx->pkt_done,
+ (int)ntohl(mgp->ss[i].fw_stats->
+ send_done_count));
}
}
@@ -3528,8 +3489,7 @@ static void myri10ge_watchdog(struct work_struct *work)
}
status = myri10ge_load_firmware(mgp, 1);
if (status != 0)
- printk(KERN_ERR "myri10ge: %s: failed to load firmware\n",
- mgp->dev->name);
+ netdev_err(mgp->dev, "failed to load firmware\n");
else
myri10ge_open(mgp->dev);
rtnl_unlock();
@@ -3580,14 +3540,10 @@ static void myri10ge_watchdog_timer(unsigned long arg)
/* nic seems like it might be stuck.. */
if (rx_pause_cnt != mgp->watchdog_pause) {
if (net_ratelimit())
- printk(KERN_WARNING
- "myri10ge %s slice %d:"
- "TX paused, check link partner\n",
- mgp->dev->name, i);
+ netdev_err(mgp->dev, "slice %d: TX paused, check link partner\n",
+ i);
} else {
- printk(KERN_WARNING
- "myri10ge %s slice %d stuck:",
- mgp->dev->name, i);
+ netdev_warn(mgp->dev, "slice %d stuck:", i);
reset_needed = 1;
}
}
@@ -4085,7 +4041,7 @@ static void myri10ge_remove(struct pci_dev *pdev)
#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E 0x0008
#define PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9 0x0009
-static struct pci_device_id myri10ge_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(myri10ge_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E)},
{PCI_DEVICE
(PCI_VENDOR_ID_MYRICOM, PCI_DEVICE_ID_MYRICOM_MYRI10GE_Z8E_9)},
@@ -4127,13 +4083,11 @@ static struct notifier_block myri10ge_dca_notifier = {
static __init int myri10ge_init_module(void)
{
- printk(KERN_INFO "%s: Version %s\n", myri10ge_driver.name,
- MYRI10GE_VERSION_STR);
+ pr_info("Version %s\n", MYRI10GE_VERSION_STR);
if (myri10ge_rss_hash > MXGEFW_RSS_HASH_TYPE_MAX) {
- printk(KERN_ERR
- "%s: Illegal rssh hash type %d, defaulting to source port\n",
- myri10ge_driver.name, myri10ge_rss_hash);
+ pr_err("Illegal rssh hash type %d, defaulting to source port\n",
+ myri10ge_rss_hash);
myri10ge_rss_hash = MXGEFW_RSS_HASH_TYPE_SRC_PORT;
}
#ifdef CONFIG_MYRI10GE_DCA
diff --git a/drivers/net/myri_sbus.c b/drivers/net/myri_sbus.c
index b3513ad3b70..8b431308535 100644
--- a/drivers/net/myri_sbus.c
+++ b/drivers/net/myri_sbus.c
@@ -716,10 +716,10 @@ static int myri_header(struct sk_buff *skb, struct net_device *dev,
pad[0] = MYRI_PAD_LEN;
pad[1] = 0xab;
- /* Set the protocol type. For a packet of type ETH_P_802_3 we put the length
- * in here instead. It is up to the 802.2 layer to carry protocol information.
+ /* Set the protocol type. For a packet of type ETH_P_802_3/2 we put the
+ * length in here instead.
*/
- if (type != ETH_P_802_3)
+ if (type != ETH_P_802_3 && type != ETH_P_802_2)
eth->h_proto = htons(type);
else
eth->h_proto = htons(len);
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index 797fe164ce2..e5203878324 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -247,7 +247,7 @@ static struct {
{ "NatSemi DP8381[56]", 0, 24 },
};
-static struct pci_device_id natsemi_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(natsemi_pci_tbl) = {
{ PCI_VENDOR_ID_NS, 0x0020, 0x12d9, 0x000c, 0, 0, 0 },
{ PCI_VENDOR_ID_NS, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
{ } /* terminate list */
@@ -2488,16 +2488,16 @@ static void __set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
rx_mode = RxFilterEnable | AcceptBroadcast
| AcceptAllMulticast | AcceptAllPhys | AcceptMyPhys;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
rx_mode = RxFilterEnable | AcceptBroadcast
| AcceptAllMulticast | AcceptMyPhys;
} else {
struct dev_mc_list *mclist;
int i;
+
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
int b = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 23) & 0x1ff;
mc_filter[b/8] |= (1 << (b & 0x07));
}
diff --git a/drivers/net/ne2k-pci.c b/drivers/net/ne2k-pci.c
index 3fcebb70151..85aec4f1013 100644
--- a/drivers/net/ne2k-pci.c
+++ b/drivers/net/ne2k-pci.c
@@ -136,7 +136,7 @@ static struct {
};
-static struct pci_device_id ne2k_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(ne2k_pci_tbl) = {
{ 0x10ec, 0x8029, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_RealTek_RTL_8029 },
{ 0x1050, 0x0940, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Winbond_89C940 },
{ 0x11f6, 0x1401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_Compex_RL2000 },
diff --git a/drivers/net/netxen/Makefile b/drivers/net/netxen/Makefile
index 11d94e2434e..861a0590b1f 100644
--- a/drivers/net/netxen/Makefile
+++ b/drivers/net/netxen/Makefile
@@ -18,7 +18,7 @@
# MA 02111-1307, USA.
#
# The full GNU General Public License is included in this distribution
-# in the file called LICENSE.
+# in the file called "COPYING".
#
#
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 9bc5bd1d538..144d2e88042 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -19,7 +19,7 @@
* MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.
+ * in the file called "COPYING".
*
*/
@@ -420,7 +420,7 @@ struct status_desc {
} __attribute__ ((aligned(16)));
/* UNIFIED ROMIMAGE *************************/
-#define NX_UNI_FW_MIN_SIZE 0x3eb000
+#define NX_UNI_FW_MIN_SIZE 0xc8000
#define NX_UNI_DIR_SECT_PRODUCT_TBL 0x0
#define NX_UNI_DIR_SECT_BOOTLD 0x6
#define NX_UNI_DIR_SECT_FW 0x7
@@ -1427,8 +1427,8 @@ static inline u32 netxen_tx_avail(struct nx_host_tx_ring *tx_ring)
}
-int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 *mac);
-int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, __le64 *mac);
+int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac);
+int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac);
extern void netxen_change_ringparam(struct netxen_adapter *adapter);
extern int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr,
int *valp);
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c
index 9cb8f687804..2a8ef5fc966 100644
--- a/drivers/net/netxen/netxen_nic_ctx.c
+++ b/drivers/net/netxen/netxen_nic_ctx.c
@@ -19,7 +19,7 @@
* MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.
+ * in the file called "COPYING".
*
*/
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index 542f408333f..f8499e56cbe 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -19,7 +19,7 @@
* MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.
+ * in the file called "COPYING".
*
*/
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h
index d138fc22927..622e4c8be93 100644
--- a/drivers/net/netxen/netxen_nic_hdr.h
+++ b/drivers/net/netxen/netxen_nic_hdr.h
@@ -19,7 +19,7 @@
* MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.
+ * in the file called "COPYING".
*
*/
@@ -969,7 +969,8 @@ enum {
#define NX_DEV_READY 3
#define NX_DEV_NEED_RESET 4
#define NX_DEV_NEED_QUISCENT 5
-#define NX_DEV_FAILED 6
+#define NX_DEV_NEED_AER 6
+#define NX_DEV_FAILED 7
#define NX_RCODE_DRIVER_INFO 0x20000000
#define NX_RCODE_DRIVER_CAN_RELOAD 0x40000000
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 85e28e60ecf..a945591298a 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -19,7 +19,7 @@
* MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.
+ * in the file called "COPYING".
*
*/
@@ -539,7 +539,7 @@ void netxen_p2_nic_set_multi(struct net_device *netdev)
struct netxen_adapter *adapter = netdev_priv(netdev);
struct dev_mc_list *mc_ptr;
u8 null_addr[6];
- int index = 0;
+ int i;
memset(null_addr, 0, 6);
@@ -554,7 +554,7 @@ void netxen_p2_nic_set_multi(struct net_device *netdev)
return;
}
- if (netdev->mc_count == 0) {
+ if (netdev_mc_empty(netdev)) {
adapter->set_promisc(adapter,
NETXEN_NIU_NON_PROMISC_MODE);
netxen_nic_disable_mcast_filter(adapter);
@@ -563,23 +563,20 @@ void netxen_p2_nic_set_multi(struct net_device *netdev)
adapter->set_promisc(adapter, NETXEN_NIU_ALLMULTI_MODE);
if (netdev->flags & IFF_ALLMULTI ||
- netdev->mc_count > adapter->max_mc_count) {
+ netdev_mc_count(netdev) > adapter->max_mc_count) {
netxen_nic_disable_mcast_filter(adapter);
return;
}
netxen_nic_enable_mcast_filter(adapter);
- for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next, index++)
- netxen_nic_set_mcast_addr(adapter, index, mc_ptr->dmi_addr);
-
- if (index != netdev->mc_count)
- printk(KERN_WARNING "%s: %s multicast address count mismatch\n",
- netxen_nic_driver_name, netdev->name);
+ i = 0;
+ netdev_for_each_mc_addr(mc_ptr, netdev)
+ netxen_nic_set_mcast_addr(adapter, i++, mc_ptr->dmi_addr);
/* Clear out remaining addresses */
- for (; index < adapter->max_mc_count; index++)
- netxen_nic_set_mcast_addr(adapter, index, null_addr);
+ while (i < adapter->max_mc_count)
+ netxen_nic_set_mcast_addr(adapter, i++, null_addr);
}
static int
@@ -704,16 +701,14 @@ void netxen_p3_nic_set_multi(struct net_device *netdev)
}
if ((netdev->flags & IFF_ALLMULTI) ||
- (netdev->mc_count > adapter->max_mc_count)) {
+ (netdev_mc_count(netdev) > adapter->max_mc_count)) {
mode = VPORT_MISS_MODE_ACCEPT_MULTI;
goto send_fw_cmd;
}
- if (netdev->mc_count > 0) {
- for (mc_ptr = netdev->mc_list; mc_ptr;
- mc_ptr = mc_ptr->next) {
+ if (!netdev_mc_empty(netdev)) {
+ netdev_for_each_mc_addr(mc_ptr, netdev)
nx_p3_nic_add_mac(adapter, mc_ptr->dmi_addr, &del_list);
- }
}
send_fw_cmd:
@@ -777,17 +772,20 @@ int netxen_p3_nic_set_mac_addr(struct netxen_adapter *adapter, u8 *addr)
int netxen_config_intr_coalesce(struct netxen_adapter *adapter)
{
nx_nic_req_t req;
- u64 word;
- int rv;
+ u64 word[6];
+ int rv, i;
memset(&req, 0, sizeof(nx_nic_req_t));
+ memset(word, 0, sizeof(word));
req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
- word = NETXEN_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16);
- req.req_hdr = cpu_to_le64(word);
+ word[0] = NETXEN_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word[0]);
- memcpy(&req.words[0], &adapter->coal, sizeof(adapter->coal));
+ memcpy(&word[0], &adapter->coal, sizeof(adapter->coal));
+ for (i = 0; i < 6; i++)
+ req.words[i] = cpu_to_le64(word[i]);
rv = netxen_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
if (rv != 0) {
@@ -1033,7 +1031,7 @@ static int netxen_get_flash_block(struct netxen_adapter *adapter, int base,
return 0;
}
-int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 *mac)
+int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac)
{
__le32 *pmac = (__le32 *) mac;
u32 offset;
@@ -1058,7 +1056,7 @@ int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, __le64 *mac)
return 0;
}
-int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, __le64 *mac)
+int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac)
{
uint32_t crbaddr, mac_hi, mac_lo;
int pci_func = adapter->ahw.pci_func;
diff --git a/drivers/net/netxen/netxen_nic_hw.h b/drivers/net/netxen/netxen_nic_hw.h
index 3fd1dcb3583..e2c5b6f2df0 100644
--- a/drivers/net/netxen/netxen_nic_hw.h
+++ b/drivers/net/netxen/netxen_nic_hw.h
@@ -19,7 +19,7 @@
* MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.
+ * in the file called "COPYING".
*
*/
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 64cff68d372..1c63610ead4 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -19,7 +19,7 @@
* MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.
+ * in the file called "COPYING".
*
*/
@@ -780,6 +780,9 @@ netxen_need_fw_reset(struct netxen_adapter *adapter)
if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
return 1;
+ if (adapter->need_fw_reset)
+ return 1;
+
/* last attempt had failed */
if (NXRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED)
return 1;
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 24279e6e55f..08780ef1c1f 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -19,7 +19,7 @@
* MA 02111-1307, USA.
*
* The full GNU General Public License is included in this distribution
- * in the file called LICENSE.
+ * in the file called "COPYING".
*
*/
@@ -35,6 +35,7 @@
#include <linux/ipv6.h>
#include <linux/inetdevice.h>
#include <linux/sysfs.h>
+#include <linux/aer.h>
MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Converged Ethernet Driver");
MODULE_LICENSE("GPL");
@@ -84,6 +85,7 @@ static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter);
static void netxen_create_diag_entries(struct netxen_adapter *adapter);
static void netxen_remove_diag_entries(struct netxen_adapter *adapter);
+static int nx_dev_request_aer(struct netxen_adapter *adapter);
static int nx_decr_dev_ref_cnt(struct netxen_adapter *adapter);
static int netxen_can_start_firmware(struct netxen_adapter *adapter);
@@ -98,7 +100,7 @@ static void netxen_config_indev_addr(struct net_device *dev, unsigned long);
{PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \
.class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
-static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(netxen_pci_tbl) = {
ENTRY(PCI_DEVICE_ID_NX2031_10GXSR),
ENTRY(PCI_DEVICE_ID_NX2031_10GCX4),
ENTRY(PCI_DEVICE_ID_NX2031_4GCU),
@@ -430,7 +432,7 @@ netxen_read_mac_addr(struct netxen_adapter *adapter)
{
int i;
unsigned char *p;
- __le64 mac_addr;
+ u64 mac_addr;
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
@@ -1262,6 +1264,9 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if ((err = pci_request_regions(pdev, netxen_nic_driver_name)))
goto err_out_disable_pdev;
+ if (NX_IS_REVISION_P3(pdev->revision))
+ pci_enable_pcie_error_reporting(pdev);
+
pci_set_master(pdev);
netdev = alloc_etherdev(sizeof(struct netxen_adapter));
@@ -1409,17 +1414,19 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
netxen_release_firmware(adapter);
+ if (NX_IS_REVISION_P3(pdev->revision))
+ pci_disable_pcie_error_reporting(pdev);
+
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
free_netdev(netdev);
}
-static int __netxen_nic_shutdown(struct pci_dev *pdev)
+
+static void netxen_nic_detach_func(struct netxen_adapter *adapter)
{
- struct netxen_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
- int retval;
netif_device_detach(netdev);
@@ -1438,53 +1445,22 @@ static int __netxen_nic_shutdown(struct pci_dev *pdev)
nx_decr_dev_ref_cnt(adapter);
clear_bit(__NX_RESETTING, &adapter->state);
-
- retval = pci_save_state(pdev);
- if (retval)
- return retval;
-
- if (netxen_nic_wol_supported(adapter)) {
- pci_enable_wake(pdev, PCI_D3cold, 1);
- pci_enable_wake(pdev, PCI_D3hot, 1);
- }
-
- pci_disable_device(pdev);
-
- return 0;
}
-static void netxen_nic_shutdown(struct pci_dev *pdev)
-{
- if (__netxen_nic_shutdown(pdev))
- return;
-}
-#ifdef CONFIG_PM
-static int
-netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state)
-{
- int retval;
-
- retval = __netxen_nic_shutdown(pdev);
- if (retval)
- return retval;
- pci_set_power_state(pdev, pci_choose_state(pdev, state));
- return 0;
-}
-
-static int
-netxen_nic_resume(struct pci_dev *pdev)
+static int netxen_nic_attach_func(struct pci_dev *pdev)
{
struct netxen_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
int err;
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
-
err = pci_enable_device(pdev);
if (err)
return err;
+ pci_set_power_state(pdev, PCI_D0);
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+
adapter->ahw.crb_win = -1;
adapter->ahw.ocm_win = -1;
@@ -1503,11 +1479,10 @@ netxen_nic_resume(struct pci_dev *pdev)
if (err)
goto err_out_detach;
- netif_device_attach(netdev);
-
netxen_config_indev_addr(netdev, NETDEV_UP);
}
+ netif_device_attach(netdev);
netxen_schedule_work(adapter, netxen_fw_poll_work, FW_POLL_DELAY);
return 0;
@@ -1517,6 +1492,85 @@ err_out:
nx_decr_dev_ref_cnt(adapter);
return err;
}
+
+static pci_ers_result_t netxen_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct netxen_adapter *adapter = pci_get_drvdata(pdev);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (nx_dev_request_aer(adapter))
+ return PCI_ERS_RESULT_RECOVERED;
+
+ netxen_nic_detach_func(adapter);
+
+ pci_disable_device(pdev);
+
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+static pci_ers_result_t netxen_io_slot_reset(struct pci_dev *pdev)
+{
+ int err = 0;
+
+ err = netxen_nic_attach_func(pdev);
+
+ return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
+}
+
+static void netxen_io_resume(struct pci_dev *pdev)
+{
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+}
+
+static void netxen_nic_shutdown(struct pci_dev *pdev)
+{
+ struct netxen_adapter *adapter = pci_get_drvdata(pdev);
+
+ netxen_nic_detach_func(adapter);
+
+ if (pci_save_state(pdev))
+ return;
+
+ if (netxen_nic_wol_supported(adapter)) {
+ pci_enable_wake(pdev, PCI_D3cold, 1);
+ pci_enable_wake(pdev, PCI_D3hot, 1);
+ }
+
+ pci_disable_device(pdev);
+}
+
+#ifdef CONFIG_PM
+static int
+netxen_nic_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct netxen_adapter *adapter = pci_get_drvdata(pdev);
+ int retval;
+
+ netxen_nic_detach_func(adapter);
+
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+
+ if (netxen_nic_wol_supported(adapter)) {
+ pci_enable_wake(pdev, PCI_D3cold, 1);
+ pci_enable_wake(pdev, PCI_D3hot, 1);
+ }
+
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+ return 0;
+}
+
+static int
+netxen_nic_resume(struct pci_dev *pdev)
+{
+ return netxen_nic_attach_func(pdev);
+}
#endif
static int netxen_nic_open(struct net_device *netdev)
@@ -2104,20 +2158,49 @@ nx_decr_dev_ref_cnt(struct netxen_adapter *adapter)
return count;
}
-static void
+static int
+nx_dev_request_aer(struct netxen_adapter *adapter)
+{
+ u32 state;
+ int ret = -EINVAL;
+
+ if (netxen_api_lock(adapter))
+ return ret;
+
+ state = NXRD32(adapter, NX_CRB_DEV_STATE);
+
+ if (state == NX_DEV_NEED_AER)
+ ret = 0;
+ else if (state == NX_DEV_READY) {
+ NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_AER);
+ ret = 0;
+ }
+
+ netxen_api_unlock(adapter);
+ return ret;
+}
+
+static int
nx_dev_request_reset(struct netxen_adapter *adapter)
{
u32 state;
+ int ret = -EINVAL;
if (netxen_api_lock(adapter))
- return;
+ return ret;
state = NXRD32(adapter, NX_CRB_DEV_STATE);
- if (state != NX_DEV_INITALIZING)
+ if (state == NX_DEV_NEED_RESET)
+ ret = 0;
+ else if (state != NX_DEV_INITALIZING && state != NX_DEV_NEED_AER) {
NXWR32(adapter, NX_CRB_DEV_STATE, NX_DEV_NEED_RESET);
+ ret = 0;
+ }
netxen_api_unlock(adapter);
+
+ return ret;
}
static int
@@ -2271,17 +2354,29 @@ netxen_check_health(struct netxen_adapter *adapter)
u32 state, heartbit;
struct net_device *netdev = adapter->netdev;
+ state = NXRD32(adapter, NX_CRB_DEV_STATE);
+ if (state == NX_DEV_NEED_AER)
+ return 0;
+
if (netxen_nic_check_temp(adapter))
goto detach;
if (adapter->need_fw_reset) {
- nx_dev_request_reset(adapter);
+ if (nx_dev_request_reset(adapter))
+ return 0;
goto detach;
}
- state = NXRD32(adapter, NX_CRB_DEV_STATE);
- if (state == NX_DEV_NEED_RESET)
- goto detach;
+ /* NX_DEV_NEED_RESET, this state can be marked in two cases
+ * 1. Tx timeout 2. Fw hang
+ * Send request to destroy context in case of tx timeout only
+ * and doesn't required in case of Fw hang
+ */
+ if (state == NX_DEV_NEED_RESET) {
+ adapter->need_fw_reset = 1;
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
+ goto detach;
+ }
if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
return 0;
@@ -2290,12 +2385,17 @@ netxen_check_health(struct netxen_adapter *adapter)
if (heartbit != adapter->heartbit) {
adapter->heartbit = heartbit;
adapter->fw_fail_cnt = 0;
+ if (adapter->need_fw_reset)
+ goto detach;
return 0;
}
if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
return 0;
+ if (nx_dev_request_reset(adapter))
+ return 0;
+
clear_bit(__NX_FW_ATTACHED, &adapter->state);
dev_info(&netdev->dev, "firmware hang detected\n");
@@ -2498,7 +2598,7 @@ netxen_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
return size;
}
-ssize_t netxen_sysfs_write_mem(struct kobject *kobj,
+static ssize_t netxen_sysfs_write_mem(struct kobject *kobj,
struct bin_attribute *attr, char *buf,
loff_t offset, size_t size)
{
@@ -2725,6 +2825,12 @@ netxen_config_indev_addr(struct net_device *dev, unsigned long event)
{ }
#endif
+static struct pci_error_handlers netxen_err_handler = {
+ .error_detected = netxen_io_error_detected,
+ .slot_reset = netxen_io_slot_reset,
+ .resume = netxen_io_resume,
+};
+
static struct pci_driver netxen_driver = {
.name = netxen_nic_driver_name,
.id_table = netxen_pci_tbl,
@@ -2734,7 +2840,8 @@ static struct pci_driver netxen_driver = {
.suspend = netxen_nic_suspend,
.resume = netxen_nic_resume,
#endif
- .shutdown = netxen_nic_shutdown
+ .shutdown = netxen_nic_shutdown,
+ .err_handler = &netxen_err_handler
};
static int __init netxen_init_module(void)
diff --git a/drivers/net/ni5010.c b/drivers/net/ni5010.c
index 6a87d810e59..c16cbfb4061 100644
--- a/drivers/net/ni5010.c
+++ b/drivers/net/ni5010.c
@@ -651,7 +651,8 @@ static void ni5010_set_multicast_list(struct net_device *dev)
PRINTK2((KERN_DEBUG "%s: entering set_multicast_list\n", dev->name));
- if (dev->flags&IFF_PROMISC || dev->flags&IFF_ALLMULTI || dev->mc_list) {
+ if (dev->flags & IFF_PROMISC || dev->flags & IFF_ALLMULTI ||
+ !netdev_mc_empty(dev)) {
outb(RMD_PROMISC, EDLC_RMODE); /* Enable promiscuous mode */
PRINTK((KERN_DEBUG "%s: Entering promiscuous mode\n", dev->name));
} else {
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index b42f5e522f9..05c29c2cef2 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -596,8 +596,8 @@ static int init586(struct net_device *dev)
struct iasetup_cmd_struct __iomem *ias_cmd;
struct tdr_cmd_struct __iomem *tdr_cmd;
struct mcsetup_cmd_struct __iomem *mc_cmd;
- struct dev_mc_list *dmi = dev->mc_list;
- int num_addrs = dev->mc_count;
+ struct dev_mc_list *dmi;
+ int num_addrs = netdev_mc_count(dev);
ptr = p->scb + 1;
@@ -724,9 +724,9 @@ static int init586(struct net_device *dev)
writew(0xffff, &mc_cmd->cmd_link);
writew(num_addrs * 6, &mc_cmd->mc_cnt);
- for (i = 0; i < num_addrs; i++, dmi = dmi->next)
- memcpy_toio(mc_cmd->mc_list[i],
- dmi->dmi_addr, 6);
+ i = 0;
+ netdev_for_each_mc_addr(dmi, dev)
+ memcpy_toio(mc_cmd->mc_list[i++], dmi->dmi_addr, 6);
writew(make16(mc_cmd), &p->scb->cbl_offset);
writeb(CUC_START, &p->scb->cmd_cuc);
diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c
index ae19aafd2c7..9225c76cac4 100644
--- a/drivers/net/ni65.c
+++ b/drivers/net/ni65.c
@@ -849,7 +849,7 @@ static int ni65_lance_reinit(struct net_device *dev)
if(dev->flags & IFF_PROMISC)
ni65_init_lance(p,dev->dev_addr,0x00,M_PROM);
- else if(dev->mc_count || dev->flags & IFF_ALLMULTI)
+ else if (netdev_mc_count(dev) || dev->flags & IFF_ALLMULTI)
ni65_init_lance(p,dev->dev_addr,0xff,0x0);
else
ni65_init_lance(p,dev->dev_addr,0x00,0x00);
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 2aed2b382c4..0678f3106cb 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -3,6 +3,8 @@
* Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
@@ -33,7 +35,6 @@
#include "niu.h"
#define DRV_MODULE_NAME "niu"
-#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "1.0"
#define DRV_MODULE_RELDATE "Nov 14, 2008"
@@ -58,7 +59,7 @@ static void writeq(u64 val, void __iomem *reg)
}
#endif
-static struct pci_device_id niu_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(niu_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_SUN, 0xabcd)},
{}
};
@@ -89,21 +90,6 @@ static int debug = -1;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "NIU debug level");
-#define niudbg(TYPE, f, a...) \
-do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \
- printk(KERN_DEBUG PFX f, ## a); \
-} while (0)
-
-#define niuinfo(TYPE, f, a...) \
-do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \
- printk(KERN_INFO PFX f, ## a); \
-} while (0)
-
-#define niuwarn(TYPE, f, a...) \
-do { if ((np)->msg_enable & NETIF_MSG_##TYPE) \
- printk(KERN_WARNING PFX f, ## a); \
-} while (0)
-
#define niu_lock_parent(np, flags) \
spin_lock_irqsave(&np->parent->lock, flags)
#define niu_unlock_parent(np, flags) \
@@ -135,10 +121,9 @@ static int __niu_set_and_wait_clear_mac(struct niu *np, unsigned long reg,
nw64_mac(reg, bits);
err = __niu_wait_bits_clear_mac(np, reg, bits, limit, delay);
if (err)
- dev_err(np->device, PFX "%s: bits (%llx) of register %s "
- "would not clear, val[%llx]\n",
- np->dev->name, (unsigned long long) bits, reg_name,
- (unsigned long long) nr64_mac(reg));
+ netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
+ (unsigned long long)bits, reg_name,
+ (unsigned long long)nr64_mac(reg));
return err;
}
@@ -175,10 +160,9 @@ static int __niu_set_and_wait_clear_ipp(struct niu *np, unsigned long reg,
err = __niu_wait_bits_clear_ipp(np, reg, bits, limit, delay);
if (err)
- dev_err(np->device, PFX "%s: bits (%llx) of register %s "
- "would not clear, val[%llx]\n",
- np->dev->name, (unsigned long long) bits, reg_name,
- (unsigned long long) nr64_ipp(reg));
+ netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
+ (unsigned long long)bits, reg_name,
+ (unsigned long long)nr64_ipp(reg));
return err;
}
@@ -216,10 +200,9 @@ static int __niu_set_and_wait_clear(struct niu *np, unsigned long reg,
nw64(reg, bits);
err = __niu_wait_bits_clear(np, reg, bits, limit, delay);
if (err)
- dev_err(np->device, PFX "%s: bits (%llx) of register %s "
- "would not clear, val[%llx]\n",
- np->dev->name, (unsigned long long) bits, reg_name,
- (unsigned long long) nr64(reg));
+ netdev_err(np->dev, "bits (%llx) of register %s would not clear, val[%llx]\n",
+ (unsigned long long)bits, reg_name,
+ (unsigned long long)nr64(reg));
return err;
}
@@ -475,9 +458,8 @@ static int serdes_init_niu_1g_serdes(struct niu *np)
err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
ESR2_TI_PLL_CFG_L, pll_cfg);
if (err) {
- dev_err(np->device, PFX "NIU Port %d "
- "serdes_init_niu_1g_serdes: "
- "mdio write to ESR2_TI_PLL_CFG_L failed", np->port);
+ netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
+ np->port, __func__);
return err;
}
@@ -486,9 +468,8 @@ static int serdes_init_niu_1g_serdes(struct niu *np)
err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
ESR2_TI_PLL_STS_L, pll_sts);
if (err) {
- dev_err(np->device, PFX "NIU Port %d "
- "serdes_init_niu_1g_serdes: "
- "mdio write to ESR2_TI_PLL_STS_L failed", np->port);
+ netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
+ np->port, __func__);
return err;
}
@@ -531,8 +512,8 @@ static int serdes_init_niu_1g_serdes(struct niu *np)
}
if ((sig & mask) != val) {
- dev_err(np->device, PFX "Port %u signal bits [%08x] are not "
- "[%08x]\n", np->port, (int) (sig & mask), (int) val);
+ netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
+ np->port, (int)(sig & mask), (int)val);
return -ENODEV;
}
@@ -569,9 +550,8 @@ static int serdes_init_niu_10g_serdes(struct niu *np)
err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
ESR2_TI_PLL_CFG_L, pll_cfg & 0xffff);
if (err) {
- dev_err(np->device, PFX "NIU Port %d "
- "serdes_init_niu_10g_serdes: "
- "mdio write to ESR2_TI_PLL_CFG_L failed", np->port);
+ netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_CFG_L failed\n",
+ np->port, __func__);
return err;
}
@@ -580,9 +560,8 @@ static int serdes_init_niu_10g_serdes(struct niu *np)
err = mdio_write(np, np->port, NIU_ESR2_DEV_ADDR,
ESR2_TI_PLL_STS_L, pll_sts & 0xffff);
if (err) {
- dev_err(np->device, PFX "NIU Port %d "
- "serdes_init_niu_10g_serdes: "
- "mdio write to ESR2_TI_PLL_STS_L failed", np->port);
+ netdev_err(np->dev, "NIU Port %d %s() mdio write to ESR2_TI_PLL_STS_L failed\n",
+ np->port, __func__);
return err;
}
@@ -639,9 +618,8 @@ static int serdes_init_niu_10g_serdes(struct niu *np)
}
if ((sig & mask) != val) {
- pr_info(PFX "NIU Port %u signal bits [%08x] are not "
- "[%08x] for 10G...trying 1G\n",
- np->port, (int) (sig & mask), (int) val);
+ pr_info("NIU Port %u signal bits [%08x] are not [%08x] for 10G...trying 1G\n",
+ np->port, (int)(sig & mask), (int)val);
/* 10G failed, try initializing at 1G */
err = serdes_init_niu_1g_serdes(np);
@@ -649,8 +627,8 @@ static int serdes_init_niu_10g_serdes(struct niu *np)
np->flags &= ~NIU_FLAGS_10G;
np->mac_xcvr = MAC_XCVR_PCS;
} else {
- dev_err(np->device, PFX "Port %u 10G/1G SERDES "
- "Link Failed \n", np->port);
+ netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
+ np->port);
return -ENODEV;
}
}
@@ -764,9 +742,8 @@ static int esr_reset(struct niu *np)
if (err)
return err;
if (reset != 0) {
- dev_err(np->device, PFX "Port %u ESR_RESET "
- "did not clear [%08x]\n",
- np->port, reset);
+ netdev_err(np->dev, "Port %u ESR_RESET did not clear [%08x]\n",
+ np->port, reset);
return -ENODEV;
}
@@ -890,8 +867,8 @@ static int serdes_init_10g(struct niu *np)
np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
return 0;
}
- dev_err(np->device, PFX "Port %u signal bits [%08x] are not "
- "[%08x]\n", np->port, (int) (sig & mask), (int) val);
+ netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
+ np->port, (int)(sig & mask), (int)val);
return -ENODEV;
}
if (np->flags & NIU_FLAGS_HOTPLUG_PHY)
@@ -1039,8 +1016,8 @@ static int serdes_init_1g_serdes(struct niu *np)
}
if ((sig & mask) != val) {
- dev_err(np->device, PFX "Port %u signal bits [%08x] are not "
- "[%08x]\n", np->port, (int) (sig & mask), (int) val);
+ netdev_err(np->dev, "Port %u signal bits [%08x] are not [%08x]\n",
+ np->port, (int)(sig & mask), (int)val);
return -ENODEV;
}
@@ -1332,8 +1309,8 @@ static int bcm8704_reset(struct niu *np)
break;
}
if (limit < 0) {
- dev_err(np->device, PFX "Port %u PHY will not reset "
- "(bmcr=%04x)\n", np->port, (err & 0xffff));
+ netdev_err(np->dev, "Port %u PHY will not reset (bmcr=%04x)\n",
+ np->port, (err & 0xffff));
return -ENODEV;
}
return 0;
@@ -1515,21 +1492,18 @@ static int xcvr_diag_bcm870x(struct niu *np)
MII_STAT1000);
if (err < 0)
return err;
- pr_info(PFX "Port %u PMA_PMD(MII_STAT1000) [%04x]\n",
- np->port, err);
+ pr_info("Port %u PMA_PMD(MII_STAT1000) [%04x]\n", np->port, err);
err = mdio_read(np, np->phy_addr, BCM8704_USER_DEV3_ADDR, 0x20);
if (err < 0)
return err;
- pr_info(PFX "Port %u USER_DEV3(0x20) [%04x]\n",
- np->port, err);
+ pr_info("Port %u USER_DEV3(0x20) [%04x]\n", np->port, err);
err = mdio_read(np, np->phy_addr, BCM8704_PHYXS_DEV_ADDR,
MII_NWAYTEST);
if (err < 0)
return err;
- pr_info(PFX "Port %u PHYXS(MII_NWAYTEST) [%04x]\n",
- np->port, err);
+ pr_info("Port %u PHYXS(MII_NWAYTEST) [%04x]\n", np->port, err);
#endif
/* XXX dig this out it might not be so useful XXX */
@@ -1555,11 +1529,11 @@ static int xcvr_diag_bcm870x(struct niu *np)
if (analog_stat0 != 0x03fc) {
if ((analog_stat0 == 0x43bc) && (tx_alarm_status != 0)) {
- pr_info(PFX "Port %u cable not connected "
- "or bad cable.\n", np->port);
+ pr_info("Port %u cable not connected or bad cable\n",
+ np->port);
} else if (analog_stat0 == 0x639c) {
- pr_info(PFX "Port %u optical module is bad "
- "or missing.\n", np->port);
+ pr_info("Port %u optical module is bad or missing\n",
+ np->port);
}
}
@@ -1699,8 +1673,8 @@ static int mii_reset(struct niu *np)
break;
}
if (limit < 0) {
- dev_err(np->device, PFX "Port %u MII would not reset, "
- "bmcr[%04x]\n", np->port, err);
+ netdev_err(np->dev, "Port %u MII would not reset, bmcr[%04x]\n",
+ np->port, err);
return -ENODEV;
}
@@ -1895,7 +1869,7 @@ static int mii_init_common(struct niu *np)
return err;
bmsr = err;
- pr_info(PFX "Port %u after MII init bmcr[%04x] bmsr[%04x]\n",
+ pr_info("Port %u after MII init bmcr[%04x] bmsr[%04x]\n",
np->port, bmcr, bmsr);
#endif
@@ -1948,16 +1922,12 @@ static int niu_link_status_common(struct niu *np, int link_up)
unsigned long flags;
if (!netif_carrier_ok(dev) && link_up) {
- niuinfo(LINK, "%s: Link is up at %s, %s duplex\n",
- dev->name,
- (lp->active_speed == SPEED_10000 ?
- "10Gb/sec" :
- (lp->active_speed == SPEED_1000 ?
- "1Gb/sec" :
- (lp->active_speed == SPEED_100 ?
- "100Mbit/sec" : "10Mbit/sec"))),
- (lp->active_duplex == DUPLEX_FULL ?
- "full" : "half"));
+ netif_info(np, link, dev, "Link is up at %s, %s duplex\n",
+ lp->active_speed == SPEED_10000 ? "10Gb/sec" :
+ lp->active_speed == SPEED_1000 ? "1Gb/sec" :
+ lp->active_speed == SPEED_100 ? "100Mbit/sec" :
+ "10Mbit/sec",
+ lp->active_duplex == DUPLEX_FULL ? "full" : "half");
spin_lock_irqsave(&np->lock, flags);
niu_init_xif(np);
@@ -1966,7 +1936,7 @@ static int niu_link_status_common(struct niu *np, int link_up)
netif_carrier_on(dev);
} else if (netif_carrier_ok(dev) && !link_up) {
- niuwarn(LINK, "%s: Link is down\n", dev->name);
+ netif_warn(np, link, dev, "Link is down\n");
spin_lock_irqsave(&np->lock, flags);
niu_handle_led(np, 0);
spin_unlock_irqrestore(&np->lock, flags);
@@ -2232,8 +2202,8 @@ static int link_status_10g_hotplug(struct niu *np, int *link_up_p)
} else {
np->flags &= ~NIU_FLAGS_HOTPLUG_PHY_PRESENT;
*link_up_p = 0;
- niuwarn(LINK, "%s: Hotplug PHY Removed\n",
- np->dev->name);
+ netif_warn(np, link, np->dev,
+ "Hotplug PHY Removed\n");
}
}
out:
@@ -2531,8 +2501,8 @@ static int serdes_init_10g_serdes(struct niu *np)
np->flags &= ~NIU_FLAGS_10G;
np->mac_xcvr = MAC_XCVR_PCS;
} else {
- dev_err(np->device, PFX "Port %u 10G/1G SERDES Link Failed \n",
- np->port);
+ netdev_err(np->dev, "Port %u 10G/1G SERDES Link Failed\n",
+ np->port);
return -ENODEV;
}
}
@@ -3234,23 +3204,22 @@ static int fflp_early_init(struct niu *np)
parent = np->parent;
err = 0;
if (!(parent->flags & PARENT_FLGS_CLS_HWINIT)) {
- niudbg(PROBE, "fflp_early_init: Initting hw on port %u\n",
- np->port);
if (np->parent->plat_type != PLAT_TYPE_NIU) {
fflp_reset(np);
fflp_set_timings(np);
err = fflp_disable_all_partitions(np);
if (err) {
- niudbg(PROBE, "fflp_disable_all_partitions "
- "failed, err=%d\n", err);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "fflp_disable_all_partitions failed, err=%d\n",
+ err);
goto out;
}
}
err = tcam_early_init(np);
if (err) {
- niudbg(PROBE, "tcam_early_init failed, err=%d\n",
- err);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "tcam_early_init failed, err=%d\n", err);
goto out;
}
fflp_llcsnap_enable(np, 1);
@@ -3260,22 +3229,22 @@ static int fflp_early_init(struct niu *np)
err = tcam_flush_all(np);
if (err) {
- niudbg(PROBE, "tcam_flush_all failed, err=%d\n",
- err);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "tcam_flush_all failed, err=%d\n", err);
goto out;
}
if (np->parent->plat_type != PLAT_TYPE_NIU) {
err = fflp_hash_clear(np);
if (err) {
- niudbg(PROBE, "fflp_hash_clear failed, "
- "err=%d\n", err);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "fflp_hash_clear failed, err=%d\n",
+ err);
goto out;
}
}
vlan_tbl_clear(np);
- niudbg(PROBE, "fflp_early_init: Success\n");
parent->flags |= PARENT_FLGS_CLS_HWINIT;
}
out:
@@ -3665,8 +3634,8 @@ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
cons = rp->cons;
- niudbg(TX_DONE, "%s: niu_tx_work() pkt_cnt[%u] cons[%d]\n",
- np->dev->name, pkt_cnt, cons);
+ netif_printk(np, tx_done, KERN_DEBUG, np->dev,
+ "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons);
while (pkt_cnt--)
cons = release_tx_packet(np, rp, cons);
@@ -3714,11 +3683,12 @@ static inline void niu_sync_rx_discard_stats(struct niu *np,
rp->rx_errors += misc & RXMISC_COUNT;
if (unlikely(misc & RXMISC_OFLOW))
- dev_err(np->device, "rx-%d: Counter overflow "
- "RXMISC discard\n", rx_channel);
+ dev_err(np->device, "rx-%d: Counter overflow RXMISC discard\n",
+ rx_channel);
- niudbg(RX_ERR, "%s-rx-%d: MISC drop=%u over=%u\n",
- np->dev->name, rx_channel, misc, misc-limit);
+ netif_printk(np, rx_err, KERN_DEBUG, np->dev,
+ "rx-%d: MISC drop=%u over=%u\n",
+ rx_channel, misc, misc-limit);
}
/* WRED (Weighted Random Early Discard) by hardware */
@@ -3728,11 +3698,11 @@ static inline void niu_sync_rx_discard_stats(struct niu *np,
rp->rx_dropped += wred & RED_DIS_CNT_COUNT;
if (unlikely(wred & RED_DIS_CNT_OFLOW))
- dev_err(np->device, "rx-%d: Counter overflow "
- "WRED discard\n", rx_channel);
+ dev_err(np->device, "rx-%d: Counter overflow WRED discard\n", rx_channel);
- niudbg(RX_ERR, "%s-rx-%d: WRED drop=%u over=%u\n",
- np->dev->name, rx_channel, wred, wred-limit);
+ netif_printk(np, rx_err, KERN_DEBUG, np->dev,
+ "rx-%d: WRED drop=%u over=%u\n",
+ rx_channel, wred, wred-limit);
}
}
@@ -3753,8 +3723,9 @@ static int niu_rx_work(struct napi_struct *napi, struct niu *np,
mbox->rx_dma_ctl_stat = 0;
mbox->rcrstat_a = 0;
- niudbg(RX_STATUS, "%s: niu_rx_work(chan[%d]), stat[%llx] qlen=%d\n",
- np->dev->name, rp->rx_channel, (unsigned long long) stat, qlen);
+ netif_printk(np, rx_status, KERN_DEBUG, np->dev,
+ "%s(chan[%d]), stat[%llx] qlen=%d\n",
+ __func__, rp->rx_channel, (unsigned long long)stat, qlen);
rcr_done = work_done = 0;
qlen = min(qlen, budget);
@@ -3791,8 +3762,8 @@ static int niu_poll_core(struct niu *np, struct niu_ldg *lp, int budget)
u32 rx_vec = (v0 & 0xffffffff);
int i, work_done = 0;
- niudbg(INTR, "%s: niu_poll_core() v0[%016llx]\n",
- np->dev->name, (unsigned long long) v0);
+ netif_printk(np, intr, KERN_DEBUG, np->dev,
+ "%s() v0[%016llx]\n", __func__, (unsigned long long)v0);
for (i = 0; i < np->num_tx_rings; i++) {
struct tx_ring_info *rp = &np->tx_rings[i];
@@ -3837,39 +3808,38 @@ static int niu_poll(struct napi_struct *napi, int budget)
static void niu_log_rxchan_errors(struct niu *np, struct rx_ring_info *rp,
u64 stat)
{
- dev_err(np->device, PFX "%s: RX channel %u errors ( ",
- np->dev->name, rp->rx_channel);
+ netdev_err(np->dev, "RX channel %u errors ( ", rp->rx_channel);
if (stat & RX_DMA_CTL_STAT_RBR_TMOUT)
- printk("RBR_TMOUT ");
+ pr_cont("RBR_TMOUT ");
if (stat & RX_DMA_CTL_STAT_RSP_CNT_ERR)
- printk("RSP_CNT ");
+ pr_cont("RSP_CNT ");
if (stat & RX_DMA_CTL_STAT_BYTE_EN_BUS)
- printk("BYTE_EN_BUS ");
+ pr_cont("BYTE_EN_BUS ");
if (stat & RX_DMA_CTL_STAT_RSP_DAT_ERR)
- printk("RSP_DAT ");
+ pr_cont("RSP_DAT ");
if (stat & RX_DMA_CTL_STAT_RCR_ACK_ERR)
- printk("RCR_ACK ");
+ pr_cont("RCR_ACK ");
if (stat & RX_DMA_CTL_STAT_RCR_SHA_PAR)
- printk("RCR_SHA_PAR ");
+ pr_cont("RCR_SHA_PAR ");
if (stat & RX_DMA_CTL_STAT_RBR_PRE_PAR)
- printk("RBR_PRE_PAR ");
+ pr_cont("RBR_PRE_PAR ");
if (stat & RX_DMA_CTL_STAT_CONFIG_ERR)
- printk("CONFIG ");
+ pr_cont("CONFIG ");
if (stat & RX_DMA_CTL_STAT_RCRINCON)
- printk("RCRINCON ");
+ pr_cont("RCRINCON ");
if (stat & RX_DMA_CTL_STAT_RCRFULL)
- printk("RCRFULL ");
+ pr_cont("RCRFULL ");
if (stat & RX_DMA_CTL_STAT_RBRFULL)
- printk("RBRFULL ");
+ pr_cont("RBRFULL ");
if (stat & RX_DMA_CTL_STAT_RBRLOGPAGE)
- printk("RBRLOGPAGE ");
+ pr_cont("RBRLOGPAGE ");
if (stat & RX_DMA_CTL_STAT_CFIGLOGPAGE)
- printk("CFIGLOGPAGE ");
+ pr_cont("CFIGLOGPAGE ");
if (stat & RX_DMA_CTL_STAT_DC_FIFO_ERR)
- printk("DC_FIDO ");
+ pr_cont("DC_FIDO ");
- printk(")\n");
+ pr_cont(")\n");
}
static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
@@ -3883,9 +3853,9 @@ static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
err = -EINVAL;
if (err) {
- dev_err(np->device, PFX "%s: RX channel %u error, stat[%llx]\n",
- np->dev->name, rp->rx_channel,
- (unsigned long long) stat);
+ netdev_err(np->dev, "RX channel %u error, stat[%llx]\n",
+ rp->rx_channel,
+ (unsigned long long) stat);
niu_log_rxchan_errors(np, rp, stat);
}
@@ -3899,27 +3869,26 @@ static int niu_rx_error(struct niu *np, struct rx_ring_info *rp)
static void niu_log_txchan_errors(struct niu *np, struct tx_ring_info *rp,
u64 cs)
{
- dev_err(np->device, PFX "%s: TX channel %u errors ( ",
- np->dev->name, rp->tx_channel);
+ netdev_err(np->dev, "TX channel %u errors ( ", rp->tx_channel);
if (cs & TX_CS_MBOX_ERR)
- printk("MBOX ");
+ pr_cont("MBOX ");
if (cs & TX_CS_PKT_SIZE_ERR)
- printk("PKT_SIZE ");
+ pr_cont("PKT_SIZE ");
if (cs & TX_CS_TX_RING_OFLOW)
- printk("TX_RING_OFLOW ");
+ pr_cont("TX_RING_OFLOW ");
if (cs & TX_CS_PREF_BUF_PAR_ERR)
- printk("PREF_BUF_PAR ");
+ pr_cont("PREF_BUF_PAR ");
if (cs & TX_CS_NACK_PREF)
- printk("NACK_PREF ");
+ pr_cont("NACK_PREF ");
if (cs & TX_CS_NACK_PKT_RD)
- printk("NACK_PKT_RD ");
+ pr_cont("NACK_PKT_RD ");
if (cs & TX_CS_CONF_PART_ERR)
- printk("CONF_PART ");
+ pr_cont("CONF_PART ");
if (cs & TX_CS_PKT_PRT_ERR)
- printk("PKT_PTR ");
+ pr_cont("PKT_PTR ");
- printk(")\n");
+ pr_cont(")\n");
}
static int niu_tx_error(struct niu *np, struct tx_ring_info *rp)
@@ -3930,12 +3899,11 @@ static int niu_tx_error(struct niu *np, struct tx_ring_info *rp)
logh = nr64(TX_RNG_ERR_LOGH(rp->tx_channel));
logl = nr64(TX_RNG_ERR_LOGL(rp->tx_channel));
- dev_err(np->device, PFX "%s: TX channel %u error, "
- "cs[%llx] logh[%llx] logl[%llx]\n",
- np->dev->name, rp->tx_channel,
- (unsigned long long) cs,
- (unsigned long long) logh,
- (unsigned long long) logl);
+ netdev_err(np->dev, "TX channel %u error, cs[%llx] logh[%llx] logl[%llx]\n",
+ rp->tx_channel,
+ (unsigned long long)cs,
+ (unsigned long long)logh,
+ (unsigned long long)logl);
niu_log_txchan_errors(np, rp, cs);
@@ -3954,9 +3922,8 @@ static int niu_mif_interrupt(struct niu *np)
phy_mdint = 1;
}
- dev_err(np->device, PFX "%s: MIF interrupt, "
- "stat[%llx] phy_mdint(%d)\n",
- np->dev->name, (unsigned long long) mif_status, phy_mdint);
+ netdev_err(np->dev, "MIF interrupt, stat[%llx] phy_mdint(%d)\n",
+ (unsigned long long)mif_status, phy_mdint);
return -ENODEV;
}
@@ -4081,41 +4048,40 @@ static int niu_mac_interrupt(struct niu *np)
static void niu_log_device_error(struct niu *np, u64 stat)
{
- dev_err(np->device, PFX "%s: Core device errors ( ",
- np->dev->name);
+ netdev_err(np->dev, "Core device errors ( ");
if (stat & SYS_ERR_MASK_META2)
- printk("META2 ");
+ pr_cont("META2 ");
if (stat & SYS_ERR_MASK_META1)
- printk("META1 ");
+ pr_cont("META1 ");
if (stat & SYS_ERR_MASK_PEU)
- printk("PEU ");
+ pr_cont("PEU ");
if (stat & SYS_ERR_MASK_TXC)
- printk("TXC ");
+ pr_cont("TXC ");
if (stat & SYS_ERR_MASK_RDMC)
- printk("RDMC ");
+ pr_cont("RDMC ");
if (stat & SYS_ERR_MASK_TDMC)
- printk("TDMC ");
+ pr_cont("TDMC ");
if (stat & SYS_ERR_MASK_ZCP)
- printk("ZCP ");
+ pr_cont("ZCP ");
if (stat & SYS_ERR_MASK_FFLP)
- printk("FFLP ");
+ pr_cont("FFLP ");
if (stat & SYS_ERR_MASK_IPP)
- printk("IPP ");
+ pr_cont("IPP ");
if (stat & SYS_ERR_MASK_MAC)
- printk("MAC ");
+ pr_cont("MAC ");
if (stat & SYS_ERR_MASK_SMX)
- printk("SMX ");
+ pr_cont("SMX ");
- printk(")\n");
+ pr_cont(")\n");
}
static int niu_device_error(struct niu *np)
{
u64 stat = nr64(SYS_ERR_STAT);
- dev_err(np->device, PFX "%s: Core device error, stat[%llx]\n",
- np->dev->name, (unsigned long long) stat);
+ netdev_err(np->dev, "Core device error, stat[%llx]\n",
+ (unsigned long long)stat);
niu_log_device_error(np, stat);
@@ -4197,8 +4163,8 @@ static void niu_rxchan_intr(struct niu *np, struct rx_ring_info *rp,
RX_DMA_CTL_STAT_RCRTO);
nw64(RX_DMA_CTL_STAT(rp->rx_channel), stat_write);
- niudbg(INTR, "%s: rxchan_intr stat[%llx]\n",
- np->dev->name, (unsigned long long) stat);
+ netif_printk(np, intr, KERN_DEBUG, np->dev,
+ "%s() stat[%llx]\n", __func__, (unsigned long long)stat);
}
static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp,
@@ -4206,8 +4172,8 @@ static void niu_txchan_intr(struct niu *np, struct tx_ring_info *rp,
{
rp->tx_cs = nr64(TX_CS(rp->tx_channel));
- niudbg(INTR, "%s: txchan_intr cs[%llx]\n",
- np->dev->name, (unsigned long long) rp->tx_cs);
+ netif_printk(np, intr, KERN_DEBUG, np->dev,
+ "%s() cs[%llx]\n", __func__, (unsigned long long)rp->tx_cs);
}
static void __niu_fastpath_interrupt(struct niu *np, int ldg, u64 v0)
@@ -4265,8 +4231,8 @@ static irqreturn_t niu_interrupt(int irq, void *dev_id)
u64 v0, v1, v2;
if (netif_msg_intr(np))
- printk(KERN_DEBUG PFX "niu_interrupt() ldg[%p](%d) ",
- lp, ldg);
+ printk(KERN_DEBUG KBUILD_MODNAME ": " "%s() ldg[%p](%d)",
+ __func__, lp, ldg);
spin_lock_irqsave(&np->lock, flags);
@@ -4275,7 +4241,7 @@ static irqreturn_t niu_interrupt(int irq, void *dev_id)
v2 = nr64(LDSV2(ldg));
if (netif_msg_intr(np))
- printk("v0[%llx] v1[%llx] v2[%llx]\n",
+ pr_cont(" v0[%llx] v1[%llx] v2[%llx]\n",
(unsigned long long) v0,
(unsigned long long) v1,
(unsigned long long) v2);
@@ -4400,8 +4366,8 @@ static int niu_alloc_rx_ring_info(struct niu *np,
if (!rp->mbox)
return -ENOMEM;
if ((unsigned long)rp->mbox & (64UL - 1)) {
- dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
- "RXDMA mailbox %p\n", np->dev->name, rp->mbox);
+ netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA mailbox %p\n",
+ rp->mbox);
return -EINVAL;
}
@@ -4411,8 +4377,8 @@ static int niu_alloc_rx_ring_info(struct niu *np,
if (!rp->rcr)
return -ENOMEM;
if ((unsigned long)rp->rcr & (64UL - 1)) {
- dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
- "RXDMA RCR table %p\n", np->dev->name, rp->rcr);
+ netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RCR table %p\n",
+ rp->rcr);
return -EINVAL;
}
rp->rcr_table_size = MAX_RCR_RING_SIZE;
@@ -4424,8 +4390,8 @@ static int niu_alloc_rx_ring_info(struct niu *np,
if (!rp->rbr)
return -ENOMEM;
if ((unsigned long)rp->rbr & (64UL - 1)) {
- dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
- "RXDMA RBR table %p\n", np->dev->name, rp->rbr);
+ netdev_err(np->dev, "Coherent alloc gives misaligned RXDMA RBR table %p\n",
+ rp->rbr);
return -EINVAL;
}
rp->rbr_table_size = MAX_RBR_RING_SIZE;
@@ -4458,8 +4424,8 @@ static int niu_alloc_tx_ring_info(struct niu *np,
if (!rp->mbox)
return -ENOMEM;
if ((unsigned long)rp->mbox & (64UL - 1)) {
- dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
- "TXDMA mailbox %p\n", np->dev->name, rp->mbox);
+ netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA mailbox %p\n",
+ rp->mbox);
return -EINVAL;
}
@@ -4469,8 +4435,8 @@ static int niu_alloc_tx_ring_info(struct niu *np,
if (!rp->descr)
return -ENOMEM;
if ((unsigned long)rp->descr & (64UL - 1)) {
- dev_err(np->device, PFX "%s: Coherent alloc gives misaligned "
- "TXDMA descr table %p\n", np->dev->name, rp->descr);
+ netdev_err(np->dev, "Coherent alloc gives misaligned TXDMA descr table %p\n",
+ rp->descr);
return -EINVAL;
}
@@ -4726,10 +4692,8 @@ static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
if (rp->descr_dma & ~(TX_RNG_CFIG_STADDR_BASE |
TX_RNG_CFIG_STADDR)) {
- dev_err(np->device, PFX "%s: TX ring channel %d "
- "DMA addr (%llx) is not aligned.\n",
- np->dev->name, channel,
- (unsigned long long) rp->descr_dma);
+ netdev_err(np->dev, "TX ring channel %d DMA addr (%llx) is not aligned\n",
+ channel, (unsigned long long)rp->descr_dma);
return -EINVAL;
}
@@ -4746,10 +4710,8 @@ static int niu_init_one_tx_channel(struct niu *np, struct tx_ring_info *rp)
if (((rp->mbox_dma >> 32) & ~TXDMA_MBH_MBADDR) ||
((u32)rp->mbox_dma & ~TXDMA_MBL_MBADDR)) {
- dev_err(np->device, PFX "%s: TX ring channel %d "
- "MBOX addr (%llx) is has illegal bits.\n",
- np->dev->name, channel,
- (unsigned long long) rp->mbox_dma);
+ netdev_err(np->dev, "TX ring channel %d MBOX addr (%llx) has invalid bits\n",
+ channel, (unsigned long long)rp->mbox_dma);
return -EINVAL;
}
nw64(TXDMA_MBH(channel), rp->mbox_dma >> 32);
@@ -5146,9 +5108,8 @@ static int niu_zcp_read(struct niu *np, int index, u64 *data)
err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
1000, 100);
if (err) {
- dev_err(np->device, PFX "%s: ZCP read busy won't clear, "
- "ZCP_RAM_ACC[%llx]\n", np->dev->name,
- (unsigned long long) nr64(ZCP_RAM_ACC));
+ netdev_err(np->dev, "ZCP read busy won't clear, ZCP_RAM_ACC[%llx]\n",
+ (unsigned long long)nr64(ZCP_RAM_ACC));
return err;
}
@@ -5160,9 +5121,8 @@ static int niu_zcp_read(struct niu *np, int index, u64 *data)
err = niu_wait_bits_clear(np, ZCP_RAM_ACC, ZCP_RAM_ACC_BUSY,
1000, 100);
if (err) {
- dev_err(np->device, PFX "%s: ZCP read busy2 won't clear, "
- "ZCP_RAM_ACC[%llx]\n", np->dev->name,
- (unsigned long long) nr64(ZCP_RAM_ACC));
+ netdev_err(np->dev, "ZCP read busy2 won't clear, ZCP_RAM_ACC[%llx]\n",
+ (unsigned long long)nr64(ZCP_RAM_ACC));
return err;
}
@@ -5527,8 +5487,7 @@ static int niu_reset_tx_bmac(struct niu *np)
udelay(100);
}
if (limit < 0) {
- dev_err(np->device, PFX "Port %u TX BMAC would not reset, "
- "BTXMAC_SW_RST[%llx]\n",
+ dev_err(np->device, "Port %u TX BMAC would not reset, BTXMAC_SW_RST[%llx]\n",
np->port,
(unsigned long long) nr64_mac(BTXMAC_SW_RST));
return -ENODEV;
@@ -5629,12 +5588,11 @@ static int niu_reset_rx_xmac(struct niu *np)
while (--limit >= 0) {
if (!(nr64_mac(XRXMAC_SW_RST) & (XRXMAC_SW_RST_REG_RS |
XRXMAC_SW_RST_SOFT_RST)))
- break;
+ break;
udelay(100);
}
if (limit < 0) {
- dev_err(np->device, PFX "Port %u RX XMAC would not reset, "
- "XRXMAC_SW_RST[%llx]\n",
+ dev_err(np->device, "Port %u RX XMAC would not reset, XRXMAC_SW_RST[%llx]\n",
np->port,
(unsigned long long) nr64_mac(XRXMAC_SW_RST));
return -ENODEV;
@@ -5655,8 +5613,7 @@ static int niu_reset_rx_bmac(struct niu *np)
udelay(100);
}
if (limit < 0) {
- dev_err(np->device, PFX "Port %u RX BMAC would not reset, "
- "BRXMAC_SW_RST[%llx]\n",
+ dev_err(np->device, "Port %u RX BMAC would not reset, BRXMAC_SW_RST[%llx]\n",
np->port,
(unsigned long long) nr64_mac(BRXMAC_SW_RST));
return -ENODEV;
@@ -5960,11 +5917,9 @@ static void niu_disable_ipp(struct niu *np)
}
if (limit < 0 &&
(rd != 0 && wr != 1)) {
- dev_err(np->device, PFX "%s: IPP would not quiesce, "
- "rd_ptr[%llx] wr_ptr[%llx]\n",
- np->dev->name,
- (unsigned long long) nr64_ipp(IPP_DFIFO_RD_PTR),
- (unsigned long long) nr64_ipp(IPP_DFIFO_WR_PTR));
+ netdev_err(np->dev, "IPP would not quiesce, rd_ptr[%llx] wr_ptr[%llx]\n",
+ (unsigned long long)nr64_ipp(IPP_DFIFO_RD_PTR),
+ (unsigned long long)nr64_ipp(IPP_DFIFO_WR_PTR));
}
val = nr64_ipp(IPP_CFIG);
@@ -5981,12 +5936,12 @@ static int niu_init_hw(struct niu *np)
{
int i, err;
- niudbg(IFUP, "%s: Initialize TXC\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TXC\n");
niu_txc_enable_port(np, 1);
niu_txc_port_dma_enable(np, 1);
niu_txc_set_imask(np, 0);
- niudbg(IFUP, "%s: Initialize TX channels\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize TX channels\n");
for (i = 0; i < np->num_tx_rings; i++) {
struct tx_ring_info *rp = &np->tx_rings[i];
@@ -5995,27 +5950,27 @@ static int niu_init_hw(struct niu *np)
return err;
}
- niudbg(IFUP, "%s: Initialize RX channels\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize RX channels\n");
err = niu_init_rx_channels(np);
if (err)
goto out_uninit_tx_channels;
- niudbg(IFUP, "%s: Initialize classifier\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize classifier\n");
err = niu_init_classifier_hw(np);
if (err)
goto out_uninit_rx_channels;
- niudbg(IFUP, "%s: Initialize ZCP\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize ZCP\n");
err = niu_init_zcp(np);
if (err)
goto out_uninit_rx_channels;
- niudbg(IFUP, "%s: Initialize IPP\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize IPP\n");
err = niu_init_ipp(np);
if (err)
goto out_uninit_rx_channels;
- niudbg(IFUP, "%s: Initialize MAC\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Initialize MAC\n");
err = niu_init_mac(np);
if (err)
goto out_uninit_ipp;
@@ -6023,16 +5978,16 @@ static int niu_init_hw(struct niu *np)
return 0;
out_uninit_ipp:
- niudbg(IFUP, "%s: Uninit IPP\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit IPP\n");
niu_disable_ipp(np);
out_uninit_rx_channels:
- niudbg(IFUP, "%s: Uninit RX channels\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit RX channels\n");
niu_stop_rx_channels(np);
niu_reset_rx_channels(np);
out_uninit_tx_channels:
- niudbg(IFUP, "%s: Uninit TX channels\n", np->dev->name);
+ netif_printk(np, ifup, KERN_DEBUG, np->dev, "Uninit TX channels\n");
niu_stop_tx_channels(np);
niu_reset_tx_channels(np);
@@ -6041,25 +5996,25 @@ out_uninit_tx_channels:
static void niu_stop_hw(struct niu *np)
{
- niudbg(IFDOWN, "%s: Disable interrupts\n", np->dev->name);
+ netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable interrupts\n");
niu_enable_interrupts(np, 0);
- niudbg(IFDOWN, "%s: Disable RX MAC\n", np->dev->name);
+ netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable RX MAC\n");
niu_enable_rx_mac(np, 0);
- niudbg(IFDOWN, "%s: Disable IPP\n", np->dev->name);
+ netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Disable IPP\n");
niu_disable_ipp(np);
- niudbg(IFDOWN, "%s: Stop TX channels\n", np->dev->name);
+ netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop TX channels\n");
niu_stop_tx_channels(np);
- niudbg(IFDOWN, "%s: Stop RX channels\n", np->dev->name);
+ netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Stop RX channels\n");
niu_stop_rx_channels(np);
- niudbg(IFDOWN, "%s: Reset TX channels\n", np->dev->name);
+ netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset TX channels\n");
niu_reset_tx_channels(np);
- niudbg(IFDOWN, "%s: Reset RX channels\n", np->dev->name);
+ netif_printk(np, ifdown, KERN_DEBUG, np->dev, "Reset RX channels\n");
niu_reset_rx_channels(np);
}
@@ -6369,10 +6324,10 @@ static void niu_set_rx_mode(struct net_device *dev)
np->flags &= ~(NIU_FLAGS_MCAST | NIU_FLAGS_PROMISC);
if (dev->flags & IFF_PROMISC)
np->flags |= NIU_FLAGS_PROMISC;
- if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 0))
+ if ((dev->flags & IFF_ALLMULTI) || (!netdev_mc_empty(dev)))
np->flags |= NIU_FLAGS_MCAST;
- alt_cnt = dev->uc.count;
+ alt_cnt = netdev_uc_count(dev);
if (alt_cnt > niu_num_alt_addr(np)) {
alt_cnt = 0;
np->flags |= NIU_FLAGS_PROMISC;
@@ -6381,17 +6336,15 @@ static void niu_set_rx_mode(struct net_device *dev)
if (alt_cnt) {
int index = 0;
- list_for_each_entry(ha, &dev->uc.list, list) {
+ netdev_for_each_uc_addr(ha, dev) {
err = niu_set_alt_mac(np, index, ha->addr);
if (err)
- printk(KERN_WARNING PFX "%s: Error %d "
- "adding alt mac %d\n",
- dev->name, err, index);
+ netdev_warn(dev, "Error %d adding alt mac %d\n",
+ err, index);
err = niu_enable_alt_mac(np, index, 1);
if (err)
- printk(KERN_WARNING PFX "%s: Error %d "
- "enabling alt mac %d\n",
- dev->name, err, index);
+ netdev_warn(dev, "Error %d enabling alt mac %d\n",
+ err, index);
index++;
}
@@ -6404,16 +6357,15 @@ static void niu_set_rx_mode(struct net_device *dev)
for (i = alt_start; i < niu_num_alt_addr(np); i++) {
err = niu_enable_alt_mac(np, i, 0);
if (err)
- printk(KERN_WARNING PFX "%s: Error %d "
- "disabling alt mac %d\n",
- dev->name, err, i);
+ netdev_warn(dev, "Error %d disabling alt mac %d\n",
+ err, i);
}
}
if (dev->flags & IFF_ALLMULTI) {
for (i = 0; i < 16; i++)
hash[i] = 0xffff;
- } else if (dev->mc_count > 0) {
- for (addr = dev->mc_list; addr; addr = addr->next) {
+ } else if (!netdev_mc_empty(dev)) {
+ netdev_for_each_mc_addr(addr, dev) {
u32 crc = ether_crc_le(ETH_ALEN, addr->da_addr);
crc >>= 24;
@@ -6570,7 +6522,7 @@ static void niu_tx_timeout(struct net_device *dev)
{
struct niu *np = netdev_priv(dev);
- dev_err(np->device, PFX "%s: Transmit timed out, resetting\n",
+ dev_err(np->device, "%s: Transmit timed out, resetting\n",
dev->name);
schedule_work(&np->reset_task);
@@ -6672,8 +6624,7 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
if (niu_tx_avail(rp) <= (skb_shinfo(skb)->nr_frags + 1)) {
netif_tx_stop_queue(txq);
- dev_err(np->device, PFX "%s: BUG! Tx ring full when "
- "queue awake!\n", dev->name);
+ dev_err(np->device, "%s: BUG! Tx ring full when queue awake!\n", dev->name);
rp->tx_errors++;
return NETDEV_TX_BUSY;
}
@@ -7237,8 +7188,8 @@ static int niu_get_ethtool_tcam_entry(struct niu *np,
tp = &parent->tcam[idx];
if (!tp->valid) {
- pr_info(PFX "niu%d: %s entry [%d] invalid for idx[%d]\n",
- parent->index, np->dev->name, (u16)nfc->fs.location, idx);
+ netdev_info(np->dev, "niu%d: entry [%d] invalid for idx[%d]\n",
+ parent->index, (u16)nfc->fs.location, idx);
return -EINVAL;
}
@@ -7248,8 +7199,8 @@ static int niu_get_ethtool_tcam_entry(struct niu *np,
ret = niu_class_to_ethflow(class, &fsp->flow_type);
if (ret < 0) {
- pr_info(PFX "niu%d: %s niu_class_to_ethflow failed\n",
- parent->index, np->dev->name);
+ netdev_info(np->dev, "niu%d: niu_class_to_ethflow failed\n",
+ parent->index);
ret = -EINVAL;
goto out;
}
@@ -7332,9 +7283,8 @@ static int niu_get_ethtool_tcam_all(struct niu *np,
if (n_entries != cnt) {
/* print warning, this should not happen */
- pr_info(PFX "niu%d: %s In niu_get_ethtool_tcam_all, "
- "n_entries[%d] != cnt[%d]!!!\n\n",
- np->parent->index, np->dev->name, n_entries, cnt);
+ netdev_info(np->dev, "niu%d: In %s(): n_entries[%d] != cnt[%d]!!!\n",
+ np->parent->index, __func__, n_entries, cnt);
}
return 0;
@@ -7561,9 +7511,8 @@ static int niu_add_ethtool_tcam_entry(struct niu *np,
}
}
if (!add_usr_cls) {
- pr_info(PFX "niu%d: %s niu_add_ethtool_tcam_entry: "
- "Could not find/insert class for pid %d\n",
- parent->index, np->dev->name, uspec->proto);
+ netdev_info(np->dev, "niu%d: %s(): Could not find/insert class for pid %d\n",
+ parent->index, __func__, uspec->proto);
ret = -EINVAL;
goto out;
}
@@ -7596,9 +7545,8 @@ static int niu_add_ethtool_tcam_entry(struct niu *np,
case AH_V6_FLOW:
case ESP_V6_FLOW:
/* Not yet implemented */
- pr_info(PFX "niu%d: %s In niu_add_ethtool_tcam_entry: "
- "flow %d for IPv6 not implemented\n\n",
- parent->index, np->dev->name, fsp->flow_type);
+ netdev_info(np->dev, "niu%d: In %s(): flow %d for IPv6 not implemented\n",
+ parent->index, __func__, fsp->flow_type);
ret = -EINVAL;
goto out;
case IP_USER_FLOW:
@@ -7607,17 +7555,15 @@ static int niu_add_ethtool_tcam_entry(struct niu *np,
class);
} else {
/* Not yet implemented */
- pr_info(PFX "niu%d: %s In niu_add_ethtool_tcam_entry: "
- "usr flow for IPv6 not implemented\n\n",
- parent->index, np->dev->name);
+ netdev_info(np->dev, "niu%d: In %s(): usr flow for IPv6 not implemented\n",
+ parent->index, __func__);
ret = -EINVAL;
goto out;
}
break;
default:
- pr_info(PFX "niu%d: %s In niu_add_ethtool_tcam_entry: "
- "Unknown flow type %d\n\n",
- parent->index, np->dev->name, fsp->flow_type);
+ netdev_info(np->dev, "niu%d: In %s(): Unknown flow type %d\n",
+ parent->index, __func__, fsp->flow_type);
ret = -EINVAL;
goto out;
}
@@ -7627,10 +7573,9 @@ static int niu_add_ethtool_tcam_entry(struct niu *np,
tp->assoc_data = TCAM_ASSOCDATA_DISC;
} else {
if (fsp->ring_cookie >= np->num_rx_rings) {
- pr_info(PFX "niu%d: %s In niu_add_ethtool_tcam_entry: "
- "Invalid RX ring %lld\n\n",
- parent->index, np->dev->name,
- (long long) fsp->ring_cookie);
+ netdev_info(np->dev, "niu%d: In %s(): Invalid RX ring %lld\n",
+ parent->index, __func__,
+ (long long)fsp->ring_cookie);
ret = -EINVAL;
goto out;
}
@@ -7699,10 +7644,9 @@ static int niu_del_ethtool_tcam_entry(struct niu *np, u32 loc)
}
}
if (i == NIU_L3_PROG_CLS) {
- pr_info(PFX "niu%d: %s In niu_del_ethtool_tcam_entry,"
- "Usr class 0x%llx not found \n",
- parent->index, np->dev->name,
- (unsigned long long) class);
+ netdev_info(np->dev, "niu%d: In %s(): Usr class 0x%llx not found\n",
+ parent->index, __func__,
+ (unsigned long long)class);
ret = -EINVAL;
goto out;
}
@@ -8001,9 +7945,7 @@ static int niu_ldg_assign_ldn(struct niu *np, struct niu_parent *parent,
* won't get any interrupts and that's painful to debug.
*/
if (nr64(LDG_NUM(ldn)) != ldg) {
- dev_err(np->device, PFX "Port %u, mis-matched "
- "LDG assignment "
- "for ldn %d, should be %d is %llu\n",
+ dev_err(np->device, "Port %u, mis-matched LDG assignment for ldn %d, should be %d is %llu\n",
np->port, ldn, ldg,
(unsigned long long) nr64(LDG_NUM(ldn)));
return -EINVAL;
@@ -8056,7 +7998,7 @@ static int __devinit niu_pci_eeprom_read(struct niu *np, u32 addr)
break;
} while (limit--);
if (!(frame & ESPC_PIO_STAT_READ_END)) {
- dev_err(np->device, PFX "EEPROM read timeout frame[%llx]\n",
+ dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
(unsigned long long) frame);
return -ENODEV;
}
@@ -8071,7 +8013,7 @@ static int __devinit niu_pci_eeprom_read(struct niu *np, u32 addr)
break;
} while (limit--);
if (!(frame & ESPC_PIO_STAT_READ_END)) {
- dev_err(np->device, PFX "EEPROM read timeout frame[%llx]\n",
+ dev_err(np->device, "EEPROM read timeout frame[%llx]\n",
(unsigned long long) frame);
return -ENODEV;
}
@@ -8152,8 +8094,9 @@ static void __devinit niu_vpd_parse_version(struct niu *np)
s += i + 5;
sscanf(s, "%d.%d", &vpd->fcode_major, &vpd->fcode_minor);
- niudbg(PROBE, "VPD_SCAN: FCODE major(%d) minor(%d)\n",
- vpd->fcode_major, vpd->fcode_minor);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "VPD_SCAN: FCODE major(%d) minor(%d)\n",
+ vpd->fcode_major, vpd->fcode_minor);
if (vpd->fcode_major > NIU_VPD_MIN_MAJOR ||
(vpd->fcode_major == NIU_VPD_MIN_MAJOR &&
vpd->fcode_minor >= NIU_VPD_MIN_MINOR))
@@ -8173,8 +8116,8 @@ static int __devinit niu_pci_vpd_scan_props(struct niu *np,
#define FOUND_MASK_PHY 0x00000020
#define FOUND_MASK_ALL 0x0000003f
- niudbg(PROBE, "VPD_SCAN: start[%x] end[%x]\n",
- start, end);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "VPD_SCAN: start[%x] end[%x]\n", start, end);
while (start < end) {
int len, err, instance, type, prop_len;
char namebuf[64];
@@ -8228,8 +8171,7 @@ static int __devinit niu_pci_vpd_scan_props(struct niu *np,
}
if (max_len && prop_len > max_len) {
- dev_err(np->device, PFX "Property '%s' length (%d) is "
- "too long.\n", namebuf, prop_len);
+ dev_err(np->device, "Property '%s' length (%d) is too long\n", namebuf, prop_len);
return -EINVAL;
}
@@ -8237,8 +8179,9 @@ static int __devinit niu_pci_vpd_scan_props(struct niu *np,
u32 off = start + 5 + err;
int i;
- niudbg(PROBE, "VPD_SCAN: Reading in property [%s] "
- "len[%d]\n", namebuf, prop_len);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "VPD_SCAN: Reading in property [%s] len[%d]\n",
+ namebuf, prop_len);
for (i = 0; i < prop_len; i++)
*prop_buf++ = niu_pci_eeprom_read(np, off + i);
}
@@ -8402,8 +8345,7 @@ static void __devinit niu_pci_vpd_validate(struct niu *np)
u8 val8;
if (!is_valid_ether_addr(&vpd->local_mac[0])) {
- dev_err(np->device, PFX "VPD MAC invalid, "
- "falling back to SPROM.\n");
+ dev_err(np->device, "VPD MAC invalid, falling back to SPROM\n");
np->flags &= ~NIU_FLAGS_VPD_VALID;
return;
@@ -8420,14 +8362,14 @@ static void __devinit niu_pci_vpd_validate(struct niu *np)
np->flags &= ~NIU_FLAGS_10G;
}
if (np->flags & NIU_FLAGS_10G)
- np->mac_xcvr = MAC_XCVR_XPCS;
+ np->mac_xcvr = MAC_XCVR_XPCS;
} else if (!strcmp(np->vpd.model, NIU_FOXXY_MDL_STR)) {
np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER |
NIU_FLAGS_HOTPLUG_PHY);
} else if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
- dev_err(np->device, PFX "Illegal phy string [%s].\n",
+ dev_err(np->device, "Illegal phy string [%s]\n",
np->vpd.phy_type);
- dev_err(np->device, PFX "Falling back to SPROM.\n");
+ dev_err(np->device, "Falling back to SPROM\n");
np->flags &= ~NIU_FLAGS_VPD_VALID;
return;
}
@@ -8455,7 +8397,8 @@ static int __devinit niu_pci_probe_sprom(struct niu *np)
np->eeprom_len = len;
- niudbg(PROBE, "SPROM: Image size %llu\n", (unsigned long long) val);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: Image size %llu\n", (unsigned long long)val);
sum = 0;
for (i = 0; i < len; i++) {
@@ -8465,10 +8408,10 @@ static int __devinit niu_pci_probe_sprom(struct niu *np)
sum += (val >> 16) & 0xff;
sum += (val >> 24) & 0xff;
}
- niudbg(PROBE, "SPROM: Checksum %x\n", (int)(sum & 0xff));
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: Checksum %x\n", (int)(sum & 0xff));
if ((sum & 0xff) != 0xab) {
- dev_err(np->device, PFX "Bad SPROM checksum "
- "(%x, should be 0xab)\n", (int) (sum & 0xff));
+ dev_err(np->device, "Bad SPROM checksum (%x, should be 0xab)\n", (int)(sum & 0xff));
return -EINVAL;
}
@@ -8491,11 +8434,12 @@ static int __devinit niu_pci_probe_sprom(struct niu *np)
ESPC_PHY_TYPE_PORT3_SHIFT;
break;
default:
- dev_err(np->device, PFX "Bogus port number %u\n",
+ dev_err(np->device, "Bogus port number %u\n",
np->port);
return -EINVAL;
}
- niudbg(PROBE, "SPROM: PHY type %x\n", val8);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: PHY type %x\n", val8);
switch (val8) {
case ESPC_PHY_TYPE_1G_COPPER:
@@ -8527,30 +8471,27 @@ static int __devinit niu_pci_probe_sprom(struct niu *np)
break;
default:
- dev_err(np->device, PFX "Bogus SPROM phy type %u\n", val8);
+ dev_err(np->device, "Bogus SPROM phy type %u\n", val8);
return -EINVAL;
}
val = nr64(ESPC_MAC_ADDR0);
- niudbg(PROBE, "SPROM: MAC_ADDR0[%08llx]\n",
- (unsigned long long) val);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: MAC_ADDR0[%08llx]\n", (unsigned long long)val);
dev->perm_addr[0] = (val >> 0) & 0xff;
dev->perm_addr[1] = (val >> 8) & 0xff;
dev->perm_addr[2] = (val >> 16) & 0xff;
dev->perm_addr[3] = (val >> 24) & 0xff;
val = nr64(ESPC_MAC_ADDR1);
- niudbg(PROBE, "SPROM: MAC_ADDR1[%08llx]\n",
- (unsigned long long) val);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: MAC_ADDR1[%08llx]\n", (unsigned long long)val);
dev->perm_addr[4] = (val >> 0) & 0xff;
dev->perm_addr[5] = (val >> 8) & 0xff;
if (!is_valid_ether_addr(&dev->perm_addr[0])) {
- dev_err(np->device, PFX "SPROM MAC address invalid\n");
- dev_err(np->device, PFX "[ \n");
- for (i = 0; i < 6; i++)
- printk("%02x ", dev->perm_addr[i]);
- printk("]\n");
+ dev_err(np->device, "SPROM MAC address invalid [ %pM ]\n",
+ dev->perm_addr);
return -EINVAL;
}
@@ -8562,8 +8503,8 @@ static int __devinit niu_pci_probe_sprom(struct niu *np)
memcpy(dev->dev_addr, dev->perm_addr, dev->addr_len);
val = nr64(ESPC_MOD_STR_LEN);
- niudbg(PROBE, "SPROM: MOD_STR_LEN[%llu]\n",
- (unsigned long long) val);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: MOD_STR_LEN[%llu]\n", (unsigned long long)val);
if (val >= 8 * 4)
return -EINVAL;
@@ -8578,8 +8519,8 @@ static int __devinit niu_pci_probe_sprom(struct niu *np)
np->vpd.model[val] = '\0';
val = nr64(ESPC_BD_MOD_STR_LEN);
- niudbg(PROBE, "SPROM: BD_MOD_STR_LEN[%llu]\n",
- (unsigned long long) val);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: BD_MOD_STR_LEN[%llu]\n", (unsigned long long)val);
if (val >= 4 * 4)
return -EINVAL;
@@ -8595,8 +8536,8 @@ static int __devinit niu_pci_probe_sprom(struct niu *np)
np->vpd.mac_num =
nr64(ESPC_NUM_PORTS_MACS) & ESPC_NUM_PORTS_MACS_VAL;
- niudbg(PROBE, "SPROM: NUM_PORTS_MACS[%d]\n",
- np->vpd.mac_num);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "SPROM: NUM_PORTS_MACS[%d]\n", np->vpd.mac_num);
return 0;
}
@@ -8629,8 +8570,6 @@ static int __devinit niu_get_and_validate_port(struct niu *np)
}
}
- niudbg(PROBE, "niu_get_and_validate_port: port[%d] num_ports[%d]\n",
- np->port, parent->num_ports);
if (np->port >= parent->num_ports)
return -ENODEV;
@@ -8659,14 +8598,12 @@ static int __devinit phy_record(struct niu_parent *parent,
pr_info("niu%d: Found PHY %08x type %s at phy_port %u\n",
parent->index, id,
- (type == PHY_TYPE_PMA_PMD ?
- "PMA/PMD" :
- (type == PHY_TYPE_PCS ?
- "PCS" : "MII")),
+ type == PHY_TYPE_PMA_PMD ? "PMA/PMD" :
+ type == PHY_TYPE_PCS ? "PCS" : "MII",
phy_port);
if (p->cur[type] >= NIU_MAX_PORTS) {
- printk(KERN_ERR PFX "Too many PHY ports.\n");
+ pr_err("Too many PHY ports\n");
return -EINVAL;
}
idx = p->cur[type];
@@ -8727,8 +8664,7 @@ static void __devinit niu_n2_divide_channels(struct niu_parent *parent)
parent->rxchan_per_port[i] = (16 / num_ports);
parent->txchan_per_port[i] = (16 / num_ports);
- pr_info(PFX "niu%d: Port %u [%u RX chans] "
- "[%u TX chans]\n",
+ pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
parent->index, i,
parent->rxchan_per_port[i],
parent->txchan_per_port[i]);
@@ -8771,8 +8707,7 @@ static void __devinit niu_divide_channels(struct niu_parent *parent,
parent->rxchan_per_port[i] = rx_chans_per_1g;
parent->txchan_per_port[i] = tx_chans_per_1g;
}
- pr_info(PFX "niu%d: Port %u [%u RX chans] "
- "[%u TX chans]\n",
+ pr_info("niu%d: Port %u [%u RX chans] [%u TX chans]\n",
parent->index, i,
parent->rxchan_per_port[i],
parent->txchan_per_port[i]);
@@ -8781,23 +8716,20 @@ static void __devinit niu_divide_channels(struct niu_parent *parent,
}
if (tot_rx > NIU_NUM_RXCHAN) {
- printk(KERN_ERR PFX "niu%d: Too many RX channels (%d), "
- "resetting to one per port.\n",
+ pr_err("niu%d: Too many RX channels (%d), resetting to one per port\n",
parent->index, tot_rx);
for (i = 0; i < num_ports; i++)
parent->rxchan_per_port[i] = 1;
}
if (tot_tx > NIU_NUM_TXCHAN) {
- printk(KERN_ERR PFX "niu%d: Too many TX channels (%d), "
- "resetting to one per port.\n",
+ pr_err("niu%d: Too many TX channels (%d), resetting to one per port\n",
parent->index, tot_tx);
for (i = 0; i < num_ports; i++)
parent->txchan_per_port[i] = 1;
}
if (tot_rx < NIU_NUM_RXCHAN || tot_tx < NIU_NUM_TXCHAN) {
- printk(KERN_WARNING PFX "niu%d: Driver bug, wasted channels, "
- "RX[%d] TX[%d]\n",
- parent->index, tot_rx, tot_tx);
+ pr_warning("niu%d: Driver bug, wasted channels, RX[%d] TX[%d]\n",
+ parent->index, tot_rx, tot_tx);
}
}
@@ -8825,18 +8757,18 @@ static void __devinit niu_divide_rdc_groups(struct niu_parent *parent,
struct rdc_table *rt = &tp->tables[grp];
int slot;
- pr_info(PFX "niu%d: Port %d RDC tbl(%d) [ ",
+ pr_info("niu%d: Port %d RDC tbl(%d) [ ",
parent->index, i, tp->first_table_num + grp);
for (slot = 0; slot < NIU_RDC_TABLE_SLOTS; slot++) {
rt->rxdma_channel[slot] =
rdc_channel_base + this_channel_offset;
- printk("%d ", rt->rxdma_channel[slot]);
+ pr_cont("%d ", rt->rxdma_channel[slot]);
if (++this_channel_offset == num_channels)
this_channel_offset = 0;
}
- printk("]\n");
+ pr_cont("]\n");
}
parent->rdc_default[i] = rdc_channel_base;
@@ -8996,8 +8928,7 @@ static int __devinit walk_phys(struct niu *np, struct niu_parent *parent)
break;
default:
- printk(KERN_ERR PFX "Unsupported port config "
- "10G[%d] 1G[%d]\n",
+ pr_err("Unsupported port config 10G[%d] 1G[%d]\n",
num_10g, num_1g);
return -EINVAL;
}
@@ -9015,8 +8946,7 @@ static int __devinit walk_phys(struct niu *np, struct niu_parent *parent)
return 0;
unknown_vg_1g_port:
- printk(KERN_ERR PFX "Cannot identify platform type, 1gport=%d\n",
- lowest_1g);
+ pr_err("Cannot identify platform type, 1gport=%d\n", lowest_1g);
return -EINVAL;
}
@@ -9025,9 +8955,6 @@ static int __devinit niu_probe_ports(struct niu *np)
struct niu_parent *parent = np->parent;
int err, i;
- niudbg(PROBE, "niu_probe_ports(): port_phy[%08x]\n",
- parent->port_phy);
-
if (parent->port_phy == PORT_PHY_UNKNOWN) {
err = walk_phys(np, parent);
if (err)
@@ -9048,9 +8975,6 @@ static int __devinit niu_classifier_swstate_init(struct niu *np)
{
struct niu_classifier *cp = &np->clas;
- niudbg(PROBE, "niu_classifier_swstate_init: num_tcam(%d)\n",
- np->parent->tcam_num_entries);
-
cp->tcam_top = (u16) np->port;
cp->tcam_sz = np->parent->tcam_num_entries / np->parent->num_ports;
cp->h1_init = 0xffffffff;
@@ -9116,8 +9040,7 @@ static int __devinit niu_init_mac_ipp_pcs_base(struct niu *np)
break;
default:
- dev_err(np->device, PFX "Port %u is invalid, cannot "
- "compute MAC block offset.\n", np->port);
+ dev_err(np->device, "Port %u is invalid, cannot compute MAC block offset\n", np->port);
return -EINVAL;
}
@@ -9327,9 +9250,8 @@ static int __devinit niu_get_of_props(struct niu *np)
phy_type = of_get_property(dp, "phy-type", &prop_len);
if (!phy_type) {
- dev_err(np->device, PFX "%s: OF node lacks "
- "phy-type property\n",
- dp->full_name);
+ netdev_err(dev, "%s: OF node lacks phy-type property\n",
+ dp->full_name);
return -EINVAL;
}
@@ -9339,34 +9261,26 @@ static int __devinit niu_get_of_props(struct niu *np)
strcpy(np->vpd.phy_type, phy_type);
if (niu_phy_type_prop_decode(np, np->vpd.phy_type)) {
- dev_err(np->device, PFX "%s: Illegal phy string [%s].\n",
- dp->full_name, np->vpd.phy_type);
+ netdev_err(dev, "%s: Illegal phy string [%s]\n",
+ dp->full_name, np->vpd.phy_type);
return -EINVAL;
}
mac_addr = of_get_property(dp, "local-mac-address", &prop_len);
if (!mac_addr) {
- dev_err(np->device, PFX "%s: OF node lacks "
- "local-mac-address property\n",
- dp->full_name);
+ netdev_err(dev, "%s: OF node lacks local-mac-address property\n",
+ dp->full_name);
return -EINVAL;
}
if (prop_len != dev->addr_len) {
- dev_err(np->device, PFX "%s: OF MAC address prop len (%d) "
- "is wrong.\n",
- dp->full_name, prop_len);
+ netdev_err(dev, "%s: OF MAC address prop len (%d) is wrong\n",
+ dp->full_name, prop_len);
}
memcpy(dev->perm_addr, mac_addr, dev->addr_len);
if (!is_valid_ether_addr(&dev->perm_addr[0])) {
- int i;
-
- dev_err(np->device, PFX "%s: OF MAC address is invalid\n",
- dp->full_name);
- dev_err(np->device, PFX "%s: [ \n",
- dp->full_name);
- for (i = 0; i < 6; i++)
- printk("%02x ", dev->perm_addr[i]);
- printk("]\n");
+ netdev_err(dev, "%s: OF MAC address is invalid\n",
+ dp->full_name);
+ netdev_err(dev, "%s: [ %pM ]\n", dp->full_name, dev->perm_addr);
return -EINVAL;
}
@@ -9414,8 +9328,8 @@ static int __devinit niu_get_invariants(struct niu *np)
nw64(ESPC_PIO_EN, ESPC_PIO_EN_ENABLE);
offset = niu_pci_vpd_offset(np);
- niudbg(PROBE, "niu_get_invariants: VPD offset [%08x]\n",
- offset);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "%s() VPD offset [%08x]\n", __func__, offset);
if (offset)
niu_pci_vpd_fetch(np, offset);
nw64(ESPC_PIO_EN, 0);
@@ -9575,8 +9489,6 @@ static struct niu_parent * __devinit niu_new_parent(struct niu *np,
struct niu_parent *p;
int i;
- niudbg(PROBE, "niu_new_parent: Creating new parent.\n");
-
plat_dev = platform_device_register_simple("niu", niu_parent_index,
NULL, 0);
if (IS_ERR(plat_dev))
@@ -9641,9 +9553,6 @@ static struct niu_parent * __devinit niu_get_parent(struct niu *np,
struct niu_parent *p, *tmp;
int port = np->port;
- niudbg(PROBE, "niu_get_parent: platform_type[%u] port[%u]\n",
- ptype, port);
-
mutex_lock(&niu_parent_lock);
p = NULL;
list_for_each_entry(tmp, &niu_parent_list, list) {
@@ -9681,7 +9590,8 @@ static void niu_put_parent(struct niu *np)
BUG_ON(!p || p->ports[port] != np);
- niudbg(PROBE, "niu_put_parent: port[%u]\n", port);
+ netif_printk(np, probe, KERN_DEBUG, np->dev,
+ "%s() port[%u]\n", __func__, port);
sprintf(port_name, "port%d", port);
@@ -9772,7 +9682,7 @@ static struct net_device * __devinit niu_alloc_and_init(
dev = alloc_etherdev_mq(sizeof(struct niu), NIU_NUM_TXCHAN);
if (!dev) {
- dev_err(gen_dev, PFX "Etherdev alloc failed, aborting.\n");
+ dev_err(gen_dev, "Etherdev alloc failed, aborting\n");
return NULL;
}
@@ -9858,30 +9768,26 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
err = pci_enable_device(pdev);
if (err) {
- dev_err(&pdev->dev, PFX "Cannot enable PCI device, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
return err;
}
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
- dev_err(&pdev->dev, PFX "Cannot find proper PCI device "
- "base addresses, aborting.\n");
+ dev_err(&pdev->dev, "Cannot find proper PCI device base addresses, aborting\n");
err = -ENODEV;
goto err_out_disable_pdev;
}
err = pci_request_regions(pdev, DRV_MODULE_NAME);
if (err) {
- dev_err(&pdev->dev, PFX "Cannot obtain PCI resources, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
goto err_out_disable_pdev;
}
pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
if (pos <= 0) {
- dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot find PCI Express capability, aborting\n");
goto err_out_free_res;
}
@@ -9920,17 +9826,14 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
dev->features |= NETIF_F_HIGHDMA;
err = pci_set_consistent_dma_mask(pdev, dma_mask);
if (err) {
- dev_err(&pdev->dev, PFX "Unable to obtain 44 bit "
- "DMA for consistent allocations, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Unable to obtain 44 bit DMA for consistent allocations, aborting\n");
goto err_out_release_parent;
}
}
if (err || dma_mask == DMA_BIT_MASK(32)) {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, PFX "No usable DMA configuration, "
- "aborting.\n");
+ dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
goto err_out_release_parent;
}
}
@@ -9939,8 +9842,7 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
np->regs = pci_ioremap_bar(pdev, 0);
if (!np->regs) {
- dev_err(&pdev->dev, PFX "Cannot map device registers, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
err = -ENOMEM;
goto err_out_release_parent;
}
@@ -9955,15 +9857,13 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
err = niu_get_invariants(np);
if (err) {
if (err != -ENODEV)
- dev_err(&pdev->dev, PFX "Problem fetching invariants "
- "of chip, aborting.\n");
+ dev_err(&pdev->dev, "Problem fetching invariants of chip, aborting\n");
goto err_out_iounmap;
}
err = register_netdev(dev);
if (err) {
- dev_err(&pdev->dev, PFX "Cannot register net device, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot register net device, aborting\n");
goto err_out_iounmap;
}
@@ -10157,7 +10057,7 @@ static int __devinit niu_of_probe(struct of_device *op,
reg = of_get_property(op->node, "reg", NULL);
if (!reg) {
- dev_err(&op->dev, PFX "%s: No 'reg' property, aborting.\n",
+ dev_err(&op->dev, "%s: No 'reg' property, aborting\n",
op->node->full_name);
return -ENODEV;
}
@@ -10186,8 +10086,7 @@ static int __devinit niu_of_probe(struct of_device *op,
resource_size(&op->resource[1]),
"niu regs");
if (!np->regs) {
- dev_err(&op->dev, PFX "Cannot map device registers, "
- "aborting.\n");
+ dev_err(&op->dev, "Cannot map device registers, aborting\n");
err = -ENOMEM;
goto err_out_release_parent;
}
@@ -10196,8 +10095,7 @@ static int __devinit niu_of_probe(struct of_device *op,
resource_size(&op->resource[2]),
"niu vregs-1");
if (!np->vir_regs_1) {
- dev_err(&op->dev, PFX "Cannot map device vir registers 1, "
- "aborting.\n");
+ dev_err(&op->dev, "Cannot map device vir registers 1, aborting\n");
err = -ENOMEM;
goto err_out_iounmap;
}
@@ -10206,8 +10104,7 @@ static int __devinit niu_of_probe(struct of_device *op,
resource_size(&op->resource[3]),
"niu vregs-2");
if (!np->vir_regs_2) {
- dev_err(&op->dev, PFX "Cannot map device vir registers 2, "
- "aborting.\n");
+ dev_err(&op->dev, "Cannot map device vir registers 2, aborting\n");
err = -ENOMEM;
goto err_out_iounmap;
}
@@ -10217,15 +10114,13 @@ static int __devinit niu_of_probe(struct of_device *op,
err = niu_get_invariants(np);
if (err) {
if (err != -ENODEV)
- dev_err(&op->dev, PFX "Problem fetching invariants "
- "of chip, aborting.\n");
+ dev_err(&op->dev, "Problem fetching invariants of chip, aborting\n");
goto err_out_iounmap;
}
err = register_netdev(dev);
if (err) {
- dev_err(&op->dev, PFX "Cannot register net device, "
- "aborting.\n");
+ dev_err(&op->dev, "Cannot register net device, aborting\n");
goto err_out_iounmap;
}
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index 1f6327d4153..8dd509c09bc 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -1719,7 +1719,7 @@ static void ns83820_set_multicast(struct net_device *ndev)
else
and_mask &= ~(RFCR_AAU | RFCR_AAM);
- if (ndev->flags & IFF_ALLMULTI || ndev->mc_count)
+ if (ndev->flags & IFF_ALLMULTI || netdev_mc_count(ndev))
or_mask |= RFCR_AAM;
else
and_mask &= ~RFCR_AAM;
@@ -2292,7 +2292,7 @@ static void __devexit ns83820_remove_one(struct pci_dev *pci_dev)
pci_set_drvdata(pci_dev, NULL);
}
-static struct pci_device_id ns83820_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(ns83820_pci_tbl) = {
{ 0x100b, 0x0022, PCI_ANY_ID, PCI_ANY_ID, 0, .driver_data = 0, },
{ 0, },
};
diff --git a/drivers/net/octeon/octeon_mgmt.c b/drivers/net/octeon/octeon_mgmt.c
index 050538bf155..be368e5cbf7 100644
--- a/drivers/net/octeon/octeon_mgmt.c
+++ b/drivers/net/octeon/octeon_mgmt.c
@@ -467,7 +467,6 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
{
struct octeon_mgmt *p = netdev_priv(netdev);
int port = p->port;
- int i;
union cvmx_agl_gmx_rxx_adr_ctl adr_ctl;
union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx;
unsigned long flags;
@@ -493,8 +492,8 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
}
if (netdev->flags & IFF_MULTICAST) {
- if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI)
- || netdev->mc_count > available_cam_entries)
+ if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) ||
+ netdev_mc_count(netdev) > available_cam_entries)
multicast_mode = 2; /* 1 - Accept all multicast. */
else
multicast_mode = 0; /* 0 - Use CAM. */
@@ -511,12 +510,8 @@ static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
}
}
if (multicast_mode == 0) {
- i = netdev->mc_count;
- list = netdev->mc_list;
- while (i--) {
+ netdev_for_each_mc_addr(list, netdev)
octeon_mgmt_cam_state_add(&cam_state, list->da_addr);
- list = list->next;
- }
}
@@ -1119,11 +1114,8 @@ static int __init octeon_mgmt_probe(struct platform_device *pdev)
if (p->port >= octeon_bootinfo->mac_addr_count)
dev_err(&pdev->dev,
- "Error %s: Using MAC outside of the assigned range: "
- "%02x:%02x:%02x:%02x:%02x:%02x\n", netdev->name,
- netdev->dev_addr[0], netdev->dev_addr[1],
- netdev->dev_addr[2], netdev->dev_addr[3],
- netdev->dev_addr[4], netdev->dev_addr[5]);
+ "Error %s: Using MAC outside of the assigned range: %pM\n",
+ netdev->name, netdev->dev_addr);
if (register_netdev(netdev))
goto err;
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index 1673eb045e1..d44d4a208bb 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -1875,7 +1875,7 @@ static void __devexit pasemi_mac_remove(struct pci_dev *pdev)
free_netdev(netdev);
}
-static struct pci_device_id pasemi_mac_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(pasemi_mac_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa005) },
{ PCI_DEVICE(PCI_VENDOR_ID_PASEMI, 0xa006) },
{ },
diff --git a/drivers/net/pci-skeleton.c b/drivers/net/pci-skeleton.c
index 480af402aff..36785853a14 100644
--- a/drivers/net/pci-skeleton.c
+++ b/drivers/net/pci-skeleton.c
@@ -11,7 +11,7 @@
-----<snip>-----
- Written 1997-2000 by Donald Becker.
+ Written 1997-2000 by Donald Becker.
This software may be used and distributed according to the
terms of the GNU General Public License (GPL), incorporated
herein by reference. Drivers based on or derived from this
@@ -85,6 +85,8 @@ IVc. Errata
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
@@ -96,16 +98,15 @@ IVc. Errata
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/crc32.h>
-#include <asm/io.h>
+#include <linux/io.h>
#define NETDRV_VERSION "1.0.1"
#define MODNAME "netdrv"
#define NETDRV_DRIVER_LOAD_MSG "MyVendor Fast Ethernet driver " NETDRV_VERSION " loaded"
-#define PFX MODNAME ": "
static char version[] __devinitdata =
-KERN_INFO NETDRV_DRIVER_LOAD_MSG "\n"
-" Support available from http://foo.com/bar/baz.html\n";
+ KERN_INFO NETDRV_DRIVER_LOAD_MSG "\n"
+ " Support available from http://foo.com/bar/baz.html\n";
/* define to 1 to enable PIO instead of MMIO */
#undef USE_IO_OPS
@@ -119,19 +120,24 @@ KERN_INFO NETDRV_DRIVER_LOAD_MSG "\n"
#ifdef NETDRV_DEBUG
/* note: prints function name for you */
-# define DPRINTK(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __func__ , ## args)
+#define DPRINTK(fmt, args...) \
+ printk(KERN_DEBUG "%s: " fmt, __func__ , ## args)
#else
-# define DPRINTK(fmt, args...)
+#define DPRINTK(fmt, args...) \
+do { \
+ if (0) \
+ printk(KERN_DEBUG fmt, ##args); \
+} while (0)
#endif
#ifdef NETDRV_NDEBUG
-# define assert(expr) do {} while (0)
+#define assert(expr) do {} while (0)
#else
-# define assert(expr) \
- if(!(expr)) { \
- printk( "Assertion failed! %s,%s,%s,line=%d\n", \
- #expr,__FILE__,__func__,__LINE__); \
- }
+#define assert(expr) \
+ if (!(expr)) { \
+ printk("Assertion failed! %s,%s,%s,line=%d\n", \
+ #expr, __FILE__, __func__, __LINE__); \
+ }
#endif
@@ -148,10 +154,10 @@ static int multicast_filter_limit = 32;
/* Size of the in-memory receive ring. */
#define RX_BUF_LEN_IDX 2 /* 0==8K, 1==16K, 2==32K, 3==64K */
-#define RX_BUF_LEN (8192 << RX_BUF_LEN_IDX)
-#define RX_BUF_PAD 16
+#define RX_BUF_LEN (8192 << RX_BUF_LEN_IDX)
+#define RX_BUF_PAD 16
#define RX_BUF_WRAP_PAD 2048 /* spare padding to handle lack of packet wrap */
-#define RX_BUF_TOT_LEN (RX_BUF_LEN + RX_BUF_PAD + RX_BUF_WRAP_PAD)
+#define RX_BUF_TOT_LEN (RX_BUF_LEN + RX_BUF_PAD + RX_BUF_WRAP_PAD)
/* Number of Tx descriptor registers. */
#define NUM_TX_DESC 4
@@ -165,9 +171,11 @@ static int multicast_filter_limit = 32;
/* PCI Tuning Parameters
Threshold is bytes transferred to chip before transmission starts. */
-#define TX_FIFO_THRESH 256 /* In bytes, rounded down to 32 byte units. */
+#define TX_FIFO_THRESH 256 /* In bytes, rounded down to 32 byte units. */
-/* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */
+/* The following settings are log_2(bytes)-4:
+ 0==16 bytes 1==32 2==64 3==128 4==256 5==512 6==1024 7==end of packet.
+*/
#define RX_FIFO_THRESH 6 /* Rx buffer level before first PCI xfer. */
#define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
@@ -175,8 +183,7 @@ static int multicast_filter_limit = 32;
/* Operational parameters that usually are not changed. */
/* Time in jiffies before concluding the transmitter is hung. */
-#define TX_TIMEOUT (6*HZ)
-
+#define TX_TIMEOUT (6 * HZ)
enum {
HAS_CHIP_XCVR = 0x020000,
@@ -186,7 +193,7 @@ enum {
#define NETDRV_MIN_IO_SIZE 0x80
#define RTL8139B_IO_SIZE 256
-#define NETDRV_CAPS HAS_CHIP_XCVR|HAS_LNK_CHNG
+#define NETDRV_CAPS (HAS_CHIP_XCVR | HAS_LNK_CHNG)
typedef enum {
RTL8139 = 0,
@@ -211,7 +218,7 @@ static struct {
};
-static struct pci_device_id netdrv_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(netdrv_pci_tbl) = {
{0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
{0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, NETDRV_CB },
{0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMC1211TX },
@@ -220,7 +227,7 @@ static struct pci_device_id netdrv_pci_tbl[] = {
{0x4033, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ADDTRON8139 },
{0,}
};
-MODULE_DEVICE_TABLE (pci, netdrv_pci_tbl);
+MODULE_DEVICE_TABLE(pci, netdrv_pci_tbl);
/* The rest of these values should never change. */
@@ -270,7 +277,7 @@ enum NETDRV_registers {
enum ClearBitMasks {
MultiIntrClear = 0xF000,
ChipCmdClear = 0xE2,
- Config1Clear = (1<<7)|(1<<6)|(1<<3)|(1<<2)|(1<<1),
+ Config1Clear = (1 << 7) | (1 << 6) | (1 << 3) | (1 << 2) | (1 << 1),
};
enum ChipCmdBits {
@@ -329,7 +336,7 @@ enum tx_config_bits {
TxLoopBack = (1 << 18) | (1 << 17), /* enable loopback test mode */
TxCRC = (1 << 16), /* DISABLE appending CRC to end of Tx packets */
TxClearAbt = (1 << 0), /* Clear abort (WO) */
- TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
+ TxDMAShift = 8, /* DMA burst value(0-7) is shift this many bits */
TxVersionMask = 0x7C800000, /* mask out version bits 30-26, 23 */
};
@@ -481,41 +488,44 @@ struct netdrv_private {
chip_t chipset;
};
-MODULE_AUTHOR ("Jeff Garzik <jgarzik@pobox.com>");
-MODULE_DESCRIPTION ("Skeleton for a PCI Fast Ethernet driver");
+MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
+MODULE_DESCRIPTION("Skeleton for a PCI Fast Ethernet driver");
MODULE_LICENSE("GPL");
module_param(multicast_filter_limit, int, 0);
module_param(max_interrupt_work, int, 0);
module_param_array(media, int, NULL, 0);
-MODULE_PARM_DESC (multicast_filter_limit, "pci-skeleton maximum number of filtered multicast addresses");
-MODULE_PARM_DESC (max_interrupt_work, "pci-skeleton maximum events handled per interrupt");
-MODULE_PARM_DESC (media, "pci-skeleton: Bits 0-3: media type, bit 17: full duplex");
-
-static int read_eeprom (void *ioaddr, int location, int addr_len);
-static int netdrv_open (struct net_device *dev);
-static int mdio_read (struct net_device *dev, int phy_id, int location);
-static void mdio_write (struct net_device *dev, int phy_id, int location,
- int val);
-static void netdrv_timer (unsigned long data);
-static void netdrv_tx_timeout (struct net_device *dev);
-static void netdrv_init_ring (struct net_device *dev);
-static int netdrv_start_xmit (struct sk_buff *skb,
- struct net_device *dev);
-static irqreturn_t netdrv_interrupt (int irq, void *dev_instance);
-static int netdrv_close (struct net_device *dev);
-static int netdrv_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
-static void netdrv_set_rx_mode (struct net_device *dev);
-static void netdrv_hw_start (struct net_device *dev);
+MODULE_PARM_DESC(multicast_filter_limit,
+ MODNAME " maximum number of filtered multicast addresses");
+MODULE_PARM_DESC(max_interrupt_work,
+ MODNAME " maximum events handled per interrupt");
+MODULE_PARM_DESC(media,
+ MODNAME " Bits 0-3: media type, bit 17: full duplex");
+
+static int read_eeprom(void *ioaddr, int location, int addr_len);
+static int netdrv_open(struct net_device *dev);
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+static void mdio_write(struct net_device *dev, int phy_id, int location,
+ int val);
+static void netdrv_timer(unsigned long data);
+static void netdrv_tx_timeout(struct net_device *dev);
+static void netdrv_init_ring(struct net_device *dev);
+static int netdrv_start_xmit(struct sk_buff *skb,
+ struct net_device *dev);
+static irqreturn_t netdrv_interrupt(int irq, void *dev_instance);
+static int netdrv_close(struct net_device *dev);
+static int netdrv_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static void netdrv_set_rx_mode(struct net_device *dev);
+static void netdrv_hw_start(struct net_device *dev);
#ifdef USE_IO_OPS
-#define NETDRV_R8(reg) inb (((unsigned long)ioaddr) + (reg))
-#define NETDRV_R16(reg) inw (((unsigned long)ioaddr) + (reg))
-#define NETDRV_R32(reg) ((unsigned long) inl (((unsigned long)ioaddr) + (reg)))
-#define NETDRV_W8(reg, val8) outb ((val8), ((unsigned long)ioaddr) + (reg))
-#define NETDRV_W16(reg, val16) outw ((val16), ((unsigned long)ioaddr) + (reg))
-#define NETDRV_W32(reg, val32) outl ((val32), ((unsigned long)ioaddr) + (reg))
+#define NETDRV_R8(reg) inb(((unsigned long)ioaddr) + (reg))
+#define NETDRV_R16(reg) inw(((unsigned long)ioaddr) + (reg))
+#define NETDRV_R32(reg) ((unsigned long)inl(((unsigned long)ioaddr) + (reg)))
+#define NETDRV_W8(reg, val8) outb((val8), ((unsigned long)ioaddr) + (reg))
+#define NETDRV_W16(reg, val16) outw((val16), ((unsigned long)ioaddr) + (reg))
+#define NETDRV_W32(reg, val32) outl((val32), ((unsigned long)ioaddr) + (reg))
#define NETDRV_W8_F NETDRV_W8
#define NETDRV_W16_F NETDRV_W16
#define NETDRV_W32_F NETDRV_W32
@@ -528,25 +538,37 @@ static void netdrv_hw_start (struct net_device *dev);
#define readb(addr) inb((unsigned long)(addr))
#define readw(addr) inw((unsigned long)(addr))
#define readl(addr) inl((unsigned long)(addr))
-#define writeb(val,addr) outb((val),(unsigned long)(addr))
-#define writew(val,addr) outw((val),(unsigned long)(addr))
-#define writel(val,addr) outl((val),(unsigned long)(addr))
+#define writeb(val, addr) outb((val), (unsigned long)(addr))
+#define writew(val, addr) outw((val), (unsigned long)(addr))
+#define writel(val, addr) outl((val), (unsigned long)(addr))
#else
/* write MMIO register, with flush */
/* Flush avoids rtl8139 bug w/ posted MMIO writes */
-#define NETDRV_W8_F(reg, val8) do { writeb ((val8), ioaddr + (reg)); readb (ioaddr + (reg)); } while (0)
-#define NETDRV_W16_F(reg, val16) do { writew ((val16), ioaddr + (reg)); readw (ioaddr + (reg)); } while (0)
-#define NETDRV_W32_F(reg, val32) do { writel ((val32), ioaddr + (reg)); readl (ioaddr + (reg)); } while (0)
+#define NETDRV_W8_F(reg, val8) \
+do { \
+ writeb((val8), ioaddr + (reg)); \
+ readb(ioaddr + (reg)); \
+} while (0)
+#define NETDRV_W16_F(reg, val16) \
+do { \
+ writew((val16), ioaddr + (reg)); \
+ readw(ioaddr + (reg)); \
+} while (0)
+#define NETDRV_W32_F(reg, val32) \
+do { \
+ writel((val32), ioaddr + (reg)); \
+ readl(ioaddr + (reg)); \
+} while (0)
#ifdef MMIO_FLUSH_AUDIT_COMPLETE
/* write MMIO register */
-#define NETDRV_W8(reg, val8) writeb ((val8), ioaddr + (reg))
-#define NETDRV_W16(reg, val16) writew ((val16), ioaddr + (reg))
-#define NETDRV_W32(reg, val32) writel ((val32), ioaddr + (reg))
+#define NETDRV_W8(reg, val8) writeb((val8), ioaddr + (reg))
+#define NETDRV_W16(reg, val16) writew((val16), ioaddr + (reg))
+#define NETDRV_W32(reg, val32) writel((val32), ioaddr + (reg))
#else
@@ -558,9 +580,9 @@ static void netdrv_hw_start (struct net_device *dev);
#endif /* MMIO_FLUSH_AUDIT_COMPLETE */
/* read MMIO register */
-#define NETDRV_R8(reg) readb (ioaddr + (reg))
-#define NETDRV_R16(reg) readw (ioaddr + (reg))
-#define NETDRV_R32(reg) ((unsigned long) readl (ioaddr + (reg)))
+#define NETDRV_R8(reg) readb(ioaddr + (reg))
+#define NETDRV_R16(reg) readw(ioaddr + (reg))
+#define NETDRV_R32(reg) ((unsigned long) readl(ioaddr + (reg)))
#endif /* USE_IO_OPS */
@@ -570,14 +592,14 @@ static const u16 netdrv_intr_mask =
TxErr | TxOK | RxErr | RxOK;
static const unsigned int netdrv_rx_config =
- RxCfgEarlyRxNone | RxCfgRcv32K | RxNoWrap |
- (RX_FIFO_THRESH << RxCfgFIFOShift) |
- (RX_DMA_BURST << RxCfgDMAShift);
+ RxCfgEarlyRxNone | RxCfgRcv32K | RxNoWrap |
+ (RX_FIFO_THRESH << RxCfgFIFOShift) |
+ (RX_DMA_BURST << RxCfgDMAShift);
-static int __devinit netdrv_init_board (struct pci_dev *pdev,
- struct net_device **dev_out,
- void **ioaddr_out)
+static int __devinit netdrv_init_board(struct pci_dev *pdev,
+ struct net_device **dev_out,
+ void **ioaddr_out)
{
void *ioaddr = NULL;
struct net_device *dev;
@@ -587,43 +609,43 @@ static int __devinit netdrv_init_board (struct pci_dev *pdev,
unsigned long mmio_start, mmio_end, mmio_flags, mmio_len;
u32 tmp;
- DPRINTK ("ENTER\n");
+ DPRINTK("ENTER\n");
- assert (pdev != NULL);
- assert (ioaddr_out != NULL);
+ assert(pdev != NULL);
+ assert(ioaddr_out != NULL);
*ioaddr_out = NULL;
*dev_out = NULL;
/* dev zeroed in alloc_etherdev */
- dev = alloc_etherdev (sizeof (*tp));
+ dev = alloc_etherdev(sizeof(*tp));
if (dev == NULL) {
dev_err(&pdev->dev, "unable to alloc new ethernet\n");
- DPRINTK ("EXIT, returning -ENOMEM\n");
+ DPRINTK("EXIT, returning -ENOMEM\n");
return -ENOMEM;
}
SET_NETDEV_DEV(dev, &pdev->dev);
tp = netdev_priv(dev);
- /* enable device (incl. PCI PM wakeup), and bus-mastering */
- rc = pci_enable_device (pdev);
+ /* enable device(incl. PCI PM wakeup), and bus-mastering */
+ rc = pci_enable_device(pdev);
if (rc)
goto err_out;
- pio_start = pci_resource_start (pdev, 0);
- pio_end = pci_resource_end (pdev, 0);
- pio_flags = pci_resource_flags (pdev, 0);
- pio_len = pci_resource_len (pdev, 0);
+ pio_start = pci_resource_start(pdev, 0);
+ pio_end = pci_resource_end(pdev, 0);
+ pio_flags = pci_resource_flags(pdev, 0);
+ pio_len = pci_resource_len(pdev, 0);
- mmio_start = pci_resource_start (pdev, 1);
- mmio_end = pci_resource_end (pdev, 1);
- mmio_flags = pci_resource_flags (pdev, 1);
- mmio_len = pci_resource_len (pdev, 1);
+ mmio_start = pci_resource_start(pdev, 1);
+ mmio_end = pci_resource_end(pdev, 1);
+ mmio_flags = pci_resource_flags(pdev, 1);
+ mmio_len = pci_resource_len(pdev, 1);
/* set this immediately, we need to know before
* we talk to the chip directly */
- DPRINTK("PIO region size == 0x%02X\n", pio_len);
- DPRINTK("MMIO region size == 0x%02lX\n", mmio_len);
+ DPRINTK("PIO region size == %#02X\n", pio_len);
+ DPRINTK("MMIO region size == %#02lX\n", mmio_len);
/* make sure PCI base addr 0 is PIO */
if (!(pio_flags & IORESOURCE_IO)) {
@@ -647,17 +669,17 @@ static int __devinit netdrv_init_board (struct pci_dev *pdev,
goto err_out;
}
- rc = pci_request_regions (pdev, MODNAME);
+ rc = pci_request_regions(pdev, MODNAME);
if (rc)
goto err_out;
- pci_set_master (pdev);
+ pci_set_master(pdev);
#ifdef USE_IO_OPS
- ioaddr = (void *) pio_start;
+ ioaddr = (void *)pio_start;
#else
/* ioremap MMIO region */
- ioaddr = ioremap (mmio_start, mmio_len);
+ ioaddr = ioremap(mmio_start, mmio_len);
if (ioaddr == NULL) {
dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
rc = -EIO;
@@ -666,52 +688,50 @@ static int __devinit netdrv_init_board (struct pci_dev *pdev,
#endif /* USE_IO_OPS */
/* Soft reset the chip. */
- NETDRV_W8 (ChipCmd, (NETDRV_R8 (ChipCmd) & ChipCmdClear) | CmdReset);
+ NETDRV_W8(ChipCmd, (NETDRV_R8(ChipCmd) & ChipCmdClear) | CmdReset);
/* Check that the chip has finished the reset. */
for (i = 1000; i > 0; i--)
- if ((NETDRV_R8 (ChipCmd) & CmdReset) == 0)
+ if ((NETDRV_R8(ChipCmd) & CmdReset) == 0)
break;
else
- udelay (10);
+ udelay(10);
/* Bring the chip out of low-power mode. */
/* <insert device-specific code here> */
#ifndef USE_IO_OPS
/* sanity checks -- ensure PIO and MMIO registers agree */
- assert (inb (pio_start+Config0) == readb (ioaddr+Config0));
- assert (inb (pio_start+Config1) == readb (ioaddr+Config1));
- assert (inb (pio_start+TxConfig) == readb (ioaddr+TxConfig));
- assert (inb (pio_start+RxConfig) == readb (ioaddr+RxConfig));
+ assert(inb(pio_start+Config0) == readb(ioaddr+Config0));
+ assert(inb(pio_start+Config1) == readb(ioaddr+Config1));
+ assert(inb(pio_start+TxConfig) == readb(ioaddr+TxConfig));
+ assert(inb(pio_start+RxConfig) == readb(ioaddr+RxConfig));
#endif /* !USE_IO_OPS */
/* identify chip attached to board */
- tmp = NETDRV_R8 (ChipVersion);
- for (i = ARRAY_SIZE (rtl_chip_info) - 1; i >= 0; i--)
+ tmp = NETDRV_R8(ChipVersion);
+ for (i = ARRAY_SIZE(rtl_chip_info) - 1; i >= 0; i--)
if (tmp == rtl_chip_info[i].version) {
tp->chipset = i;
goto match;
}
/* if unknown chip, assume array element #0, original RTL-8139 in this case */
- dev_printk (KERN_DEBUG, &pdev->dev,
- "unknown chip version, assuming RTL-8139\n");
- dev_printk (KERN_DEBUG, &pdev->dev, "TxConfig = 0x%lx\n",
- NETDRV_R32 (TxConfig));
+ dev_printk(KERN_DEBUG, &pdev->dev,
+ "unknown chip version, assuming RTL-8139\n");
+ dev_printk(KERN_DEBUG, &pdev->dev, "TxConfig = %#lx\n",
+ NETDRV_R32(TxConfig));
tp->chipset = 0;
match:
- DPRINTK ("chipset id (%d) == index %d, '%s'\n",
- tmp,
- tp->chipset,
- rtl_chip_info[tp->chipset].name);
+ DPRINTK("chipset id(%d) == index %d, '%s'\n",
+ tmp, tp->chipset, rtl_chip_info[tp->chipset].name);
- rc = register_netdev (dev);
+ rc = register_netdev(dev);
if (rc)
goto err_out_unmap;
- DPRINTK ("EXIT, returning 0\n");
+ DPRINTK("EXIT, returning 0\n");
*ioaddr_out = ioaddr;
*dev_out = dev;
return 0;
@@ -721,10 +741,10 @@ err_out_unmap:
iounmap(ioaddr);
err_out_free_res:
#endif
- pci_release_regions (pdev);
+ pci_release_regions(pdev);
err_out:
- free_netdev (dev);
- DPRINTK ("EXIT, returning %d\n", rc);
+ free_netdev(dev);
+ DPRINTK("EXIT, returning %d\n", rc);
return rc;
}
@@ -740,8 +760,8 @@ static const struct net_device_ops netdrv_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
};
-static int __devinit netdrv_init_one (struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int __devinit netdrv_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
struct net_device *dev = NULL;
struct netdrv_private *tp;
@@ -756,29 +776,29 @@ static int __devinit netdrv_init_one (struct pci_dev *pdev,
printk(version);
#endif
- DPRINTK ("ENTER\n");
+ DPRINTK("ENTER\n");
- assert (pdev != NULL);
- assert (ent != NULL);
+ assert(pdev != NULL);
+ assert(ent != NULL);
board_idx++;
- i = netdrv_init_board (pdev, &dev, &ioaddr);
+ i = netdrv_init_board(pdev, &dev, &ioaddr);
if (i < 0) {
- DPRINTK ("EXIT, returning %d\n", i);
+ DPRINTK("EXIT, returning %d\n", i);
return i;
}
tp = netdev_priv(dev);
- assert (ioaddr != NULL);
- assert (dev != NULL);
- assert (tp != NULL);
+ assert(ioaddr != NULL);
+ assert(dev != NULL);
+ assert(tp != NULL);
- addr_len = read_eeprom (ioaddr, 0, 8) == 0x8129 ? 8 : 6;
+ addr_len = read_eeprom(ioaddr, 0, 8) == 0x8129 ? 8 : 6;
for (i = 0; i < 3; i++)
- ((u16 *) (dev->dev_addr))[i] =
- le16_to_cpu (read_eeprom (ioaddr, i + 7, addr_len));
+ ((u16 *)(dev->dev_addr))[i] =
+ le16_to_cpu(read_eeprom(ioaddr, i + 7, addr_len));
dev->netdev_ops = &netdrv_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
@@ -791,7 +811,7 @@ static int __devinit netdrv_init_one (struct pci_dev *pdev,
/* note: tp->chipset set in netdrv_init_board */
tp->drv_flags = PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
- PCI_COMMAND_MASTER | NETDRV_CAPS;
+ PCI_COMMAND_MASTER | NETDRV_CAPS;
tp->pci_dev = pdev;
tp->board = ent->driver_data;
tp->mmio_addr = ioaddr;
@@ -801,18 +821,15 @@ static int __devinit netdrv_init_one (struct pci_dev *pdev,
tp->phys[0] = 32;
- printk (KERN_INFO "%s: %s at 0x%lx, %pM IRQ %d\n",
- dev->name,
- board_info[ent->driver_data].name,
- dev->base_addr,
- dev->dev_addr,
- dev->irq);
+ netdev_info(dev, "%s at %#lx, %pM IRQ %d\n",
+ board_info[ent->driver_data].name,
+ dev->base_addr, dev->dev_addr, dev->irq);
- printk (KERN_DEBUG "%s: Identified 8139 chip type '%s'\n",
- dev->name, rtl_chip_info[tp->chipset].name);
+ netdev_printk(KERN_DEBUG, dev, "Identified 8139 chip type '%s'\n",
+ rtl_chip_info[tp->chipset].name);
/* Put the chip into low-power mode. */
- NETDRV_W8_F (Cfg9346, Cfg9346_Unlock);
+ NETDRV_W8_F(Cfg9346, Cfg9346_Unlock);
/* The lower four bits are the media type. */
option = (board_idx > 7) ? 0 : media[board_idx];
@@ -824,45 +841,43 @@ static int __devinit netdrv_init_one (struct pci_dev *pdev,
}
if (tp->full_duplex) {
- printk (KERN_INFO
- "%s: Media type forced to Full Duplex.\n",
- dev->name);
- mdio_write (dev, tp->phys[0], MII_ADVERTISE, ADVERTISE_FULL);
+ netdev_info(dev, "Media type forced to Full Duplex\n");
+ mdio_write(dev, tp->phys[0], MII_ADVERTISE, ADVERTISE_FULL);
tp->duplex_lock = 1;
}
- DPRINTK ("EXIT - returning 0\n");
+ DPRINTK("EXIT - returning 0\n");
return 0;
}
-static void __devexit netdrv_remove_one (struct pci_dev *pdev)
+static void __devexit netdrv_remove_one(struct pci_dev *pdev)
{
- struct net_device *dev = pci_get_drvdata (pdev);
+ struct net_device *dev = pci_get_drvdata(pdev);
struct netdrv_private *np;
- DPRINTK ("ENTER\n");
+ DPRINTK("ENTER\n");
- assert (dev != NULL);
+ assert(dev != NULL);
np = netdev_priv(dev);
- assert (np != NULL);
+ assert(np != NULL);
- unregister_netdev (dev);
+ unregister_netdev(dev);
#ifndef USE_IO_OPS
- iounmap (np->mmio_addr);
+ iounmap(np->mmio_addr);
#endif /* !USE_IO_OPS */
- pci_release_regions (pdev);
+ pci_release_regions(pdev);
- free_netdev (dev);
+ free_netdev(dev);
- pci_set_drvdata (pdev, NULL);
+ pci_set_drvdata(pdev, NULL);
- pci_disable_device (pdev);
+ pci_disable_device(pdev);
- DPRINTK ("EXIT\n");
+ DPRINTK("EXIT\n");
}
@@ -870,63 +885,63 @@ static void __devexit netdrv_remove_one (struct pci_dev *pdev)
/* EEPROM_Ctrl bits. */
#define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
-#define EE_CS 0x08 /* EEPROM chip select. */
+#define EE_CS 0x08 /* EEPROM chip select. */
#define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */
-#define EE_WRITE_0 0x00
-#define EE_WRITE_1 0x02
+#define EE_WRITE_0 0x00
+#define EE_WRITE_1 0x02
#define EE_DATA_READ 0x01 /* EEPROM chip data out. */
-#define EE_ENB (0x80 | EE_CS)
+#define EE_ENB (0x80 | EE_CS)
/* Delay between EEPROM clock transitions.
No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
- */
+*/
#define eeprom_delay() readl(ee_addr)
/* The EEPROM commands include the alway-set leading bit. */
#define EE_WRITE_CMD (5)
-#define EE_READ_CMD (6)
+#define EE_READ_CMD (6)
#define EE_ERASE_CMD (7)
-static int __devinit read_eeprom (void *ioaddr, int location, int addr_len)
+static int __devinit read_eeprom(void *ioaddr, int location, int addr_len)
{
int i;
unsigned retval = 0;
void *ee_addr = ioaddr + Cfg9346;
int read_cmd = location | (EE_READ_CMD << addr_len);
- DPRINTK ("ENTER\n");
+ DPRINTK("ENTER\n");
- writeb (EE_ENB & ~EE_CS, ee_addr);
- writeb (EE_ENB, ee_addr);
- eeprom_delay ();
+ writeb(EE_ENB & ~EE_CS, ee_addr);
+ writeb(EE_ENB, ee_addr);
+ eeprom_delay();
/* Shift the read command bits out. */
for (i = 4 + addr_len; i >= 0; i--) {
int dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
- writeb (EE_ENB | dataval, ee_addr);
- eeprom_delay ();
- writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
- eeprom_delay ();
+ writeb(EE_ENB | dataval, ee_addr);
+ eeprom_delay();
+ writeb(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay();
}
- writeb (EE_ENB, ee_addr);
- eeprom_delay ();
+ writeb(EE_ENB, ee_addr);
+ eeprom_delay();
for (i = 16; i > 0; i--) {
- writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
- eeprom_delay ();
+ writeb(EE_ENB | EE_SHIFT_CLK, ee_addr);
+ eeprom_delay();
retval =
- (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
- 0);
- writeb (EE_ENB, ee_addr);
- eeprom_delay ();
+ (retval << 1) | ((readb(ee_addr) & EE_DATA_READ) ? 1 :
+ 0);
+ writeb(EE_ENB, ee_addr);
+ eeprom_delay();
}
/* Terminate the EEPROM access. */
- writeb (~EE_CS, ee_addr);
- eeprom_delay ();
+ writeb(~EE_CS, ee_addr);
+ eeprom_delay();
- DPRINTK ("EXIT - returning %d\n", retval);
+ DPRINTK("EXIT - returning %d\n", retval);
return retval;
}
@@ -936,12 +951,12 @@ static int __devinit read_eeprom (void *ioaddr, int location, int addr_len)
The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
met by back-to-back PCI I/O cycles, but we insert a delay to avoid
"overclocking" issues. */
-#define MDIO_DIR 0x80
+#define MDIO_DIR 0x80
#define MDIO_DATA_OUT 0x04
#define MDIO_DATA_IN 0x02
-#define MDIO_CLK 0x01
-#define MDIO_WRITE0 (MDIO_DIR)
-#define MDIO_WRITE1 (MDIO_DIR | MDIO_DATA_OUT)
+#define MDIO_CLK 0x01
+#define MDIO_WRITE0 (MDIO_DIR)
+#define MDIO_WRITE1 (MDIO_DIR | MDIO_DATA_OUT)
#define mdio_delay() readb(mdio_addr)
@@ -959,24 +974,24 @@ static char mii_2_8139_map[8] = {
/* Syncronize the MII management interface by shifting 32 one bits out. */
-static void mdio_sync (void *mdio_addr)
+static void mdio_sync(void *mdio_addr)
{
int i;
- DPRINTK ("ENTER\n");
+ DPRINTK("ENTER\n");
for (i = 32; i >= 0; i--) {
- writeb (MDIO_WRITE1, mdio_addr);
- mdio_delay ();
- writeb (MDIO_WRITE1 | MDIO_CLK, mdio_addr);
- mdio_delay ();
+ writeb(MDIO_WRITE1, mdio_addr);
+ mdio_delay();
+ writeb(MDIO_WRITE1 | MDIO_CLK, mdio_addr);
+ mdio_delay();
}
- DPRINTK ("EXIT\n");
+ DPRINTK("EXIT\n");
}
-static int mdio_read (struct net_device *dev, int phy_id, int location)
+static int mdio_read(struct net_device *dev, int phy_id, int location)
{
struct netdrv_private *tp = netdev_priv(dev);
void *mdio_addr = tp->mmio_addr + Config4;
@@ -984,97 +999,94 @@ static int mdio_read (struct net_device *dev, int phy_id, int location)
int retval = 0;
int i;
- DPRINTK ("ENTER\n");
+ DPRINTK("ENTER\n");
if (phy_id > 31) { /* Really a 8139. Use internal registers. */
- DPRINTK ("EXIT after directly using 8139 internal regs\n");
+ DPRINTK("EXIT after directly using 8139 internal regs\n");
return location < 8 && mii_2_8139_map[location] ?
- readw (tp->mmio_addr + mii_2_8139_map[location]) : 0;
+ readw(tp->mmio_addr + mii_2_8139_map[location]) : 0;
}
- mdio_sync (mdio_addr);
+ mdio_sync(mdio_addr);
/* Shift the read command bits out. */
for (i = 15; i >= 0; i--) {
int dataval = (mii_cmd & (1 << i)) ? MDIO_DATA_OUT : 0;
- writeb (MDIO_DIR | dataval, mdio_addr);
- mdio_delay ();
- writeb (MDIO_DIR | dataval | MDIO_CLK, mdio_addr);
- mdio_delay ();
+ writeb(MDIO_DIR | dataval, mdio_addr);
+ mdio_delay();
+ writeb(MDIO_DIR | dataval | MDIO_CLK, mdio_addr);
+ mdio_delay();
}
/* Read the two transition, 16 data, and wire-idle bits. */
for (i = 19; i > 0; i--) {
- writeb (0, mdio_addr);
- mdio_delay ();
- retval =
- (retval << 1) | ((readb (mdio_addr) & MDIO_DATA_IN) ? 1
- : 0);
- writeb (MDIO_CLK, mdio_addr);
- mdio_delay ();
+ writeb(0, mdio_addr);
+ mdio_delay();
+ retval = ((retval << 1) | ((readb(mdio_addr) & MDIO_DATA_IN))
+ ? 1 : 0);
+ writeb(MDIO_CLK, mdio_addr);
+ mdio_delay();
}
- DPRINTK ("EXIT, returning %d\n", (retval >> 1) & 0xffff);
+ DPRINTK("EXIT, returning %d\n", (retval >> 1) & 0xffff);
return (retval >> 1) & 0xffff;
}
-static void mdio_write (struct net_device *dev, int phy_id, int location,
- int value)
+static void mdio_write(struct net_device *dev, int phy_id, int location,
+ int value)
{
struct netdrv_private *tp = netdev_priv(dev);
void *mdio_addr = tp->mmio_addr + Config4;
int mii_cmd =
- (0x5002 << 16) | (phy_id << 23) | (location << 18) | value;
+ (0x5002 << 16) | (phy_id << 23) | (location << 18) | value;
int i;
- DPRINTK ("ENTER\n");
+ DPRINTK("ENTER\n");
if (phy_id > 31) { /* Really a 8139. Use internal registers. */
if (location < 8 && mii_2_8139_map[location]) {
- writew (value,
- tp->mmio_addr + mii_2_8139_map[location]);
- readw (tp->mmio_addr + mii_2_8139_map[location]);
+ writew(value,
+ tp->mmio_addr + mii_2_8139_map[location]);
+ readw(tp->mmio_addr + mii_2_8139_map[location]);
}
- DPRINTK ("EXIT after directly using 8139 internal regs\n");
+ DPRINTK("EXIT after directly using 8139 internal regs\n");
return;
}
- mdio_sync (mdio_addr);
+ mdio_sync(mdio_addr);
/* Shift the command bits out. */
for (i = 31; i >= 0; i--) {
int dataval =
- (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
- writeb (dataval, mdio_addr);
- mdio_delay ();
- writeb (dataval | MDIO_CLK, mdio_addr);
- mdio_delay ();
+ (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+ writeb(dataval, mdio_addr);
+ mdio_delay();
+ writeb(dataval | MDIO_CLK, mdio_addr);
+ mdio_delay();
}
/* Clear out extra bits. */
for (i = 2; i > 0; i--) {
- writeb (0, mdio_addr);
- mdio_delay ();
- writeb (MDIO_CLK, mdio_addr);
- mdio_delay ();
+ writeb(0, mdio_addr);
+ mdio_delay();
+ writeb(MDIO_CLK, mdio_addr);
+ mdio_delay();
}
- DPRINTK ("EXIT\n");
+ DPRINTK("EXIT\n");
}
-static int netdrv_open (struct net_device *dev)
+static int netdrv_open(struct net_device *dev)
{
struct netdrv_private *tp = netdev_priv(dev);
int retval;
-#ifdef NETDRV_DEBUG
void *ioaddr = tp->mmio_addr;
-#endif
- DPRINTK ("ENTER\n");
+ DPRINTK("ENTER\n");
- retval = request_irq (dev->irq, netdrv_interrupt, IRQF_SHARED, dev->name, dev);
+ retval = request_irq(dev->irq, netdrv_interrupt, IRQF_SHARED, dev->name, dev);
if (retval) {
- DPRINTK ("EXIT, returning %d\n", retval);
+ DPRINTK("EXIT, returning %d\n", retval);
return retval;
}
@@ -1092,7 +1104,7 @@ static int netdrv_open (struct net_device *dev)
pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN,
tp->rx_ring, tp->rx_ring_dma);
- DPRINTK ("EXIT, returning -ENOMEM\n");
+ DPRINTK("EXIT, returning -ENOMEM\n");
return -ENOMEM;
}
@@ -1100,109 +1112,108 @@ static int netdrv_open (struct net_device *dev)
tp->full_duplex = tp->duplex_lock;
tp->tx_flag = (TX_FIFO_THRESH << 11) & 0x003f0000;
- netdrv_init_ring (dev);
- netdrv_hw_start (dev);
+ netdrv_init_ring(dev);
+ netdrv_hw_start(dev);
- DPRINTK ("%s: netdrv_open() ioaddr %#lx IRQ %d"
- " GP Pins %2.2x %s-duplex.\n",
- dev->name, pci_resource_start (tp->pci_dev, 1),
- dev->irq, NETDRV_R8 (MediaStatus),
- tp->full_duplex ? "full" : "half");
+ netdev_dbg(dev, "ioaddr %#llx IRQ %d GP Pins %02x %s-duplex\n",
+ (unsigned long long)pci_resource_start(tp->pci_dev, 1),
+ dev->irq, NETDRV_R8(MediaStatus),
+ tp->full_duplex ? "full" : "half");
/* Set the timer to switch to check for link beat and perhaps switch
to an alternate media type. */
- init_timer (&tp->timer);
+ init_timer(&tp->timer);
tp->timer.expires = jiffies + 3 * HZ;
tp->timer.data = (unsigned long) dev;
tp->timer.function = &netdrv_timer;
- add_timer (&tp->timer);
+ add_timer(&tp->timer);
- DPRINTK ("EXIT, returning 0\n");
+ DPRINTK("EXIT, returning 0\n");
return 0;
}
/* Start the hardware at open or resume. */
-static void netdrv_hw_start (struct net_device *dev)
+static void netdrv_hw_start(struct net_device *dev)
{
struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
u32 i;
- DPRINTK ("ENTER\n");
+ DPRINTK("ENTER\n");
/* Soft reset the chip. */
- NETDRV_W8 (ChipCmd, (NETDRV_R8 (ChipCmd) & ChipCmdClear) | CmdReset);
- udelay (100);
+ NETDRV_W8(ChipCmd, (NETDRV_R8(ChipCmd) & ChipCmdClear) | CmdReset);
+ udelay(100);
/* Check that the chip has finished the reset. */
for (i = 1000; i > 0; i--)
- if ((NETDRV_R8 (ChipCmd) & CmdReset) == 0)
+ if ((NETDRV_R8(ChipCmd) & CmdReset) == 0)
break;
/* Restore our idea of the MAC address. */
- NETDRV_W32_F (MAC0 + 0, cpu_to_le32 (*(u32 *) (dev->dev_addr + 0)));
- NETDRV_W32_F (MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4)));
+ NETDRV_W32_F(MAC0 + 0, cpu_to_le32(*(u32 *)(dev->dev_addr + 0)));
+ NETDRV_W32_F(MAC0 + 4, cpu_to_le32(*(u32 *)(dev->dev_addr + 4)));
/* Must enable Tx/Rx before setting transfer thresholds! */
- NETDRV_W8_F (ChipCmd, (NETDRV_R8 (ChipCmd) & ChipCmdClear) |
- CmdRxEnb | CmdTxEnb);
+ NETDRV_W8_F(ChipCmd, (NETDRV_R8(ChipCmd) & ChipCmdClear) |
+ CmdRxEnb | CmdTxEnb);
i = netdrv_rx_config |
- (NETDRV_R32 (RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
- NETDRV_W32_F (RxConfig, i);
+ (NETDRV_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
+ NETDRV_W32_F(RxConfig, i);
/* Check this value: the documentation for IFG contradicts ifself. */
- NETDRV_W32 (TxConfig, (TX_DMA_BURST << TxDMAShift));
+ NETDRV_W32(TxConfig, (TX_DMA_BURST << TxDMAShift));
/* unlock Config[01234] and BMCR register writes */
- NETDRV_W8_F (Cfg9346, Cfg9346_Unlock);
- udelay (10);
+ NETDRV_W8_F(Cfg9346, Cfg9346_Unlock);
+ udelay(10);
tp->cur_rx = 0;
/* Lock Config[01234] and BMCR register writes */
- NETDRV_W8_F (Cfg9346, Cfg9346_Lock);
- udelay (10);
+ NETDRV_W8_F(Cfg9346, Cfg9346_Lock);
+ udelay(10);
/* init Rx ring buffer DMA address */
- NETDRV_W32_F (RxBuf, tp->rx_ring_dma);
+ NETDRV_W32_F(RxBuf, tp->rx_ring_dma);
/* init Tx buffer DMA addresses */
for (i = 0; i < NUM_TX_DESC; i++)
- NETDRV_W32_F (TxAddr0 + (i * 4), tp->tx_bufs_dma + (tp->tx_buf[i] - tp->tx_bufs));
+ NETDRV_W32_F(TxAddr0 + (i * 4), tp->tx_bufs_dma + (tp->tx_buf[i] - tp->tx_bufs));
- NETDRV_W32_F (RxMissed, 0);
+ NETDRV_W32_F(RxMissed, 0);
- netdrv_set_rx_mode (dev);
+ netdrv_set_rx_mode(dev);
/* no early-rx interrupts */
- NETDRV_W16 (MultiIntr, NETDRV_R16 (MultiIntr) & MultiIntrClear);
+ NETDRV_W16(MultiIntr, NETDRV_R16(MultiIntr) & MultiIntrClear);
/* make sure RxTx has started */
- NETDRV_W8_F (ChipCmd, (NETDRV_R8 (ChipCmd) & ChipCmdClear) |
- CmdRxEnb | CmdTxEnb);
+ NETDRV_W8_F(ChipCmd, (NETDRV_R8(ChipCmd) & ChipCmdClear) |
+ CmdRxEnb | CmdTxEnb);
/* Enable all known interrupts by setting the interrupt mask. */
- NETDRV_W16_F (IntrMask, netdrv_intr_mask);
+ NETDRV_W16_F(IntrMask, netdrv_intr_mask);
- netif_start_queue (dev);
+ netif_start_queue(dev);
- DPRINTK ("EXIT\n");
+ DPRINTK("EXIT\n");
}
/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
-static void netdrv_init_ring (struct net_device *dev)
+static void netdrv_init_ring(struct net_device *dev)
{
struct netdrv_private *tp = netdev_priv(dev);
int i;
- DPRINTK ("ENTER\n");
+ DPRINTK("ENTER\n");
tp->cur_rx = 0;
- atomic_set (&tp->cur_tx, 0);
- atomic_set (&tp->dirty_tx, 0);
+ atomic_set(&tp->cur_tx, 0);
+ atomic_set(&tp->dirty_tx, 0);
for (i = 0; i < NUM_TX_DESC; i++) {
tp->tx_info[i].skb = NULL;
@@ -1210,11 +1221,11 @@ static void netdrv_init_ring (struct net_device *dev)
tp->tx_buf[i] = &tp->tx_bufs[i * TX_BUF_SIZE];
}
- DPRINTK ("EXIT\n");
+ DPRINTK("EXIT\n");
}
-static void netdrv_timer (unsigned long data)
+static void netdrv_timer(unsigned long data)
{
struct net_device *dev = (struct net_device *) data;
struct netdrv_private *tp = netdev_priv(dev);
@@ -1222,58 +1233,54 @@ static void netdrv_timer (unsigned long data)
int next_tick = 60 * HZ;
int mii_lpa;
- mii_lpa = mdio_read (dev, tp->phys[0], MII_LPA);
+ mii_lpa = mdio_read(dev, tp->phys[0], MII_LPA);
if (!tp->duplex_lock && mii_lpa != 0xffff) {
int duplex = ((mii_lpa & LPA_100FULL) ||
- (mii_lpa & 0x01C0) == 0x0040);
+ (mii_lpa & 0x01C0) == 0x0040);
if (tp->full_duplex != duplex) {
tp->full_duplex = duplex;
- printk (KERN_INFO
- "%s: Setting %s-duplex based on MII #%d link"
- " partner ability of %4.4x.\n", dev->name,
- tp->full_duplex ? "full" : "half",
- tp->phys[0], mii_lpa);
- NETDRV_W8 (Cfg9346, Cfg9346_Unlock);
- NETDRV_W8 (Config1, tp->full_duplex ? 0x60 : 0x20);
- NETDRV_W8 (Cfg9346, Cfg9346_Lock);
+ netdev_info(dev, "Setting %s-duplex based on MII #%d link partner ability of %04x\n",
+ tp->full_duplex ? "full" : "half",
+ tp->phys[0], mii_lpa);
+ NETDRV_W8(Cfg9346, Cfg9346_Unlock);
+ NETDRV_W8(Config1, tp->full_duplex ? 0x60 : 0x20);
+ NETDRV_W8(Cfg9346, Cfg9346_Lock);
}
}
- DPRINTK ("%s: Media selection tick, Link partner %4.4x.\n",
- dev->name, NETDRV_R16 (NWayLPAR));
- DPRINTK ("%s: Other registers are IntMask %4.4x IntStatus %4.4x"
- " RxStatus %4.4x.\n", dev->name,
- NETDRV_R16 (IntrMask),
- NETDRV_R16 (IntrStatus),
- NETDRV_R32 (RxEarlyStatus));
- DPRINTK ("%s: Chip config %2.2x %2.2x.\n",
- dev->name, NETDRV_R8 (Config0),
- NETDRV_R8 (Config1));
+ netdev_dbg(dev, "Media selection tick, Link partner %04x\n",
+ NETDRV_R16(NWayLPAR));
+ netdev_dbg(dev, "Other registers are IntMask %04x IntStatus %04x RxStatus %04lx\n",
+ NETDRV_R16(IntrMask),
+ NETDRV_R16(IntrStatus),
+ NETDRV_R32(RxEarlyStatus));
+ netdev_dbg(dev, "Chip config %02x %02x\n",
+ NETDRV_R8(Config0), NETDRV_R8(Config1));
tp->timer.expires = jiffies + next_tick;
- add_timer (&tp->timer);
+ add_timer(&tp->timer);
}
-static void netdrv_tx_clear (struct net_device *dev)
+static void netdrv_tx_clear(struct net_device *dev)
{
int i;
struct netdrv_private *tp = netdev_priv(dev);
- atomic_set (&tp->cur_tx, 0);
- atomic_set (&tp->dirty_tx, 0);
+ atomic_set(&tp->cur_tx, 0);
+ atomic_set(&tp->dirty_tx, 0);
/* Dump the unsent Tx packets. */
for (i = 0; i < NUM_TX_DESC; i++) {
struct ring_info *rp = &tp->tx_info[i];
if (rp->mapping != 0) {
- pci_unmap_single (tp->pci_dev, rp->mapping,
- rp->skb->len, PCI_DMA_TODEVICE);
+ pci_unmap_single(tp->pci_dev, rp->mapping,
+ rp->skb->len, PCI_DMA_TODEVICE);
rp->mapping = 0;
}
if (rp->skb) {
- dev_kfree_skb (rp->skb);
+ dev_kfree_skb(rp->skb);
rp->skb = NULL;
dev->stats.tx_dropped++;
}
@@ -1281,7 +1288,7 @@ static void netdrv_tx_clear (struct net_device *dev)
}
-static void netdrv_tx_timeout (struct net_device *dev)
+static void netdrv_tx_timeout(struct net_device *dev)
{
struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
@@ -1289,96 +1296,95 @@ static void netdrv_tx_timeout (struct net_device *dev)
u8 tmp8;
unsigned long flags;
- DPRINTK ("%s: Transmit timeout, status %2.2x %4.4x "
- "media %2.2x.\n", dev->name,
- NETDRV_R8 (ChipCmd),
- NETDRV_R16 (IntrStatus),
- NETDRV_R8 (MediaStatus));
+ netdev_dbg(dev, "Transmit timeout, status %02x %04x media %02x\n",
+ NETDRV_R8(ChipCmd),
+ NETDRV_R16(IntrStatus),
+ NETDRV_R8(MediaStatus));
/* disable Tx ASAP, if not already */
- tmp8 = NETDRV_R8 (ChipCmd);
+ tmp8 = NETDRV_R8(ChipCmd);
if (tmp8 & CmdTxEnb)
- NETDRV_W8 (ChipCmd, tmp8 & ~CmdTxEnb);
+ NETDRV_W8(ChipCmd, tmp8 & ~CmdTxEnb);
/* Disable interrupts by clearing the interrupt mask. */
- NETDRV_W16 (IntrMask, 0x0000);
+ NETDRV_W16(IntrMask, 0x0000);
/* Emit info to figure out what went wrong. */
- printk (KERN_DEBUG "%s: Tx queue start entry %d dirty entry %d.\n",
- dev->name, atomic_read (&tp->cur_tx),
- atomic_read (&tp->dirty_tx));
+ netdev_dbg(dev, "Tx queue start entry %d dirty entry %d\n",
+ atomic_read(&tp->cur_tx),
+ atomic_read(&tp->dirty_tx));
for (i = 0; i < NUM_TX_DESC; i++)
- printk (KERN_DEBUG "%s: Tx descriptor %d is %8.8lx.%s\n",
- dev->name, i, NETDRV_R32 (TxStatus0 + (i * 4)),
- i == atomic_read (&tp->dirty_tx) % NUM_TX_DESC ?
- " (queue head)" : "");
+ netdev_dbg(dev, "Tx descriptor %d is %08lx%s\n",
+ i, NETDRV_R32(TxStatus0 + (i * 4)),
+ i == atomic_read(&tp->dirty_tx) % NUM_TX_DESC ?
+ "(queue head)" : "");
/* Stop a shared interrupt from scavenging while we are. */
- spin_lock_irqsave (&tp->lock, flags);
+ spin_lock_irqsave(&tp->lock, flags);
- netdrv_tx_clear (dev);
+ netdrv_tx_clear(dev);
- spin_unlock_irqrestore (&tp->lock, flags);
+ spin_unlock_irqrestore(&tp->lock, flags);
/* ...and finally, reset everything */
- netdrv_hw_start (dev);
+ netdrv_hw_start(dev);
- netif_wake_queue (dev);
+ netif_wake_queue(dev);
}
-static int netdrv_start_xmit (struct sk_buff *skb, struct net_device *dev)
+static int netdrv_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
int entry;
/* Calculate the next Tx descriptor entry. */
- entry = atomic_read (&tp->cur_tx) % NUM_TX_DESC;
+ entry = atomic_read(&tp->cur_tx) % NUM_TX_DESC;
- assert (tp->tx_info[entry].skb == NULL);
- assert (tp->tx_info[entry].mapping == 0);
+ assert(tp->tx_info[entry].skb == NULL);
+ assert(tp->tx_info[entry].mapping == 0);
tp->tx_info[entry].skb = skb;
/* tp->tx_info[entry].mapping = 0; */
skb_copy_from_linear_data(skb, tp->tx_buf[entry], skb->len);
/* Note: the chip doesn't have auto-pad! */
- NETDRV_W32 (TxStatus0 + (entry * sizeof(u32)),
- tp->tx_flag | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
+ NETDRV_W32(TxStatus0 + (entry * sizeof(u32)),
+ tp->tx_flag | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
dev->trans_start = jiffies;
- atomic_inc (&tp->cur_tx);
- if ((atomic_read (&tp->cur_tx) - atomic_read (&tp->dirty_tx)) >= NUM_TX_DESC)
- netif_stop_queue (dev);
+ atomic_inc(&tp->cur_tx);
+ if ((atomic_read(&tp->cur_tx) - atomic_read(&tp->dirty_tx)) >= NUM_TX_DESC)
+ netif_stop_queue(dev);
- DPRINTK ("%s: Queued Tx packet at %p size %u to slot %d.\n",
- dev->name, skb->data, skb->len, entry);
+ netdev_dbg(dev, "Queued Tx packet at %p size %u to slot %d\n",
+ skb->data, skb->len, entry);
return NETDEV_TX_OK;
}
-static void netdrv_tx_interrupt (struct net_device *dev,
- struct netdrv_private *tp,
- void *ioaddr)
+static void netdrv_tx_interrupt(struct net_device *dev,
+ struct netdrv_private *tp,
+ void *ioaddr)
{
int cur_tx, dirty_tx, tx_left;
- assert (dev != NULL);
- assert (tp != NULL);
- assert (ioaddr != NULL);
+ assert(dev != NULL);
+ assert(tp != NULL);
+ assert(ioaddr != NULL);
- dirty_tx = atomic_read (&tp->dirty_tx);
+ dirty_tx = atomic_read(&tp->dirty_tx);
- cur_tx = atomic_read (&tp->cur_tx);
+ cur_tx = atomic_read(&tp->cur_tx);
tx_left = cur_tx - dirty_tx;
while (tx_left > 0) {
int entry = dirty_tx % NUM_TX_DESC;
int txstatus;
- txstatus = NETDRV_R32 (TxStatus0 + (entry * sizeof (u32)));
+ txstatus = NETDRV_R32(TxStatus0 + (entry * sizeof(u32)));
if (!(txstatus & (TxStatOK | TxUnderrun | TxAborted)))
break; /* It still hasn't been Txed */
@@ -1386,12 +1392,12 @@ static void netdrv_tx_interrupt (struct net_device *dev,
/* Note: TxCarrierLost is always asserted at 100mbps. */
if (txstatus & (TxOutOfWindow | TxAborted)) {
/* There was an major error, log it. */
- DPRINTK ("%s: Transmit error, Tx status %8.8x.\n",
- dev->name, txstatus);
+ netdev_dbg(dev, "Transmit error, Tx status %#08x\n",
+ txstatus);
dev->stats.tx_errors++;
if (txstatus & TxAborted) {
dev->stats.tx_aborted_errors++;
- NETDRV_W32 (TxConfig, TxClearAbt | (TX_DMA_BURST << TxDMAShift));
+ NETDRV_W32(TxConfig, TxClearAbt | (TX_DMA_BURST << TxDMAShift));
}
if (txstatus & TxCarrierLost)
dev->stats.tx_carrier_errors++;
@@ -1417,48 +1423,45 @@ static void netdrv_tx_interrupt (struct net_device *dev,
PCI_DMA_TODEVICE);
tp->tx_info[entry].mapping = 0;
}
- dev_kfree_skb_irq (tp->tx_info[entry].skb);
+ dev_kfree_skb_irq(tp->tx_info[entry].skb);
tp->tx_info[entry].skb = NULL;
dirty_tx++;
if (dirty_tx < 0) { /* handle signed int overflow */
- atomic_sub (cur_tx, &tp->cur_tx); /* XXX racy? */
+ atomic_sub(cur_tx, &tp->cur_tx); /* XXX racy? */
dirty_tx = cur_tx - tx_left + 1;
}
- if (netif_queue_stopped (dev))
- netif_wake_queue (dev);
+ if (netif_queue_stopped(dev))
+ netif_wake_queue(dev);
- cur_tx = atomic_read (&tp->cur_tx);
+ cur_tx = atomic_read(&tp->cur_tx);
tx_left = cur_tx - dirty_tx;
}
#ifndef NETDRV_NDEBUG
- if (atomic_read (&tp->cur_tx) - dirty_tx > NUM_TX_DESC) {
- printk (KERN_ERR
- "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
- dev->name, dirty_tx, atomic_read (&tp->cur_tx));
+ if (atomic_read(&tp->cur_tx) - dirty_tx > NUM_TX_DESC) {
+ netdev_err(dev, "Out-of-sync dirty pointer, %d vs. %d\n",
+ dirty_tx, atomic_read(&tp->cur_tx));
dirty_tx += NUM_TX_DESC;
}
#endif /* NETDRV_NDEBUG */
- atomic_set (&tp->dirty_tx, dirty_tx);
+ atomic_set(&tp->dirty_tx, dirty_tx);
}
/* TODO: clean this up! Rx reset need not be this intensive */
-static void netdrv_rx_err (u32 rx_status, struct net_device *dev,
- struct netdrv_private *tp, void *ioaddr)
+static void netdrv_rx_err(u32 rx_status, struct net_device *dev,
+ struct netdrv_private *tp, void *ioaddr)
{
u8 tmp8;
int tmp_work = 1000;
- DPRINTK ("%s: Ethernet frame had errors, status %8.8x.\n",
- dev->name, rx_status);
- if (rx_status & RxTooLong) {
- DPRINTK ("%s: Oversized Ethernet frame, status %4.4x!\n",
- dev->name, rx_status);
+ netdev_dbg(dev, "Ethernet frame had errors, status %08x\n", rx_status);
+ if (rx_status & RxTooLong)
+ netdev_dbg(dev, "Oversized Ethernet frame, status %04x!\n",
+ rx_status);
/* A.C.: The chip hangs here. */
- }
dev->stats.rx_errors++;
if (rx_status & (RxBadSymbol | RxBadAlign))
dev->stats.rx_frame_errors++;
@@ -1466,56 +1469,55 @@ static void netdrv_rx_err (u32 rx_status, struct net_device *dev,
dev->stats.rx_length_errors++;
if (rx_status & RxCRCErr)
dev->stats.rx_crc_errors++;
- /* Reset the receiver, based on RealTek recommendation. (Bug?) */
+ /* Reset the receiver, based on RealTek recommendation.(Bug?) */
tp->cur_rx = 0;
/* disable receive */
- tmp8 = NETDRV_R8 (ChipCmd) & ChipCmdClear;
- NETDRV_W8_F (ChipCmd, tmp8 | CmdTxEnb);
+ tmp8 = NETDRV_R8(ChipCmd) & ChipCmdClear;
+ NETDRV_W8_F(ChipCmd, tmp8 | CmdTxEnb);
/* A.C.: Reset the multicast list. */
- netdrv_set_rx_mode (dev);
+ netdrv_set_rx_mode(dev);
/* XXX potentially temporary hack to
* restart hung receiver */
while (--tmp_work > 0) {
- tmp8 = NETDRV_R8 (ChipCmd);
+ tmp8 = NETDRV_R8(ChipCmd);
if ((tmp8 & CmdRxEnb) && (tmp8 & CmdTxEnb))
break;
- NETDRV_W8_F (ChipCmd,
- (tmp8 & ChipCmdClear) | CmdRxEnb | CmdTxEnb);
+ NETDRV_W8_F(ChipCmd,
+ (tmp8 & ChipCmdClear) | CmdRxEnb | CmdTxEnb);
}
/* G.S.: Re-enable receiver */
/* XXX temporary hack to work around receiver hang */
- netdrv_set_rx_mode (dev);
+ netdrv_set_rx_mode(dev);
if (tmp_work <= 0)
- printk (KERN_WARNING PFX "tx/rx enable wait too long\n");
+ netdev_warn(dev, "tx/rx enable wait too long\n");
}
/* The data sheet doesn't describe the Rx ring at all, so I'm guessing at the
field alignments and semantics. */
-static void netdrv_rx_interrupt (struct net_device *dev,
- struct netdrv_private *tp, void *ioaddr)
+static void netdrv_rx_interrupt(struct net_device *dev,
+ struct netdrv_private *tp, void *ioaddr)
{
unsigned char *rx_ring;
u16 cur_rx;
- assert (dev != NULL);
- assert (tp != NULL);
- assert (ioaddr != NULL);
+ assert(dev != NULL);
+ assert(tp != NULL);
+ assert(ioaddr != NULL);
rx_ring = tp->rx_ring;
cur_rx = tp->cur_rx;
- DPRINTK ("%s: In netdrv_rx(), current %4.4x BufAddr %4.4x,"
- " free to %4.4x, Cmd %2.2x.\n", dev->name, cur_rx,
- NETDRV_R16 (RxBufAddr),
- NETDRV_R16 (RxBufPtr), NETDRV_R8 (ChipCmd));
+ netdev_dbg(dev, "In netdrv_rx(), current %04x BufAddr %04x, free to %04x, Cmd %02x\n",
+ cur_rx, NETDRV_R16(RxBufAddr),
+ NETDRV_R16(RxBufPtr), NETDRV_R8(ChipCmd));
- while ((NETDRV_R8 (ChipCmd) & RxBufEmpty) == 0) {
+ while ((NETDRV_R8(ChipCmd) & RxBufEmpty) == 0) {
int ring_offset = cur_rx % RX_BUF_LEN;
u32 rx_status;
unsigned int rx_size;
@@ -1523,32 +1525,25 @@ static void netdrv_rx_interrupt (struct net_device *dev,
struct sk_buff *skb;
/* read size+status of next frame from DMA ring buffer */
- rx_status = le32_to_cpu (*(u32 *) (rx_ring + ring_offset));
+ rx_status = le32_to_cpu(*(u32 *)(rx_ring + ring_offset));
rx_size = rx_status >> 16;
pkt_size = rx_size - 4;
- DPRINTK ("%s: netdrv_rx() status %4.4x, size %4.4x,"
- " cur %4.4x.\n", dev->name, rx_status,
- rx_size, cur_rx);
+ netdev_dbg(dev, "netdrv_rx() status %04x, size %04x, cur %04x\n",
+ rx_status, rx_size, cur_rx);
#if defined(NETDRV_DEBUG) && (NETDRV_DEBUG > 2)
- {
- int i;
- DPRINTK ("%s: Frame contents ", dev->name);
- for (i = 0; i < 70; i++)
- printk (" %2.2x",
- rx_ring[ring_offset + i]);
- printk (".\n");
- }
+ print_hex_dump_bytes("Frame contents: ", HEX_DUMP_OFFSET,
+ &rx_ring[ring_offset], 70);
#endif
/* If Rx err or invalid rx_size/rx_status received
- * (which happens if we get lost in the ring),
+ *(which happens if we get lost in the ring),
* Rx process gets reset, so we abort any further
* Rx processing.
*/
if ((rx_size > (MAX_ETH_FRAME_SIZE+4)) ||
(!(rx_status & RxStatusOK))) {
- netdrv_rx_err (rx_status, dev, tp, ioaddr);
+ netdrv_rx_err(rx_status, dev, tp, ioaddr);
return;
}
@@ -1561,71 +1556,67 @@ static void netdrv_rx_interrupt (struct net_device *dev,
* drop packets here under memory pressure.
*/
- skb = dev_alloc_skb (pkt_size + 2);
+ skb = dev_alloc_skb(pkt_size + 2);
if (skb) {
- skb_reserve (skb, 2); /* 16 byte align the IP fields. */
+ skb_reserve(skb, 2); /* 16 byte align the IP fields. */
- skb_copy_to_linear_data (skb, &rx_ring[ring_offset + 4], pkt_size);
- skb_put (skb, pkt_size);
+ skb_copy_to_linear_data(skb, &rx_ring[ring_offset + 4], pkt_size);
+ skb_put(skb, pkt_size);
- skb->protocol = eth_type_trans (skb, dev);
- netif_rx (skb);
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
dev->stats.rx_bytes += pkt_size;
dev->stats.rx_packets++;
} else {
- printk (KERN_WARNING
- "%s: Memory squeeze, dropping packet.\n",
- dev->name);
+ netdev_warn(dev, "Memory squeeze, dropping packet\n");
dev->stats.rx_dropped++;
}
cur_rx = (cur_rx + rx_size + 4 + 3) & ~3;
- NETDRV_W16_F (RxBufPtr, cur_rx - 16);
+ NETDRV_W16_F(RxBufPtr, cur_rx - 16);
}
- DPRINTK ("%s: Done netdrv_rx(), current %4.4x BufAddr %4.4x,"
- " free to %4.4x, Cmd %2.2x.\n", dev->name, cur_rx,
- NETDRV_R16 (RxBufAddr),
- NETDRV_R16 (RxBufPtr), NETDRV_R8 (ChipCmd));
+ netdev_dbg(dev, "Done netdrv_rx(), current %04x BufAddr %04x, free to %04x, Cmd %02x\n",
+ cur_rx, NETDRV_R16(RxBufAddr),
+ NETDRV_R16(RxBufPtr), NETDRV_R8(ChipCmd));
tp->cur_rx = cur_rx;
}
-static void netdrv_weird_interrupt (struct net_device *dev,
- struct netdrv_private *tp,
- void *ioaddr,
- int status, int link_changed)
+static void netdrv_weird_interrupt(struct net_device *dev,
+ struct netdrv_private *tp,
+ void *ioaddr,
+ int status, int link_changed)
{
- printk (KERN_DEBUG "%s: Abnormal interrupt, status %8.8x.\n",
- dev->name, status);
+ netdev_printk(KERN_DEBUG, dev, "Abnormal interrupt, status %08x\n",
+ status);
- assert (dev != NULL);
- assert (tp != NULL);
- assert (ioaddr != NULL);
+ assert(dev != NULL);
+ assert(tp != NULL);
+ assert(ioaddr != NULL);
/* Update the error count. */
- dev->stats.rx_missed_errors += NETDRV_R32 (RxMissed);
- NETDRV_W32 (RxMissed, 0);
+ dev->stats.rx_missed_errors += NETDRV_R32(RxMissed);
+ NETDRV_W32(RxMissed, 0);
if ((status & RxUnderrun) && link_changed &&
(tp->drv_flags & HAS_LNK_CHNG)) {
/* Really link-change on new chips. */
- int lpar = NETDRV_R16 (NWayLPAR);
+ int lpar = NETDRV_R16(NWayLPAR);
int duplex = ((lpar & 0x0100) || (lpar & 0x01C0) == 0x0040 ||
- tp->duplex_lock);
+ tp->duplex_lock);
if (tp->full_duplex != duplex) {
tp->full_duplex = duplex;
- NETDRV_W8 (Cfg9346, Cfg9346_Unlock);
- NETDRV_W8 (Config1, tp->full_duplex ? 0x60 : 0x20);
- NETDRV_W8 (Cfg9346, Cfg9346_Lock);
+ NETDRV_W8(Cfg9346, Cfg9346_Unlock);
+ NETDRV_W8(Config1, tp->full_duplex ? 0x60 : 0x20);
+ NETDRV_W8(Cfg9346, Cfg9346_Lock);
}
status &= ~RxUnderrun;
}
/* XXX along with netdrv_rx_err, are we double-counting errors? */
- if (status &
- (RxUnderrun | RxOverflow | RxErr | RxFIFOOver))
+ if (status & (RxUnderrun | RxOverflow | RxErr | RxFIFOOver))
dev->stats.rx_errors++;
if (status & (PCSTimeout))
@@ -1634,22 +1625,21 @@ static void netdrv_weird_interrupt (struct net_device *dev,
dev->stats.rx_fifo_errors++;
if (status & RxOverflow) {
dev->stats.rx_over_errors++;
- tp->cur_rx = NETDRV_R16 (RxBufAddr) % RX_BUF_LEN;
- NETDRV_W16_F (RxBufPtr, tp->cur_rx - 16);
+ tp->cur_rx = NETDRV_R16(RxBufAddr) % RX_BUF_LEN;
+ NETDRV_W16_F(RxBufPtr, tp->cur_rx - 16);
}
if (status & PCIErr) {
u16 pci_cmd_status;
- pci_read_config_word (tp->pci_dev, PCI_STATUS, &pci_cmd_status);
+ pci_read_config_word(tp->pci_dev, PCI_STATUS, &pci_cmd_status);
- printk (KERN_ERR "%s: PCI Bus error %4.4x.\n",
- dev->name, pci_cmd_status);
+ netdev_err(dev, "PCI Bus error %04x\n", pci_cmd_status);
}
}
/* The interrupt handler does all of the Rx thread work and cleans up
after the Tx thread. */
-static irqreturn_t netdrv_interrupt (int irq, void *dev_instance)
+static irqreturn_t netdrv_interrupt(int irq, void *dev_instance)
{
struct net_device *dev = (struct net_device *) dev_instance;
struct netdrv_private *tp = netdev_priv(dev);
@@ -1658,22 +1648,21 @@ static irqreturn_t netdrv_interrupt (int irq, void *dev_instance)
int status = 0, link_changed = 0; /* avoid bogus "uninit" warning */
int handled = 0;
- spin_lock (&tp->lock);
+ spin_lock(&tp->lock);
do {
- status = NETDRV_R16 (IntrStatus);
+ status = NETDRV_R16(IntrStatus);
- /* h/w no longer present (hotplug?) or major error, bail */
+ /* h/w no longer present(hotplug?) or major error, bail */
if (status == 0xFFFF)
break;
handled = 1;
/* Acknowledge all of the current interrupt sources ASAP */
- NETDRV_W16_F (IntrStatus, status);
+ NETDRV_W16_F(IntrStatus, status);
- DPRINTK ("%s: interrupt status=%#4.4x new intstat=%#4.4x.\n",
- dev->name, status,
- NETDRV_R16 (IntrStatus));
+ netdev_dbg(dev, "interrupt status=%#04x new intstat=%#04x\n",
+ status, NETDRV_R16(IntrStatus));
if ((status &
(PCIErr | PCSTimeout | RxUnderrun | RxOverflow |
@@ -1682,69 +1671,67 @@ static irqreturn_t netdrv_interrupt (int irq, void *dev_instance)
/* Check uncommon events with one test. */
if (status & (PCIErr | PCSTimeout | RxUnderrun | RxOverflow |
- RxFIFOOver | TxErr | RxErr))
- netdrv_weird_interrupt (dev, tp, ioaddr,
- status, link_changed);
+ RxFIFOOver | TxErr | RxErr))
+ netdrv_weird_interrupt(dev, tp, ioaddr,
+ status, link_changed);
if (status & (RxOK | RxUnderrun | RxOverflow | RxFIFOOver)) /* Rx interrupt */
- netdrv_rx_interrupt (dev, tp, ioaddr);
+ netdrv_rx_interrupt(dev, tp, ioaddr);
if (status & (TxOK | TxErr))
- netdrv_tx_interrupt (dev, tp, ioaddr);
+ netdrv_tx_interrupt(dev, tp, ioaddr);
boguscnt--;
} while (boguscnt > 0);
if (boguscnt <= 0) {
- printk (KERN_WARNING
- "%s: Too much work at interrupt, "
- "IntrStatus=0x%4.4x.\n", dev->name,
- status);
+ netdev_warn(dev, "Too much work at interrupt, IntrStatus=%#04x\n",
+ status);
/* Clear all interrupt sources. */
- NETDRV_W16 (IntrStatus, 0xffff);
+ NETDRV_W16(IntrStatus, 0xffff);
}
- spin_unlock (&tp->lock);
+ spin_unlock(&tp->lock);
- DPRINTK ("%s: exiting interrupt, intr_status=%#4.4x.\n",
- dev->name, NETDRV_R16 (IntrStatus));
+ netdev_dbg(dev, "exiting interrupt, intr_status=%#04x\n",
+ NETDRV_R16(IntrStatus));
return IRQ_RETVAL(handled);
}
-static int netdrv_close (struct net_device *dev)
+static int netdrv_close(struct net_device *dev)
{
struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
unsigned long flags;
- DPRINTK ("ENTER\n");
+ DPRINTK("ENTER\n");
- netif_stop_queue (dev);
+ netif_stop_queue(dev);
- DPRINTK ("%s: Shutting down ethercard, status was 0x%4.4x.\n",
- dev->name, NETDRV_R16 (IntrStatus));
+ netdev_dbg(dev, "Shutting down ethercard, status was %#04x\n",
+ NETDRV_R16(IntrStatus));
- del_timer_sync (&tp->timer);
+ del_timer_sync(&tp->timer);
- spin_lock_irqsave (&tp->lock, flags);
+ spin_lock_irqsave(&tp->lock, flags);
/* Stop the chip's Tx and Rx DMA processes. */
- NETDRV_W8 (ChipCmd, (NETDRV_R8 (ChipCmd) & ChipCmdClear));
+ NETDRV_W8(ChipCmd, (NETDRV_R8(ChipCmd) & ChipCmdClear));
/* Disable interrupts by clearing the interrupt mask. */
- NETDRV_W16 (IntrMask, 0x0000);
+ NETDRV_W16(IntrMask, 0x0000);
/* Update the error counts. */
- dev->stats.rx_missed_errors += NETDRV_R32 (RxMissed);
- NETDRV_W32 (RxMissed, 0);
+ dev->stats.rx_missed_errors += NETDRV_R32(RxMissed);
+ NETDRV_W32(RxMissed, 0);
- spin_unlock_irqrestore (&tp->lock, flags);
+ spin_unlock_irqrestore(&tp->lock, flags);
- free_irq (dev->irq, dev);
+ free_irq(dev->irq, dev);
- netdrv_tx_clear (dev);
+ netdrv_tx_clear(dev);
pci_free_consistent(tp->pci_dev, RX_BUF_TOT_LEN,
tp->rx_ring, tp->rx_ring_dma);
@@ -1754,23 +1741,23 @@ static int netdrv_close (struct net_device *dev)
tp->tx_bufs = NULL;
/* Green! Put the chip in low-power mode. */
- NETDRV_W8 (Cfg9346, Cfg9346_Unlock);
- NETDRV_W8 (Config1, 0x03);
- NETDRV_W8 (Cfg9346, Cfg9346_Lock);
+ NETDRV_W8(Cfg9346, Cfg9346_Unlock);
+ NETDRV_W8(Config1, 0x03);
+ NETDRV_W8(Cfg9346, Cfg9346_Lock);
- DPRINTK ("EXIT\n");
+ DPRINTK("EXIT\n");
return 0;
}
-static int netdrv_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
+static int netdrv_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct netdrv_private *tp = netdev_priv(dev);
struct mii_ioctl_data *data = if_mii(rq);
unsigned long flags;
int rc = 0;
- DPRINTK ("ENTER\n");
+ DPRINTK("ENTER\n");
switch (cmd) {
case SIOCGMIIPHY: /* Get address of MII PHY in use. */
@@ -1778,15 +1765,15 @@ static int netdrv_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
/* Fall Through */
case SIOCGMIIREG: /* Read MII PHY register. */
- spin_lock_irqsave (&tp->lock, flags);
- data->val_out = mdio_read (dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
- spin_unlock_irqrestore (&tp->lock, flags);
+ spin_lock_irqsave(&tp->lock, flags);
+ data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
+ spin_unlock_irqrestore(&tp->lock, flags);
break;
case SIOCSMIIREG: /* Write MII PHY register. */
- spin_lock_irqsave (&tp->lock, flags);
- mdio_write (dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
- spin_unlock_irqrestore (&tp->lock, flags);
+ spin_lock_irqsave(&tp->lock, flags);
+ mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
+ spin_unlock_irqrestore(&tp->lock, flags);
break;
default:
@@ -1794,43 +1781,43 @@ static int netdrv_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
break;
}
- DPRINTK ("EXIT, returning %d\n", rc);
+ DPRINTK("EXIT, returning %d\n", rc);
return rc;
}
/* Set or clear the multicast filter for this adaptor.
This routine is not state sensitive and need not be SMP locked. */
-static void netdrv_set_rx_mode (struct net_device *dev)
+static void netdrv_set_rx_mode(struct net_device *dev)
{
struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
u32 mc_filter[2]; /* Multicast hash filter */
- int i, rx_mode;
+ int rx_mode;
u32 tmp;
- DPRINTK ("ENTER\n");
+ DPRINTK("ENTER\n");
- DPRINTK ("%s: netdrv_set_rx_mode(%4.4x) done -- Rx config %8.8x.\n",
- dev->name, dev->flags, NETDRV_R32 (RxConfig));
+ netdev_dbg(dev, "%s(%04x) done -- Rx config %08lx\n",
+ __func__, dev->flags, NETDRV_R32(RxConfig));
/* Note: do not reorder, GCC is clever about common statements. */
if (dev->flags & IFF_PROMISC) {
rx_mode =
- AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
- AcceptAllPhys;
+ AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
+ AcceptAllPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
} else {
struct dev_mc_list *mclist;
+
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
@@ -1838,66 +1825,66 @@ static void netdrv_set_rx_mode (struct net_device *dev)
}
/* if called from irq handler, lock already acquired */
- if (!in_irq ())
- spin_lock_irq (&tp->lock);
+ if (!in_irq())
+ spin_lock_irq(&tp->lock);
/* We can safely update without stopping the chip. */
tmp = netdrv_rx_config | rx_mode |
- (NETDRV_R32 (RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
- NETDRV_W32_F (RxConfig, tmp);
- NETDRV_W32_F (MAR0 + 0, mc_filter[0]);
- NETDRV_W32_F (MAR0 + 4, mc_filter[1]);
+ (NETDRV_R32(RxConfig) & rtl_chip_info[tp->chipset].RxConfigMask);
+ NETDRV_W32_F(RxConfig, tmp);
+ NETDRV_W32_F(MAR0 + 0, mc_filter[0]);
+ NETDRV_W32_F(MAR0 + 4, mc_filter[1]);
- if (!in_irq ())
- spin_unlock_irq (&tp->lock);
+ if (!in_irq())
+ spin_unlock_irq(&tp->lock);
- DPRINTK ("EXIT\n");
+ DPRINTK("EXIT\n");
}
#ifdef CONFIG_PM
-static int netdrv_suspend (struct pci_dev *pdev, pm_message_t state)
+static int netdrv_suspend(struct pci_dev *pdev, pm_message_t state)
{
- struct net_device *dev = pci_get_drvdata (pdev);
+ struct net_device *dev = pci_get_drvdata(pdev);
struct netdrv_private *tp = netdev_priv(dev);
void *ioaddr = tp->mmio_addr;
unsigned long flags;
if (!netif_running(dev))
return 0;
- netif_device_detach (dev);
+ netif_device_detach(dev);
- spin_lock_irqsave (&tp->lock, flags);
+ spin_lock_irqsave(&tp->lock, flags);
/* Disable interrupts, stop Tx and Rx. */
- NETDRV_W16 (IntrMask, 0x0000);
- NETDRV_W8 (ChipCmd, (NETDRV_R8 (ChipCmd) & ChipCmdClear));
+ NETDRV_W16(IntrMask, 0x0000);
+ NETDRV_W8(ChipCmd, (NETDRV_R8(ChipCmd) & ChipCmdClear));
/* Update the error counts. */
- dev->stats.rx_missed_errors += NETDRV_R32 (RxMissed);
- NETDRV_W32 (RxMissed, 0);
+ dev->stats.rx_missed_errors += NETDRV_R32(RxMissed);
+ NETDRV_W32(RxMissed, 0);
- spin_unlock_irqrestore (&tp->lock, flags);
+ spin_unlock_irqrestore(&tp->lock, flags);
- pci_save_state (pdev);
- pci_set_power_state (pdev, PCI_D3hot);
+ pci_save_state(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
-static int netdrv_resume (struct pci_dev *pdev)
+static int netdrv_resume(struct pci_dev *pdev)
{
- struct net_device *dev = pci_get_drvdata (pdev);
+ struct net_device *dev = pci_get_drvdata(pdev);
/*struct netdrv_private *tp = netdev_priv(dev);*/
if (!netif_running(dev))
return 0;
- pci_set_power_state (pdev, PCI_D0);
- pci_restore_state (pdev);
- netif_device_attach (dev);
- netdrv_hw_start (dev);
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+ netif_device_attach(dev);
+ netdrv_hw_start(dev);
return 0;
}
@@ -1917,7 +1904,7 @@ static struct pci_driver netdrv_pci_driver = {
};
-static int __init netdrv_init_module (void)
+static int __init netdrv_init_module(void)
{
/* when a module, this is printed whether or not devices are found in probe */
#ifdef MODULE
@@ -1927,9 +1914,9 @@ static int __init netdrv_init_module (void)
}
-static void __exit netdrv_cleanup_module (void)
+static void __exit netdrv_cleanup_module(void)
{
- pci_unregister_driver (&netdrv_pci_driver);
+ pci_unregister_driver(&netdrv_pci_driver);
}
diff --git a/drivers/net/pcmcia/3c574_cs.c b/drivers/net/pcmcia/3c574_cs.c
index 98938ea9e0b..3d1d3a7b7ed 100644
--- a/drivers/net/pcmcia/3c574_cs.c
+++ b/drivers/net/pcmcia/3c574_cs.c
@@ -1148,7 +1148,7 @@ static void set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC)
outw(SetRxFilter | RxStation | RxMulticast | RxBroadcast | RxProm,
ioaddr + EL3_CMD);
- else if (dev->mc_count || (dev->flags & IFF_ALLMULTI))
+ else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI))
outw(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD);
else
outw(SetRxFilter | RxStation | RxBroadcast, ioaddr + EL3_CMD);
diff --git a/drivers/net/pcmcia/3c589_cs.c b/drivers/net/pcmcia/3c589_cs.c
index 322e11df009..091e0b00043 100644
--- a/drivers/net/pcmcia/3c589_cs.c
+++ b/drivers/net/pcmcia/3c589_cs.c
@@ -886,7 +886,7 @@ static void set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC)
opts |= RxMulticast | RxProm;
- else if (dev->mc_count || (dev->flags & IFF_ALLMULTI))
+ else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI))
opts |= RxMulticast;
outw(opts, ioaddr + EL3_CMD);
}
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index d431b59e7d1..09291e60d30 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -779,6 +779,7 @@ static struct pcmcia_device_id axnet_ids[] = {
PCMCIA_DEVICE_PROD_ID12("CNet", "CNF301", 0xbc477dde, 0x78c5f40b),
PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FEther PCC-TXD", 0x5261440f, 0x436768c5),
PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FEtherII PCC-TXD", 0x5261440f, 0x730df72e),
+ PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega FEther PCC-TXM", 0x5261440f, 0x3abbd061),
PCMCIA_DEVICE_PROD_ID12("Dynalink", "L100C16", 0x55632fd5, 0x66bc2a90),
PCMCIA_DEVICE_PROD_ID12("IO DATA", "ETXPCM", 0x547e66dc, 0x233adac2),
PCMCIA_DEVICE_PROD_ID12("Linksys", "EtherFast 10/100 PC Card (PCMPC100 V3)", 0x0733cc81, 0x232019a8),
@@ -1065,14 +1066,11 @@ static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
spin_lock_irqsave(&ei_local->page_lock, flags);
outb_p(0x00, e8390_base + EN0_IMR);
- spin_unlock_irqrestore(&ei_local->page_lock, flags);
/*
* Slow phase with lock held.
*/
- spin_lock_irqsave(&ei_local->page_lock, flags);
-
ei_local->irqlock = 1;
send_length = max(length, ETH_ZLEN);
@@ -1628,8 +1626,7 @@ static inline void make_mc_bits(u8 *bits, struct net_device *dev)
struct dev_mc_list *dmi;
u32 crc;
- for (dmi=dev->mc_list; dmi; dmi=dmi->next) {
-
+ netdev_for_each_mc_addr(dmi, dev) {
crc = ether_crc(ETH_ALEN, dmi->dmi_addr);
/*
* The 8390 uses the 6 most significant bits of the
@@ -1655,7 +1652,7 @@ static void do_set_multicast_list(struct net_device *dev)
if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) {
memset(ei_local->mcfilter, 0, 8);
- if (dev->mc_list)
+ if (!netdev_mc_empty(dev))
make_mc_bits(ei_local->mcfilter, dev);
} else {
/* set to accept-all */
@@ -1671,7 +1668,7 @@ static void do_set_multicast_list(struct net_device *dev)
if(dev->flags&IFF_PROMISC)
outb_p(E8390_RXCONFIG | 0x58, e8390_base + EN0_RXCR);
- else if(dev->flags&IFF_ALLMULTI || dev->mc_list)
+ else if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev))
outb_p(E8390_RXCONFIG | 0x48, e8390_base + EN0_RXCR);
else
outb_p(E8390_RXCONFIG | 0x40, e8390_base + EN0_RXCR);
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 7b17404d085..b9dc80b9d04 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -1187,22 +1187,20 @@ static void set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) {
memset(mc_filter, 0xff, sizeof(mc_filter));
outb(3, ioaddr + RX_MODE); /* Enable promiscuous mode */
- } else if (dev->mc_count > MC_FILTERBREAK ||
+ } else if (netdev_mc_count(dev) > MC_FILTERBREAK ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
memset(mc_filter, 0xff, sizeof(mc_filter));
outb(2, ioaddr + RX_MODE); /* Use normal mode. */
- } else if (dev->mc_count == 0) {
+ } else if (netdev_mc_empty(dev)) {
memset(mc_filter, 0x00, sizeof(mc_filter));
outb(1, ioaddr + RX_MODE); /* Ignore almost all multicasts. */
} else {
struct dev_mc_list *mclist;
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
- unsigned int bit =
- ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 26;
+ netdev_for_each_mc_addr(mclist, dev) {
+ unsigned int bit = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 26;
mc_filter[bit >> 3] |= (1 << (bit & 7));
}
outb(2, ioaddr + RX_MODE); /* Use normal mode. */
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index 12e3233868e..c717b143f11 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -1475,14 +1475,13 @@ static void set_multicast_list(struct net_device *dev)
{
mace_private *lp = netdev_priv(dev);
int adr[ETHER_ADDR_LEN] = {0}; /* Ethernet address */
- int i;
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
#ifdef PCMCIA_DEBUG
{
static int old;
- if (dev->mc_count != old) {
- old = dev->mc_count;
+ if (netdev_mc_count(dev) != old) {
+ old = netdev_mc_count(dev);
pr_debug("%s: setting Rx mode to %d addresses.\n",
dev->name, old);
}
@@ -1490,15 +1489,14 @@ static void set_multicast_list(struct net_device *dev)
#endif
/* Set multicast_num_addrs. */
- lp->multicast_num_addrs = dev->mc_count;
+ lp->multicast_num_addrs = netdev_mc_count(dev);
/* Set multicast_ladrf. */
if (num_addrs > 0) {
/* Calculate multicast logical address filter */
memset(lp->multicast_ladrf, 0, MACE_LADRF_LEN);
- for (i = 0; i < dev->mc_count; i++) {
+ netdev_for_each_mc_addr(dmi, dev) {
memcpy(adr, dmi->dmi_addr, ETHER_ADDR_LEN);
- dmi = dmi->next;
BuildLAF(lp->multicast_ladrf, adr);
}
}
@@ -1537,15 +1535,15 @@ static void set_multicast_list(struct net_device *dev)
#ifdef PCMCIA_DEBUG
{
static int old;
- if (dev->mc_count != old) {
- old = dev->mc_count;
+ if (netdev_mc_count(dev) != old) {
+ old = netdev_mc_count(dev);
pr_debug("%s: setting Rx mode to %d addresses.\n",
dev->name, old);
}
}
#endif
- lp->multicast_num_addrs = dev->mc_count;
+ lp->multicast_num_addrs = netdev_mc_count(dev);
restore_multicast_list(dev);
} /* set_multicast_list */
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index aa57cfd1e3f..5adc662c4bf 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -1593,27 +1593,6 @@ static void smc_rx(struct net_device *dev)
/*======================================================================
- Calculate values for the hardware multicast filter hash table.
-
-======================================================================*/
-
-static void fill_multicast_tbl(int count, struct dev_mc_list *addrs,
- u_char *multicast_table)
-{
- struct dev_mc_list *mc_addr;
-
- for (mc_addr = addrs; mc_addr && count-- > 0; mc_addr = mc_addr->next) {
- u_int position = ether_crc(6, mc_addr->dmi_addr);
-#ifndef final_version /* Verify multicast address. */
- if ((mc_addr->dmi_addr[0] & 1) == 0)
- continue;
-#endif
- multicast_table[position >> 29] |= 1 << ((position >> 26) & 7);
- }
-}
-
-/*======================================================================
-
Set the receive mode.
This routine is used by both the protocol level to notify us of
@@ -1636,9 +1615,17 @@ static void set_rx_mode(struct net_device *dev)
} else if (dev->flags & IFF_ALLMULTI)
rx_cfg_setting = RxStripCRC | RxEnable | RxAllMulti;
else {
- if (dev->mc_count) {
- fill_multicast_tbl(dev->mc_count, dev->mc_list,
- (u_char *)multicast_table);
+ if (!netdev_mc_empty(dev)) {
+ struct dev_mc_list *mc_addr;
+
+ netdev_for_each_mc_addr(mc_addr, dev) {
+ u_int position = ether_crc(6, mc_addr->dmi_addr);
+#ifndef final_version /* Verify multicast address. */
+ if ((mc_addr->dmi_addr[0] & 1) == 0)
+ continue;
+#endif
+ multicast_table[position >> 29] |= 1 << ((position >> 26) & 7);
+ }
}
rx_cfg_setting = RxStripCRC | RxEnable;
}
diff --git a/drivers/net/pcmcia/xirc2ps_cs.c b/drivers/net/pcmcia/xirc2ps_cs.c
index 466fc72698c..4d1802e457b 100644
--- a/drivers/net/pcmcia/xirc2ps_cs.c
+++ b/drivers/net/pcmcia/xirc2ps_cs.c
@@ -1364,47 +1364,63 @@ do_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK;
}
+struct set_address_info {
+ int reg_nr;
+ int page_nr;
+ int mohawk;
+ unsigned int ioaddr;
+};
+
+static void set_address(struct set_address_info *sa_info, char *addr)
+{
+ unsigned int ioaddr = sa_info->ioaddr;
+ int i;
+
+ for (i = 0; i < 6; i++) {
+ if (sa_info->reg_nr > 15) {
+ sa_info->reg_nr = 8;
+ sa_info->page_nr++;
+ SelectPage(sa_info->page_nr);
+ }
+ if (sa_info->mohawk)
+ PutByte(sa_info->reg_nr++, addr[5 - i]);
+ else
+ PutByte(sa_info->reg_nr++, addr[i]);
+ }
+}
+
/****************
* Set all addresses: This first one is the individual address,
* the next 9 addresses are taken from the multicast list and
* the rest is filled with the individual address.
*/
-static void
-set_addresses(struct net_device *dev)
+static void set_addresses(struct net_device *dev)
{
- unsigned int ioaddr = dev->base_addr;
- local_info_t *lp = netdev_priv(dev);
- struct dev_mc_list *dmi = dev->mc_list;
- unsigned char *addr;
- int i,j,k,n;
-
- SelectPage(k=0x50);
- for (i=0,j=8,n=0; ; i++, j++) {
- if (i > 5) {
- if (++n > 9)
- break;
- i = 0;
- if (n > 1 && n <= dev->mc_count && dmi) {
- dmi = dmi->next;
- }
- }
- if (j > 15) {
- j = 8;
- k++;
- SelectPage(k);
- }
-
- if (n && n <= dev->mc_count && dmi)
- addr = dmi->dmi_addr;
- else
- addr = dev->dev_addr;
+ unsigned int ioaddr = dev->base_addr;
+ local_info_t *lp = netdev_priv(dev);
+ struct dev_mc_list *dmi;
+ struct set_address_info sa_info;
+ int i;
- if (lp->mohawk)
- PutByte(j, addr[5-i]);
- else
- PutByte(j, addr[i]);
- }
- SelectPage(0);
+ /*
+ * Setup the info structure so that by first set_address call it will do
+ * SelectPage with the right page number. Hence these ones here.
+ */
+ sa_info.reg_nr = 15 + 1;
+ sa_info.page_nr = 0x50 - 1;
+ sa_info.mohawk = lp->mohawk;
+ sa_info.ioaddr = ioaddr;
+
+ set_address(&sa_info, dev->dev_addr);
+ i = 0;
+ netdev_for_each_mc_addr(dmi, dev) {
+ if (i++ == 9)
+ break;
+ set_address(&sa_info, dmi->dmi_addr);
+ }
+ while (i++ < 9)
+ set_address(&sa_info, dev->dev_addr);
+ SelectPage(0);
}
/****************
@@ -1424,9 +1440,9 @@ set_multicast_list(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* snoop */
PutByte(XIRCREG42_SWC1, value | 0x06); /* set MPE and PME */
- } else if (dev->mc_count > 9 || (dev->flags & IFF_ALLMULTI)) {
+ } else if (netdev_mc_count(dev) > 9 || (dev->flags & IFF_ALLMULTI)) {
PutByte(XIRCREG42_SWC1, value | 0x02); /* set MPE */
- } else if (dev->mc_count) {
+ } else if (!netdev_mc_empty(dev)) {
/* the chip can filter 9 addresses perfectly */
PutByte(XIRCREG42_SWC1, value | 0x01);
SelectPage(0x40);
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index e154677ff70..084d78dd163 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -21,6 +21,8 @@
*
*************************************************************************/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define DRV_NAME "pcnet32"
#define DRV_VERSION "1.35"
#define DRV_RELDATE "21.Apr.2008"
@@ -50,16 +52,16 @@ static const char *const version =
#include <linux/spinlock.h>
#include <linux/moduleparam.h>
#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
#include <asm/dma.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
#include <asm/irq.h>
/*
* PCI device identifiers for "new style" Linux PCI Device Drivers
*/
-static struct pci_device_id pcnet32_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(pcnet32_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME), },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE), },
@@ -83,7 +85,7 @@ static int cards_found;
static unsigned int pcnet32_portlist[] __initdata =
{ 0x300, 0x320, 0x340, 0x360, 0 };
-static int pcnet32_debug = 0;
+static int pcnet32_debug;
static int tx_start = 1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */
static int pcnet32vlb; /* check for VLB cards ? */
@@ -390,7 +392,7 @@ static struct pcnet32_access pcnet32_wio = {
static u16 pcnet32_dwio_read_csr(unsigned long addr, int index)
{
outl(index, addr + PCNET32_DWIO_RAP);
- return (inl(addr + PCNET32_DWIO_RDP) & 0xffff);
+ return inl(addr + PCNET32_DWIO_RDP) & 0xffff;
}
static void pcnet32_dwio_write_csr(unsigned long addr, int index, u16 val)
@@ -402,7 +404,7 @@ static void pcnet32_dwio_write_csr(unsigned long addr, int index, u16 val)
static u16 pcnet32_dwio_read_bcr(unsigned long addr, int index)
{
outl(index, addr + PCNET32_DWIO_RAP);
- return (inl(addr + PCNET32_DWIO_BDP) & 0xffff);
+ return inl(addr + PCNET32_DWIO_BDP) & 0xffff;
}
static void pcnet32_dwio_write_bcr(unsigned long addr, int index, u16 val)
@@ -413,7 +415,7 @@ static void pcnet32_dwio_write_bcr(unsigned long addr, int index, u16 val)
static u16 pcnet32_dwio_read_rap(unsigned long addr)
{
- return (inl(addr + PCNET32_DWIO_RAP) & 0xffff);
+ return inl(addr + PCNET32_DWIO_RAP) & 0xffff;
}
static void pcnet32_dwio_write_rap(unsigned long addr, u16 val)
@@ -487,10 +489,7 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev,
(1 << size),
&new_ring_dma_addr);
if (new_tx_ring == NULL) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR
- "%s: Consistent memory allocation failed.\n",
- dev->name);
+ netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
return;
}
memset(new_tx_ring, 0, sizeof(struct pcnet32_tx_head) * (1 << size));
@@ -498,18 +497,14 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev,
new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
GFP_ATOMIC);
if (!new_dma_addr_list) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR
- "%s: Memory allocation failed.\n", dev->name);
+ netif_err(lp, drv, dev, "Memory allocation failed\n");
goto free_new_tx_ring;
}
new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
GFP_ATOMIC);
if (!new_skb_list) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR
- "%s: Memory allocation failed.\n", dev->name);
+ netif_err(lp, drv, dev, "Memory allocation failed\n");
goto free_new_lists;
}
@@ -529,15 +524,14 @@ static void pcnet32_realloc_tx_ring(struct net_device *dev,
lp->tx_skbuff = new_skb_list;
return;
- free_new_lists:
+free_new_lists:
kfree(new_dma_addr_list);
- free_new_tx_ring:
+free_new_tx_ring:
pci_free_consistent(lp->pci_dev,
sizeof(struct pcnet32_tx_head) *
(1 << size),
new_tx_ring,
new_ring_dma_addr);
- return;
}
/*
@@ -565,10 +559,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
(1 << size),
&new_ring_dma_addr);
if (new_rx_ring == NULL) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR
- "%s: Consistent memory allocation failed.\n",
- dev->name);
+ netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
return;
}
memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size));
@@ -576,18 +567,14 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
GFP_ATOMIC);
if (!new_dma_addr_list) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR
- "%s: Memory allocation failed.\n", dev->name);
+ netif_err(lp, drv, dev, "Memory allocation failed\n");
goto free_new_rx_ring;
}
new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
GFP_ATOMIC);
if (!new_skb_list) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR
- "%s: Memory allocation failed.\n", dev->name);
+ netif_err(lp, drv, dev, "Memory allocation failed\n");
goto free_new_lists;
}
@@ -599,15 +586,14 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
new_skb_list[new] = lp->rx_skbuff[new];
}
/* now allocate any new buffers needed */
- for (; new < size; new++ ) {
+ for (; new < size; new++) {
struct sk_buff *rx_skbuff;
new_skb_list[new] = dev_alloc_skb(PKT_BUF_SKB);
- if (!(rx_skbuff = new_skb_list[new])) {
+ rx_skbuff = new_skb_list[new];
+ if (!rx_skbuff) {
/* keep the original lists and buffers */
- if (netif_msg_drv(lp))
- printk(KERN_ERR
- "%s: pcnet32_realloc_rx_ring dev_alloc_skb failed.\n",
- dev->name);
+ netif_err(lp, drv, dev, "%s dev_alloc_skb failed\n",
+ __func__);
goto free_all_new;
}
skb_reserve(rx_skbuff, NET_IP_ALIGN);
@@ -644,8 +630,8 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
lp->rx_skbuff = new_skb_list;
return;
- free_all_new:
- for (; --new >= lp->rx_ring_size; ) {
+free_all_new:
+ while (--new >= lp->rx_ring_size) {
if (new_skb_list[new]) {
pci_unmap_single(lp->pci_dev, new_dma_addr_list[new],
PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
@@ -653,9 +639,9 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
}
}
kfree(new_skb_list);
- free_new_lists:
+free_new_lists:
kfree(new_dma_addr_list);
- free_new_rx_ring:
+free_new_rx_ring:
pci_free_consistent(lp->pci_dev,
sizeof(struct pcnet32_rx_head) *
(1 << size),
@@ -838,16 +824,14 @@ static int pcnet32_set_ringparam(struct net_device *dev,
spin_unlock_irqrestore(&lp->lock, flags);
- if (netif_msg_drv(lp))
- printk(KERN_INFO
- "%s: Ring Param Settings: RX: %d, TX: %d\n", dev->name,
- lp->rx_ring_size, lp->tx_ring_size);
+ netif_info(lp, drv, dev, "Ring Param Settings: RX: %d, TX: %d\n",
+ lp->rx_ring_size, lp->tx_ring_size);
return 0;
}
static void pcnet32_get_strings(struct net_device *dev, u32 stringset,
- u8 * data)
+ u8 *data)
{
memcpy(data, pcnet32_gstrings_test, sizeof(pcnet32_gstrings_test));
}
@@ -871,17 +855,15 @@ static void pcnet32_ethtool_test(struct net_device *dev,
if (test->flags == ETH_TEST_FL_OFFLINE) {
rc = pcnet32_loopback_test(dev, data);
if (rc) {
- if (netif_msg_hw(lp))
- printk(KERN_DEBUG "%s: Loopback test failed.\n",
- dev->name);
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "Loopback test failed\n");
test->flags |= ETH_TEST_FL_FAILED;
- } else if (netif_msg_hw(lp))
- printk(KERN_DEBUG "%s: Loopback test passed.\n",
- dev->name);
- } else if (netif_msg_hw(lp))
- printk(KERN_DEBUG
- "%s: No tests to run (specify 'Offline' on ethtool).",
- dev->name);
+ } else
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "Loopback test passed\n");
+ } else
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "No tests to run (specify 'Offline' on ethtool)\n");
} /* end pcnet32_ethtool_test */
static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
@@ -926,40 +908,39 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
/* Initialize Transmit buffers. */
size = data_len + 15;
for (x = 0; x < numbuffs; x++) {
- if (!(skb = dev_alloc_skb(size))) {
- if (netif_msg_hw(lp))
- printk(KERN_DEBUG
- "%s: Cannot allocate skb at line: %d!\n",
- dev->name, __LINE__);
+ skb = dev_alloc_skb(size);
+ if (!skb) {
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "Cannot allocate skb at line: %d!\n",
+ __LINE__);
goto clean_up;
- } else {
- packet = skb->data;
- skb_put(skb, size); /* create space for data */
- lp->tx_skbuff[x] = skb;
- lp->tx_ring[x].length = cpu_to_le16(-skb->len);
- lp->tx_ring[x].misc = 0;
-
- /* put DA and SA into the skb */
- for (i = 0; i < 6; i++)
- *packet++ = dev->dev_addr[i];
- for (i = 0; i < 6; i++)
- *packet++ = dev->dev_addr[i];
- /* type */
- *packet++ = 0x08;
- *packet++ = 0x06;
- /* packet number */
- *packet++ = x;
- /* fill packet with data */
- for (i = 0; i < data_len; i++)
- *packet++ = i;
-
- lp->tx_dma_addr[x] =
- pci_map_single(lp->pci_dev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
- lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]);
- wmb(); /* Make sure owner changes after all others are visible */
- lp->tx_ring[x].status = cpu_to_le16(status);
}
+ packet = skb->data;
+ skb_put(skb, size); /* create space for data */
+ lp->tx_skbuff[x] = skb;
+ lp->tx_ring[x].length = cpu_to_le16(-skb->len);
+ lp->tx_ring[x].misc = 0;
+
+ /* put DA and SA into the skb */
+ for (i = 0; i < 6; i++)
+ *packet++ = dev->dev_addr[i];
+ for (i = 0; i < 6; i++)
+ *packet++ = dev->dev_addr[i];
+ /* type */
+ *packet++ = 0x08;
+ *packet++ = 0x06;
+ /* packet number */
+ *packet++ = x;
+ /* fill packet with data */
+ for (i = 0; i < data_len; i++)
+ *packet++ = i;
+
+ lp->tx_dma_addr[x] =
+ pci_map_single(lp->pci_dev, skb->data, skb->len,
+ PCI_DMA_TODEVICE);
+ lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]);
+ wmb(); /* Make sure owner changes after all others are visible */
+ lp->tx_ring[x].status = cpu_to_le16(status);
}
x = a->read_bcr(ioaddr, 32); /* set internal loopback in BCR32 */
@@ -984,9 +965,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
ticks++;
}
if (ticks == 200) {
- if (netif_msg_hw(lp))
- printk("%s: Desc %d failed to reset!\n",
- dev->name, x);
+ netif_err(lp, hw, dev, "Desc %d failed to reset!\n", x);
break;
}
}
@@ -994,15 +973,14 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
wmb();
if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
- printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
+ netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
for (x = 0; x < numbuffs; x++) {
- printk(KERN_DEBUG "%s: Packet %d:\n", dev->name, x);
+ netdev_printk(KERN_DEBUG, dev, "Packet %d: ", x);
skb = lp->rx_skbuff[x];
- for (i = 0; i < size; i++) {
- printk("%02x ", *(skb->data + i));
- }
- printk("\n");
+ for (i = 0; i < size; i++)
+ pr_cont(" %02x", *(skb->data + i));
+ pr_cont("\n");
}
}
@@ -1013,11 +991,9 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
packet = lp->tx_skbuff[x]->data;
for (i = 0; i < size; i++) {
if (*(skb->data + i) != packet[i]) {
- if (netif_msg_hw(lp))
- printk(KERN_DEBUG
- "%s: Error in compare! %2x - %02x %02x\n",
- dev->name, i, *(skb->data + i),
- packet[i]);
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "Error in compare! %2x - %02x %02x\n",
+ i, *(skb->data + i), packet[i]);
rc = 1;
break;
}
@@ -1025,7 +1001,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
x++;
}
- clean_up:
+clean_up:
*data1 = rc;
pcnet32_purge_tx_ring(dev);
@@ -1044,7 +1020,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
}
spin_unlock_irqrestore(&lp->lock, flags);
- return (rc);
+ return rc;
} /* end pcnet32_loopback_test */
static void pcnet32_led_blink_callback(struct net_device *dev)
@@ -1056,9 +1032,8 @@ static void pcnet32_led_blink_callback(struct net_device *dev)
int i;
spin_lock_irqsave(&lp->lock, flags);
- for (i = 4; i < 8; i++) {
+ for (i = 4; i < 8; i++)
a->write_bcr(ioaddr, i, a->read_bcr(ioaddr, i) ^ 0x4000);
- }
spin_unlock_irqrestore(&lp->lock, flags);
mod_timer(&lp->blink_timer, PCNET32_BLINK_TIMEOUT);
@@ -1080,9 +1055,8 @@ static int pcnet32_phys_id(struct net_device *dev, u32 data)
/* Save the current value of the bcrs */
spin_lock_irqsave(&lp->lock, flags);
- for (i = 4; i < 8; i++) {
+ for (i = 4; i < 8; i++)
regs[i - 4] = a->read_bcr(ioaddr, i);
- }
spin_unlock_irqrestore(&lp->lock, flags);
mod_timer(&lp->blink_timer, jiffies);
@@ -1097,9 +1071,8 @@ static int pcnet32_phys_id(struct net_device *dev, u32 data)
/* Restore the original value of the bcrs */
spin_lock_irqsave(&lp->lock, flags);
- for (i = 4; i < 8; i++) {
+ for (i = 4; i < 8; i++)
a->write_bcr(ioaddr, i, regs[i - 4]);
- }
spin_unlock_irqrestore(&lp->lock, flags);
return 0;
@@ -1136,10 +1109,8 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
spin_lock_irqsave(&lp->lock, *flags);
ticks++;
if (ticks > 200) {
- if (netif_msg_hw(lp))
- printk(KERN_DEBUG
- "%s: Error getting into suspend!\n",
- dev->name);
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "Error getting into suspend!\n");
return 0;
}
}
@@ -1184,15 +1155,13 @@ static void pcnet32_rx_entry(struct net_device *dev,
/* Discard oversize frames. */
if (unlikely(pkt_len > PKT_BUF_SIZE)) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR "%s: Impossible packet size %d!\n",
- dev->name, pkt_len);
+ netif_err(lp, drv, dev, "Impossible packet size %d!\n",
+ pkt_len);
dev->stats.rx_errors++;
return;
}
if (pkt_len < 60) {
- if (netif_msg_rx_err(lp))
- printk(KERN_ERR "%s: Runt packet!\n", dev->name);
+ netif_err(lp, rx_err, dev, "Runt packet!\n");
dev->stats.rx_errors++;
return;
}
@@ -1200,7 +1169,8 @@ static void pcnet32_rx_entry(struct net_device *dev,
if (pkt_len > rx_copybreak) {
struct sk_buff *newskb;
- if ((newskb = dev_alloc_skb(PKT_BUF_SKB))) {
+ newskb = dev_alloc_skb(PKT_BUF_SKB);
+ if (newskb) {
skb_reserve(newskb, NET_IP_ALIGN);
skb = lp->rx_skbuff[entry];
pci_unmap_single(lp->pci_dev,
@@ -1218,15 +1188,11 @@ static void pcnet32_rx_entry(struct net_device *dev,
rx_in_place = 1;
} else
skb = NULL;
- } else {
+ } else
skb = dev_alloc_skb(pkt_len + NET_IP_ALIGN);
- }
if (skb == NULL) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR
- "%s: Memory squeeze, dropping packet.\n",
- dev->name);
+ netif_err(lp, drv, dev, "Memory squeeze, dropping packet\n");
dev->stats.rx_dropped++;
return;
}
@@ -1297,11 +1263,9 @@ static int pcnet32_tx(struct net_device *dev)
/* There was a major error, log it. */
int err_status = le32_to_cpu(lp->tx_ring[entry].misc);
dev->stats.tx_errors++;
- if (netif_msg_tx_err(lp))
- printk(KERN_ERR
- "%s: Tx error status=%04x err_status=%08x\n",
- dev->name, status,
- err_status);
+ netif_err(lp, tx_err, dev,
+ "Tx error status=%04x err_status=%08x\n",
+ status, err_status);
if (err_status & 0x04000000)
dev->stats.tx_aborted_errors++;
if (err_status & 0x08000000)
@@ -1313,10 +1277,7 @@ static int pcnet32_tx(struct net_device *dev)
dev->stats.tx_fifo_errors++;
/* Ackk! On FIFO errors the Tx unit is turned off! */
/* Remove this verbosity later! */
- if (netif_msg_tx_err(lp))
- printk(KERN_ERR
- "%s: Tx FIFO error!\n",
- dev->name);
+ netif_err(lp, tx_err, dev, "Tx FIFO error!\n");
must_restart = 1;
}
#else
@@ -1325,10 +1286,7 @@ static int pcnet32_tx(struct net_device *dev)
if (!lp->dxsuflo) { /* If controller doesn't recover ... */
/* Ackk! On FIFO errors the Tx unit is turned off! */
/* Remove this verbosity later! */
- if (netif_msg_tx_err(lp))
- printk(KERN_ERR
- "%s: Tx FIFO error!\n",
- dev->name);
+ netif_err(lp, tx_err, dev, "Tx FIFO error!\n");
must_restart = 1;
}
}
@@ -1354,11 +1312,8 @@ static int pcnet32_tx(struct net_device *dev)
delta = (lp->cur_tx - dirty_tx) & (lp->tx_mod_mask + lp->tx_ring_size);
if (delta > lp->tx_ring_size) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR
- "%s: out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
- dev->name, dirty_tx, lp->cur_tx,
- lp->tx_full);
+ netif_err(lp, drv, dev, "out-of-sync dirty pointer, %d vs. %d, full=%d\n",
+ dirty_tx, lp->cur_tx, lp->tx_full);
dirty_tx += lp->tx_ring_size;
delta -= lp->tx_ring_size;
}
@@ -1421,7 +1376,7 @@ static int pcnet32_get_regs_len(struct net_device *dev)
struct pcnet32_private *lp = netdev_priv(dev);
int j = lp->phycount * PCNET32_REGS_PER_PHY;
- return ((PCNET32_NUM_REGS + j) * sizeof(u16));
+ return (PCNET32_NUM_REGS + j) * sizeof(u16);
}
static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
@@ -1445,21 +1400,20 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
*buff++ = inw(ioaddr + i);
/* read control and status registers */
- for (i = 0; i < 90; i++) {
+ for (i = 0; i < 90; i++)
*buff++ = a->read_csr(ioaddr, i);
- }
*buff++ = a->read_csr(ioaddr, 112);
*buff++ = a->read_csr(ioaddr, 114);
/* read bus configuration registers */
- for (i = 0; i < 30; i++) {
+ for (i = 0; i < 30; i++)
*buff++ = a->read_bcr(ioaddr, i);
- }
+
*buff++ = 0; /* skip bcr30 so as not to hang 79C976 */
- for (i = 31; i < 36; i++) {
+
+ for (i = 31; i < 36; i++)
*buff++ = a->read_bcr(ioaddr, i);
- }
/* read mii phy registers */
if (lp->mii) {
@@ -1535,8 +1489,7 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
err = pci_enable_device(pdev);
if (err < 0) {
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_ERR PFX
- "failed to enable device -- err=%d\n", err);
+ pr_err("failed to enable device -- err=%d\n", err);
return err;
}
pci_set_master(pdev);
@@ -1544,29 +1497,25 @@ pcnet32_probe_pci(struct pci_dev *pdev, const struct pci_device_id *ent)
ioaddr = pci_resource_start(pdev, 0);
if (!ioaddr) {
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_ERR PFX
- "card has no PCI IO resources, aborting\n");
+ pr_err("card has no PCI IO resources, aborting\n");
return -ENODEV;
}
if (!pci_dma_supported(pdev, PCNET32_DMA_MASK)) {
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_ERR PFX
- "architecture does not support 32bit PCI busmaster DMA\n");
+ pr_err("architecture does not support 32bit PCI busmaster DMA\n");
return -ENODEV;
}
- if (request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci") ==
- NULL) {
+ if (!request_region(ioaddr, PCNET32_TOTAL_SIZE, "pcnet32_probe_pci")) {
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_ERR PFX
- "io address range already allocated\n");
+ pr_err("io address range already allocated\n");
return -EBUSY;
}
err = pcnet32_probe1(ioaddr, 1, pdev);
- if (err < 0) {
+ if (err < 0)
pci_disable_device(pdev);
- }
+
return err;
}
@@ -1616,7 +1565,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
a = &pcnet32_dwio;
} else {
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_ERR PFX "No access methods\n");
+ pr_err("No access methods\n");
goto err_release_region;
}
}
@@ -1624,11 +1573,10 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
chip_version =
a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr, 89) << 16);
if ((pcnet32_debug & NETIF_MSG_PROBE) && (pcnet32_debug & NETIF_MSG_HW))
- printk(KERN_INFO " PCnet chip version is %#x.\n",
- chip_version);
+ pr_info(" PCnet chip version is %#x\n", chip_version);
if ((chip_version & 0xfff) != 0x003) {
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_INFO PFX "Unsupported chip version.\n");
+ pr_info("Unsupported chip version\n");
goto err_release_region;
}
@@ -1681,7 +1629,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
if (cards_found < MAX_UNITS && homepna[cards_found])
media |= 1; /* switch to home wiring mode */
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_DEBUG PFX "media set to %sMbit mode.\n",
+ printk(KERN_DEBUG PFX "media set to %sMbit mode\n",
(media & 1) ? "1" : "10");
a->write_bcr(ioaddr, 49, media);
break;
@@ -1697,9 +1645,8 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
break;
default:
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_INFO PFX
- "PCnet version %#x, no PCnet32 chip.\n",
- chip_version);
+ pr_info("PCnet version %#x, no PCnet32 chip\n",
+ chip_version);
goto err_release_region;
}
@@ -1721,7 +1668,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
dev = alloc_etherdev(sizeof(*lp));
if (!dev) {
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_ERR PFX "Memory allocation failed.\n");
+ pr_err("Memory allocation failed\n");
ret = -ENOMEM;
goto err_release_region;
}
@@ -1730,7 +1677,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
SET_NETDEV_DEV(dev, &pdev->dev);
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_INFO PFX "%s at %#3lx,", chipname, ioaddr);
+ pr_info("%s at %#3lx,", chipname, ioaddr);
/* In most chips, after a chip reset, the ethernet address is read from the
* station address PROM at the base address and programmed into the
@@ -1755,9 +1702,8 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
!is_valid_ether_addr(dev->dev_addr)) {
if (is_valid_ether_addr(promaddr)) {
if (pcnet32_debug & NETIF_MSG_PROBE) {
- printk(" warning: CSR address invalid,\n");
- printk(KERN_INFO
- " using instead PROM address of");
+ pr_cont(" warning: CSR address invalid,\n");
+ pr_info(" using instead PROM address of");
}
memcpy(dev->dev_addr, promaddr, 6);
}
@@ -1769,54 +1715,54 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
memset(dev->dev_addr, 0, ETH_ALEN);
if (pcnet32_debug & NETIF_MSG_PROBE) {
- printk(" %pM", dev->dev_addr);
+ pr_cont(" %pM", dev->dev_addr);
/* Version 0x2623 and 0x2624 */
if (((chip_version + 1) & 0xfffe) == 0x2624) {
i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */
- printk(KERN_INFO " tx_start_pt(0x%04x):", i);
+ pr_info(" tx_start_pt(0x%04x):", i);
switch (i >> 10) {
case 0:
- printk(KERN_CONT " 20 bytes,");
+ pr_cont(" 20 bytes,");
break;
case 1:
- printk(KERN_CONT " 64 bytes,");
+ pr_cont(" 64 bytes,");
break;
case 2:
- printk(KERN_CONT " 128 bytes,");
+ pr_cont(" 128 bytes,");
break;
case 3:
- printk(KERN_CONT "~220 bytes,");
+ pr_cont("~220 bytes,");
break;
}
i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */
- printk(KERN_CONT " BCR18(%x):", i & 0xffff);
+ pr_cont(" BCR18(%x):", i & 0xffff);
if (i & (1 << 5))
- printk(KERN_CONT "BurstWrEn ");
+ pr_cont("BurstWrEn ");
if (i & (1 << 6))
- printk(KERN_CONT "BurstRdEn ");
+ pr_cont("BurstRdEn ");
if (i & (1 << 7))
- printk(KERN_CONT "DWordIO ");
+ pr_cont("DWordIO ");
if (i & (1 << 11))
- printk(KERN_CONT "NoUFlow ");
+ pr_cont("NoUFlow ");
i = a->read_bcr(ioaddr, 25);
- printk(KERN_INFO " SRAMSIZE=0x%04x,", i << 8);
+ pr_info(" SRAMSIZE=0x%04x,", i << 8);
i = a->read_bcr(ioaddr, 26);
- printk(KERN_CONT " SRAM_BND=0x%04x,", i << 8);
+ pr_cont(" SRAM_BND=0x%04x,", i << 8);
i = a->read_bcr(ioaddr, 27);
if (i & (1 << 14))
- printk(KERN_CONT "LowLatRx");
+ pr_cont("LowLatRx");
}
}
dev->base_addr = ioaddr;
lp = netdev_priv(dev);
/* pci_alloc_consistent returns page-aligned memory, so we do not have to check the alignment */
- if ((lp->init_block =
- pci_alloc_consistent(pdev, sizeof(*lp->init_block), &lp->init_dma_addr)) == NULL) {
+ lp->init_block = pci_alloc_consistent(pdev, sizeof(*lp->init_block),
+ &lp->init_dma_addr);
+ if (!lp->init_block) {
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_ERR PFX
- "Consistent memory allocation failed.\n");
+ pr_err("Consistent memory allocation failed\n");
ret = -ENOMEM;
goto err_free_netdev;
}
@@ -1890,7 +1836,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
if (pdev) { /* use the IRQ provided by PCI */
dev->irq = pdev->irq;
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(" assigned IRQ %d.\n", dev->irq);
+ pr_cont(" assigned IRQ %d\n", dev->irq);
} else {
unsigned long irq_mask = probe_irq_on();
@@ -1906,12 +1852,12 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
dev->irq = probe_irq_off(irq_mask);
if (!dev->irq) {
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(", failed to detect IRQ line.\n");
+ pr_cont(", failed to detect IRQ line\n");
ret = -ENODEV;
goto err_free_ring;
}
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(", probed IRQ %d.\n", dev->irq);
+ pr_cont(", probed IRQ %d\n", dev->irq);
}
/* Set the mii phy_id so that we can query the link state */
@@ -1935,14 +1881,12 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
lp->phymask |= (1 << i);
lp->mii_if.phy_id = i;
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_INFO PFX
- "Found PHY %04x:%04x at address %d.\n",
- id1, id2, i);
+ pr_info("Found PHY %04x:%04x at address %d\n",
+ id1, id2, i);
}
lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
- if (lp->phycount > 1) {
+ if (lp->phycount > 1)
lp->options |= PCNET32_PORT_MII;
- }
}
init_timer(&lp->watchdog_timer);
@@ -1966,7 +1910,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
}
if (pcnet32_debug & NETIF_MSG_PROBE)
- printk(KERN_INFO "%s: registered as %s\n", dev->name, lp->name);
+ pr_info("%s: registered as %s\n", dev->name, lp->name);
cards_found++;
/* enable LED writes */
@@ -1995,10 +1939,7 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
lp->tx_ring_size,
&lp->tx_ring_dma_addr);
if (lp->tx_ring == NULL) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR PFX
- "%s: Consistent memory allocation failed.\n",
- name);
+ netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
return -ENOMEM;
}
@@ -2007,46 +1948,35 @@ static int pcnet32_alloc_ring(struct net_device *dev, const char *name)
lp->rx_ring_size,
&lp->rx_ring_dma_addr);
if (lp->rx_ring == NULL) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR PFX
- "%s: Consistent memory allocation failed.\n",
- name);
+ netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
return -ENOMEM;
}
lp->tx_dma_addr = kcalloc(lp->tx_ring_size, sizeof(dma_addr_t),
GFP_ATOMIC);
if (!lp->tx_dma_addr) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR PFX
- "%s: Memory allocation failed.\n", name);
+ netif_err(lp, drv, dev, "Memory allocation failed\n");
return -ENOMEM;
}
lp->rx_dma_addr = kcalloc(lp->rx_ring_size, sizeof(dma_addr_t),
GFP_ATOMIC);
if (!lp->rx_dma_addr) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR PFX
- "%s: Memory allocation failed.\n", name);
+ netif_err(lp, drv, dev, "Memory allocation failed\n");
return -ENOMEM;
}
lp->tx_skbuff = kcalloc(lp->tx_ring_size, sizeof(struct sk_buff *),
GFP_ATOMIC);
if (!lp->tx_skbuff) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR PFX
- "%s: Memory allocation failed.\n", name);
+ netif_err(lp, drv, dev, "Memory allocation failed\n");
return -ENOMEM;
}
lp->rx_skbuff = kcalloc(lp->rx_ring_size, sizeof(struct sk_buff *),
GFP_ATOMIC);
if (!lp->rx_skbuff) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR PFX
- "%s: Memory allocation failed.\n", name);
+ netif_err(lp, drv, dev, "Memory allocation failed\n");
return -ENOMEM;
}
@@ -2115,12 +2045,11 @@ static int pcnet32_open(struct net_device *dev)
/* switch pcnet32 to 32bit mode */
lp->a.write_bcr(ioaddr, 20, 2);
- if (netif_msg_ifup(lp))
- printk(KERN_DEBUG
- "%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n",
- dev->name, dev->irq, (u32) (lp->tx_ring_dma_addr),
- (u32) (lp->rx_ring_dma_addr),
- (u32) (lp->init_dma_addr));
+ netif_printk(lp, ifup, KERN_DEBUG, dev,
+ "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
+ __func__, dev->irq, (u32) (lp->tx_ring_dma_addr),
+ (u32) (lp->rx_ring_dma_addr),
+ (u32) (lp->init_dma_addr));
/* set/reset autoselect bit */
val = lp->a.read_bcr(ioaddr, 2) & ~2;
@@ -2155,10 +2084,8 @@ static int pcnet32_open(struct net_device *dev)
pdev->subsystem_device == PCI_SUBDEVICE_ID_AT_2701FX)) {
if (lp->options & PCNET32_PORT_ASEL) {
lp->options = PCNET32_PORT_FD | PCNET32_PORT_100;
- if (netif_msg_link(lp))
- printk(KERN_DEBUG
- "%s: Setting 100Mb-Full Duplex.\n",
- dev->name);
+ netif_printk(lp, link, KERN_DEBUG, dev,
+ "Setting 100Mb-Full Duplex\n");
}
}
if (lp->phycount < 2) {
@@ -2246,9 +2173,7 @@ static int pcnet32_open(struct net_device *dev)
}
}
lp->mii_if.phy_id = first_phy;
- if (netif_msg_link(lp))
- printk(KERN_INFO "%s: Using PHY number %d.\n",
- dev->name, first_phy);
+ netif_info(lp, link, dev, "Using PHY number %d\n", first_phy);
}
#ifdef DO_DXSUFLO
@@ -2295,18 +2220,17 @@ static int pcnet32_open(struct net_device *dev)
*/
lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
- if (netif_msg_ifup(lp))
- printk(KERN_DEBUG
- "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
- dev->name, i,
- (u32) (lp->init_dma_addr),
- lp->a.read_csr(ioaddr, CSR0));
+ netif_printk(lp, ifup, KERN_DEBUG, dev,
+ "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
+ i,
+ (u32) (lp->init_dma_addr),
+ lp->a.read_csr(ioaddr, CSR0));
spin_unlock_irqrestore(&lp->lock, flags);
return 0; /* Always succeed */
- err_free_ring:
+err_free_ring:
/* free any allocated skbuffs */
pcnet32_purge_rx_ring(dev);
@@ -2316,7 +2240,7 @@ static int pcnet32_open(struct net_device *dev)
*/
lp->a.write_bcr(ioaddr, 20, 4);
- err_free_irq:
+err_free_irq:
spin_unlock_irqrestore(&lp->lock, flags);
free_irq(dev->irq, dev);
return rc;
@@ -2367,14 +2291,12 @@ static int pcnet32_init_ring(struct net_device *dev)
for (i = 0; i < lp->rx_ring_size; i++) {
struct sk_buff *rx_skbuff = lp->rx_skbuff[i];
if (rx_skbuff == NULL) {
- if (!
- (rx_skbuff = lp->rx_skbuff[i] =
- dev_alloc_skb(PKT_BUF_SKB))) {
- /* there is not much, we can do at this point */
- if (netif_msg_drv(lp))
- printk(KERN_ERR
- "%s: pcnet32_init_ring dev_alloc_skb failed.\n",
- dev->name);
+ lp->rx_skbuff[i] = dev_alloc_skb(PKT_BUF_SKB);
+ rx_skbuff = lp->rx_skbuff[i];
+ if (!rx_skbuff) {
+ /* there is not much we can do at this point */
+ netif_err(lp, drv, dev, "%s dev_alloc_skb failed\n",
+ __func__);
return -1;
}
skb_reserve(rx_skbuff, NET_IP_ALIGN);
@@ -2424,10 +2346,9 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
break;
- if (i >= 100 && netif_msg_drv(lp))
- printk(KERN_ERR
- "%s: pcnet32_restart timed out waiting for stop.\n",
- dev->name);
+ if (i >= 100)
+ netif_err(lp, drv, dev, "%s timed out waiting for stop\n",
+ __func__);
pcnet32_purge_tx_ring(dev);
if (pcnet32_init_ring(dev))
@@ -2451,8 +2372,7 @@ static void pcnet32_tx_timeout(struct net_device *dev)
spin_lock_irqsave(&lp->lock, flags);
/* Transmitter timeout, serious problems. */
if (pcnet32_debug & NETIF_MSG_DRV)
- printk(KERN_ERR
- "%s: transmit timed out, status %4.4x, resetting.\n",
+ pr_err("%s: transmit timed out, status %4.4x, resetting\n",
dev->name, lp->a.read_csr(ioaddr, CSR0));
lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
dev->stats.tx_errors++;
@@ -2495,11 +2415,9 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
spin_lock_irqsave(&lp->lock, flags);
- if (netif_msg_tx_queued(lp)) {
- printk(KERN_DEBUG
- "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
- dev->name, lp->a.read_csr(ioaddr, CSR0));
- }
+ netif_printk(lp, tx_queued, KERN_DEBUG, dev,
+ "%s() called, csr0 %4.4x\n",
+ __func__, lp->a.read_csr(ioaddr, CSR0));
/* Default status -- will not enable Successful-TxDone
* interrupt when that option is available to us.
@@ -2558,16 +2476,14 @@ pcnet32_interrupt(int irq, void *dev_id)
csr0 = lp->a.read_csr(ioaddr, CSR0);
while ((csr0 & 0x8f00) && --boguscnt >= 0) {
- if (csr0 == 0xffff) {
+ if (csr0 == 0xffff)
break; /* PCMCIA remove happened */
- }
/* Acknowledge all of the current interrupt sources ASAP. */
lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
- if (netif_msg_intr(lp))
- printk(KERN_DEBUG
- "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
- dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
+ netif_printk(lp, intr, KERN_DEBUG, dev,
+ "interrupt csr0=%#2.2x new csr=%#2.2x\n",
+ csr0, lp->a.read_csr(ioaddr, CSR0));
/* Log misc errors. */
if (csr0 & 0x4000)
@@ -2587,10 +2503,8 @@ pcnet32_interrupt(int irq, void *dev_id)
dev->stats.rx_errors++; /* Missed a Rx frame. */
}
if (csr0 & 0x0800) {
- if (netif_msg_drv(lp))
- printk(KERN_ERR
- "%s: Bus master arbitration failure, status %4.4x.\n",
- dev->name, csr0);
+ netif_err(lp, drv, dev, "Bus master arbitration failure, status %4.4x\n",
+ csr0);
/* unlike for the lance, there is no restart needed */
}
if (napi_schedule_prep(&lp->napi)) {
@@ -2606,9 +2520,9 @@ pcnet32_interrupt(int irq, void *dev_id)
csr0 = lp->a.read_csr(ioaddr, CSR0);
}
- if (netif_msg_intr(lp))
- printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
- dev->name, lp->a.read_csr(ioaddr, CSR0));
+ netif_printk(lp, intr, KERN_DEBUG, dev,
+ "exiting interrupt, csr0=%#4.4x\n",
+ lp->a.read_csr(ioaddr, CSR0));
spin_unlock(&lp->lock);
@@ -2630,10 +2544,9 @@ static int pcnet32_close(struct net_device *dev)
dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
- if (netif_msg_ifdown(lp))
- printk(KERN_DEBUG
- "%s: Shutting down ethercard, status was %2.2x.\n",
- dev->name, lp->a.read_csr(ioaddr, CSR0));
+ netif_printk(lp, ifdown, KERN_DEBUG, dev,
+ "Shutting down ethercard, status was %2.2x\n",
+ lp->a.read_csr(ioaddr, CSR0));
/* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
@@ -2677,7 +2590,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
struct pcnet32_private *lp = netdev_priv(dev);
volatile struct pcnet32_init_block *ib = lp->init_block;
volatile __le16 *mcast_table = (__le16 *)ib->filter;
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
unsigned long ioaddr = dev->base_addr;
char *addrs;
int i;
@@ -2698,9 +2611,8 @@ static void pcnet32_load_multicast(struct net_device *dev)
ib->filter[1] = 0;
/* Add addresses */
- for (i = 0; i < dev->mc_count; i++) {
+ netdev_for_each_mc_addr(dmi, dev) {
addrs = dmi->dmi_addr;
- dmi = dmi->next;
/* multicast address? */
if (!(*addrs & 1))
@@ -2730,9 +2642,7 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
csr15 = lp->a.read_csr(ioaddr, CSR15);
if (dev->flags & IFF_PROMISC) {
/* Log any net taps. */
- if (netif_msg_hw(lp))
- printk(KERN_INFO "%s: Promiscuous mode enabled.\n",
- dev->name);
+ netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
lp->init_block->mode =
cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
7);
@@ -2819,10 +2729,8 @@ static int pcnet32_check_otherphy(struct net_device *dev)
mii.phy_id = i;
if (mii_link_ok(&mii)) {
/* found PHY with active link */
- if (netif_msg_link(lp))
- printk(KERN_INFO
- "%s: Using PHY number %d.\n",
- dev->name, i);
+ netif_info(lp, link, dev, "Using PHY number %d\n",
+ i);
/* isolate inactive phy */
bmcr =
@@ -2868,8 +2776,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
if (!curr_link) {
if (prev_link || verbose) {
netif_carrier_off(dev);
- if (netif_msg_link(lp))
- printk(KERN_INFO "%s: link down\n", dev->name);
+ netif_info(lp, link, dev, "link down\n");
}
if (lp->phycount > 1) {
curr_link = pcnet32_check_otherphy(dev);
@@ -2881,12 +2788,11 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
if (netif_msg_link(lp)) {
struct ethtool_cmd ecmd;
mii_ethtool_gset(&lp->mii_if, &ecmd);
- printk(KERN_INFO
- "%s: link up, %sMbps, %s-duplex\n",
- dev->name,
- (ecmd.speed == SPEED_100) ? "100" : "10",
- (ecmd.duplex ==
- DUPLEX_FULL) ? "full" : "half");
+ netdev_info(dev, "link up, %sMbps, %s-duplex\n",
+ (ecmd.speed == SPEED_100)
+ ? "100" : "10",
+ (ecmd.duplex == DUPLEX_FULL)
+ ? "full" : "half");
}
bcr9 = lp->a.read_bcr(dev->base_addr, 9);
if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
@@ -2897,8 +2803,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
lp->a.write_bcr(dev->base_addr, 9, bcr9);
}
} else {
- if (netif_msg_link(lp))
- printk(KERN_INFO "%s: link up\n", dev->name);
+ netif_info(lp, link, dev, "link up\n");
}
}
}
@@ -3010,7 +2915,7 @@ MODULE_LICENSE("GPL");
static int __init pcnet32_init_module(void)
{
- printk(KERN_INFO "%s", version);
+ pr_info("%s", version);
pcnet32_debug = netif_msg_init(debug, PCNET32_MSG_DEFAULT);
@@ -3026,7 +2931,7 @@ static int __init pcnet32_init_module(void)
pcnet32_probe_vlbus(pcnet32_portlist);
if (cards_found && (pcnet32_debug & NETIF_MSG_PROBE))
- printk(KERN_INFO PFX "%d cards_found.\n", cards_found);
+ pr_info("%d cards_found\n", cards_found);
return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV;
}
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 33c4b12a63b..f482fc4f8cf 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -18,9 +18,6 @@
#include <linux/phy.h>
#include <linux/brcmphy.h>
-#define PHY_ID_BCM50610 0x0143bd60
-#define PHY_ID_BCM50610M 0x0143bd70
-#define PHY_ID_BCM57780 0x03625d90
#define BRCM_PHY_MODEL(phydev) \
((phydev)->drv->phy_id & (phydev)->drv->phy_id_mask)
@@ -823,7 +820,7 @@ static struct phy_driver bcm57780_driver = {
};
static struct phy_driver bcmac131_driver = {
- .phy_id = 0x0143bc70,
+ .phy_id = PHY_ID_BCMAC131,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCMAC131",
.features = PHY_BASIC_FEATURES |
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 6f69b9ba0df..65ed385c2ce 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -63,6 +63,7 @@
#define MII_M1111_HWCFG_MODE_COPPER_RGMII 0xb
#define MII_M1111_HWCFG_MODE_FIBER_RGMII 0x3
#define MII_M1111_HWCFG_MODE_SGMII_NO_CLK 0x4
+#define MII_M1111_HWCFG_MODE_COPPER_RTBI 0x9
#define MII_M1111_HWCFG_FIBER_COPPER_AUTO 0x8000
#define MII_M1111_HWCFG_FIBER_COPPER_RES 0x2000
@@ -269,6 +270,43 @@ static int m88e1111_config_init(struct phy_device *phydev)
return err;
}
+ if (phydev->interface == PHY_INTERFACE_MODE_RTBI) {
+ temp = phy_read(phydev, MII_M1111_PHY_EXT_CR);
+ if (temp < 0)
+ return temp;
+ temp |= (MII_M1111_RX_DELAY | MII_M1111_TX_DELAY);
+ err = phy_write(phydev, MII_M1111_PHY_EXT_CR, temp);
+ if (err < 0)
+ return err;
+
+ temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
+ if (temp < 0)
+ return temp;
+ temp &= ~(MII_M1111_HWCFG_MODE_MASK | MII_M1111_HWCFG_FIBER_COPPER_RES);
+ temp |= 0x7 | MII_M1111_HWCFG_FIBER_COPPER_AUTO;
+ err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
+ if (err < 0)
+ return err;
+
+ /* soft reset */
+ err = phy_write(phydev, MII_BMCR, BMCR_RESET);
+ if (err < 0)
+ return err;
+ do
+ temp = phy_read(phydev, MII_BMCR);
+ while (temp & BMCR_RESET);
+
+ temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
+ if (temp < 0)
+ return temp;
+ temp &= ~(MII_M1111_HWCFG_MODE_MASK | MII_M1111_HWCFG_FIBER_COPPER_RES);
+ temp |= MII_M1111_HWCFG_MODE_COPPER_RTBI | MII_M1111_HWCFG_FIBER_COPPER_AUTO;
+ err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
+ if (err < 0)
+ return err;
+ }
+
+
err = phy_write(phydev, MII_BMCR, BMCR_RESET);
if (err < 0)
return err;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index adbc0fded13..db1794546c5 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -277,6 +277,22 @@ int phy_device_register(struct phy_device *phydev)
EXPORT_SYMBOL(phy_device_register);
/**
+ * phy_find_first - finds the first PHY device on the bus
+ * @bus: the target MII bus
+ */
+struct phy_device *phy_find_first(struct mii_bus *bus)
+{
+ int addr;
+
+ for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
+ if (bus->phy_map[addr])
+ return bus->phy_map[addr];
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(phy_find_first);
+
+/**
* phy_prepare_link - prepares the PHY layer to monitor link status
* @phydev: target phy_device struct
* @handler: callback function for link status change notifications
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c
index 5123bb954dd..ed2644a5750 100644
--- a/drivers/net/phy/smsc.c
+++ b/drivers/net/phy/smsc.c
@@ -25,6 +25,7 @@
#define MII_LAN83C185_ISF 29 /* Interrupt Source Flags */
#define MII_LAN83C185_IM 30 /* Interrupt Mask */
+#define MII_LAN83C185_CTRL_STATUS 17 /* Mode/Status Register */
#define MII_LAN83C185_ISF_INT1 (1<<1) /* Auto-Negotiation Page Received */
#define MII_LAN83C185_ISF_INT2 (1<<2) /* Parallel Detection Fault */
@@ -37,8 +38,10 @@
#define MII_LAN83C185_ISF_INT_ALL (0x0e)
#define MII_LAN83C185_ISF_INT_PHYLIB_EVENTS \
- (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4)
+ (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4 | \
+ MII_LAN83C185_ISF_INT7)
+#define MII_LAN83C185_EDPWRDOWN (1 << 13) /* EDPWRDOWN */
static int smsc_phy_config_intr(struct phy_device *phydev)
{
@@ -59,9 +62,23 @@ static int smsc_phy_ack_interrupt(struct phy_device *phydev)
static int smsc_phy_config_init(struct phy_device *phydev)
{
+ int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
+ if (rc < 0)
+ return rc;
+
+ /* Enable energy detect mode for this SMSC Transceivers */
+ rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS,
+ rc | MII_LAN83C185_EDPWRDOWN);
+ if (rc < 0)
+ return rc;
+
return smsc_phy_ack_interrupt (phydev);
}
+static int lan911x_config_init(struct phy_device *phydev)
+{
+ return smsc_phy_ack_interrupt(phydev);
+}
static struct phy_driver lan83c185_driver = {
.phy_id = 0x0007c0a0, /* OUI=0x00800f, Model#=0x0a */
@@ -147,7 +164,7 @@ static struct phy_driver lan911x_int_driver = {
/* basic functions */
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
- .config_init = smsc_phy_config_init,
+ .config_init = lan911x_config_init,
/* IRQ related */
.ack_interrupt = smsc_phy_ack_interrupt,
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 2282e729edb..6d61602208c 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -167,7 +167,7 @@ struct channel {
u8 avail; /* flag used in multilink stuff */
u8 had_frag; /* >= 1 fragments have been sent */
u32 lastseq; /* MP: last sequence # received */
- int speed; /* speed of the corresponding ppp channel*/
+ int speed; /* speed of the corresponding ppp channel*/
#endif /* CONFIG_PPP_MULTILINK */
};
@@ -1293,13 +1293,13 @@ ppp_push(struct ppp *ppp)
*/
static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
{
- int len, totlen;
- int i, bits, hdrlen, mtu;
- int flen;
- int navail, nfree, nzero;
- int nbigger;
- int totspeed;
- int totfree;
+ int len, totlen;
+ int i, bits, hdrlen, mtu;
+ int flen;
+ int navail, nfree, nzero;
+ int nbigger;
+ int totspeed;
+ int totfree;
unsigned char *p, *q;
struct list_head *list;
struct channel *pch;
@@ -1307,21 +1307,21 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
struct ppp_channel *chan;
totspeed = 0; /*total bitrate of the bundle*/
- nfree = 0; /* # channels which have no packet already queued */
- navail = 0; /* total # of usable channels (not deregistered) */
- nzero = 0; /* number of channels with zero speed associated*/
- totfree = 0; /*total # of channels available and
+ nfree = 0; /* # channels which have no packet already queued */
+ navail = 0; /* total # of usable channels (not deregistered) */
+ nzero = 0; /* number of channels with zero speed associated*/
+ totfree = 0; /*total # of channels available and
*having no queued packets before
*starting the fragmentation*/
hdrlen = (ppp->flags & SC_MP_XSHORTSEQ)? MPHDRLEN_SSN: MPHDRLEN;
- i = 0;
- list_for_each_entry(pch, &ppp->channels, clist) {
+ i = 0;
+ list_for_each_entry(pch, &ppp->channels, clist) {
navail += pch->avail = (pch->chan != NULL);
pch->speed = pch->chan->speed;
- if (pch->avail) {
+ if (pch->avail) {
if (skb_queue_empty(&pch->file.xq) ||
- !pch->had_frag) {
+ !pch->had_frag) {
if (pch->speed == 0)
nzero++;
else
@@ -1331,60 +1331,60 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
++nfree;
++totfree;
}
- if (!pch->had_frag && i < ppp->nxchan)
- ppp->nxchan = i;
+ if (!pch->had_frag && i < ppp->nxchan)
+ ppp->nxchan = i;
}
++i;
}
/*
- * Don't start sending this packet unless at least half of
- * the channels are free. This gives much better TCP
- * performance if we have a lot of channels.
+ * Don't start sending this packet unless at least half of
+ * the channels are free. This gives much better TCP
+ * performance if we have a lot of channels.
*/
- if (nfree == 0 || nfree < navail / 2)
- return 0; /* can't take now, leave it in xmit_pending */
+ if (nfree == 0 || nfree < navail / 2)
+ return 0; /* can't take now, leave it in xmit_pending */
/* Do protocol field compression (XXX this should be optional) */
- p = skb->data;
- len = skb->len;
+ p = skb->data;
+ len = skb->len;
if (*p == 0) {
++p;
--len;
}
totlen = len;
- nbigger = len % nfree;
+ nbigger = len % nfree;
- /* skip to the channel after the one we last used
- and start at that one */
+ /* skip to the channel after the one we last used
+ and start at that one */
list = &ppp->channels;
- for (i = 0; i < ppp->nxchan; ++i) {
+ for (i = 0; i < ppp->nxchan; ++i) {
list = list->next;
- if (list == &ppp->channels) {
- i = 0;
+ if (list == &ppp->channels) {
+ i = 0;
break;
}
}
- /* create a fragment for each channel */
+ /* create a fragment for each channel */
bits = B;
- while (len > 0) {
+ while (len > 0) {
list = list->next;
- if (list == &ppp->channels) {
- i = 0;
+ if (list == &ppp->channels) {
+ i = 0;
continue;
}
- pch = list_entry(list, struct channel, clist);
+ pch = list_entry(list, struct channel, clist);
++i;
if (!pch->avail)
continue;
/*
- * Skip this channel if it has a fragment pending already and
- * we haven't given a fragment to all of the free channels.
+ * Skip this channel if it has a fragment pending already and
+ * we haven't given a fragment to all of the free channels.
*/
if (pch->avail == 1) {
- if (nfree > 0)
+ if (nfree > 0)
continue;
} else {
pch->avail = 1;
@@ -1393,32 +1393,32 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
/* check the channel's mtu and whether it is still attached. */
spin_lock_bh(&pch->downl);
if (pch->chan == NULL) {
- /* can't use this channel, it's being deregistered */
+ /* can't use this channel, it's being deregistered */
if (pch->speed == 0)
nzero--;
else
- totspeed -= pch->speed;
+ totspeed -= pch->speed;
spin_unlock_bh(&pch->downl);
pch->avail = 0;
totlen = len;
totfree--;
nfree--;
- if (--navail == 0)
+ if (--navail == 0)
break;
continue;
}
/*
*if the channel speed is not set divide
- *the packet evenly among the free channels;
+ *the packet evenly among the free channels;
*otherwise divide it according to the speed
*of the channel we are going to transmit on
*/
flen = len;
if (nfree > 0) {
if (pch->speed == 0) {
- flen = totlen/nfree ;
+ flen = totlen/nfree;
if (nbigger > 0) {
flen++;
nbigger--;
@@ -1436,8 +1436,8 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
}
/*
- *check if we are on the last channel or
- *we exceded the lenght of the data to
+ *check if we are on the last channel or
+ *we exceded the lenght of the data to
*fragment
*/
if ((nfree <= 0) || (flen > len))
@@ -1448,29 +1448,29 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
*above formula will be equal or less than zero.
*Skip the channel in this case
*/
- if (flen <= 0) {
+ if (flen <= 0) {
pch->avail = 2;
spin_unlock_bh(&pch->downl);
continue;
}
- mtu = pch->chan->mtu - hdrlen;
- if (mtu < 4)
- mtu = 4;
+ mtu = pch->chan->mtu - hdrlen;
+ if (mtu < 4)
+ mtu = 4;
if (flen > mtu)
flen = mtu;
- if (flen == len)
- bits |= E;
- frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC);
+ if (flen == len)
+ bits |= E;
+ frag = alloc_skb(flen + hdrlen + (flen == 0), GFP_ATOMIC);
if (!frag)
goto noskb;
- q = skb_put(frag, flen + hdrlen);
+ q = skb_put(frag, flen + hdrlen);
- /* make the MP header */
+ /* make the MP header */
q[0] = PPP_MP >> 8;
q[1] = PPP_MP;
if (ppp->flags & SC_MP_XSHORTSEQ) {
- q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
+ q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
q[3] = ppp->nxseq;
} else {
q[2] = bits;
@@ -1483,24 +1483,24 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
/* try to send it down the channel */
chan = pch->chan;
- if (!skb_queue_empty(&pch->file.xq) ||
+ if (!skb_queue_empty(&pch->file.xq) ||
!chan->ops->start_xmit(chan, frag))
skb_queue_tail(&pch->file.xq, frag);
- pch->had_frag = 1;
+ pch->had_frag = 1;
p += flen;
- len -= flen;
+ len -= flen;
++ppp->nxseq;
bits = 0;
spin_unlock_bh(&pch->downl);
}
- ppp->nxchan = i;
+ ppp->nxchan = i;
return 1;
noskb:
spin_unlock_bh(&pch->downl);
if (ppp->debug & 1)
- printk(KERN_ERR "PPP: no memory (fragment)\n");
+ printk(KERN_ERR "PPP: no memory (fragment)\n");
++ppp->dev->stats.tx_errors;
++ppp->nxseq;
return 1; /* abandon the frame */
diff --git a/drivers/net/ps3_gelic_net.c b/drivers/net/ps3_gelic_net.c
index 0c768593aad..a849f6f23a1 100644
--- a/drivers/net/ps3_gelic_net.c
+++ b/drivers/net/ps3_gelic_net.c
@@ -568,7 +568,7 @@ void gelic_net_set_multi(struct net_device *netdev)
status);
if ((netdev->flags & IFF_ALLMULTI) ||
- (netdev->mc_count > GELIC_NET_MC_COUNT_MAX)) {
+ (netdev_mc_count(netdev) > GELIC_NET_MC_COUNT_MAX)) {
status = lv1_net_add_multicast_address(bus_id(card),
dev_id(card),
0, 1);
@@ -580,7 +580,7 @@ void gelic_net_set_multi(struct net_device *netdev)
}
/* set multicast addresses */
- for (mc = netdev->mc_list; mc; mc = mc->next) {
+ netdev_for_each_mc_addr(mc, netdev) {
addr = 0;
p = mc->dmi_addr;
for (i = 0; i < ETH_ALEN; i++) {
diff --git a/drivers/net/ps3_gelic_wireless.c b/drivers/net/ps3_gelic_wireless.c
index 227b141c4fb..2663b2fdc0b 100644
--- a/drivers/net/ps3_gelic_wireless.c
+++ b/drivers/net/ps3_gelic_wireless.c
@@ -1389,113 +1389,6 @@ static int gelic_wl_get_mode(struct net_device *netdev,
return 0;
}
-#ifdef CONFIG_GELIC_WIRELESS_OLD_PSK_INTERFACE
-/* SIOCIWFIRSTPRIV */
-static int hex2bin(u8 *str, u8 *bin, unsigned int len)
-{
- unsigned int i;
- static unsigned char *hex = "0123456789ABCDEF";
- unsigned char *p, *q;
- u8 tmp;
-
- if (len != WPA_PSK_LEN * 2)
- return -EINVAL;
-
- for (i = 0; i < WPA_PSK_LEN * 2; i += 2) {
- p = strchr(hex, toupper(str[i]));
- q = strchr(hex, toupper(str[i + 1]));
- if (!p || !q) {
- pr_info("%s: unconvertible PSK digit=%d\n",
- __func__, i);
- return -EINVAL;
- }
- tmp = ((p - hex) << 4) + (q - hex);
- *bin++ = tmp;
- }
- return 0;
-};
-
-static int gelic_wl_priv_set_psk(struct net_device *net_dev,
- struct iw_request_info *info,
- union iwreq_data *data, char *extra)
-{
- struct gelic_wl_info *wl = port_wl(netdev_priv(net_dev));
- unsigned int len;
- unsigned long irqflag;
- int ret = 0;
-
- pr_debug("%s:<- len=%d\n", __func__, data->data.length);
- len = data->data.length - 1;
- if (len <= 2)
- return -EINVAL;
-
- spin_lock_irqsave(&wl->lock, irqflag);
- if (extra[0] == '"' && extra[len - 1] == '"') {
- pr_debug("%s: passphrase mode\n", __func__);
- /* pass phrase */
- if (GELIC_WL_EURUS_PSK_MAX_LEN < (len - 2)) {
- pr_info("%s: passphrase too long\n", __func__);
- ret = -E2BIG;
- goto out;
- }
- memset(wl->psk, 0, sizeof(wl->psk));
- wl->psk_len = len - 2;
- memcpy(wl->psk, &(extra[1]), wl->psk_len);
- wl->psk_type = GELIC_EURUS_WPA_PSK_PASSPHRASE;
- } else {
- ret = hex2bin(extra, wl->psk, len);
- if (ret)
- goto out;
- wl->psk_len = WPA_PSK_LEN;
- wl->psk_type = GELIC_EURUS_WPA_PSK_BIN;
- }
- set_bit(GELIC_WL_STAT_WPA_PSK_SET, &wl->stat);
-out:
- spin_unlock_irqrestore(&wl->lock, irqflag);
- pr_debug("%s:->\n", __func__);
- return ret;
-}
-
-static int gelic_wl_priv_get_psk(struct net_device *net_dev,
- struct iw_request_info *info,
- union iwreq_data *data, char *extra)
-{
- struct gelic_wl_info *wl = port_wl(netdev_priv(net_dev));
- char *p;
- unsigned long irqflag;
- unsigned int i;
-
- pr_debug("%s:<-\n", __func__);
- if (!capable(CAP_NET_ADMIN))
- return -EPERM;
-
- spin_lock_irqsave(&wl->lock, irqflag);
- p = extra;
- if (test_bit(GELIC_WL_STAT_WPA_PSK_SET, &wl->stat)) {
- if (wl->psk_type == GELIC_EURUS_WPA_PSK_BIN) {
- for (i = 0; i < wl->psk_len; i++) {
- sprintf(p, "%02xu", wl->psk[i]);
- p += 2;
- }
- *p = '\0';
- data->data.length = wl->psk_len * 2;
- } else {
- *p++ = '"';
- memcpy(p, wl->psk, wl->psk_len);
- p += wl->psk_len;
- *p++ = '"';
- *p = '\0';
- data->data.length = wl->psk_len + 2;
- }
- } else
- /* no psk set */
- data->data.length = 0;
- spin_unlock_irqrestore(&wl->lock, irqflag);
- pr_debug("%s:-> %d\n", __func__, data->data.length);
- return 0;
-}
-#endif
-
/* SIOCGIWNICKN */
static int gelic_wl_get_nick(struct net_device *net_dev,
struct iw_request_info *info,
@@ -1571,8 +1464,10 @@ static int gelic_wl_start_scan(struct gelic_wl_info *wl, int always_scan,
init_completion(&wl->scan_done);
/*
* If we have already a bss list, don't try to get new
+ * unless we are doing an ESSID scan
*/
- if (!always_scan && wl->scan_stat == GELIC_WL_SCAN_STAT_GOT_LIST) {
+ if ((!essid_len && !always_scan)
+ && wl->scan_stat == GELIC_WL_SCAN_STAT_GOT_LIST) {
pr_debug("%s: already has the list\n", __func__);
complete(&wl->scan_done);
goto out;
@@ -1673,7 +1568,7 @@ static void gelic_wl_scan_complete_event(struct gelic_wl_info *wl)
}
}
- /* put them in the newtork_list */
+ /* put them in the network_list */
for (i = 0, scan_info_size = 0, scan_info = buf;
scan_info_size < data_len;
i++, scan_info_size += be16_to_cpu(scan_info->size),
@@ -2009,7 +1904,7 @@ static int gelic_wl_do_wpa_setup(struct gelic_wl_info *wl)
/* PSK type */
wpa->psk_type = cpu_to_be16(wl->psk_type);
#ifdef DEBUG
- pr_debug("%s: sec=%s psktype=%s\nn", __func__,
+ pr_debug("%s: sec=%s psktype=%s\n", __func__,
wpasecstr(wpa->security),
(wpa->psk_type == GELIC_EURUS_WPA_PSK_BIN) ?
"BIN" : "passphrase");
@@ -2019,9 +1914,9 @@ static int gelic_wl_do_wpa_setup(struct gelic_wl_info *wl)
* the debug log because this dumps your precious
* passphrase/key.
*/
- pr_debug("%s: psk=%s\n",
+ pr_debug("%s: psk=%s\n", __func__,
(wpa->psk_type == GELIC_EURUS_WPA_PSK_BIN) ?
- (char *)"N/A" : (char *)wpa->psk);
+ "N/A" : wpa->psk);
#endif
#endif
/* issue wpa setup */
@@ -2406,40 +2301,10 @@ static const iw_handler gelic_wl_wext_handler[] =
IW_IOCTL(SIOCGIWNICKN) = gelic_wl_get_nick,
};
-#ifdef CONFIG_GELIC_WIRELESS_OLD_PSK_INTERFACE
-static struct iw_priv_args gelic_wl_private_args[] =
-{
- {
- .cmd = GELIC_WL_PRIV_SET_PSK,
- .set_args = IW_PRIV_TYPE_CHAR |
- (GELIC_WL_EURUS_PSK_MAX_LEN + 2),
- .name = "set_psk"
- },
- {
- .cmd = GELIC_WL_PRIV_GET_PSK,
- .get_args = IW_PRIV_TYPE_CHAR |
- (GELIC_WL_EURUS_PSK_MAX_LEN + 2),
- .name = "get_psk"
- }
-};
-
-static const iw_handler gelic_wl_private_handler[] =
-{
- gelic_wl_priv_set_psk,
- gelic_wl_priv_get_psk,
-};
-#endif
-
static const struct iw_handler_def gelic_wl_wext_handler_def = {
.num_standard = ARRAY_SIZE(gelic_wl_wext_handler),
.standard = gelic_wl_wext_handler,
.get_wireless_stats = gelic_wl_get_wireless_stats,
-#ifdef CONFIG_GELIC_WIRELESS_OLD_PSK_INTERFACE
- .num_private = ARRAY_SIZE(gelic_wl_private_handler),
- .num_private_args = ARRAY_SIZE(gelic_wl_private_args),
- .private = gelic_wl_private_handler,
- .private_args = gelic_wl_private_args,
-#endif
};
static struct net_device * __devinit gelic_wl_alloc(struct gelic_card *card)
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index dd35066a7f8..4ef0afbcbe1 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -61,7 +61,7 @@ static int msi;
module_param(msi, int, 0);
MODULE_PARM_DESC(msi, "Turn on Message Signaled Interrupts.");
-static struct pci_device_id ql3xxx_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(ql3xxx_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3022_DEVICE_ID)},
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QL3032_DEVICE_ID)},
/* required last entry */
@@ -4087,7 +4087,6 @@ static void __devexit ql3xxx_remove(struct pci_dev *pdev)
struct ql3_adapter *qdev = netdev_priv(ndev);
unregister_netdev(ndev);
- qdev = netdev_priv(ndev);
ql_disable_interrupts(qdev);
diff --git a/drivers/net/qlcnic/Makefile b/drivers/net/qlcnic/Makefile
new file mode 100644
index 00000000000..ddba83ef3f4
--- /dev/null
+++ b/drivers/net/qlcnic/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for Qlogic 1G/10G Ethernet Driver for CNA devices
+#
+
+obj-$(CONFIG_QLCNIC) := qlcnic.o
+
+qlcnic-y := qlcnic_hw.o qlcnic_main.o qlcnic_init.o \
+ qlcnic_ethtool.o qlcnic_ctx.o
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
new file mode 100644
index 00000000000..b40a851ec7d
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -0,0 +1,1126 @@
+/*
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#ifndef _QLCNIC_H_
+#define _QLCNIC_H_
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/firmware.h>
+
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/timer.h>
+
+#include <linux/vmalloc.h>
+
+#include <linux/io.h>
+#include <asm/byteorder.h>
+
+#include "qlcnic_hdr.h"
+
+#define _QLCNIC_LINUX_MAJOR 5
+#define _QLCNIC_LINUX_MINOR 0
+#define _QLCNIC_LINUX_SUBVERSION 0
+#define QLCNIC_LINUX_VERSIONID "5.0.0"
+
+#define QLCNIC_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
+#define _major(v) (((v) >> 24) & 0xff)
+#define _minor(v) (((v) >> 16) & 0xff)
+#define _build(v) ((v) & 0xffff)
+
+/* version in image has weird encoding:
+ * 7:0 - major
+ * 15:8 - minor
+ * 31:16 - build (little endian)
+ */
+#define QLCNIC_DECODE_VERSION(v) \
+ QLCNIC_VERSION_CODE(((v) & 0xff), (((v) >> 8) & 0xff), ((v) >> 16))
+
+#define QLCNIC_NUM_FLASH_SECTORS (64)
+#define QLCNIC_FLASH_SECTOR_SIZE (64 * 1024)
+#define QLCNIC_FLASH_TOTAL_SIZE (QLCNIC_NUM_FLASH_SECTORS \
+ * QLCNIC_FLASH_SECTOR_SIZE)
+
+#define RCV_DESC_RINGSIZE(rds_ring) \
+ (sizeof(struct rcv_desc) * (rds_ring)->num_desc)
+#define RCV_BUFF_RINGSIZE(rds_ring) \
+ (sizeof(struct qlcnic_rx_buffer) * rds_ring->num_desc)
+#define STATUS_DESC_RINGSIZE(sds_ring) \
+ (sizeof(struct status_desc) * (sds_ring)->num_desc)
+#define TX_BUFF_RINGSIZE(tx_ring) \
+ (sizeof(struct qlcnic_cmd_buffer) * tx_ring->num_desc)
+#define TX_DESC_RINGSIZE(tx_ring) \
+ (sizeof(struct cmd_desc_type0) * tx_ring->num_desc)
+
+#define QLCNIC_P3P_A0 0x50
+
+#define QLCNIC_IS_REVISION_P3P(REVISION) (REVISION >= QLCNIC_P3P_A0)
+
+#define FIRST_PAGE_GROUP_START 0
+#define FIRST_PAGE_GROUP_END 0x100000
+
+#define P3_MAX_MTU (9600)
+#define QLCNIC_MAX_ETHERHDR 32 /* This contains some padding */
+
+#define QLCNIC_P3_RX_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + ETH_DATA_LEN)
+#define QLCNIC_P3_RX_JUMBO_BUF_MAX_LEN (QLCNIC_MAX_ETHERHDR + P3_MAX_MTU)
+#define QLCNIC_CT_DEFAULT_RX_BUF_LEN 2048
+#define QLCNIC_LRO_BUFFER_EXTRA 2048
+
+#define QLCNIC_RX_LRO_BUFFER_LENGTH (8060)
+
+/* Opcodes to be used with the commands */
+#define TX_ETHER_PKT 0x01
+#define TX_TCP_PKT 0x02
+#define TX_UDP_PKT 0x03
+#define TX_IP_PKT 0x04
+#define TX_TCP_LSO 0x05
+#define TX_TCP_LSO6 0x06
+#define TX_IPSEC 0x07
+#define TX_IPSEC_CMD 0x0a
+#define TX_TCPV6_PKT 0x0b
+#define TX_UDPV6_PKT 0x0c
+
+/* Tx defines */
+#define MAX_BUFFERS_PER_CMD 32
+#define TX_STOP_THRESH ((MAX_SKB_FRAGS >> 2) + 4)
+#define QLCNIC_MAX_TX_TIMEOUTS 2
+
+/*
+ * Following are the states of the Phantom. Phantom will set them and
+ * Host will read to check if the fields are correct.
+ */
+#define PHAN_INITIALIZE_FAILED 0xffff
+#define PHAN_INITIALIZE_COMPLETE 0xff01
+
+/* Host writes the following to notify that it has done the init-handshake */
+#define PHAN_INITIALIZE_ACK 0xf00f
+#define PHAN_PEG_RCV_INITIALIZED 0xff01
+
+#define NUM_RCV_DESC_RINGS 3
+#define NUM_STS_DESC_RINGS 4
+
+#define RCV_RING_NORMAL 0
+#define RCV_RING_JUMBO 1
+#define RCV_RING_LRO 2
+
+#define MIN_CMD_DESCRIPTORS 64
+#define MIN_RCV_DESCRIPTORS 64
+#define MIN_JUMBO_DESCRIPTORS 32
+
+#define MAX_CMD_DESCRIPTORS 1024
+#define MAX_RCV_DESCRIPTORS_1G 4096
+#define MAX_RCV_DESCRIPTORS_10G 8192
+#define MAX_JUMBO_RCV_DESCRIPTORS_1G 512
+#define MAX_JUMBO_RCV_DESCRIPTORS_10G 1024
+#define MAX_LRO_RCV_DESCRIPTORS 8
+
+#define DEFAULT_RCV_DESCRIPTORS_1G 2048
+#define DEFAULT_RCV_DESCRIPTORS_10G 4096
+
+#define get_next_index(index, length) \
+ (((index) + 1) & ((length) - 1))
+
+#define MPORT_MULTI_FUNCTION_MODE 0x2222
+
+/*
+ * Following data structures describe the descriptors that will be used.
+ * Added fileds of tcpHdrSize and ipHdrSize, The driver needs to do it only when
+ * we are doing LSO (above the 1500 size packet) only.
+ */
+
+#define FLAGS_VLAN_TAGGED 0x10
+#define FLAGS_VLAN_OOB 0x40
+
+#define qlcnic_set_tx_vlan_tci(cmd_desc, v) \
+ (cmd_desc)->vlan_TCI = cpu_to_le16(v);
+#define qlcnic_set_cmd_desc_port(cmd_desc, var) \
+ ((cmd_desc)->port_ctxid |= ((var) & 0x0F))
+#define qlcnic_set_cmd_desc_ctxid(cmd_desc, var) \
+ ((cmd_desc)->port_ctxid |= ((var) << 4 & 0xF0))
+
+#define qlcnic_set_tx_port(_desc, _port) \
+ ((_desc)->port_ctxid = ((_port) & 0xf) | (((_port) << 4) & 0xf0))
+
+#define qlcnic_set_tx_flags_opcode(_desc, _flags, _opcode) \
+ ((_desc)->flags_opcode = \
+ cpu_to_le16(((_flags) & 0x7f) | (((_opcode) & 0x3f) << 7)))
+
+#define qlcnic_set_tx_frags_len(_desc, _frags, _len) \
+ ((_desc)->nfrags__length = \
+ cpu_to_le32(((_frags) & 0xff) | (((_len) & 0xffffff) << 8)))
+
+struct cmd_desc_type0 {
+ u8 tcp_hdr_offset; /* For LSO only */
+ u8 ip_hdr_offset; /* For LSO only */
+ __le16 flags_opcode; /* 15:13 unused, 12:7 opcode, 6:0 flags */
+ __le32 nfrags__length; /* 31:8 total len, 7:0 frag count */
+
+ __le64 addr_buffer2;
+
+ __le16 reference_handle;
+ __le16 mss;
+ u8 port_ctxid; /* 7:4 ctxid 3:0 port */
+ u8 total_hdr_length; /* LSO only : MAC+IP+TCP Hdr size */
+ __le16 conn_id; /* IPSec offoad only */
+
+ __le64 addr_buffer3;
+ __le64 addr_buffer1;
+
+ __le16 buffer_length[4];
+
+ __le64 addr_buffer4;
+
+ __le32 reserved2;
+ __le16 reserved;
+ __le16 vlan_TCI;
+
+} __attribute__ ((aligned(64)));
+
+/* Note: sizeof(rcv_desc) should always be a mutliple of 2 */
+struct rcv_desc {
+ __le16 reference_handle;
+ __le16 reserved;
+ __le32 buffer_length; /* allocated buffer length (usually 2K) */
+ __le64 addr_buffer;
+};
+
+/* opcode field in status_desc */
+#define QLCNIC_SYN_OFFLOAD 0x03
+#define QLCNIC_RXPKT_DESC 0x04
+#define QLCNIC_OLD_RXPKT_DESC 0x3f
+#define QLCNIC_RESPONSE_DESC 0x05
+#define QLCNIC_LRO_DESC 0x12
+
+/* for status field in status_desc */
+#define STATUS_CKSUM_OK (2)
+
+/* owner bits of status_desc */
+#define STATUS_OWNER_HOST (0x1ULL << 56)
+#define STATUS_OWNER_PHANTOM (0x2ULL << 56)
+
+/* Status descriptor:
+ 0-3 port, 4-7 status, 8-11 type, 12-27 total_length
+ 28-43 reference_handle, 44-47 protocol, 48-52 pkt_offset
+ 53-55 desc_cnt, 56-57 owner, 58-63 opcode
+ */
+#define qlcnic_get_sts_port(sts_data) \
+ ((sts_data) & 0x0F)
+#define qlcnic_get_sts_status(sts_data) \
+ (((sts_data) >> 4) & 0x0F)
+#define qlcnic_get_sts_type(sts_data) \
+ (((sts_data) >> 8) & 0x0F)
+#define qlcnic_get_sts_totallength(sts_data) \
+ (((sts_data) >> 12) & 0xFFFF)
+#define qlcnic_get_sts_refhandle(sts_data) \
+ (((sts_data) >> 28) & 0xFFFF)
+#define qlcnic_get_sts_prot(sts_data) \
+ (((sts_data) >> 44) & 0x0F)
+#define qlcnic_get_sts_pkt_offset(sts_data) \
+ (((sts_data) >> 48) & 0x1F)
+#define qlcnic_get_sts_desc_cnt(sts_data) \
+ (((sts_data) >> 53) & 0x7)
+#define qlcnic_get_sts_opcode(sts_data) \
+ (((sts_data) >> 58) & 0x03F)
+
+#define qlcnic_get_lro_sts_refhandle(sts_data) \
+ ((sts_data) & 0x0FFFF)
+#define qlcnic_get_lro_sts_length(sts_data) \
+ (((sts_data) >> 16) & 0x0FFFF)
+#define qlcnic_get_lro_sts_l2_hdr_offset(sts_data) \
+ (((sts_data) >> 32) & 0x0FF)
+#define qlcnic_get_lro_sts_l4_hdr_offset(sts_data) \
+ (((sts_data) >> 40) & 0x0FF)
+#define qlcnic_get_lro_sts_timestamp(sts_data) \
+ (((sts_data) >> 48) & 0x1)
+#define qlcnic_get_lro_sts_type(sts_data) \
+ (((sts_data) >> 49) & 0x7)
+#define qlcnic_get_lro_sts_push_flag(sts_data) \
+ (((sts_data) >> 52) & 0x1)
+#define qlcnic_get_lro_sts_seq_number(sts_data) \
+ ((sts_data) & 0x0FFFFFFFF)
+
+
+struct status_desc {
+ __le64 status_desc_data[2];
+} __attribute__ ((aligned(16)));
+
+/* UNIFIED ROMIMAGE */
+#define QLCNIC_UNI_FW_MIN_SIZE 0xc8000
+#define QLCNIC_UNI_DIR_SECT_PRODUCT_TBL 0x0
+#define QLCNIC_UNI_DIR_SECT_BOOTLD 0x6
+#define QLCNIC_UNI_DIR_SECT_FW 0x7
+
+/*Offsets */
+#define QLCNIC_UNI_CHIP_REV_OFF 10
+#define QLCNIC_UNI_FLAGS_OFF 11
+#define QLCNIC_UNI_BIOS_VERSION_OFF 12
+#define QLCNIC_UNI_BOOTLD_IDX_OFF 27
+#define QLCNIC_UNI_FIRMWARE_IDX_OFF 29
+
+struct uni_table_desc{
+ u32 findex;
+ u32 num_entries;
+ u32 entry_size;
+ u32 reserved[5];
+};
+
+struct uni_data_desc{
+ u32 findex;
+ u32 size;
+ u32 reserved[5];
+};
+
+/* Magic number to let user know flash is programmed */
+#define QLCNIC_BDINFO_MAGIC 0x12345678
+
+#define QLCNIC_BRDTYPE_P3_REF_QG 0x0021
+#define QLCNIC_BRDTYPE_P3_HMEZ 0x0022
+#define QLCNIC_BRDTYPE_P3_10G_CX4_LP 0x0023
+#define QLCNIC_BRDTYPE_P3_4_GB 0x0024
+#define QLCNIC_BRDTYPE_P3_IMEZ 0x0025
+#define QLCNIC_BRDTYPE_P3_10G_SFP_PLUS 0x0026
+#define QLCNIC_BRDTYPE_P3_10000_BASE_T 0x0027
+#define QLCNIC_BRDTYPE_P3_XG_LOM 0x0028
+#define QLCNIC_BRDTYPE_P3_4_GB_MM 0x0029
+#define QLCNIC_BRDTYPE_P3_10G_SFP_CT 0x002a
+#define QLCNIC_BRDTYPE_P3_10G_SFP_QT 0x002b
+#define QLCNIC_BRDTYPE_P3_10G_CX4 0x0031
+#define QLCNIC_BRDTYPE_P3_10G_XFP 0x0032
+#define QLCNIC_BRDTYPE_P3_10G_TP 0x0080
+
+/* Flash memory map */
+#define QLCNIC_BRDCFG_START 0x4000 /* board config */
+#define QLCNIC_BOOTLD_START 0x10000 /* bootld */
+#define QLCNIC_IMAGE_START 0x43000 /* compressed image */
+#define QLCNIC_USER_START 0x3E8000 /* Firmare info */
+
+#define QLCNIC_FW_VERSION_OFFSET (QLCNIC_USER_START+0x408)
+#define QLCNIC_FW_SIZE_OFFSET (QLCNIC_USER_START+0x40c)
+#define QLCNIC_FW_SERIAL_NUM_OFFSET (QLCNIC_USER_START+0x81c)
+#define QLCNIC_BIOS_VERSION_OFFSET (QLCNIC_USER_START+0x83c)
+
+#define QLCNIC_BRDTYPE_OFFSET (QLCNIC_BRDCFG_START+0x8)
+#define QLCNIC_FW_MAGIC_OFFSET (QLCNIC_BRDCFG_START+0x128)
+
+#define QLCNIC_FW_MIN_SIZE (0x3fffff)
+#define QLCNIC_UNIFIED_ROMIMAGE 0
+#define QLCNIC_FLASH_ROMIMAGE 1
+#define QLCNIC_UNKNOWN_ROMIMAGE 0xff
+
+#define QLCNIC_UNIFIED_ROMIMAGE_NAME "phanfw.bin"
+#define QLCNIC_FLASH_ROMIMAGE_NAME "flash"
+
+extern char qlcnic_driver_name[];
+
+/* Number of status descriptors to handle per interrupt */
+#define MAX_STATUS_HANDLE (64)
+
+/*
+ * qlcnic_skb_frag{} is to contain mapping info for each SG list. This
+ * has to be freed when DMA is complete. This is part of qlcnic_tx_buffer{}.
+ */
+struct qlcnic_skb_frag {
+ u64 dma;
+ u64 length;
+};
+
+struct qlcnic_recv_crb {
+ u32 crb_rcv_producer[NUM_RCV_DESC_RINGS];
+ u32 crb_sts_consumer[NUM_STS_DESC_RINGS];
+ u32 sw_int_mask[NUM_STS_DESC_RINGS];
+};
+
+/* Following defines are for the state of the buffers */
+#define QLCNIC_BUFFER_FREE 0
+#define QLCNIC_BUFFER_BUSY 1
+
+/*
+ * There will be one qlcnic_buffer per skb packet. These will be
+ * used to save the dma info for pci_unmap_page()
+ */
+struct qlcnic_cmd_buffer {
+ struct sk_buff *skb;
+ struct qlcnic_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1];
+ u32 frag_count;
+};
+
+/* In rx_buffer, we do not need multiple fragments as is a single buffer */
+struct qlcnic_rx_buffer {
+ struct list_head list;
+ struct sk_buff *skb;
+ u64 dma;
+ u16 ref_handle;
+ u16 state;
+};
+
+/* Board types */
+#define QLCNIC_GBE 0x01
+#define QLCNIC_XGBE 0x02
+
+/*
+ * One hardware_context{} per adapter
+ * contains interrupt info as well shared hardware info.
+ */
+struct qlcnic_hardware_context {
+ void __iomem *pci_base0;
+ void __iomem *ocm_win_crb;
+
+ unsigned long pci_len0;
+
+ u32 ocm_win;
+ u32 crb_win;
+
+ rwlock_t crb_lock;
+ struct mutex mem_lock;
+
+ u8 cut_through;
+ u8 revision_id;
+ u8 pci_func;
+ u8 linkup;
+ u16 port_type;
+ u16 board_type;
+};
+
+struct qlcnic_adapter_stats {
+ u64 xmitcalled;
+ u64 xmitfinished;
+ u64 rxdropped;
+ u64 txdropped;
+ u64 csummed;
+ u64 rx_pkts;
+ u64 lro_pkts;
+ u64 rxbytes;
+ u64 txbytes;
+};
+
+/*
+ * Rcv Descriptor Context. One such per Rcv Descriptor. There may
+ * be one Rcv Descriptor for normal packets, one for jumbo and may be others.
+ */
+struct qlcnic_host_rds_ring {
+ u32 producer;
+ u32 num_desc;
+ u32 dma_size;
+ u32 skb_size;
+ u32 flags;
+ void __iomem *crb_rcv_producer;
+ struct rcv_desc *desc_head;
+ struct qlcnic_rx_buffer *rx_buf_arr;
+ struct list_head free_list;
+ spinlock_t lock;
+ dma_addr_t phys_addr;
+};
+
+struct qlcnic_host_sds_ring {
+ u32 consumer;
+ u32 num_desc;
+ void __iomem *crb_sts_consumer;
+ void __iomem *crb_intr_mask;
+
+ struct status_desc *desc_head;
+ struct qlcnic_adapter *adapter;
+ struct napi_struct napi;
+ struct list_head free_list[NUM_RCV_DESC_RINGS];
+
+ int irq;
+
+ dma_addr_t phys_addr;
+ char name[IFNAMSIZ+4];
+};
+
+struct qlcnic_host_tx_ring {
+ u32 producer;
+ __le32 *hw_consumer;
+ u32 sw_consumer;
+ void __iomem *crb_cmd_producer;
+ u32 num_desc;
+
+ struct netdev_queue *txq;
+
+ struct qlcnic_cmd_buffer *cmd_buf_arr;
+ struct cmd_desc_type0 *desc_head;
+ dma_addr_t phys_addr;
+ dma_addr_t hw_cons_phys_addr;
+};
+
+/*
+ * Receive context. There is one such structure per instance of the
+ * receive processing. Any state information that is relevant to
+ * the receive, and is must be in this structure. The global data may be
+ * present elsewhere.
+ */
+struct qlcnic_recv_context {
+ u32 state;
+ u16 context_id;
+ u16 virt_port;
+
+ struct qlcnic_host_rds_ring *rds_rings;
+ struct qlcnic_host_sds_ring *sds_rings;
+};
+
+/* HW context creation */
+
+#define QLCNIC_OS_CRB_RETRY_COUNT 4000
+#define QLCNIC_CDRP_SIGNATURE_MAKE(pcifn, version) \
+ (((pcifn) & 0xff) | (((version) & 0xff) << 8) | (0xcafe << 16))
+
+#define QLCNIC_CDRP_CMD_BIT 0x80000000
+
+/*
+ * All responses must have the QLCNIC_CDRP_CMD_BIT cleared
+ * in the crb QLCNIC_CDRP_CRB_OFFSET.
+ */
+#define QLCNIC_CDRP_FORM_RSP(rsp) (rsp)
+#define QLCNIC_CDRP_IS_RSP(rsp) (((rsp) & QLCNIC_CDRP_CMD_BIT) == 0)
+
+#define QLCNIC_CDRP_RSP_OK 0x00000001
+#define QLCNIC_CDRP_RSP_FAIL 0x00000002
+#define QLCNIC_CDRP_RSP_TIMEOUT 0x00000003
+
+/*
+ * All commands must have the QLCNIC_CDRP_CMD_BIT set in
+ * the crb QLCNIC_CDRP_CRB_OFFSET.
+ */
+#define QLCNIC_CDRP_FORM_CMD(cmd) (QLCNIC_CDRP_CMD_BIT | (cmd))
+#define QLCNIC_CDRP_IS_CMD(cmd) (((cmd) & QLCNIC_CDRP_CMD_BIT) != 0)
+
+#define QLCNIC_CDRP_CMD_SUBMIT_CAPABILITIES 0x00000001
+#define QLCNIC_CDRP_CMD_READ_MAX_RDS_PER_CTX 0x00000002
+#define QLCNIC_CDRP_CMD_READ_MAX_SDS_PER_CTX 0x00000003
+#define QLCNIC_CDRP_CMD_READ_MAX_RULES_PER_CTX 0x00000004
+#define QLCNIC_CDRP_CMD_READ_MAX_RX_CTX 0x00000005
+#define QLCNIC_CDRP_CMD_READ_MAX_TX_CTX 0x00000006
+#define QLCNIC_CDRP_CMD_CREATE_RX_CTX 0x00000007
+#define QLCNIC_CDRP_CMD_DESTROY_RX_CTX 0x00000008
+#define QLCNIC_CDRP_CMD_CREATE_TX_CTX 0x00000009
+#define QLCNIC_CDRP_CMD_DESTROY_TX_CTX 0x0000000a
+#define QLCNIC_CDRP_CMD_SETUP_STATISTICS 0x0000000e
+#define QLCNIC_CDRP_CMD_GET_STATISTICS 0x0000000f
+#define QLCNIC_CDRP_CMD_DELETE_STATISTICS 0x00000010
+#define QLCNIC_CDRP_CMD_SET_MTU 0x00000012
+#define QLCNIC_CDRP_CMD_READ_PHY 0x00000013
+#define QLCNIC_CDRP_CMD_WRITE_PHY 0x00000014
+#define QLCNIC_CDRP_CMD_READ_HW_REG 0x00000015
+#define QLCNIC_CDRP_CMD_GET_FLOW_CTL 0x00000016
+#define QLCNIC_CDRP_CMD_SET_FLOW_CTL 0x00000017
+#define QLCNIC_CDRP_CMD_READ_MAX_MTU 0x00000018
+#define QLCNIC_CDRP_CMD_READ_MAX_LRO 0x00000019
+#define QLCNIC_CDRP_CMD_CONFIGURE_TOE 0x0000001a
+#define QLCNIC_CDRP_CMD_FUNC_ATTRIB 0x0000001b
+#define QLCNIC_CDRP_CMD_READ_PEXQ_PARAMETERS 0x0000001c
+#define QLCNIC_CDRP_CMD_GET_LIC_CAPABILITIES 0x0000001d
+#define QLCNIC_CDRP_CMD_READ_MAX_LRO_PER_BOARD 0x0000001e
+#define QLCNIC_CDRP_CMD_MAX 0x0000001f
+
+#define QLCNIC_RCODE_SUCCESS 0
+#define QLCNIC_RCODE_TIMEOUT 17
+#define QLCNIC_DESTROY_CTX_RESET 0
+
+/*
+ * Capabilities Announced
+ */
+#define QLCNIC_CAP0_LEGACY_CONTEXT (1)
+#define QLCNIC_CAP0_LEGACY_MN (1 << 2)
+#define QLCNIC_CAP0_LSO (1 << 6)
+#define QLCNIC_CAP0_JUMBO_CONTIGUOUS (1 << 7)
+#define QLCNIC_CAP0_LRO_CONTIGUOUS (1 << 8)
+
+/*
+ * Context state
+ */
+#define QLCHAL_VERSION 1
+
+#define QLCNIC_HOST_CTX_STATE_ACTIVE 2
+
+/*
+ * Rx context
+ */
+
+struct qlcnic_hostrq_sds_ring {
+ __le64 host_phys_addr; /* Ring base addr */
+ __le32 ring_size; /* Ring entries */
+ __le16 msi_index;
+ __le16 rsvd; /* Padding */
+};
+
+struct qlcnic_hostrq_rds_ring {
+ __le64 host_phys_addr; /* Ring base addr */
+ __le64 buff_size; /* Packet buffer size */
+ __le32 ring_size; /* Ring entries */
+ __le32 ring_kind; /* Class of ring */
+};
+
+struct qlcnic_hostrq_rx_ctx {
+ __le64 host_rsp_dma_addr; /* Response dma'd here */
+ __le32 capabilities[4]; /* Flag bit vector */
+ __le32 host_int_crb_mode; /* Interrupt crb usage */
+ __le32 host_rds_crb_mode; /* RDS crb usage */
+ /* These ring offsets are relative to data[0] below */
+ __le32 rds_ring_offset; /* Offset to RDS config */
+ __le32 sds_ring_offset; /* Offset to SDS config */
+ __le16 num_rds_rings; /* Count of RDS rings */
+ __le16 num_sds_rings; /* Count of SDS rings */
+ __le16 rsvd1; /* Padding */
+ __le16 rsvd2; /* Padding */
+ u8 reserved[128]; /* reserve space for future expansion*/
+ /* MUST BE 64-bit aligned.
+ The following is packed:
+ - N hostrq_rds_rings
+ - N hostrq_sds_rings */
+ char data[0];
+};
+
+struct qlcnic_cardrsp_rds_ring{
+ __le32 host_producer_crb; /* Crb to use */
+ __le32 rsvd1; /* Padding */
+};
+
+struct qlcnic_cardrsp_sds_ring {
+ __le32 host_consumer_crb; /* Crb to use */
+ __le32 interrupt_crb; /* Crb to use */
+};
+
+struct qlcnic_cardrsp_rx_ctx {
+ /* These ring offsets are relative to data[0] below */
+ __le32 rds_ring_offset; /* Offset to RDS config */
+ __le32 sds_ring_offset; /* Offset to SDS config */
+ __le32 host_ctx_state; /* Starting State */
+ __le32 num_fn_per_port; /* How many PCI fn share the port */
+ __le16 num_rds_rings; /* Count of RDS rings */
+ __le16 num_sds_rings; /* Count of SDS rings */
+ __le16 context_id; /* Handle for context */
+ u8 phys_port; /* Physical id of port */
+ u8 virt_port; /* Virtual/Logical id of port */
+ u8 reserved[128]; /* save space for future expansion */
+ /* MUST BE 64-bit aligned.
+ The following is packed:
+ - N cardrsp_rds_rings
+ - N cardrs_sds_rings */
+ char data[0];
+};
+
+#define SIZEOF_HOSTRQ_RX(HOSTRQ_RX, rds_rings, sds_rings) \
+ (sizeof(HOSTRQ_RX) + \
+ (rds_rings)*(sizeof(struct qlcnic_hostrq_rds_ring)) + \
+ (sds_rings)*(sizeof(struct qlcnic_hostrq_sds_ring)))
+
+#define SIZEOF_CARDRSP_RX(CARDRSP_RX, rds_rings, sds_rings) \
+ (sizeof(CARDRSP_RX) + \
+ (rds_rings)*(sizeof(struct qlcnic_cardrsp_rds_ring)) + \
+ (sds_rings)*(sizeof(struct qlcnic_cardrsp_sds_ring)))
+
+/*
+ * Tx context
+ */
+
+struct qlcnic_hostrq_cds_ring {
+ __le64 host_phys_addr; /* Ring base addr */
+ __le32 ring_size; /* Ring entries */
+ __le32 rsvd; /* Padding */
+};
+
+struct qlcnic_hostrq_tx_ctx {
+ __le64 host_rsp_dma_addr; /* Response dma'd here */
+ __le64 cmd_cons_dma_addr; /* */
+ __le64 dummy_dma_addr; /* */
+ __le32 capabilities[4]; /* Flag bit vector */
+ __le32 host_int_crb_mode; /* Interrupt crb usage */
+ __le32 rsvd1; /* Padding */
+ __le16 rsvd2; /* Padding */
+ __le16 interrupt_ctl;
+ __le16 msi_index;
+ __le16 rsvd3; /* Padding */
+ struct qlcnic_hostrq_cds_ring cds_ring; /* Desc of cds ring */
+ u8 reserved[128]; /* future expansion */
+};
+
+struct qlcnic_cardrsp_cds_ring {
+ __le32 host_producer_crb; /* Crb to use */
+ __le32 interrupt_crb; /* Crb to use */
+};
+
+struct qlcnic_cardrsp_tx_ctx {
+ __le32 host_ctx_state; /* Starting state */
+ __le16 context_id; /* Handle for context */
+ u8 phys_port; /* Physical id of port */
+ u8 virt_port; /* Virtual/Logical id of port */
+ struct qlcnic_cardrsp_cds_ring cds_ring; /* Card cds settings */
+ u8 reserved[128]; /* future expansion */
+};
+
+#define SIZEOF_HOSTRQ_TX(HOSTRQ_TX) (sizeof(HOSTRQ_TX))
+#define SIZEOF_CARDRSP_TX(CARDRSP_TX) (sizeof(CARDRSP_TX))
+
+/* CRB */
+
+#define QLCNIC_HOST_RDS_CRB_MODE_UNIQUE 0
+#define QLCNIC_HOST_RDS_CRB_MODE_SHARED 1
+#define QLCNIC_HOST_RDS_CRB_MODE_CUSTOM 2
+#define QLCNIC_HOST_RDS_CRB_MODE_MAX 3
+
+#define QLCNIC_HOST_INT_CRB_MODE_UNIQUE 0
+#define QLCNIC_HOST_INT_CRB_MODE_SHARED 1
+#define QLCNIC_HOST_INT_CRB_MODE_NORX 2
+#define QLCNIC_HOST_INT_CRB_MODE_NOTX 3
+#define QLCNIC_HOST_INT_CRB_MODE_NORXTX 4
+
+
+/* MAC */
+
+#define MC_COUNT_P3 38
+
+#define QLCNIC_MAC_NOOP 0
+#define QLCNIC_MAC_ADD 1
+#define QLCNIC_MAC_DEL 2
+
+struct qlcnic_mac_list_s {
+ struct list_head list;
+ uint8_t mac_addr[ETH_ALEN+2];
+};
+
+/*
+ * Interrupt coalescing defaults. The defaults are for 1500 MTU. It is
+ * adjusted based on configured MTU.
+ */
+#define QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US 3
+#define QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS 256
+#define QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS 64
+#define QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US 4
+
+#define QLCNIC_INTR_DEFAULT 0x04
+
+union qlcnic_nic_intr_coalesce_data {
+ struct {
+ u16 rx_packets;
+ u16 rx_time_us;
+ u16 tx_packets;
+ u16 tx_time_us;
+ } data;
+ u64 word;
+};
+
+struct qlcnic_nic_intr_coalesce {
+ u16 stats_time_us;
+ u16 rate_sample_time;
+ u16 flags;
+ u16 rsvd_1;
+ u32 low_threshold;
+ u32 high_threshold;
+ union qlcnic_nic_intr_coalesce_data normal;
+ union qlcnic_nic_intr_coalesce_data low;
+ union qlcnic_nic_intr_coalesce_data high;
+ union qlcnic_nic_intr_coalesce_data irq;
+};
+
+#define QLCNIC_HOST_REQUEST 0x13
+#define QLCNIC_REQUEST 0x14
+
+#define QLCNIC_MAC_EVENT 0x1
+
+#define QLCNIC_IP_UP 2
+#define QLCNIC_IP_DOWN 3
+
+/*
+ * Driver --> Firmware
+ */
+#define QLCNIC_H2C_OPCODE_START 0
+#define QLCNIC_H2C_OPCODE_CONFIG_RSS 1
+#define QLCNIC_H2C_OPCODE_CONFIG_RSS_TBL 2
+#define QLCNIC_H2C_OPCODE_CONFIG_INTR_COALESCE 3
+#define QLCNIC_H2C_OPCODE_CONFIG_LED 4
+#define QLCNIC_H2C_OPCODE_CONFIG_PROMISCUOUS 5
+#define QLCNIC_H2C_OPCODE_CONFIG_L2_MAC 6
+#define QLCNIC_H2C_OPCODE_LRO_REQUEST 7
+#define QLCNIC_H2C_OPCODE_GET_SNMP_STATS 8
+#define QLCNIC_H2C_OPCODE_PROXY_START_REQUEST 9
+#define QLCNIC_H2C_OPCODE_PROXY_STOP_REQUEST 10
+#define QLCNIC_H2C_OPCODE_PROXY_SET_MTU 11
+#define QLCNIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE 12
+#define QLCNIC_H2C_OPCODE_GET_FINGER_PRINT_REQUEST 13
+#define QLCNIC_H2C_OPCODE_INSTALL_LICENSE_REQUEST 14
+#define QLCNIC_H2C_OPCODE_GET_LICENSE_CAPABILITY_REQUEST 15
+#define QLCNIC_H2C_OPCODE_GET_NET_STATS 16
+#define QLCNIC_H2C_OPCODE_PROXY_UPDATE_P2V 17
+#define QLCNIC_H2C_OPCODE_CONFIG_IPADDR 18
+#define QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK 19
+#define QLCNIC_H2C_OPCODE_PROXY_STOP_DONE 20
+#define QLCNIC_H2C_OPCODE_GET_LINKEVENT 21
+#define QLCNIC_C2C_OPCODE 22
+#define QLCNIC_H2C_OPCODE_CONFIG_BRIDGING 23
+#define QLCNIC_H2C_OPCODE_CONFIG_HW_LRO 24
+#define QLCNIC_H2C_OPCODE_LAST 25
+/*
+ * Firmware --> Driver
+ */
+
+#define QLCNIC_C2H_OPCODE_START 128
+#define QLCNIC_C2H_OPCODE_CONFIG_RSS_RESPONSE 129
+#define QLCNIC_C2H_OPCODE_CONFIG_RSS_TBL_RESPONSE 130
+#define QLCNIC_C2H_OPCODE_CONFIG_MAC_RESPONSE 131
+#define QLCNIC_C2H_OPCODE_CONFIG_PROMISCUOUS_RESPONSE 132
+#define QLCNIC_C2H_OPCODE_CONFIG_L2_MAC_RESPONSE 133
+#define QLCNIC_C2H_OPCODE_LRO_DELETE_RESPONSE 134
+#define QLCNIC_C2H_OPCODE_LRO_ADD_FAILURE_RESPONSE 135
+#define QLCNIC_C2H_OPCODE_GET_SNMP_STATS 136
+#define QLCNIC_C2H_OPCODE_GET_FINGER_PRINT_REPLY 137
+#define QLCNIC_C2H_OPCODE_INSTALL_LICENSE_REPLY 138
+#define QLCNIC_C2H_OPCODE_GET_LICENSE_CAPABILITIES_REPLY 139
+#define QLCNIC_C2H_OPCODE_GET_NET_STATS_RESPONSE 140
+#define QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE 141
+#define QLCNIC_C2H_OPCODE_LAST 142
+
+#define VPORT_MISS_MODE_DROP 0 /* drop all unmatched */
+#define VPORT_MISS_MODE_ACCEPT_ALL 1 /* accept all packets */
+#define VPORT_MISS_MODE_ACCEPT_MULTI 2 /* accept unmatched multicast */
+
+#define QLCNIC_LRO_REQUEST_CLEANUP 4
+
+/* Capabilites received */
+#define QLCNIC_FW_CAPABILITY_BDG (1 << 8)
+#define QLCNIC_FW_CAPABILITY_FVLANTX (1 << 9)
+#define QLCNIC_FW_CAPABILITY_HW_LRO (1 << 10)
+
+/* module types */
+#define LINKEVENT_MODULE_NOT_PRESENT 1
+#define LINKEVENT_MODULE_OPTICAL_UNKNOWN 2
+#define LINKEVENT_MODULE_OPTICAL_SRLR 3
+#define LINKEVENT_MODULE_OPTICAL_LRM 4
+#define LINKEVENT_MODULE_OPTICAL_SFP_1G 5
+#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE 6
+#define LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN 7
+#define LINKEVENT_MODULE_TWINAX 8
+
+#define LINKSPEED_10GBPS 10000
+#define LINKSPEED_1GBPS 1000
+#define LINKSPEED_100MBPS 100
+#define LINKSPEED_10MBPS 10
+
+#define LINKSPEED_ENCODED_10MBPS 0
+#define LINKSPEED_ENCODED_100MBPS 1
+#define LINKSPEED_ENCODED_1GBPS 2
+
+#define LINKEVENT_AUTONEG_DISABLED 0
+#define LINKEVENT_AUTONEG_ENABLED 1
+
+#define LINKEVENT_HALF_DUPLEX 0
+#define LINKEVENT_FULL_DUPLEX 1
+
+#define LINKEVENT_LINKSPEED_MBPS 0
+#define LINKEVENT_LINKSPEED_ENCODED 1
+
+#define AUTO_FW_RESET_ENABLED 0x01
+/* firmware response header:
+ * 63:58 - message type
+ * 57:56 - owner
+ * 55:53 - desc count
+ * 52:48 - reserved
+ * 47:40 - completion id
+ * 39:32 - opcode
+ * 31:16 - error code
+ * 15:00 - reserved
+ */
+#define qlcnic_get_nic_msg_opcode(msg_hdr) \
+ ((msg_hdr >> 32) & 0xFF)
+
+struct qlcnic_fw_msg {
+ union {
+ struct {
+ u64 hdr;
+ u64 body[7];
+ };
+ u64 words[8];
+ };
+};
+
+struct qlcnic_nic_req {
+ __le64 qhdr;
+ __le64 req_hdr;
+ __le64 words[6];
+};
+
+struct qlcnic_mac_req {
+ u8 op;
+ u8 tag;
+ u8 mac_addr[6];
+};
+
+#define QLCNIC_MSI_ENABLED 0x02
+#define QLCNIC_MSIX_ENABLED 0x04
+#define QLCNIC_LRO_ENABLED 0x08
+#define QLCNIC_BRIDGE_ENABLED 0X10
+#define QLCNIC_DIAG_ENABLED 0x20
+#define QLCNIC_IS_MSI_FAMILY(adapter) \
+ ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
+
+#define MSIX_ENTRIES_PER_ADAPTER NUM_STS_DESC_RINGS
+#define QLCNIC_MSIX_TBL_SPACE 8192
+#define QLCNIC_PCI_REG_MSIX_TBL 0x44
+
+#define QLCNIC_NETDEV_WEIGHT 128
+#define QLCNIC_ADAPTER_UP_MAGIC 777
+
+#define __QLCNIC_FW_ATTACHED 0
+#define __QLCNIC_DEV_UP 1
+#define __QLCNIC_RESETTING 2
+#define __QLCNIC_START_FW 4
+
+#define QLCNIC_INTERRUPT_TEST 1
+#define QLCNIC_LOOPBACK_TEST 2
+
+struct qlcnic_adapter {
+ struct qlcnic_hardware_context ahw;
+
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ struct list_head mac_list;
+
+ spinlock_t tx_clean_lock;
+
+ u16 num_txd;
+ u16 num_rxd;
+ u16 num_jumbo_rxd;
+ u16 num_lro_rxd;
+
+ u8 max_rds_rings;
+ u8 max_sds_rings;
+ u8 driver_mismatch;
+ u8 msix_supported;
+ u8 rx_csum;
+ u8 pci_using_dac;
+ u8 portnum;
+ u8 physical_port;
+
+ u8 mc_enabled;
+ u8 max_mc_count;
+ u8 rss_supported;
+ u8 rsrvd1;
+ u8 fw_wait_cnt;
+ u8 fw_fail_cnt;
+ u8 tx_timeo_cnt;
+ u8 need_fw_reset;
+
+ u8 has_link_events;
+ u8 fw_type;
+ u16 tx_context_id;
+ u16 mtu;
+ u16 is_up;
+
+ u16 link_speed;
+ u16 link_duplex;
+ u16 link_autoneg;
+ u16 module_type;
+
+ u32 capabilities;
+ u32 flags;
+ u32 irq;
+ u32 temp;
+
+ u32 int_vec_bit;
+ u32 heartbit;
+
+ u8 dev_state;
+ u8 diag_test;
+ u8 diag_cnt;
+ u8 rsrd1;
+ u16 rsrd2;
+
+ u8 mac_addr[ETH_ALEN];
+
+ struct qlcnic_adapter_stats stats;
+
+ struct qlcnic_recv_context recv_ctx;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ void __iomem *tgt_mask_reg;
+ void __iomem *tgt_status_reg;
+ void __iomem *crb_int_state_reg;
+ void __iomem *isr_int_vec;
+
+ struct msix_entry msix_entries[MSIX_ENTRIES_PER_ADAPTER];
+
+ struct delayed_work fw_work;
+
+ struct work_struct tx_timeout_task;
+
+ struct qlcnic_nic_intr_coalesce coal;
+
+ unsigned long state;
+ __le32 file_prd_off; /*File fw product offset*/
+ u32 fw_version;
+ const struct firmware *fw;
+};
+
+int qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val);
+int qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val);
+
+u32 qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off);
+int qlcnic_hw_write_wx_2M(struct qlcnic_adapter *, ulong off, u32 data);
+int qlcnic_pci_mem_write_2M(struct qlcnic_adapter *, u64 off, u64 data);
+int qlcnic_pci_mem_read_2M(struct qlcnic_adapter *, u64 off, u64 *data);
+
+#define QLCRD32(adapter, off) \
+ (qlcnic_hw_read_wx_2M(adapter, off))
+#define QLCWR32(adapter, off, val) \
+ (qlcnic_hw_write_wx_2M(adapter, off, val))
+
+int qlcnic_pcie_sem_lock(struct qlcnic_adapter *, int, u32);
+void qlcnic_pcie_sem_unlock(struct qlcnic_adapter *, int);
+
+#define qlcnic_rom_lock(a) \
+ qlcnic_pcie_sem_lock((a), 2, QLCNIC_ROM_LOCK_ID)
+#define qlcnic_rom_unlock(a) \
+ qlcnic_pcie_sem_unlock((a), 2)
+#define qlcnic_phy_lock(a) \
+ qlcnic_pcie_sem_lock((a), 3, QLCNIC_PHY_LOCK_ID)
+#define qlcnic_phy_unlock(a) \
+ qlcnic_pcie_sem_unlock((a), 3)
+#define qlcnic_api_lock(a) \
+ qlcnic_pcie_sem_lock((a), 5, 0)
+#define qlcnic_api_unlock(a) \
+ qlcnic_pcie_sem_unlock((a), 5)
+#define qlcnic_sw_lock(a) \
+ qlcnic_pcie_sem_lock((a), 6, 0)
+#define qlcnic_sw_unlock(a) \
+ qlcnic_pcie_sem_unlock((a), 6)
+#define crb_win_lock(a) \
+ qlcnic_pcie_sem_lock((a), 7, QLCNIC_CRB_WIN_LOCK_ID)
+#define crb_win_unlock(a) \
+ qlcnic_pcie_sem_unlock((a), 7)
+
+int qlcnic_get_board_info(struct qlcnic_adapter *adapter);
+int qlcnic_wol_supported(struct qlcnic_adapter *adapter);
+int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate);
+
+/* Functions from qlcnic_init.c */
+int qlcnic_phantom_init(struct qlcnic_adapter *adapter);
+int qlcnic_load_firmware(struct qlcnic_adapter *adapter);
+int qlcnic_need_fw_reset(struct qlcnic_adapter *adapter);
+void qlcnic_request_firmware(struct qlcnic_adapter *adapter);
+void qlcnic_release_firmware(struct qlcnic_adapter *adapter);
+int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter);
+
+int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp);
+int qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
+ u8 *bytes, size_t size);
+int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter);
+void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter);
+
+void __iomem *qlcnic_get_ioaddr(struct qlcnic_adapter *, u32);
+
+int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter);
+void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter);
+
+void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter);
+void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter);
+
+int qlcnic_init_firmware(struct qlcnic_adapter *adapter);
+void qlcnic_watchdog_task(struct work_struct *work);
+void qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
+ struct qlcnic_host_rds_ring *rds_ring);
+int qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max);
+void qlcnic_set_multi(struct net_device *netdev);
+void qlcnic_free_mac_list(struct qlcnic_adapter *adapter);
+int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32);
+int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter);
+int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable);
+int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, u32 ip, int cmd);
+int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable);
+void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup);
+
+int qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu);
+int qlcnic_change_mtu(struct net_device *netdev, int new_mtu);
+int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable);
+int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, int enable);
+int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
+void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring);
+int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u64 *mac);
+void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter);
+int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter);
+
+/* Functions from qlcnic_main.c */
+int qlcnic_reset_context(struct qlcnic_adapter *);
+u32 qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
+ u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd);
+void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings);
+int qlcnic_diag_alloc_res(struct net_device *netdev, int test);
+int qlcnic_check_loopback_buff(unsigned char *data);
+netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring);
+
+/*
+ * QLOGIC Board information
+ */
+
+#define QLCNIC_MAX_BOARD_NAME_LEN 100
+struct qlcnic_brdinfo {
+ unsigned short vendor;
+ unsigned short device;
+ unsigned short sub_vendor;
+ unsigned short sub_device;
+ char short_name[QLCNIC_MAX_BOARD_NAME_LEN];
+};
+
+static const struct qlcnic_brdinfo qlcnic_boards[] = {
+ {0x1077, 0x8020, 0x1077, 0x203,
+ "8200 Series Single Port 10GbE Converged Network Adapter \
+ (TCP/IP Networking)"},
+ {0x1077, 0x8020, 0x1077, 0x207,
+ "8200 Series Dual Port 10GbE Converged Network Adapter \
+ (TCP/IP Networking)"},
+ {0x1077, 0x8020, 0x1077, 0x20b,
+ "3200 Series Dual Port 10Gb Intelligent Ethernet Adapter"},
+ {0x1077, 0x8020, 0x1077, 0x20c,
+ "3200 Series Quad Port 1Gb Intelligent Ethernet Adapter"},
+ {0x1077, 0x8020, 0x1077, 0x20f,
+ "3200 Series Single Port 10Gb Intelligent Ethernet Adapter"},
+ {0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"},
+};
+
+#define NUM_SUPPORTED_BOARDS ARRAY_SIZE(qlcnic_boards)
+
+static inline u32 qlcnic_tx_avail(struct qlcnic_host_tx_ring *tx_ring)
+{
+ smp_mb();
+ if (tx_ring->producer < tx_ring->sw_consumer)
+ return tx_ring->sw_consumer - tx_ring->producer;
+ else
+ return tx_ring->sw_consumer + tx_ring->num_desc -
+ tx_ring->producer;
+}
+
+extern const struct ethtool_ops qlcnic_ethtool_ops;
+
+#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c
new file mode 100644
index 00000000000..0a6a39914ae
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_ctx.c
@@ -0,0 +1,534 @@
+/*
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#include "qlcnic.h"
+
+static u32
+qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
+{
+ u32 rsp;
+ int timeout = 0;
+
+ do {
+ /* give atleast 1ms for firmware to respond */
+ msleep(1);
+
+ if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT)
+ return QLCNIC_CDRP_RSP_TIMEOUT;
+
+ rsp = QLCRD32(adapter, QLCNIC_CDRP_CRB_OFFSET);
+ } while (!QLCNIC_CDRP_IS_RSP(rsp));
+
+ return rsp;
+}
+
+u32
+qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
+ u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd)
+{
+ u32 rsp;
+ u32 signature;
+ u32 rcode = QLCNIC_RCODE_SUCCESS;
+ struct pci_dev *pdev = adapter->pdev;
+
+ signature = QLCNIC_CDRP_SIGNATURE_MAKE(pci_fn, version);
+
+ /* Acquire semaphore before accessing CRB */
+ if (qlcnic_api_lock(adapter))
+ return QLCNIC_RCODE_TIMEOUT;
+
+ QLCWR32(adapter, QLCNIC_SIGN_CRB_OFFSET, signature);
+ QLCWR32(adapter, QLCNIC_ARG1_CRB_OFFSET, arg1);
+ QLCWR32(adapter, QLCNIC_ARG2_CRB_OFFSET, arg2);
+ QLCWR32(adapter, QLCNIC_ARG3_CRB_OFFSET, arg3);
+ QLCWR32(adapter, QLCNIC_CDRP_CRB_OFFSET, QLCNIC_CDRP_FORM_CMD(cmd));
+
+ rsp = qlcnic_poll_rsp(adapter);
+
+ if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) {
+ dev_err(&pdev->dev, "card response timeout.\n");
+ rcode = QLCNIC_RCODE_TIMEOUT;
+ } else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
+ rcode = QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
+ dev_err(&pdev->dev, "failed card response code:0x%x\n",
+ rcode);
+ }
+
+ /* Release semaphore */
+ qlcnic_api_unlock(adapter);
+
+ return rcode;
+}
+
+int
+qlcnic_fw_cmd_set_mtu(struct qlcnic_adapter *adapter, int mtu)
+{
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ if (recv_ctx->state == QLCNIC_HOST_CTX_STATE_ACTIVE) {
+ if (qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ QLCHAL_VERSION,
+ recv_ctx->context_id,
+ mtu,
+ 0,
+ QLCNIC_CDRP_CMD_SET_MTU)) {
+
+ dev_err(&adapter->pdev->dev, "Failed to set mtu\n");
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int
+qlcnic_fw_cmd_create_rx_ctx(struct qlcnic_adapter *adapter)
+{
+ void *addr;
+ struct qlcnic_hostrq_rx_ctx *prq;
+ struct qlcnic_cardrsp_rx_ctx *prsp;
+ struct qlcnic_hostrq_rds_ring *prq_rds;
+ struct qlcnic_hostrq_sds_ring *prq_sds;
+ struct qlcnic_cardrsp_rds_ring *prsp_rds;
+ struct qlcnic_cardrsp_sds_ring *prsp_sds;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+
+ dma_addr_t hostrq_phys_addr, cardrsp_phys_addr;
+ u64 phys_addr;
+
+ int i, nrds_rings, nsds_rings;
+ size_t rq_size, rsp_size;
+ u32 cap, reg, val;
+ int err;
+
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ nrds_rings = adapter->max_rds_rings;
+ nsds_rings = adapter->max_sds_rings;
+
+ rq_size =
+ SIZEOF_HOSTRQ_RX(struct qlcnic_hostrq_rx_ctx, nrds_rings,
+ nsds_rings);
+ rsp_size =
+ SIZEOF_CARDRSP_RX(struct qlcnic_cardrsp_rx_ctx, nrds_rings,
+ nsds_rings);
+
+ addr = pci_alloc_consistent(adapter->pdev,
+ rq_size, &hostrq_phys_addr);
+ if (addr == NULL)
+ return -ENOMEM;
+ prq = (struct qlcnic_hostrq_rx_ctx *)addr;
+
+ addr = pci_alloc_consistent(adapter->pdev,
+ rsp_size, &cardrsp_phys_addr);
+ if (addr == NULL) {
+ err = -ENOMEM;
+ goto out_free_rq;
+ }
+ prsp = (struct qlcnic_cardrsp_rx_ctx *)addr;
+
+ prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr);
+
+ cap = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN);
+ cap |= (QLCNIC_CAP0_JUMBO_CONTIGUOUS | QLCNIC_CAP0_LRO_CONTIGUOUS);
+
+ prq->capabilities[0] = cpu_to_le32(cap);
+ prq->host_int_crb_mode =
+ cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
+ prq->host_rds_crb_mode =
+ cpu_to_le32(QLCNIC_HOST_RDS_CRB_MODE_UNIQUE);
+
+ prq->num_rds_rings = cpu_to_le16(nrds_rings);
+ prq->num_sds_rings = cpu_to_le16(nsds_rings);
+ prq->rds_ring_offset = cpu_to_le32(0);
+
+ val = le32_to_cpu(prq->rds_ring_offset) +
+ (sizeof(struct qlcnic_hostrq_rds_ring) * nrds_rings);
+ prq->sds_ring_offset = cpu_to_le32(val);
+
+ prq_rds = (struct qlcnic_hostrq_rds_ring *)(prq->data +
+ le32_to_cpu(prq->rds_ring_offset));
+
+ for (i = 0; i < nrds_rings; i++) {
+
+ rds_ring = &recv_ctx->rds_rings[i];
+
+ prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr);
+ prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc);
+ prq_rds[i].ring_kind = cpu_to_le32(i);
+ prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size);
+ }
+
+ prq_sds = (struct qlcnic_hostrq_sds_ring *)(prq->data +
+ le32_to_cpu(prq->sds_ring_offset));
+
+ for (i = 0; i < nsds_rings; i++) {
+
+ sds_ring = &recv_ctx->sds_rings[i];
+
+ prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr);
+ prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc);
+ prq_sds[i].msi_index = cpu_to_le16(i);
+ }
+
+ phys_addr = hostrq_phys_addr;
+ err = qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ QLCHAL_VERSION,
+ (u32)(phys_addr >> 32),
+ (u32)(phys_addr & 0xffffffff),
+ rq_size,
+ QLCNIC_CDRP_CMD_CREATE_RX_CTX);
+ if (err) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to create rx ctx in firmware%d\n", err);
+ goto out_free_rsp;
+ }
+
+
+ prsp_rds = ((struct qlcnic_cardrsp_rds_ring *)
+ &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]);
+
+ for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) {
+ rds_ring = &recv_ctx->rds_rings[i];
+
+ reg = le32_to_cpu(prsp_rds[i].host_producer_crb);
+ rds_ring->crb_rcv_producer = qlcnic_get_ioaddr(adapter,
+ QLCNIC_REG(reg - 0x200));
+ }
+
+ prsp_sds = ((struct qlcnic_cardrsp_sds_ring *)
+ &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]);
+
+ for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) {
+ sds_ring = &recv_ctx->sds_rings[i];
+
+ reg = le32_to_cpu(prsp_sds[i].host_consumer_crb);
+ sds_ring->crb_sts_consumer = qlcnic_get_ioaddr(adapter,
+ QLCNIC_REG(reg - 0x200));
+
+ reg = le32_to_cpu(prsp_sds[i].interrupt_crb);
+ sds_ring->crb_intr_mask = qlcnic_get_ioaddr(adapter,
+ QLCNIC_REG(reg - 0x200));
+ }
+
+ recv_ctx->state = le32_to_cpu(prsp->host_ctx_state);
+ recv_ctx->context_id = le16_to_cpu(prsp->context_id);
+ recv_ctx->virt_port = prsp->virt_port;
+
+out_free_rsp:
+ pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr);
+out_free_rq:
+ pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr);
+ return err;
+}
+
+static void
+qlcnic_fw_cmd_destroy_rx_ctx(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ if (qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ QLCHAL_VERSION,
+ recv_ctx->context_id,
+ QLCNIC_DESTROY_CTX_RESET,
+ 0,
+ QLCNIC_CDRP_CMD_DESTROY_RX_CTX)) {
+
+ dev_err(&adapter->pdev->dev,
+ "Failed to destroy rx ctx in firmware\n");
+ }
+}
+
+static int
+qlcnic_fw_cmd_create_tx_ctx(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hostrq_tx_ctx *prq;
+ struct qlcnic_hostrq_cds_ring *prq_cds;
+ struct qlcnic_cardrsp_tx_ctx *prsp;
+ void *rq_addr, *rsp_addr;
+ size_t rq_size, rsp_size;
+ u32 temp;
+ int err;
+ u64 phys_addr;
+ dma_addr_t rq_phys_addr, rsp_phys_addr;
+ struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+
+ rq_size = SIZEOF_HOSTRQ_TX(struct qlcnic_hostrq_tx_ctx);
+ rq_addr = pci_alloc_consistent(adapter->pdev,
+ rq_size, &rq_phys_addr);
+ if (!rq_addr)
+ return -ENOMEM;
+
+ rsp_size = SIZEOF_CARDRSP_TX(struct qlcnic_cardrsp_tx_ctx);
+ rsp_addr = pci_alloc_consistent(adapter->pdev,
+ rsp_size, &rsp_phys_addr);
+ if (!rsp_addr) {
+ err = -ENOMEM;
+ goto out_free_rq;
+ }
+
+ memset(rq_addr, 0, rq_size);
+ prq = (struct qlcnic_hostrq_tx_ctx *)rq_addr;
+
+ memset(rsp_addr, 0, rsp_size);
+ prsp = (struct qlcnic_cardrsp_tx_ctx *)rsp_addr;
+
+ prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr);
+
+ temp = (QLCNIC_CAP0_LEGACY_CONTEXT | QLCNIC_CAP0_LEGACY_MN |
+ QLCNIC_CAP0_LSO);
+ prq->capabilities[0] = cpu_to_le32(temp);
+
+ prq->host_int_crb_mode =
+ cpu_to_le32(QLCNIC_HOST_INT_CRB_MODE_SHARED);
+
+ prq->interrupt_ctl = 0;
+ prq->msi_index = 0;
+ prq->cmd_cons_dma_addr = cpu_to_le64(tx_ring->hw_cons_phys_addr);
+
+ prq_cds = &prq->cds_ring;
+
+ prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr);
+ prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc);
+
+ phys_addr = rq_phys_addr;
+ err = qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ QLCHAL_VERSION,
+ (u32)(phys_addr >> 32),
+ ((u32)phys_addr & 0xffffffff),
+ rq_size,
+ QLCNIC_CDRP_CMD_CREATE_TX_CTX);
+
+ if (err == QLCNIC_RCODE_SUCCESS) {
+ temp = le32_to_cpu(prsp->cds_ring.host_producer_crb);
+ tx_ring->crb_cmd_producer = qlcnic_get_ioaddr(adapter,
+ QLCNIC_REG(temp - 0x200));
+
+ adapter->tx_context_id =
+ le16_to_cpu(prsp->context_id);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "Failed to create tx ctx in firmware%d\n", err);
+ err = -EIO;
+ }
+
+ pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr);
+
+out_free_rq:
+ pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr);
+
+ return err;
+}
+
+static void
+qlcnic_fw_cmd_destroy_tx_ctx(struct qlcnic_adapter *adapter)
+{
+ if (qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ QLCHAL_VERSION,
+ adapter->tx_context_id,
+ QLCNIC_DESTROY_CTX_RESET,
+ 0,
+ QLCNIC_CDRP_CMD_DESTROY_TX_CTX)) {
+
+ dev_err(&adapter->pdev->dev,
+ "Failed to destroy tx ctx in firmware\n");
+ }
+}
+
+int
+qlcnic_fw_cmd_query_phy(struct qlcnic_adapter *adapter, u32 reg, u32 *val)
+{
+
+ if (qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ QLCHAL_VERSION,
+ reg,
+ 0,
+ 0,
+ QLCNIC_CDRP_CMD_READ_PHY)) {
+
+ return -EIO;
+ }
+
+ return QLCRD32(adapter, QLCNIC_ARG1_CRB_OFFSET);
+}
+
+int
+qlcnic_fw_cmd_set_phy(struct qlcnic_adapter *adapter, u32 reg, u32 val)
+{
+ return qlcnic_issue_cmd(adapter,
+ adapter->ahw.pci_func,
+ QLCHAL_VERSION,
+ reg,
+ val,
+ 0,
+ QLCNIC_CDRP_CMD_WRITE_PHY);
+}
+
+int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
+{
+ void *addr;
+ int err;
+ int ring;
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ struct pci_dev *pdev = adapter->pdev;
+
+ recv_ctx = &adapter->recv_ctx;
+ tx_ring = adapter->tx_ring;
+
+ tx_ring->hw_consumer = (__le32 *)pci_alloc_consistent(pdev, sizeof(u32),
+ &tx_ring->hw_cons_phys_addr);
+ if (tx_ring->hw_consumer == NULL) {
+ dev_err(&pdev->dev, "failed to allocate tx consumer\n");
+ return -ENOMEM;
+ }
+ *(tx_ring->hw_consumer) = 0;
+
+ /* cmd desc ring */
+ addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring),
+ &tx_ring->phys_addr);
+
+ if (addr == NULL) {
+ dev_err(&pdev->dev, "failed to allocate tx desc ring\n");
+ return -ENOMEM;
+ }
+
+ tx_ring->desc_head = (struct cmd_desc_type0 *)addr;
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ addr = pci_alloc_consistent(adapter->pdev,
+ RCV_DESC_RINGSIZE(rds_ring),
+ &rds_ring->phys_addr);
+ if (addr == NULL) {
+ dev_err(&pdev->dev,
+ "failed to allocate rds ring [%d]\n", ring);
+ err = -ENOMEM;
+ goto err_out_free;
+ }
+ rds_ring->desc_head = (struct rcv_desc *)addr;
+
+ }
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+
+ addr = pci_alloc_consistent(adapter->pdev,
+ STATUS_DESC_RINGSIZE(sds_ring),
+ &sds_ring->phys_addr);
+ if (addr == NULL) {
+ dev_err(&pdev->dev,
+ "failed to allocate sds ring [%d]\n", ring);
+ err = -ENOMEM;
+ goto err_out_free;
+ }
+ sds_ring->desc_head = (struct status_desc *)addr;
+ }
+
+
+ err = qlcnic_fw_cmd_create_rx_ctx(adapter);
+ if (err)
+ goto err_out_free;
+ err = qlcnic_fw_cmd_create_tx_ctx(adapter);
+ if (err)
+ goto err_out_free;
+
+ set_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
+ return 0;
+
+err_out_free:
+ qlcnic_free_hw_resources(adapter);
+ return err;
+}
+
+void qlcnic_free_hw_resources(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ int ring;
+
+
+ if (test_and_clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) {
+ qlcnic_fw_cmd_destroy_rx_ctx(adapter);
+ qlcnic_fw_cmd_destroy_tx_ctx(adapter);
+
+ /* Allow dma queues to drain after context reset */
+ msleep(20);
+ }
+
+ recv_ctx = &adapter->recv_ctx;
+
+ tx_ring = adapter->tx_ring;
+ if (tx_ring->hw_consumer != NULL) {
+ pci_free_consistent(adapter->pdev,
+ sizeof(u32),
+ tx_ring->hw_consumer,
+ tx_ring->hw_cons_phys_addr);
+ tx_ring->hw_consumer = NULL;
+ }
+
+ if (tx_ring->desc_head != NULL) {
+ pci_free_consistent(adapter->pdev,
+ TX_DESC_RINGSIZE(tx_ring),
+ tx_ring->desc_head, tx_ring->phys_addr);
+ tx_ring->desc_head = NULL;
+ }
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ if (rds_ring->desc_head != NULL) {
+ pci_free_consistent(adapter->pdev,
+ RCV_DESC_RINGSIZE(rds_ring),
+ rds_ring->desc_head,
+ rds_ring->phys_addr);
+ rds_ring->desc_head = NULL;
+ }
+ }
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+
+ if (sds_ring->desc_head != NULL) {
+ pci_free_consistent(adapter->pdev,
+ STATUS_DESC_RINGSIZE(sds_ring),
+ sds_ring->desc_head,
+ sds_ring->phys_addr);
+ sds_ring->desc_head = NULL;
+ }
+ }
+}
+
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
new file mode 100644
index 00000000000..8da6ec8c13b
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -0,0 +1,1015 @@
+/*
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+#include <linux/ethtool.h>
+
+#include "qlcnic.h"
+
+struct qlcnic_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define QLC_SIZEOF(m) FIELD_SIZEOF(struct qlcnic_adapter, m)
+#define QLC_OFF(m) offsetof(struct qlcnic_adapter, m)
+
+static const struct qlcnic_stats qlcnic_gstrings_stats[] = {
+ {"xmit_called",
+ QLC_SIZEOF(stats.xmitcalled), QLC_OFF(stats.xmitcalled)},
+ {"xmit_finished",
+ QLC_SIZEOF(stats.xmitfinished), QLC_OFF(stats.xmitfinished)},
+ {"rx_dropped",
+ QLC_SIZEOF(stats.rxdropped), QLC_OFF(stats.rxdropped)},
+ {"tx_dropped",
+ QLC_SIZEOF(stats.txdropped), QLC_OFF(stats.txdropped)},
+ {"csummed",
+ QLC_SIZEOF(stats.csummed), QLC_OFF(stats.csummed)},
+ {"rx_pkts",
+ QLC_SIZEOF(stats.rx_pkts), QLC_OFF(stats.rx_pkts)},
+ {"lro_pkts",
+ QLC_SIZEOF(stats.lro_pkts), QLC_OFF(stats.lro_pkts)},
+ {"rx_bytes",
+ QLC_SIZEOF(stats.rxbytes), QLC_OFF(stats.rxbytes)},
+ {"tx_bytes",
+ QLC_SIZEOF(stats.txbytes), QLC_OFF(stats.txbytes)},
+};
+
+#define QLCNIC_STATS_LEN ARRAY_SIZE(qlcnic_gstrings_stats)
+
+static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
+ "Register_Test_on_offline",
+ "Link_Test_on_offline",
+ "Interrupt_Test_offline",
+ "Loopback_Test_offline"
+};
+
+#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test)
+
+#define QLCNIC_RING_REGS_COUNT 20
+#define QLCNIC_RING_REGS_LEN (QLCNIC_RING_REGS_COUNT * sizeof(u32))
+#define QLCNIC_MAX_EEPROM_LEN 1024
+
+static const u32 diag_registers[] = {
+ CRB_CMDPEG_STATE,
+ CRB_RCVPEG_STATE,
+ CRB_XG_STATE_P3,
+ CRB_FW_CAPABILITIES_1,
+ ISR_INT_STATE_REG,
+ QLCNIC_CRB_DEV_REF_COUNT,
+ QLCNIC_CRB_DEV_STATE,
+ QLCNIC_CRB_DRV_STATE,
+ QLCNIC_CRB_DRV_SCRATCH,
+ QLCNIC_CRB_DEV_PARTITION_INFO,
+ QLCNIC_CRB_DRV_IDC_VER,
+ QLCNIC_PEG_ALIVE_COUNTER,
+ QLCNIC_PEG_HALT_STATUS1,
+ QLCNIC_PEG_HALT_STATUS2,
+ QLCNIC_CRB_PEG_NET_0+0x3c,
+ QLCNIC_CRB_PEG_NET_1+0x3c,
+ QLCNIC_CRB_PEG_NET_2+0x3c,
+ QLCNIC_CRB_PEG_NET_4+0x3c,
+ -1
+};
+
+static int qlcnic_get_regs_len(struct net_device *dev)
+{
+ return sizeof(diag_registers) + QLCNIC_RING_REGS_LEN;
+}
+
+static int qlcnic_get_eeprom_len(struct net_device *dev)
+{
+ return QLCNIC_FLASH_TOTAL_SIZE;
+}
+
+static void
+qlcnic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u32 fw_major, fw_minor, fw_build;
+
+ fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+ fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
+ fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
+ sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build);
+
+ strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+ strlcpy(drvinfo->driver, qlcnic_driver_name, 32);
+ strlcpy(drvinfo->version, QLCNIC_LINUX_VERSIONID, 32);
+}
+
+static int
+qlcnic_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int check_sfp_module = 0;
+ u16 pcifn = adapter->ahw.pci_func;
+
+ /* read which mode */
+ if (adapter->ahw.port_type == QLCNIC_GBE) {
+ ecmd->supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full);
+
+ ecmd->advertising = (ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full);
+
+ ecmd->speed = adapter->link_speed;
+ ecmd->duplex = adapter->link_duplex;
+ ecmd->autoneg = adapter->link_autoneg;
+
+ } else if (adapter->ahw.port_type == QLCNIC_XGBE) {
+ u32 val;
+
+ val = QLCRD32(adapter, QLCNIC_PORT_MODE_ADDR);
+ if (val == QLCNIC_PORT_MODE_802_3_AP) {
+ ecmd->supported = SUPPORTED_1000baseT_Full;
+ ecmd->advertising = ADVERTISED_1000baseT_Full;
+ } else {
+ ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->advertising = ADVERTISED_10000baseT_Full;
+ }
+
+ if (netif_running(dev) && adapter->has_link_events) {
+ ecmd->speed = adapter->link_speed;
+ ecmd->autoneg = adapter->link_autoneg;
+ ecmd->duplex = adapter->link_duplex;
+ goto skip;
+ }
+
+ val = QLCRD32(adapter, P3_LINK_SPEED_REG(pcifn));
+ ecmd->speed = P3_LINK_SPEED_MHZ *
+ P3_LINK_SPEED_VAL(pcifn, val);
+ ecmd->duplex = DUPLEX_FULL;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ } else
+ return -EIO;
+
+skip:
+ ecmd->phy_address = adapter->physical_port;
+ ecmd->transceiver = XCVR_EXTERNAL;
+
+ switch (adapter->ahw.board_type) {
+ case QLCNIC_BRDTYPE_P3_REF_QG:
+ case QLCNIC_BRDTYPE_P3_4_GB:
+ case QLCNIC_BRDTYPE_P3_4_GB_MM:
+
+ ecmd->supported |= SUPPORTED_Autoneg;
+ ecmd->advertising |= ADVERTISED_Autoneg;
+ case QLCNIC_BRDTYPE_P3_10G_CX4:
+ case QLCNIC_BRDTYPE_P3_10G_CX4_LP:
+ case QLCNIC_BRDTYPE_P3_10000_BASE_T:
+ ecmd->supported |= SUPPORTED_TP;
+ ecmd->advertising |= ADVERTISED_TP;
+ ecmd->port = PORT_TP;
+ ecmd->autoneg = adapter->link_autoneg;
+ break;
+ case QLCNIC_BRDTYPE_P3_IMEZ:
+ case QLCNIC_BRDTYPE_P3_XG_LOM:
+ case QLCNIC_BRDTYPE_P3_HMEZ:
+ ecmd->supported |= SUPPORTED_MII;
+ ecmd->advertising |= ADVERTISED_MII;
+ ecmd->port = PORT_MII;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ break;
+ case QLCNIC_BRDTYPE_P3_10G_SFP_PLUS:
+ case QLCNIC_BRDTYPE_P3_10G_SFP_CT:
+ case QLCNIC_BRDTYPE_P3_10G_SFP_QT:
+ ecmd->advertising |= ADVERTISED_TP;
+ ecmd->supported |= SUPPORTED_TP;
+ check_sfp_module = netif_running(dev) &&
+ adapter->has_link_events;
+ case QLCNIC_BRDTYPE_P3_10G_XFP:
+ ecmd->supported |= SUPPORTED_FIBRE;
+ ecmd->advertising |= ADVERTISED_FIBRE;
+ ecmd->port = PORT_FIBRE;
+ ecmd->autoneg = AUTONEG_DISABLE;
+ break;
+ case QLCNIC_BRDTYPE_P3_10G_TP:
+ if (adapter->ahw.port_type == QLCNIC_XGBE) {
+ ecmd->autoneg = AUTONEG_DISABLE;
+ ecmd->supported |= (SUPPORTED_FIBRE | SUPPORTED_TP);
+ ecmd->advertising |=
+ (ADVERTISED_FIBRE | ADVERTISED_TP);
+ ecmd->port = PORT_FIBRE;
+ check_sfp_module = netif_running(dev) &&
+ adapter->has_link_events;
+ } else {
+ ecmd->autoneg = AUTONEG_ENABLE;
+ ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
+ ecmd->advertising |=
+ (ADVERTISED_TP | ADVERTISED_Autoneg);
+ ecmd->port = PORT_TP;
+ }
+ break;
+ default:
+ dev_err(&adapter->pdev->dev, "Unsupported board model %d\n",
+ adapter->ahw.board_type);
+ return -EIO;
+ }
+
+ if (check_sfp_module) {
+ switch (adapter->module_type) {
+ case LINKEVENT_MODULE_OPTICAL_UNKNOWN:
+ case LINKEVENT_MODULE_OPTICAL_SRLR:
+ case LINKEVENT_MODULE_OPTICAL_LRM:
+ case LINKEVENT_MODULE_OPTICAL_SFP_1G:
+ ecmd->port = PORT_FIBRE;
+ break;
+ case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE:
+ case LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN:
+ case LINKEVENT_MODULE_TWINAX:
+ ecmd->port = PORT_TP;
+ break;
+ default:
+ ecmd->port = PORT_OTHER;
+ }
+ }
+
+ return 0;
+}
+
+static int
+qlcnic_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ __u32 status;
+
+ /* read which mode */
+ if (adapter->ahw.port_type == QLCNIC_GBE) {
+ /* autonegotiation */
+ if (qlcnic_fw_cmd_set_phy(adapter,
+ QLCNIC_NIU_GB_MII_MGMT_ADDR_AUTONEG,
+ ecmd->autoneg) != 0)
+ return -EIO;
+ else
+ adapter->link_autoneg = ecmd->autoneg;
+
+ if (qlcnic_fw_cmd_query_phy(adapter,
+ QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
+ &status) != 0)
+ return -EIO;
+
+ switch (ecmd->speed) {
+ case SPEED_10:
+ qlcnic_set_phy_speed(status, 0);
+ break;
+ case SPEED_100:
+ qlcnic_set_phy_speed(status, 1);
+ break;
+ case SPEED_1000:
+ qlcnic_set_phy_speed(status, 2);
+ break;
+ }
+
+ if (ecmd->duplex == DUPLEX_HALF)
+ qlcnic_clear_phy_duplex(status);
+ if (ecmd->duplex == DUPLEX_FULL)
+ qlcnic_set_phy_duplex(status);
+ if (qlcnic_fw_cmd_set_phy(adapter,
+ QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS,
+ *((int *)&status)) != 0)
+ return -EIO;
+ else {
+ adapter->link_speed = ecmd->speed;
+ adapter->link_duplex = ecmd->duplex;
+ }
+ } else
+ return -EOPNOTSUPP;
+
+ if (!netif_running(dev))
+ return 0;
+
+ dev->netdev_ops->ndo_stop(dev);
+ return dev->netdev_ops->ndo_open(dev);
+}
+
+static void
+qlcnic_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_host_sds_ring *sds_ring;
+ u32 *regs_buff = p;
+ int ring, i = 0;
+
+ memset(p, 0, qlcnic_get_regs_len(dev));
+ regs->version = (1 << 24) | (adapter->ahw.revision_id << 16) |
+ (adapter->pdev)->device;
+
+ for (i = 0; diag_registers[i] != -1; i++)
+ regs_buff[i] = QLCRD32(adapter, diag_registers[i]);
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
+ regs_buff[i++] = 0xFFEFCDAB; /* Marker btw regs and ring count*/
+
+ regs_buff[i++] = 1; /* No. of tx ring */
+ regs_buff[i++] = le32_to_cpu(*(adapter->tx_ring->hw_consumer));
+ regs_buff[i++] = readl(adapter->tx_ring->crb_cmd_producer);
+
+ regs_buff[i++] = 2; /* No. of rx ring */
+ regs_buff[i++] = readl(recv_ctx->rds_rings[0].crb_rcv_producer);
+ regs_buff[i++] = readl(recv_ctx->rds_rings[1].crb_rcv_producer);
+
+ regs_buff[i++] = adapter->max_sds_rings;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &(recv_ctx->sds_rings[ring]);
+ regs_buff[i++] = readl(sds_ring->crb_sts_consumer);
+ }
+}
+
+static u32 qlcnic_test_link(struct net_device *dev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u32 val;
+
+ val = QLCRD32(adapter, CRB_XG_STATE_P3);
+ val = XG_LINK_STATE_P3(adapter->ahw.pci_func, val);
+ return (val == XG_LINK_UP_P3) ? 0 : 1;
+}
+
+static int
+qlcnic_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+ u8 *bytes)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int offset;
+ int ret;
+
+ if (eeprom->len == 0)
+ return -EINVAL;
+
+ eeprom->magic = (adapter->pdev)->vendor |
+ ((adapter->pdev)->device << 16);
+ offset = eeprom->offset;
+
+ ret = qlcnic_rom_fast_read_words(adapter, offset, bytes,
+ eeprom->len);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void
+qlcnic_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+ ring->rx_pending = adapter->num_rxd;
+ ring->rx_jumbo_pending = adapter->num_jumbo_rxd;
+ ring->rx_jumbo_pending += adapter->num_lro_rxd;
+ ring->tx_pending = adapter->num_txd;
+
+ if (adapter->ahw.port_type == QLCNIC_GBE) {
+ ring->rx_max_pending = MAX_RCV_DESCRIPTORS_1G;
+ ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ } else {
+ ring->rx_max_pending = MAX_RCV_DESCRIPTORS_10G;
+ ring->rx_jumbo_max_pending = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ }
+
+ ring->tx_max_pending = MAX_CMD_DESCRIPTORS;
+
+ ring->rx_mini_max_pending = 0;
+ ring->rx_mini_pending = 0;
+}
+
+static u32
+qlcnic_validate_ringparam(u32 val, u32 min, u32 max, char *r_name)
+{
+ u32 num_desc;
+ num_desc = max(val, min);
+ num_desc = min(num_desc, max);
+ num_desc = roundup_pow_of_two(num_desc);
+
+ if (val != num_desc) {
+ printk(KERN_INFO "%s: setting %s ring size %d instead of %d\n",
+ qlcnic_driver_name, r_name, num_desc, val);
+ }
+
+ return num_desc;
+}
+
+static int
+qlcnic_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u16 max_rcv_desc = MAX_RCV_DESCRIPTORS_10G;
+ u16 max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ u16 num_rxd, num_jumbo_rxd, num_txd;
+
+
+ if (ring->rx_mini_pending)
+ return -EOPNOTSUPP;
+
+ if (adapter->ahw.port_type == QLCNIC_GBE) {
+ max_rcv_desc = MAX_RCV_DESCRIPTORS_1G;
+ max_jumbo_desc = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ }
+
+ num_rxd = qlcnic_validate_ringparam(ring->rx_pending,
+ MIN_RCV_DESCRIPTORS, max_rcv_desc, "rx");
+
+ num_jumbo_rxd = qlcnic_validate_ringparam(ring->rx_jumbo_pending,
+ MIN_JUMBO_DESCRIPTORS, max_jumbo_desc, "rx jumbo");
+
+ num_txd = qlcnic_validate_ringparam(ring->tx_pending,
+ MIN_CMD_DESCRIPTORS, MAX_CMD_DESCRIPTORS, "tx");
+
+ if (num_rxd == adapter->num_rxd && num_txd == adapter->num_txd &&
+ num_jumbo_rxd == adapter->num_jumbo_rxd)
+ return 0;
+
+ adapter->num_rxd = num_rxd;
+ adapter->num_jumbo_rxd = num_jumbo_rxd;
+ adapter->num_txd = num_txd;
+
+ return qlcnic_reset_context(adapter);
+}
+
+static void
+qlcnic_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int port = adapter->physical_port;
+ __u32 val;
+
+ if (adapter->ahw.port_type == QLCNIC_GBE) {
+ if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
+ return;
+ /* get flow control settings */
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port));
+ pause->rx_pause = qlcnic_gb_get_rx_flowctl(val);
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL);
+ switch (port) {
+ case 0:
+ pause->tx_pause = !(qlcnic_gb_get_gb0_mask(val));
+ break;
+ case 1:
+ pause->tx_pause = !(qlcnic_gb_get_gb1_mask(val));
+ break;
+ case 2:
+ pause->tx_pause = !(qlcnic_gb_get_gb2_mask(val));
+ break;
+ case 3:
+ default:
+ pause->tx_pause = !(qlcnic_gb_get_gb3_mask(val));
+ break;
+ }
+ } else if (adapter->ahw.port_type == QLCNIC_XGBE) {
+ if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
+ return;
+ pause->rx_pause = 1;
+ val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL);
+ if (port == 0)
+ pause->tx_pause = !(qlcnic_xg_get_xg0_mask(val));
+ else
+ pause->tx_pause = !(qlcnic_xg_get_xg1_mask(val));
+ } else {
+ dev_err(&netdev->dev, "Unknown board type: %x\n",
+ adapter->ahw.port_type);
+ }
+}
+
+static int
+qlcnic_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int port = adapter->physical_port;
+ __u32 val;
+
+ /* read mode */
+ if (adapter->ahw.port_type == QLCNIC_GBE) {
+ if ((port < 0) || (port > QLCNIC_NIU_MAX_GBE_PORTS))
+ return -EIO;
+ /* set flow control */
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port));
+
+ if (pause->rx_pause)
+ qlcnic_gb_rx_flowctl(val);
+ else
+ qlcnic_gb_unset_rx_flowctl(val);
+
+ QLCWR32(adapter, QLCNIC_NIU_GB_MAC_CONFIG_0(port),
+ val);
+ /* set autoneg */
+ val = QLCRD32(adapter, QLCNIC_NIU_GB_PAUSE_CTL);
+ switch (port) {
+ case 0:
+ if (pause->tx_pause)
+ qlcnic_gb_unset_gb0_mask(val);
+ else
+ qlcnic_gb_set_gb0_mask(val);
+ break;
+ case 1:
+ if (pause->tx_pause)
+ qlcnic_gb_unset_gb1_mask(val);
+ else
+ qlcnic_gb_set_gb1_mask(val);
+ break;
+ case 2:
+ if (pause->tx_pause)
+ qlcnic_gb_unset_gb2_mask(val);
+ else
+ qlcnic_gb_set_gb2_mask(val);
+ break;
+ case 3:
+ default:
+ if (pause->tx_pause)
+ qlcnic_gb_unset_gb3_mask(val);
+ else
+ qlcnic_gb_set_gb3_mask(val);
+ break;
+ }
+ QLCWR32(adapter, QLCNIC_NIU_GB_PAUSE_CTL, val);
+ } else if (adapter->ahw.port_type == QLCNIC_XGBE) {
+ if ((port < 0) || (port > QLCNIC_NIU_MAX_XG_PORTS))
+ return -EIO;
+ val = QLCRD32(adapter, QLCNIC_NIU_XG_PAUSE_CTL);
+ if (port == 0) {
+ if (pause->tx_pause)
+ qlcnic_xg_unset_xg0_mask(val);
+ else
+ qlcnic_xg_set_xg0_mask(val);
+ } else {
+ if (pause->tx_pause)
+ qlcnic_xg_unset_xg1_mask(val);
+ else
+ qlcnic_xg_set_xg1_mask(val);
+ }
+ QLCWR32(adapter, QLCNIC_NIU_XG_PAUSE_CTL, val);
+ } else {
+ dev_err(&netdev->dev, "Unknown board type: %x\n",
+ adapter->ahw.port_type);
+ }
+ return 0;
+}
+
+static int qlcnic_reg_test(struct net_device *dev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u32 data_read, data_written;
+
+ data_read = QLCRD32(adapter, QLCNIC_PCIX_PH_REG(0));
+ if ((data_read & 0xffff) != adapter->pdev->vendor)
+ return 1;
+
+ data_written = (u32)0xa5a5a5a5;
+
+ QLCWR32(adapter, CRB_SCRATCHPAD_TEST, data_written);
+ data_read = QLCRD32(adapter, CRB_SCRATCHPAD_TEST);
+ if (data_written != data_read)
+ return 1;
+
+ return 0;
+}
+
+static int qlcnic_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_TEST:
+ return QLCNIC_TEST_LEN;
+ case ETH_SS_STATS:
+ return QLCNIC_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+#define QLC_ILB_PKT_SIZE 64
+
+static void qlcnic_create_loopback_buff(unsigned char *data)
+{
+ unsigned char random_data[] = {0xa8, 0x06, 0x45, 0x00};
+ memset(data, 0x4e, QLC_ILB_PKT_SIZE);
+ memset(data, 0xff, 12);
+ memcpy(data + 12, random_data, sizeof(random_data));
+}
+
+int qlcnic_check_loopback_buff(unsigned char *data)
+{
+ unsigned char buff[QLC_ILB_PKT_SIZE];
+ qlcnic_create_loopback_buff(buff);
+ return memcmp(data, buff, QLC_ILB_PKT_SIZE);
+}
+
+static int qlcnic_do_ilb_test(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_host_sds_ring *sds_ring = &recv_ctx->sds_rings[0];
+ struct sk_buff *skb;
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ skb = dev_alloc_skb(QLC_ILB_PKT_SIZE);
+ qlcnic_create_loopback_buff(skb->data);
+ skb_put(skb, QLC_ILB_PKT_SIZE);
+
+ adapter->diag_cnt = 0;
+
+ qlcnic_xmit_frame(skb, adapter->netdev);
+
+ msleep(5);
+
+ qlcnic_process_rcv_ring_diag(sds_ring);
+
+ dev_kfree_skb_any(skb);
+ if (!adapter->diag_cnt)
+ return -1;
+ }
+ return 0;
+}
+
+static int qlcnic_loopback_test(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int max_sds_rings = adapter->max_sds_rings;
+ int ret;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EIO;
+
+ ret = qlcnic_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST);
+ if (ret)
+ goto clear_it;
+
+ ret = qlcnic_set_ilb_mode(adapter);
+ if (ret)
+ goto done;
+
+ ret = qlcnic_do_ilb_test(adapter);
+
+ qlcnic_clear_ilb_mode(adapter);
+
+done:
+ qlcnic_diag_free_res(netdev, max_sds_rings);
+
+clear_it:
+ adapter->max_sds_rings = max_sds_rings;
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return ret;
+}
+
+static int qlcnic_irq_test(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int max_sds_rings = adapter->max_sds_rings;
+ int ret;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EIO;
+
+ ret = qlcnic_diag_alloc_res(netdev, QLCNIC_INTERRUPT_TEST);
+ if (ret)
+ goto clear_it;
+
+ adapter->diag_cnt = 0;
+ ret = qlcnic_issue_cmd(adapter, adapter->ahw.pci_func,
+ QLCHAL_VERSION, adapter->portnum, 0, 0, 0x00000011);
+ if (ret)
+ goto done;
+
+ msleep(10);
+
+ ret = !adapter->diag_cnt;
+
+done:
+ qlcnic_diag_free_res(netdev, max_sds_rings);
+
+clear_it:
+ adapter->max_sds_rings = max_sds_rings;
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return ret;
+}
+
+static void
+qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
+ u64 *data)
+{
+ memset(data, 0, sizeof(u64) * QLCNIC_TEST_LEN);
+
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ data[2] = qlcnic_irq_test(dev);
+ if (data[2])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ data[3] = qlcnic_loopback_test(dev);
+ if (data[3])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ }
+
+ data[0] = qlcnic_reg_test(dev);
+ if (data[0])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ /* link test */
+ data[1] = (u64) qlcnic_test_link(dev);
+ if (data[1])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+}
+
+static void
+qlcnic_get_strings(struct net_device *dev, u32 stringset, u8 * data)
+{
+ int index;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ memcpy(data, *qlcnic_gstrings_test,
+ QLCNIC_TEST_LEN * ETH_GSTRING_LEN);
+ break;
+ case ETH_SS_STATS:
+ for (index = 0; index < QLCNIC_STATS_LEN; index++) {
+ memcpy(data + index * ETH_GSTRING_LEN,
+ qlcnic_gstrings_stats[index].stat_string,
+ ETH_GSTRING_LEN);
+ }
+ break;
+ }
+}
+
+static void
+qlcnic_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 * data)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int index;
+
+ for (index = 0; index < QLCNIC_STATS_LEN; index++) {
+ char *p =
+ (char *)adapter +
+ qlcnic_gstrings_stats[index].stat_offset;
+ data[index] =
+ (qlcnic_gstrings_stats[index].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p:(*(u32 *)p);
+ }
+}
+
+static u32 qlcnic_get_rx_csum(struct net_device *dev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ return adapter->rx_csum;
+}
+
+static int qlcnic_set_rx_csum(struct net_device *dev, u32 data)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ adapter->rx_csum = !!data;
+ return 0;
+}
+
+static u32 qlcnic_get_tso(struct net_device *dev)
+{
+ return (dev->features & (NETIF_F_TSO | NETIF_F_TSO6)) != 0;
+}
+
+static int qlcnic_set_tso(struct net_device *dev, u32 data)
+{
+ if (data)
+ dev->features |= (NETIF_F_TSO | NETIF_F_TSO6);
+ else
+ dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+
+ return 0;
+}
+
+static int qlcnic_blink_led(struct net_device *dev, u32 val)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ int ret;
+
+ ret = qlcnic_config_led(adapter, 1, 0xf);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to set LED blink state.\n");
+ return ret;
+ }
+
+ msleep_interruptible(val * 1000);
+
+ ret = qlcnic_config_led(adapter, 0, 0xf);
+ if (ret) {
+ dev_err(&adapter->pdev->dev,
+ "Failed to reset LED blink state.\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void
+qlcnic_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u32 wol_cfg;
+
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
+ if (wol_cfg & (1UL << adapter->portnum))
+ wol->supported |= WAKE_MAGIC;
+
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
+ if (wol_cfg & (1UL << adapter->portnum))
+ wol->wolopts |= WAKE_MAGIC;
+}
+
+static int
+qlcnic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+ u32 wol_cfg;
+
+ if (wol->wolopts & ~WAKE_MAGIC)
+ return -EOPNOTSUPP;
+
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
+ if (!(wol_cfg & (1 << adapter->portnum)))
+ return -EOPNOTSUPP;
+
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
+ if (wol->wolopts & WAKE_MAGIC)
+ wol_cfg |= 1UL << adapter->portnum;
+ else
+ wol_cfg &= ~(1UL << adapter->portnum);
+
+ QLCWR32(adapter, QLCNIC_WOL_CONFIG, wol_cfg);
+
+ return 0;
+}
+
+/*
+ * Set the coalescing parameters. Currently only normal is supported.
+ * If rx_coalesce_usecs == 0 or rx_max_coalesced_frames == 0 then set the
+ * firmware coalescing to default.
+ */
+static int qlcnic_set_intr_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ethcoal)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return -EINVAL;
+
+ /*
+ * Return Error if unsupported values or
+ * unsupported parameters are set.
+ */
+ if (ethcoal->rx_coalesce_usecs > 0xffff ||
+ ethcoal->rx_max_coalesced_frames > 0xffff ||
+ ethcoal->tx_coalesce_usecs > 0xffff ||
+ ethcoal->tx_max_coalesced_frames > 0xffff ||
+ ethcoal->rx_coalesce_usecs_irq ||
+ ethcoal->rx_max_coalesced_frames_irq ||
+ ethcoal->tx_coalesce_usecs_irq ||
+ ethcoal->tx_max_coalesced_frames_irq ||
+ ethcoal->stats_block_coalesce_usecs ||
+ ethcoal->use_adaptive_rx_coalesce ||
+ ethcoal->use_adaptive_tx_coalesce ||
+ ethcoal->pkt_rate_low ||
+ ethcoal->rx_coalesce_usecs_low ||
+ ethcoal->rx_max_coalesced_frames_low ||
+ ethcoal->tx_coalesce_usecs_low ||
+ ethcoal->tx_max_coalesced_frames_low ||
+ ethcoal->pkt_rate_high ||
+ ethcoal->rx_coalesce_usecs_high ||
+ ethcoal->rx_max_coalesced_frames_high ||
+ ethcoal->tx_coalesce_usecs_high ||
+ ethcoal->tx_max_coalesced_frames_high)
+ return -EINVAL;
+
+ if (!ethcoal->rx_coalesce_usecs ||
+ !ethcoal->rx_max_coalesced_frames) {
+ adapter->coal.flags = QLCNIC_INTR_DEFAULT;
+ adapter->coal.normal.data.rx_time_us =
+ QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
+ adapter->coal.normal.data.rx_packets =
+ QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
+ } else {
+ adapter->coal.flags = 0;
+ adapter->coal.normal.data.rx_time_us =
+ ethcoal->rx_coalesce_usecs;
+ adapter->coal.normal.data.rx_packets =
+ ethcoal->rx_max_coalesced_frames;
+ }
+ adapter->coal.normal.data.tx_time_us = ethcoal->tx_coalesce_usecs;
+ adapter->coal.normal.data.tx_packets =
+ ethcoal->tx_max_coalesced_frames;
+
+ qlcnic_config_intr_coalesce(adapter);
+
+ return 0;
+}
+
+static int qlcnic_get_intr_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ethcoal)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return -EINVAL;
+
+ ethcoal->rx_coalesce_usecs = adapter->coal.normal.data.rx_time_us;
+ ethcoal->tx_coalesce_usecs = adapter->coal.normal.data.tx_time_us;
+ ethcoal->rx_max_coalesced_frames =
+ adapter->coal.normal.data.rx_packets;
+ ethcoal->tx_max_coalesced_frames =
+ adapter->coal.normal.data.tx_packets;
+
+ return 0;
+}
+
+static int qlcnic_set_flags(struct net_device *netdev, u32 data)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int hw_lro;
+
+ if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO))
+ return -EINVAL;
+
+ ethtool_op_set_flags(netdev, data);
+
+ hw_lro = (data & ETH_FLAG_LRO) ? QLCNIC_LRO_ENABLED : 0;
+
+ if (qlcnic_config_hw_lro(adapter, hw_lro))
+ return -EIO;
+
+ if ((hw_lro == 0) && qlcnic_send_lro_cleanup(adapter))
+ return -EIO;
+
+
+ return 0;
+}
+
+const struct ethtool_ops qlcnic_ethtool_ops = {
+ .get_settings = qlcnic_get_settings,
+ .set_settings = qlcnic_set_settings,
+ .get_drvinfo = qlcnic_get_drvinfo,
+ .get_regs_len = qlcnic_get_regs_len,
+ .get_regs = qlcnic_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = qlcnic_get_eeprom_len,
+ .get_eeprom = qlcnic_get_eeprom,
+ .get_ringparam = qlcnic_get_ringparam,
+ .set_ringparam = qlcnic_set_ringparam,
+ .get_pauseparam = qlcnic_get_pauseparam,
+ .set_pauseparam = qlcnic_set_pauseparam,
+ .set_tx_csum = ethtool_op_set_tx_csum,
+ .set_sg = ethtool_op_set_sg,
+ .get_tso = qlcnic_get_tso,
+ .set_tso = qlcnic_set_tso,
+ .get_wol = qlcnic_get_wol,
+ .set_wol = qlcnic_set_wol,
+ .self_test = qlcnic_diag_test,
+ .get_strings = qlcnic_get_strings,
+ .get_ethtool_stats = qlcnic_get_ethtool_stats,
+ .get_sset_count = qlcnic_get_sset_count,
+ .get_rx_csum = qlcnic_get_rx_csum,
+ .set_rx_csum = qlcnic_set_rx_csum,
+ .get_coalesce = qlcnic_get_intr_coalesce,
+ .set_coalesce = qlcnic_set_intr_coalesce,
+ .get_flags = ethtool_op_get_flags,
+ .set_flags = qlcnic_set_flags,
+ .phys_id = qlcnic_blink_led,
+};
diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h
new file mode 100644
index 00000000000..0469f84360a
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_hdr.h
@@ -0,0 +1,937 @@
+/*
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#ifndef __QLCNIC_HDR_H_
+#define __QLCNIC_HDR_H_
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+/*
+ * The basic unit of access when reading/writing control registers.
+ */
+
+enum {
+ QLCNIC_HW_H0_CH_HUB_ADR = 0x05,
+ QLCNIC_HW_H1_CH_HUB_ADR = 0x0E,
+ QLCNIC_HW_H2_CH_HUB_ADR = 0x03,
+ QLCNIC_HW_H3_CH_HUB_ADR = 0x01,
+ QLCNIC_HW_H4_CH_HUB_ADR = 0x06,
+ QLCNIC_HW_H5_CH_HUB_ADR = 0x07,
+ QLCNIC_HW_H6_CH_HUB_ADR = 0x08
+};
+
+/* Hub 0 */
+enum {
+ QLCNIC_HW_MN_CRB_AGT_ADR = 0x15,
+ QLCNIC_HW_MS_CRB_AGT_ADR = 0x25
+};
+
+/* Hub 1 */
+enum {
+ QLCNIC_HW_PS_CRB_AGT_ADR = 0x73,
+ QLCNIC_HW_SS_CRB_AGT_ADR = 0x20,
+ QLCNIC_HW_RPMX3_CRB_AGT_ADR = 0x0b,
+ QLCNIC_HW_QMS_CRB_AGT_ADR = 0x00,
+ QLCNIC_HW_SQGS0_CRB_AGT_ADR = 0x01,
+ QLCNIC_HW_SQGS1_CRB_AGT_ADR = 0x02,
+ QLCNIC_HW_SQGS2_CRB_AGT_ADR = 0x03,
+ QLCNIC_HW_SQGS3_CRB_AGT_ADR = 0x04,
+ QLCNIC_HW_C2C0_CRB_AGT_ADR = 0x58,
+ QLCNIC_HW_C2C1_CRB_AGT_ADR = 0x59,
+ QLCNIC_HW_C2C2_CRB_AGT_ADR = 0x5a,
+ QLCNIC_HW_RPMX2_CRB_AGT_ADR = 0x0a,
+ QLCNIC_HW_RPMX4_CRB_AGT_ADR = 0x0c,
+ QLCNIC_HW_RPMX7_CRB_AGT_ADR = 0x0f,
+ QLCNIC_HW_RPMX9_CRB_AGT_ADR = 0x12,
+ QLCNIC_HW_SMB_CRB_AGT_ADR = 0x18
+};
+
+/* Hub 2 */
+enum {
+ QLCNIC_HW_NIU_CRB_AGT_ADR = 0x31,
+ QLCNIC_HW_I2C0_CRB_AGT_ADR = 0x19,
+ QLCNIC_HW_I2C1_CRB_AGT_ADR = 0x29,
+
+ QLCNIC_HW_SN_CRB_AGT_ADR = 0x10,
+ QLCNIC_HW_I2Q_CRB_AGT_ADR = 0x20,
+ QLCNIC_HW_LPC_CRB_AGT_ADR = 0x22,
+ QLCNIC_HW_ROMUSB_CRB_AGT_ADR = 0x21,
+ QLCNIC_HW_QM_CRB_AGT_ADR = 0x66,
+ QLCNIC_HW_SQG0_CRB_AGT_ADR = 0x60,
+ QLCNIC_HW_SQG1_CRB_AGT_ADR = 0x61,
+ QLCNIC_HW_SQG2_CRB_AGT_ADR = 0x62,
+ QLCNIC_HW_SQG3_CRB_AGT_ADR = 0x63,
+ QLCNIC_HW_RPMX1_CRB_AGT_ADR = 0x09,
+ QLCNIC_HW_RPMX5_CRB_AGT_ADR = 0x0d,
+ QLCNIC_HW_RPMX6_CRB_AGT_ADR = 0x0e,
+ QLCNIC_HW_RPMX8_CRB_AGT_ADR = 0x11
+};
+
+/* Hub 3 */
+enum {
+ QLCNIC_HW_PH_CRB_AGT_ADR = 0x1A,
+ QLCNIC_HW_SRE_CRB_AGT_ADR = 0x50,
+ QLCNIC_HW_EG_CRB_AGT_ADR = 0x51,
+ QLCNIC_HW_RPMX0_CRB_AGT_ADR = 0x08
+};
+
+/* Hub 4 */
+enum {
+ QLCNIC_HW_PEGN0_CRB_AGT_ADR = 0x40,
+ QLCNIC_HW_PEGN1_CRB_AGT_ADR,
+ QLCNIC_HW_PEGN2_CRB_AGT_ADR,
+ QLCNIC_HW_PEGN3_CRB_AGT_ADR,
+ QLCNIC_HW_PEGNI_CRB_AGT_ADR,
+ QLCNIC_HW_PEGND_CRB_AGT_ADR,
+ QLCNIC_HW_PEGNC_CRB_AGT_ADR,
+ QLCNIC_HW_PEGR0_CRB_AGT_ADR,
+ QLCNIC_HW_PEGR1_CRB_AGT_ADR,
+ QLCNIC_HW_PEGR2_CRB_AGT_ADR,
+ QLCNIC_HW_PEGR3_CRB_AGT_ADR,
+ QLCNIC_HW_PEGN4_CRB_AGT_ADR
+};
+
+/* Hub 5 */
+enum {
+ QLCNIC_HW_PEGS0_CRB_AGT_ADR = 0x40,
+ QLCNIC_HW_PEGS1_CRB_AGT_ADR,
+ QLCNIC_HW_PEGS2_CRB_AGT_ADR,
+ QLCNIC_HW_PEGS3_CRB_AGT_ADR,
+ QLCNIC_HW_PEGSI_CRB_AGT_ADR,
+ QLCNIC_HW_PEGSD_CRB_AGT_ADR,
+ QLCNIC_HW_PEGSC_CRB_AGT_ADR
+};
+
+/* Hub 6 */
+enum {
+ QLCNIC_HW_CAS0_CRB_AGT_ADR = 0x46,
+ QLCNIC_HW_CAS1_CRB_AGT_ADR = 0x47,
+ QLCNIC_HW_CAS2_CRB_AGT_ADR = 0x48,
+ QLCNIC_HW_CAS3_CRB_AGT_ADR = 0x49,
+ QLCNIC_HW_NCM_CRB_AGT_ADR = 0x16,
+ QLCNIC_HW_TMR_CRB_AGT_ADR = 0x17,
+ QLCNIC_HW_XDMA_CRB_AGT_ADR = 0x05,
+ QLCNIC_HW_OCM0_CRB_AGT_ADR = 0x06,
+ QLCNIC_HW_OCM1_CRB_AGT_ADR = 0x07
+};
+
+/* Floaters - non existent modules */
+#define QLCNIC_HW_EFC_RPMX0_CRB_AGT_ADR 0x67
+
+/* This field defines PCI/X adr [25:20] of agents on the CRB */
+enum {
+ QLCNIC_HW_PX_MAP_CRB_PH = 0,
+ QLCNIC_HW_PX_MAP_CRB_PS,
+ QLCNIC_HW_PX_MAP_CRB_MN,
+ QLCNIC_HW_PX_MAP_CRB_MS,
+ QLCNIC_HW_PX_MAP_CRB_PGR1,
+ QLCNIC_HW_PX_MAP_CRB_SRE,
+ QLCNIC_HW_PX_MAP_CRB_NIU,
+ QLCNIC_HW_PX_MAP_CRB_QMN,
+ QLCNIC_HW_PX_MAP_CRB_SQN0,
+ QLCNIC_HW_PX_MAP_CRB_SQN1,
+ QLCNIC_HW_PX_MAP_CRB_SQN2,
+ QLCNIC_HW_PX_MAP_CRB_SQN3,
+ QLCNIC_HW_PX_MAP_CRB_QMS,
+ QLCNIC_HW_PX_MAP_CRB_SQS0,
+ QLCNIC_HW_PX_MAP_CRB_SQS1,
+ QLCNIC_HW_PX_MAP_CRB_SQS2,
+ QLCNIC_HW_PX_MAP_CRB_SQS3,
+ QLCNIC_HW_PX_MAP_CRB_PGN0,
+ QLCNIC_HW_PX_MAP_CRB_PGN1,
+ QLCNIC_HW_PX_MAP_CRB_PGN2,
+ QLCNIC_HW_PX_MAP_CRB_PGN3,
+ QLCNIC_HW_PX_MAP_CRB_PGND,
+ QLCNIC_HW_PX_MAP_CRB_PGNI,
+ QLCNIC_HW_PX_MAP_CRB_PGS0,
+ QLCNIC_HW_PX_MAP_CRB_PGS1,
+ QLCNIC_HW_PX_MAP_CRB_PGS2,
+ QLCNIC_HW_PX_MAP_CRB_PGS3,
+ QLCNIC_HW_PX_MAP_CRB_PGSD,
+ QLCNIC_HW_PX_MAP_CRB_PGSI,
+ QLCNIC_HW_PX_MAP_CRB_SN,
+ QLCNIC_HW_PX_MAP_CRB_PGR2,
+ QLCNIC_HW_PX_MAP_CRB_EG,
+ QLCNIC_HW_PX_MAP_CRB_PH2,
+ QLCNIC_HW_PX_MAP_CRB_PS2,
+ QLCNIC_HW_PX_MAP_CRB_CAM,
+ QLCNIC_HW_PX_MAP_CRB_CAS0,
+ QLCNIC_HW_PX_MAP_CRB_CAS1,
+ QLCNIC_HW_PX_MAP_CRB_CAS2,
+ QLCNIC_HW_PX_MAP_CRB_C2C0,
+ QLCNIC_HW_PX_MAP_CRB_C2C1,
+ QLCNIC_HW_PX_MAP_CRB_TIMR,
+ QLCNIC_HW_PX_MAP_CRB_PGR3,
+ QLCNIC_HW_PX_MAP_CRB_RPMX1,
+ QLCNIC_HW_PX_MAP_CRB_RPMX2,
+ QLCNIC_HW_PX_MAP_CRB_RPMX3,
+ QLCNIC_HW_PX_MAP_CRB_RPMX4,
+ QLCNIC_HW_PX_MAP_CRB_RPMX5,
+ QLCNIC_HW_PX_MAP_CRB_RPMX6,
+ QLCNIC_HW_PX_MAP_CRB_RPMX7,
+ QLCNIC_HW_PX_MAP_CRB_XDMA,
+ QLCNIC_HW_PX_MAP_CRB_I2Q,
+ QLCNIC_HW_PX_MAP_CRB_ROMUSB,
+ QLCNIC_HW_PX_MAP_CRB_CAS3,
+ QLCNIC_HW_PX_MAP_CRB_RPMX0,
+ QLCNIC_HW_PX_MAP_CRB_RPMX8,
+ QLCNIC_HW_PX_MAP_CRB_RPMX9,
+ QLCNIC_HW_PX_MAP_CRB_OCM0,
+ QLCNIC_HW_PX_MAP_CRB_OCM1,
+ QLCNIC_HW_PX_MAP_CRB_SMB,
+ QLCNIC_HW_PX_MAP_CRB_I2C0,
+ QLCNIC_HW_PX_MAP_CRB_I2C1,
+ QLCNIC_HW_PX_MAP_CRB_LPC,
+ QLCNIC_HW_PX_MAP_CRB_PGNC,
+ QLCNIC_HW_PX_MAP_CRB_PGR0
+};
+
+/* This field defines CRB adr [31:20] of the agents */
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_MN \
+ ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MN_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PH \
+ ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_PH_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_MS \
+ ((QLCNIC_HW_H0_CH_HUB_ADR << 7) | QLCNIC_HW_MS_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PS \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_PS_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SS \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SS_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMS \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_QMS_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS0 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS1 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS2 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQS3 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SQGS3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C0 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_C2C1 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_C2C1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX4_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX7_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9 \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX9_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SMB \
+ ((QLCNIC_HW_H1_CH_HUB_ADR << 7) | QLCNIC_HW_SMB_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_NIU \
+ ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_NIU_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0 \
+ ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1 \
+ ((QLCNIC_HW_H2_CH_HUB_ADR << 7) | QLCNIC_HW_I2C1_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SRE \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SRE_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_EG \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_EG_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_QMN \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_QM_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_SQG3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX5_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX6_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_RPMX8_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS0 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS1 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS2 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAS3 \
+ ((QLCNIC_HW_H3_CH_HUB_ADR << 7) | QLCNIC_HW_CAS3_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNI_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGND \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGND_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGN4_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGNC_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR0 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR1 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR2 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGR3 \
+ ((QLCNIC_HW_H4_CH_HUB_ADR << 7) | QLCNIC_HW_PEGR3_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSI_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSD \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSD_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0 \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1 \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2 \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS2_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3 \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGS3_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_PGSC \
+ ((QLCNIC_HW_H5_CH_HUB_ADR << 7) | QLCNIC_HW_PEGSC_CRB_AGT_ADR)
+
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_CAM \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_NCM_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_TMR_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_XDMA_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_SN \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_SN_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_I2Q_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_ROMUSB_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0 \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM0_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_OCM1 \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_OCM1_CRB_AGT_ADR)
+#define QLCNIC_HW_CRB_HUB_AGT_ADR_LPC \
+ ((QLCNIC_HW_H6_CH_HUB_ADR << 7) | QLCNIC_HW_LPC_CRB_AGT_ADR)
+
+#define QLCNIC_SRE_MISC (QLCNIC_CRB_SRE + 0x0002c)
+
+#define QLCNIC_I2Q_CLR_PCI_HI (QLCNIC_CRB_I2Q + 0x00034)
+
+#define ROMUSB_GLB (QLCNIC_CRB_ROMUSB + 0x00000)
+#define ROMUSB_ROM (QLCNIC_CRB_ROMUSB + 0x10000)
+
+#define QLCNIC_ROMUSB_GLB_STATUS (ROMUSB_GLB + 0x0004)
+#define QLCNIC_ROMUSB_GLB_SW_RESET (ROMUSB_GLB + 0x0008)
+#define QLCNIC_ROMUSB_GLB_PAD_GPIO_I (ROMUSB_GLB + 0x000c)
+#define QLCNIC_ROMUSB_GLB_CAS_RST (ROMUSB_GLB + 0x0038)
+#define QLCNIC_ROMUSB_GLB_TEST_MUX_SEL (ROMUSB_GLB + 0x0044)
+#define QLCNIC_ROMUSB_GLB_PEGTUNE_DONE (ROMUSB_GLB + 0x005c)
+#define QLCNIC_ROMUSB_GLB_CHIP_CLK_CTRL (ROMUSB_GLB + 0x00A8)
+
+#define QLCNIC_ROMUSB_GPIO(n) (ROMUSB_GLB + 0x60 + (4 * (n)))
+
+#define QLCNIC_ROMUSB_ROM_INSTR_OPCODE (ROMUSB_ROM + 0x0004)
+#define QLCNIC_ROMUSB_ROM_ADDRESS (ROMUSB_ROM + 0x0008)
+#define QLCNIC_ROMUSB_ROM_WDATA (ROMUSB_ROM + 0x000c)
+#define QLCNIC_ROMUSB_ROM_ABYTE_CNT (ROMUSB_ROM + 0x0010)
+#define QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT (ROMUSB_ROM + 0x0014)
+#define QLCNIC_ROMUSB_ROM_RDATA (ROMUSB_ROM + 0x0018)
+
+/* Lock IDs for ROM lock */
+#define ROM_LOCK_DRIVER 0x0d417340
+
+/******************************************************************************
+*
+* Definitions specific to M25P flash
+*
+*******************************************************************************
+*/
+
+/* all are 1MB windows */
+
+#define QLCNIC_PCI_CRB_WINDOWSIZE 0x00100000
+#define QLCNIC_PCI_CRB_WINDOW(A) \
+ (QLCNIC_PCI_CRBSPACE + (A)*QLCNIC_PCI_CRB_WINDOWSIZE)
+
+#define QLCNIC_CRB_NIU QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_NIU)
+#define QLCNIC_CRB_SRE QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SRE)
+#define QLCNIC_CRB_ROMUSB \
+ QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_ROMUSB)
+#define QLCNIC_CRB_I2Q QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2Q)
+#define QLCNIC_CRB_I2C0 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_I2C0)
+#define QLCNIC_CRB_SMB QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SMB)
+#define QLCNIC_CRB_MAX QLCNIC_PCI_CRB_WINDOW(64)
+
+#define QLCNIC_CRB_PCIX_HOST QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH)
+#define QLCNIC_CRB_PCIX_HOST2 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PH2)
+#define QLCNIC_CRB_PEG_NET_0 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN0)
+#define QLCNIC_CRB_PEG_NET_1 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN1)
+#define QLCNIC_CRB_PEG_NET_2 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN2)
+#define QLCNIC_CRB_PEG_NET_3 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGN3)
+#define QLCNIC_CRB_PEG_NET_4 QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SQS2)
+#define QLCNIC_CRB_PEG_NET_D QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGND)
+#define QLCNIC_CRB_PEG_NET_I QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PGNI)
+#define QLCNIC_CRB_DDR_NET QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_MN)
+#define QLCNIC_CRB_QDR_NET QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_SN)
+
+#define QLCNIC_CRB_PCIX_MD QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_PS)
+#define QLCNIC_CRB_PCIE QLCNIC_CRB_PCIX_MD
+
+#define ISR_INT_VECTOR (QLCNIC_PCIX_PS_REG(PCIX_INT_VECTOR))
+#define ISR_INT_MASK (QLCNIC_PCIX_PS_REG(PCIX_INT_MASK))
+#define ISR_INT_MASK_SLOW (QLCNIC_PCIX_PS_REG(PCIX_INT_MASK))
+#define ISR_INT_TARGET_STATUS (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS))
+#define ISR_INT_TARGET_MASK (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK))
+#define ISR_INT_TARGET_STATUS_F1 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F1))
+#define ISR_INT_TARGET_MASK_F1 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F1))
+#define ISR_INT_TARGET_STATUS_F2 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F2))
+#define ISR_INT_TARGET_MASK_F2 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F2))
+#define ISR_INT_TARGET_STATUS_F3 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F3))
+#define ISR_INT_TARGET_MASK_F3 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F3))
+#define ISR_INT_TARGET_STATUS_F4 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F4))
+#define ISR_INT_TARGET_MASK_F4 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F4))
+#define ISR_INT_TARGET_STATUS_F5 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F5))
+#define ISR_INT_TARGET_MASK_F5 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F5))
+#define ISR_INT_TARGET_STATUS_F6 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F6))
+#define ISR_INT_TARGET_MASK_F6 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F6))
+#define ISR_INT_TARGET_STATUS_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_STATUS_F7))
+#define ISR_INT_TARGET_MASK_F7 (QLCNIC_PCIX_PS_REG(PCIX_TARGET_MASK_F7))
+
+#define QLCNIC_PCI_MN_2M (0)
+#define QLCNIC_PCI_MS_2M (0x80000)
+#define QLCNIC_PCI_OCM0_2M (0x000c0000UL)
+#define QLCNIC_PCI_CRBSPACE (0x06000000UL)
+#define QLCNIC_PCI_2MB_SIZE (0x00200000UL)
+#define QLCNIC_PCI_CAMQM_2M_BASE (0x000ff800UL)
+#define QLCNIC_PCI_CAMQM_2M_END (0x04800800UL)
+
+#define QLCNIC_CRB_CAM QLCNIC_PCI_CRB_WINDOW(QLCNIC_HW_PX_MAP_CRB_CAM)
+
+#define QLCNIC_ADDR_DDR_NET (0x0000000000000000ULL)
+#define QLCNIC_ADDR_DDR_NET_MAX (0x000000000fffffffULL)
+#define QLCNIC_ADDR_OCM0 (0x0000000200000000ULL)
+#define QLCNIC_ADDR_OCM0_MAX (0x00000002000fffffULL)
+#define QLCNIC_ADDR_OCM1 (0x0000000200400000ULL)
+#define QLCNIC_ADDR_OCM1_MAX (0x00000002004fffffULL)
+#define QLCNIC_ADDR_QDR_NET (0x0000000300000000ULL)
+#define QLCNIC_ADDR_QDR_NET_MAX_P3 (0x0000000303ffffffULL)
+
+/*
+ * Register offsets for MN
+ */
+#define QLCNIC_MIU_CONTROL (0x000)
+#define QLCNIC_MIU_MN_CONTROL (QLCNIC_CRB_DDR_NET+QLCNIC_MIU_CONTROL)
+
+/* 200ms delay in each loop */
+#define QLCNIC_NIU_PHY_WAITLEN 200000
+/* 10 seconds before we give up */
+#define QLCNIC_NIU_PHY_WAITMAX 50
+#define QLCNIC_NIU_MAX_GBE_PORTS 4
+#define QLCNIC_NIU_MAX_XG_PORTS 2
+
+#define QLCNIC_NIU_MODE (QLCNIC_CRB_NIU + 0x00000)
+#define QLCNIC_NIU_GB_PAUSE_CTL (QLCNIC_CRB_NIU + 0x0030c)
+#define QLCNIC_NIU_XG_PAUSE_CTL (QLCNIC_CRB_NIU + 0x00098)
+
+#define QLCNIC_NIU_GB_MAC_CONFIG_0(I) \
+ (QLCNIC_CRB_NIU + 0x30000 + (I)*0x10000)
+#define QLCNIC_NIU_GB_MAC_CONFIG_1(I) \
+ (QLCNIC_CRB_NIU + 0x30004 + (I)*0x10000)
+
+
+#define TEST_AGT_CTRL (0x00)
+
+#define TA_CTL_START 1
+#define TA_CTL_ENABLE 2
+#define TA_CTL_WRITE 4
+#define TA_CTL_BUSY 8
+
+/*
+ * Register offsets for MN
+ */
+#define MIU_TEST_AGT_BASE (0x90)
+
+#define MIU_TEST_AGT_ADDR_LO (0x04)
+#define MIU_TEST_AGT_ADDR_HI (0x08)
+#define MIU_TEST_AGT_WRDATA_LO (0x10)
+#define MIU_TEST_AGT_WRDATA_HI (0x14)
+#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x20)
+#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x24)
+#define MIU_TEST_AGT_WRDATA(i) (0x10+(0x10*((i)>>1))+(4*((i)&1)))
+#define MIU_TEST_AGT_RDDATA_LO (0x18)
+#define MIU_TEST_AGT_RDDATA_HI (0x1c)
+#define MIU_TEST_AGT_RDDATA_UPPER_LO (0x28)
+#define MIU_TEST_AGT_RDDATA_UPPER_HI (0x2c)
+#define MIU_TEST_AGT_RDDATA(i) (0x18+(0x10*((i)>>1))+(4*((i)&1)))
+
+#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8
+#define MIU_TEST_AGT_UPPER_ADDR(off) (0)
+
+/*
+ * Register offsets for MS
+ */
+#define SIU_TEST_AGT_BASE (0x60)
+
+#define SIU_TEST_AGT_ADDR_LO (0x04)
+#define SIU_TEST_AGT_ADDR_HI (0x18)
+#define SIU_TEST_AGT_WRDATA_LO (0x08)
+#define SIU_TEST_AGT_WRDATA_HI (0x0c)
+#define SIU_TEST_AGT_WRDATA(i) (0x08+(4*(i)))
+#define SIU_TEST_AGT_RDDATA_LO (0x10)
+#define SIU_TEST_AGT_RDDATA_HI (0x14)
+#define SIU_TEST_AGT_RDDATA(i) (0x10+(4*(i)))
+
+#define SIU_TEST_AGT_ADDR_MASK 0x3ffff8
+#define SIU_TEST_AGT_UPPER_ADDR(off) ((off)>>22)
+
+/* XG Link status */
+#define XG_LINK_UP 0x10
+#define XG_LINK_DOWN 0x20
+
+#define XG_LINK_UP_P3 0x01
+#define XG_LINK_DOWN_P3 0x02
+#define XG_LINK_STATE_P3_MASK 0xf
+#define XG_LINK_STATE_P3(pcifn, val) \
+ (((val) >> ((pcifn) * 4)) & XG_LINK_STATE_P3_MASK)
+
+#define P3_LINK_SPEED_MHZ 100
+#define P3_LINK_SPEED_MASK 0xff
+#define P3_LINK_SPEED_REG(pcifn) \
+ (CRB_PF_LINK_SPEED_1 + (((pcifn) / 4) * 4))
+#define P3_LINK_SPEED_VAL(pcifn, reg) \
+ (((reg) >> (8 * ((pcifn) & 0x3))) & P3_LINK_SPEED_MASK)
+
+#define QLCNIC_CAM_RAM_BASE (QLCNIC_CRB_CAM + 0x02000)
+#define QLCNIC_CAM_RAM(reg) (QLCNIC_CAM_RAM_BASE + (reg))
+#define QLCNIC_FW_VERSION_MAJOR (QLCNIC_CAM_RAM(0x150))
+#define QLCNIC_FW_VERSION_MINOR (QLCNIC_CAM_RAM(0x154))
+#define QLCNIC_FW_VERSION_SUB (QLCNIC_CAM_RAM(0x158))
+#define QLCNIC_ROM_LOCK_ID (QLCNIC_CAM_RAM(0x100))
+#define QLCNIC_PHY_LOCK_ID (QLCNIC_CAM_RAM(0x120))
+#define QLCNIC_CRB_WIN_LOCK_ID (QLCNIC_CAM_RAM(0x124))
+
+#define NIC_CRB_BASE (QLCNIC_CAM_RAM(0x200))
+#define NIC_CRB_BASE_2 (QLCNIC_CAM_RAM(0x700))
+#define QLCNIC_REG(X) (NIC_CRB_BASE+(X))
+#define QLCNIC_REG_2(X) (NIC_CRB_BASE_2+(X))
+
+#define QLCNIC_CDRP_CRB_OFFSET (QLCNIC_REG(0x18))
+#define QLCNIC_ARG1_CRB_OFFSET (QLCNIC_REG(0x1c))
+#define QLCNIC_ARG2_CRB_OFFSET (QLCNIC_REG(0x20))
+#define QLCNIC_ARG3_CRB_OFFSET (QLCNIC_REG(0x24))
+#define QLCNIC_SIGN_CRB_OFFSET (QLCNIC_REG(0x28))
+
+#define CRB_CMDPEG_STATE (QLCNIC_REG(0x50))
+#define CRB_RCVPEG_STATE (QLCNIC_REG(0x13c))
+
+#define CRB_XG_STATE_P3 (QLCNIC_REG(0x98))
+#define CRB_PF_LINK_SPEED_1 (QLCNIC_REG(0xe8))
+#define CRB_PF_LINK_SPEED_2 (QLCNIC_REG(0xec))
+
+#define CRB_MPORT_MODE (QLCNIC_REG(0xc4))
+#define CRB_DMA_SHIFT (QLCNIC_REG(0xcc))
+
+#define CRB_TEMP_STATE (QLCNIC_REG(0x1b4))
+
+#define CRB_V2P_0 (QLCNIC_REG(0x290))
+#define CRB_V2P(port) (CRB_V2P_0+((port)*4))
+#define CRB_DRIVER_VERSION (QLCNIC_REG(0x2a0))
+
+#define CRB_SW_INT_MASK_0 (QLCNIC_REG(0x1d8))
+#define CRB_SW_INT_MASK_1 (QLCNIC_REG(0x1e0))
+#define CRB_SW_INT_MASK_2 (QLCNIC_REG(0x1e4))
+#define CRB_SW_INT_MASK_3 (QLCNIC_REG(0x1e8))
+
+#define CRB_FW_CAPABILITIES_1 (QLCNIC_CAM_RAM(0x128))
+#define CRB_MAC_BLOCK_START (QLCNIC_CAM_RAM(0x1c0))
+
+/*
+ * capabilities register, can be used to selectively enable/disable features
+ * for backward compability
+ */
+#define CRB_NIC_CAPABILITIES_HOST QLCNIC_REG(0x1a8)
+#define CRB_NIC_CAPABILITIES_FW QLCNIC_REG(0x1dc)
+#define CRB_NIC_MSI_MODE_HOST QLCNIC_REG(0x270)
+#define CRB_NIC_MSI_MODE_FW QLCNIC_REG(0x274)
+
+#define INTR_SCHEME_PERPORT 0x1
+#define MSI_MODE_MULTIFUNC 0x1
+
+/* used for ethtool tests */
+#define CRB_SCRATCHPAD_TEST QLCNIC_REG(0x280)
+
+/*
+ * CrbPortPhanCntrHi/Lo is used to pass the address of HostPhantomIndex address
+ * which can be read by the Phantom host to get producer/consumer indexes from
+ * Phantom/Casper. If it is not HOST_SHARED_MEMORY, then the following
+ * registers will be used for the addresses of the ring's shared memory
+ * on the Phantom.
+ */
+
+#define qlcnic_get_temp_val(x) ((x) >> 16)
+#define qlcnic_get_temp_state(x) ((x) & 0xffff)
+#define qlcnic_encode_temp(val, state) (((val) << 16) | (state))
+
+/*
+ * Temperature control.
+ */
+enum {
+ QLCNIC_TEMP_NORMAL = 0x1, /* Normal operating range */
+ QLCNIC_TEMP_WARN, /* Sound alert, temperature getting high */
+ QLCNIC_TEMP_PANIC /* Fatal error, hardware has shut down. */
+};
+
+/* Lock IDs for PHY lock */
+#define PHY_LOCK_DRIVER 0x44524956
+
+/* Used for PS PCI Memory access */
+#define PCIX_PS_OP_ADDR_LO (0x10000)
+/* via CRB (PS side only) */
+#define PCIX_PS_OP_ADDR_HI (0x10004)
+
+#define PCIX_INT_VECTOR (0x10100)
+#define PCIX_INT_MASK (0x10104)
+
+#define PCIX_OCM_WINDOW (0x10800)
+#define PCIX_OCM_WINDOW_REG(func) (PCIX_OCM_WINDOW + 0x20 * (func))
+
+#define PCIX_TARGET_STATUS (0x10118)
+#define PCIX_TARGET_STATUS_F1 (0x10160)
+#define PCIX_TARGET_STATUS_F2 (0x10164)
+#define PCIX_TARGET_STATUS_F3 (0x10168)
+#define PCIX_TARGET_STATUS_F4 (0x10360)
+#define PCIX_TARGET_STATUS_F5 (0x10364)
+#define PCIX_TARGET_STATUS_F6 (0x10368)
+#define PCIX_TARGET_STATUS_F7 (0x1036c)
+
+#define PCIX_TARGET_MASK (0x10128)
+#define PCIX_TARGET_MASK_F1 (0x10170)
+#define PCIX_TARGET_MASK_F2 (0x10174)
+#define PCIX_TARGET_MASK_F3 (0x10178)
+#define PCIX_TARGET_MASK_F4 (0x10370)
+#define PCIX_TARGET_MASK_F5 (0x10374)
+#define PCIX_TARGET_MASK_F6 (0x10378)
+#define PCIX_TARGET_MASK_F7 (0x1037c)
+
+#define PCIX_MSI_F(i) (0x13000+((i)*4))
+
+#define QLCNIC_PCIX_PH_REG(reg) (QLCNIC_CRB_PCIE + (reg))
+#define QLCNIC_PCIX_PS_REG(reg) (QLCNIC_CRB_PCIX_MD + (reg))
+#define QLCNIC_PCIE_REG(reg) (QLCNIC_CRB_PCIE + (reg))
+
+#define PCIE_SEM0_LOCK (0x1c000)
+#define PCIE_SEM0_UNLOCK (0x1c004)
+#define PCIE_SEM_LOCK(N) (PCIE_SEM0_LOCK + 8*(N))
+#define PCIE_SEM_UNLOCK(N) (PCIE_SEM0_UNLOCK + 8*(N))
+
+#define PCIE_SETUP_FUNCTION (0x12040)
+#define PCIE_SETUP_FUNCTION2 (0x12048)
+#define PCIE_MISCCFG_RC (0x1206c)
+#define PCIE_TGT_SPLIT_CHICKEN (0x12080)
+#define PCIE_CHICKEN3 (0x120c8)
+
+#define ISR_INT_STATE_REG (QLCNIC_PCIX_PS_REG(PCIE_MISCCFG_RC))
+#define PCIE_MAX_MASTER_SPLIT (0x14048)
+
+#define QLCNIC_PORT_MODE_NONE 0
+#define QLCNIC_PORT_MODE_XG 1
+#define QLCNIC_PORT_MODE_GB 2
+#define QLCNIC_PORT_MODE_802_3_AP 3
+#define QLCNIC_PORT_MODE_AUTO_NEG 4
+#define QLCNIC_PORT_MODE_AUTO_NEG_1G 5
+#define QLCNIC_PORT_MODE_AUTO_NEG_XG 6
+#define QLCNIC_PORT_MODE_ADDR (QLCNIC_CAM_RAM(0x24))
+#define QLCNIC_WOL_PORT_MODE (QLCNIC_CAM_RAM(0x198))
+
+#define QLCNIC_WOL_CONFIG_NV (QLCNIC_CAM_RAM(0x184))
+#define QLCNIC_WOL_CONFIG (QLCNIC_CAM_RAM(0x188))
+
+#define QLCNIC_PEG_TUNE_MN_PRESENT 0x1
+#define QLCNIC_PEG_TUNE_CAPABILITY (QLCNIC_CAM_RAM(0x02c))
+
+#define QLCNIC_DMA_WATCHDOG_CTRL (QLCNIC_CAM_RAM(0x14))
+#define QLCNIC_PEG_ALIVE_COUNTER (QLCNIC_CAM_RAM(0xb0))
+#define QLCNIC_PEG_HALT_STATUS1 (QLCNIC_CAM_RAM(0xa8))
+#define QLCNIC_PEG_HALT_STATUS2 (QLCNIC_CAM_RAM(0xac))
+#define QLCNIC_CRB_DEV_REF_COUNT (QLCNIC_CAM_RAM(0x138))
+#define QLCNIC_CRB_DEV_STATE (QLCNIC_CAM_RAM(0x140))
+
+#define QLCNIC_CRB_DRV_STATE (QLCNIC_CAM_RAM(0x144))
+#define QLCNIC_CRB_DRV_SCRATCH (QLCNIC_CAM_RAM(0x148))
+#define QLCNIC_CRB_DEV_PARTITION_INFO (QLCNIC_CAM_RAM(0x14c))
+#define QLCNIC_CRB_DRV_IDC_VER (QLCNIC_CAM_RAM(0x14c))
+
+ /* Device State */
+#define QLCNIC_DEV_COLD 1
+#define QLCNIC_DEV_INITALIZING 2
+#define QLCNIC_DEV_READY 3
+#define QLCNIC_DEV_NEED_RESET 4
+#define QLCNIC_DEV_NEED_QUISCENT 5
+#define QLCNIC_DEV_FAILED 6
+
+#define QLCNIC_RCODE_DRIVER_INFO 0x20000000
+#define QLCNIC_RCODE_DRIVER_CAN_RELOAD 0x40000000
+#define QLCNIC_RCODE_FATAL_ERROR 0x80000000
+#define QLCNIC_FWERROR_PEGNUM(code) ((code) & 0xff)
+#define QLCNIC_FWERROR_CODE(code) ((code >> 8) & 0xfffff)
+
+#define FW_POLL_DELAY (2 * HZ)
+#define FW_FAIL_THRESH 3
+#define FW_POLL_THRESH 10
+
+#define ISR_MSI_INT_TRIGGER(FUNC) (QLCNIC_PCIX_PS_REG(PCIX_MSI_F(FUNC)))
+#define ISR_LEGACY_INT_TRIGGERED(VAL) (((VAL) & 0x300) == 0x200)
+
+/*
+ * PCI Interrupt Vector Values.
+ */
+#define PCIX_INT_VECTOR_BIT_F0 0x0080
+#define PCIX_INT_VECTOR_BIT_F1 0x0100
+#define PCIX_INT_VECTOR_BIT_F2 0x0200
+#define PCIX_INT_VECTOR_BIT_F3 0x0400
+#define PCIX_INT_VECTOR_BIT_F4 0x0800
+#define PCIX_INT_VECTOR_BIT_F5 0x1000
+#define PCIX_INT_VECTOR_BIT_F6 0x2000
+#define PCIX_INT_VECTOR_BIT_F7 0x4000
+
+struct qlcnic_legacy_intr_set {
+ u32 int_vec_bit;
+ u32 tgt_status_reg;
+ u32 tgt_mask_reg;
+ u32 pci_int_reg;
+};
+
+#define QLCNIC_LEGACY_INTR_CONFIG \
+{ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F0, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(0) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F1, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F1, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F1, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(1) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F2, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F2, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F2, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(2) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F3, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F3, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F3, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(3) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F4, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F4, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F4, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(4) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F5, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F5, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F5, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(5) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F6, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F6, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F6, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(6) }, \
+ \
+ { \
+ .int_vec_bit = PCIX_INT_VECTOR_BIT_F7, \
+ .tgt_status_reg = ISR_INT_TARGET_STATUS_F7, \
+ .tgt_mask_reg = ISR_INT_TARGET_MASK_F7, \
+ .pci_int_reg = ISR_MSI_INT_TRIGGER(7) }, \
+}
+
+/* NIU REGS */
+
+#define _qlcnic_crb_get_bit(var, bit) ((var >> bit) & 0x1)
+
+/*
+ * NIU GB MAC Config Register 0 (applies to GB0, GB1, GB2, GB3)
+ *
+ * Bit 0 : enable_tx => 1:enable frame xmit, 0:disable
+ * Bit 1 : tx_synced => R/O: xmit enable synched to xmit stream
+ * Bit 2 : enable_rx => 1:enable frame recv, 0:disable
+ * Bit 3 : rx_synced => R/O: recv enable synched to recv stream
+ * Bit 4 : tx_flowctl => 1:enable pause frame generation, 0:disable
+ * Bit 5 : rx_flowctl => 1:act on recv'd pause frames, 0:ignore
+ * Bit 8 : loopback => 1:loop MAC xmits to MAC recvs, 0:normal
+ * Bit 16: tx_reset_pb => 1:reset frame xmit protocol blk, 0:no-op
+ * Bit 17: rx_reset_pb => 1:reset frame recv protocol blk, 0:no-op
+ * Bit 18: tx_reset_mac => 1:reset data/ctl multiplexer blk, 0:no-op
+ * Bit 19: rx_reset_mac => 1:reset ctl frames & timers blk, 0:no-op
+ * Bit 31: soft_reset => 1:reset the MAC and the SERDES, 0:no-op
+ */
+#define qlcnic_gb_rx_flowctl(config_word) \
+ ((config_word) |= 1 << 5)
+#define qlcnic_gb_get_rx_flowctl(config_word) \
+ _qlcnic_crb_get_bit((config_word), 5)
+#define qlcnic_gb_unset_rx_flowctl(config_word) \
+ ((config_word) &= ~(1 << 5))
+
+/*
+ * NIU GB Pause Ctl Register
+ */
+
+#define qlcnic_gb_set_gb0_mask(config_word) \
+ ((config_word) |= 1 << 0)
+#define qlcnic_gb_set_gb1_mask(config_word) \
+ ((config_word) |= 1 << 2)
+#define qlcnic_gb_set_gb2_mask(config_word) \
+ ((config_word) |= 1 << 4)
+#define qlcnic_gb_set_gb3_mask(config_word) \
+ ((config_word) |= 1 << 6)
+
+#define qlcnic_gb_get_gb0_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 0)
+#define qlcnic_gb_get_gb1_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 2)
+#define qlcnic_gb_get_gb2_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 4)
+#define qlcnic_gb_get_gb3_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 6)
+
+#define qlcnic_gb_unset_gb0_mask(config_word) \
+ ((config_word) &= ~(1 << 0))
+#define qlcnic_gb_unset_gb1_mask(config_word) \
+ ((config_word) &= ~(1 << 2))
+#define qlcnic_gb_unset_gb2_mask(config_word) \
+ ((config_word) &= ~(1 << 4))
+#define qlcnic_gb_unset_gb3_mask(config_word) \
+ ((config_word) &= ~(1 << 6))
+
+/*
+ * NIU XG Pause Ctl Register
+ *
+ * Bit 0 : xg0_mask => 1:disable tx pause frames
+ * Bit 1 : xg0_request => 1:request single pause frame
+ * Bit 2 : xg0_on_off => 1:request is pause on, 0:off
+ * Bit 3 : xg1_mask => 1:disable tx pause frames
+ * Bit 4 : xg1_request => 1:request single pause frame
+ * Bit 5 : xg1_on_off => 1:request is pause on, 0:off
+ */
+
+#define qlcnic_xg_set_xg0_mask(config_word) \
+ ((config_word) |= 1 << 0)
+#define qlcnic_xg_set_xg1_mask(config_word) \
+ ((config_word) |= 1 << 3)
+
+#define qlcnic_xg_get_xg0_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 0)
+#define qlcnic_xg_get_xg1_mask(config_word) \
+ _qlcnic_crb_get_bit((config_word), 3)
+
+#define qlcnic_xg_unset_xg0_mask(config_word) \
+ ((config_word) &= ~(1 << 0))
+#define qlcnic_xg_unset_xg1_mask(config_word) \
+ ((config_word) &= ~(1 << 3))
+
+/*
+ * NIU XG Pause Ctl Register
+ *
+ * Bit 0 : xg0_mask => 1:disable tx pause frames
+ * Bit 1 : xg0_request => 1:request single pause frame
+ * Bit 2 : xg0_on_off => 1:request is pause on, 0:off
+ * Bit 3 : xg1_mask => 1:disable tx pause frames
+ * Bit 4 : xg1_request => 1:request single pause frame
+ * Bit 5 : xg1_on_off => 1:request is pause on, 0:off
+ */
+
+/*
+ * PHY-Specific MII control/status registers.
+ */
+#define QLCNIC_NIU_GB_MII_MGMT_ADDR_AUTONEG 4
+#define QLCNIC_NIU_GB_MII_MGMT_ADDR_PHY_STATUS 17
+
+/*
+ * PHY-Specific Status Register (reg 17).
+ *
+ * Bit 0 : jabber => 1:jabber detected, 0:not
+ * Bit 1 : polarity => 1:polarity reversed, 0:normal
+ * Bit 2 : recvpause => 1:receive pause enabled, 0:disabled
+ * Bit 3 : xmitpause => 1:transmit pause enabled, 0:disabled
+ * Bit 4 : energydetect => 1:sleep, 0:active
+ * Bit 5 : downshift => 1:downshift, 0:no downshift
+ * Bit 6 : crossover => 1:MDIX (crossover), 0:MDI (no crossover)
+ * Bits 7-9 : cablelen => not valid in 10Mb/s mode
+ * 0:<50m, 1:50-80m, 2:80-110m, 3:110-140m, 4:>140m
+ * Bit 10 : link => 1:link up, 0:link down
+ * Bit 11 : resolved => 1:speed and duplex resolved, 0:not yet
+ * Bit 12 : pagercvd => 1:page received, 0:page not received
+ * Bit 13 : duplex => 1:full duplex, 0:half duplex
+ * Bits 14-15 : speed => 0:10Mb/s, 1:100Mb/s, 2:1000Mb/s, 3:rsvd
+ */
+
+#define qlcnic_get_phy_speed(config_word) (((config_word) >> 14) & 0x03)
+
+#define qlcnic_set_phy_speed(config_word, val) \
+ ((config_word) |= ((val & 0x03) << 14))
+#define qlcnic_set_phy_duplex(config_word) \
+ ((config_word) |= 1 << 13)
+#define qlcnic_clear_phy_duplex(config_word) \
+ ((config_word) &= ~(1 << 13))
+
+#define qlcnic_get_phy_link(config_word) \
+ _qlcnic_crb_get_bit(config_word, 10)
+#define qlcnic_get_phy_duplex(config_word) \
+ _qlcnic_crb_get_bit(config_word, 13)
+
+#define QLCNIC_NIU_NON_PROMISC_MODE 0
+#define QLCNIC_NIU_PROMISC_MODE 1
+#define QLCNIC_NIU_ALLMULTI_MODE 2
+
+struct crb_128M_2M_sub_block_map {
+ unsigned valid;
+ unsigned start_128M;
+ unsigned end_128M;
+ unsigned start_2M;
+};
+
+struct crb_128M_2M_block_map{
+ struct crb_128M_2M_sub_block_map sub_block[16];
+};
+#endif /* __QLCNIC_HDR_H_ */
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
new file mode 100644
index 00000000000..99a4d1379d0
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -0,0 +1,1274 @@
+/*
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#include "qlcnic.h"
+
+#include <net/ip.h>
+
+#define MASK(n) ((1ULL<<(n))-1)
+#define OCM_WIN_P3P(addr) (addr & 0xffc0000)
+
+#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
+
+#define CRB_BLK(off) ((off >> 20) & 0x3f)
+#define CRB_SUBBLK(off) ((off >> 16) & 0xf)
+#define CRB_WINDOW_2M (0x130060)
+#define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000))
+#define CRB_INDIRECT_2M (0x1e0000UL)
+
+
+#ifndef readq
+static inline u64 readq(void __iomem *addr)
+{
+ return readl(addr) | (((u64) readl(addr + 4)) << 32LL);
+}
+#endif
+
+#ifndef writeq
+static inline void writeq(u64 val, void __iomem *addr)
+{
+ writel(((u32) (val)), (addr));
+ writel(((u32) (val >> 32)), (addr + 4));
+}
+#endif
+
+#define ADDR_IN_RANGE(addr, low, high) \
+ (((addr) < (high)) && ((addr) >= (low)))
+
+#define PCI_OFFSET_FIRST_RANGE(adapter, off) \
+ ((adapter)->ahw.pci_base0 + (off))
+
+static void __iomem *pci_base_offset(struct qlcnic_adapter *adapter,
+ unsigned long off)
+{
+ if (ADDR_IN_RANGE(off, FIRST_PAGE_GROUP_START, FIRST_PAGE_GROUP_END))
+ return PCI_OFFSET_FIRST_RANGE(adapter, off);
+
+ return NULL;
+}
+
+static const struct crb_128M_2M_block_map
+crb_128M_2M_map[64] __cacheline_aligned_in_smp = {
+ {{{0, 0, 0, 0} } }, /* 0: PCI */
+ {{{1, 0x0100000, 0x0102000, 0x120000}, /* 1: PCIE */
+ {1, 0x0110000, 0x0120000, 0x130000},
+ {1, 0x0120000, 0x0122000, 0x124000},
+ {1, 0x0130000, 0x0132000, 0x126000},
+ {1, 0x0140000, 0x0142000, 0x128000},
+ {1, 0x0150000, 0x0152000, 0x12a000},
+ {1, 0x0160000, 0x0170000, 0x110000},
+ {1, 0x0170000, 0x0172000, 0x12e000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x01e0000, 0x01e0800, 0x122000},
+ {0, 0x0000000, 0x0000000, 0x000000} } },
+ {{{1, 0x0200000, 0x0210000, 0x180000} } },/* 2: MN */
+ {{{0, 0, 0, 0} } }, /* 3: */
+ {{{1, 0x0400000, 0x0401000, 0x169000} } },/* 4: P2NR1 */
+ {{{1, 0x0500000, 0x0510000, 0x140000} } },/* 5: SRE */
+ {{{1, 0x0600000, 0x0610000, 0x1c0000} } },/* 6: NIU */
+ {{{1, 0x0700000, 0x0704000, 0x1b8000} } },/* 7: QM */
+ {{{1, 0x0800000, 0x0802000, 0x170000}, /* 8: SQM0 */
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x08f0000, 0x08f2000, 0x172000} } },
+ {{{1, 0x0900000, 0x0902000, 0x174000}, /* 9: SQM1*/
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x09f0000, 0x09f2000, 0x176000} } },
+ {{{0, 0x0a00000, 0x0a02000, 0x178000}, /* 10: SQM2*/
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x0af0000, 0x0af2000, 0x17a000} } },
+ {{{0, 0x0b00000, 0x0b02000, 0x17c000}, /* 11: SQM3*/
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {1, 0x0bf0000, 0x0bf2000, 0x17e000} } },
+ {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } },/* 12: I2Q */
+ {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } },/* 13: TMR */
+ {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } },/* 14: ROMUSB */
+ {{{1, 0x0f00000, 0x0f01000, 0x164000} } },/* 15: PEG4 */
+ {{{0, 0x1000000, 0x1004000, 0x1a8000} } },/* 16: XDMA */
+ {{{1, 0x1100000, 0x1101000, 0x160000} } },/* 17: PEG0 */
+ {{{1, 0x1200000, 0x1201000, 0x161000} } },/* 18: PEG1 */
+ {{{1, 0x1300000, 0x1301000, 0x162000} } },/* 19: PEG2 */
+ {{{1, 0x1400000, 0x1401000, 0x163000} } },/* 20: PEG3 */
+ {{{1, 0x1500000, 0x1501000, 0x165000} } },/* 21: P2ND */
+ {{{1, 0x1600000, 0x1601000, 0x166000} } },/* 22: P2NI */
+ {{{0, 0, 0, 0} } }, /* 23: */
+ {{{0, 0, 0, 0} } }, /* 24: */
+ {{{0, 0, 0, 0} } }, /* 25: */
+ {{{0, 0, 0, 0} } }, /* 26: */
+ {{{0, 0, 0, 0} } }, /* 27: */
+ {{{0, 0, 0, 0} } }, /* 28: */
+ {{{1, 0x1d00000, 0x1d10000, 0x190000} } },/* 29: MS */
+ {{{1, 0x1e00000, 0x1e01000, 0x16a000} } },/* 30: P2NR2 */
+ {{{1, 0x1f00000, 0x1f10000, 0x150000} } },/* 31: EPG */
+ {{{0} } }, /* 32: PCI */
+ {{{1, 0x2100000, 0x2102000, 0x120000}, /* 33: PCIE */
+ {1, 0x2110000, 0x2120000, 0x130000},
+ {1, 0x2120000, 0x2122000, 0x124000},
+ {1, 0x2130000, 0x2132000, 0x126000},
+ {1, 0x2140000, 0x2142000, 0x128000},
+ {1, 0x2150000, 0x2152000, 0x12a000},
+ {1, 0x2160000, 0x2170000, 0x110000},
+ {1, 0x2170000, 0x2172000, 0x12e000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000},
+ {0, 0x0000000, 0x0000000, 0x000000} } },
+ {{{1, 0x2200000, 0x2204000, 0x1b0000} } },/* 34: CAM */
+ {{{0} } }, /* 35: */
+ {{{0} } }, /* 36: */
+ {{{0} } }, /* 37: */
+ {{{0} } }, /* 38: */
+ {{{0} } }, /* 39: */
+ {{{1, 0x2800000, 0x2804000, 0x1a4000} } },/* 40: TMR */
+ {{{1, 0x2900000, 0x2901000, 0x16b000} } },/* 41: P2NR3 */
+ {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } },/* 42: RPMX1 */
+ {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } },/* 43: RPMX2 */
+ {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } },/* 44: RPMX3 */
+ {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } },/* 45: RPMX4 */
+ {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } },/* 46: RPMX5 */
+ {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } },/* 47: RPMX6 */
+ {{{1, 0x3000000, 0x3000400, 0x1adc00} } },/* 48: RPMX7 */
+ {{{0, 0x3100000, 0x3104000, 0x1a8000} } },/* 49: XDMA */
+ {{{1, 0x3200000, 0x3204000, 0x1d4000} } },/* 50: I2Q */
+ {{{1, 0x3300000, 0x3304000, 0x1a0000} } },/* 51: ROMUSB */
+ {{{0} } }, /* 52: */
+ {{{1, 0x3500000, 0x3500400, 0x1ac000} } },/* 53: RPMX0 */
+ {{{1, 0x3600000, 0x3600400, 0x1ae000} } },/* 54: RPMX8 */
+ {{{1, 0x3700000, 0x3700400, 0x1ae400} } },/* 55: RPMX9 */
+ {{{1, 0x3800000, 0x3804000, 0x1d0000} } },/* 56: OCM0 */
+ {{{1, 0x3900000, 0x3904000, 0x1b4000} } },/* 57: CRYPTO */
+ {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } },/* 58: SMB */
+ {{{0} } }, /* 59: I2C0 */
+ {{{0} } }, /* 60: I2C1 */
+ {{{1, 0x3d00000, 0x3d04000, 0x1d8000} } },/* 61: LPC */
+ {{{1, 0x3e00000, 0x3e01000, 0x167000} } },/* 62: P2NC */
+ {{{1, 0x3f00000, 0x3f01000, 0x168000} } } /* 63: P2NR0 */
+};
+
+/*
+ * top 12 bits of crb internal address (hub, agent)
+ */
+static const unsigned crb_hub_agt[64] = {
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_MN,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_MS,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SRE,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_NIU,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_QMN,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SQN0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SQN1,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SQN2,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SQN3,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGN4,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGN0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGN1,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGN2,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGN3,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGND,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGNI,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGS0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGS1,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGS2,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGS3,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGSI,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SN,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_EG,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PS,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_CAM,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_TIMR,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX1,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX2,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX3,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX4,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX5,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX6,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX7,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_XDMA,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_I2Q,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_ROMUSB,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX8,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_RPMX9,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_OCM0,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_SMB,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_I2C0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_I2C1,
+ 0,
+ QLCNIC_HW_CRB_HUB_AGT_ADR_PGNC,
+ 0,
+};
+
+/* PCI Windowing for DDR regions. */
+
+#define QLCNIC_PCIE_SEM_TIMEOUT 10000
+
+int
+qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
+{
+ int done = 0, timeout = 0;
+
+ while (!done) {
+ done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)));
+ if (done == 1)
+ break;
+ if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT)
+ return -EIO;
+ msleep(1);
+ }
+
+ if (id_reg)
+ QLCWR32(adapter, id_reg, adapter->portnum);
+
+ return 0;
+}
+
+void
+qlcnic_pcie_sem_unlock(struct qlcnic_adapter *adapter, int sem)
+{
+ QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_UNLOCK(sem)));
+}
+
+static int
+qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
+ struct cmd_desc_type0 *cmd_desc_arr, int nr_desc)
+{
+ u32 i, producer, consumer;
+ struct qlcnic_cmd_buffer *pbuf;
+ struct cmd_desc_type0 *cmd_desc;
+ struct qlcnic_host_tx_ring *tx_ring;
+
+ i = 0;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return -EIO;
+
+ tx_ring = adapter->tx_ring;
+ __netif_tx_lock_bh(tx_ring->txq);
+
+ producer = tx_ring->producer;
+ consumer = tx_ring->sw_consumer;
+
+ if (nr_desc >= qlcnic_tx_avail(tx_ring)) {
+ netif_tx_stop_queue(tx_ring->txq);
+ __netif_tx_unlock_bh(tx_ring->txq);
+ return -EBUSY;
+ }
+
+ do {
+ cmd_desc = &cmd_desc_arr[i];
+
+ pbuf = &tx_ring->cmd_buf_arr[producer];
+ pbuf->skb = NULL;
+ pbuf->frag_count = 0;
+
+ memcpy(&tx_ring->desc_head[producer],
+ &cmd_desc_arr[i], sizeof(struct cmd_desc_type0));
+
+ producer = get_next_index(producer, tx_ring->num_desc);
+ i++;
+
+ } while (i != nr_desc);
+
+ tx_ring->producer = producer;
+
+ qlcnic_update_cmd_producer(adapter, tx_ring);
+
+ __netif_tx_unlock_bh(tx_ring->txq);
+
+ return 0;
+}
+
+static int
+qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
+ unsigned op)
+{
+ struct qlcnic_nic_req req;
+ struct qlcnic_mac_req *mac_req;
+ u64 word;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_REQUEST << 23);
+
+ word = QLCNIC_MAC_EVENT | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ mac_req = (struct qlcnic_mac_req *)&req.words[0];
+ mac_req->op = op;
+ memcpy(mac_req->mac_addr, addr, 6);
+
+ return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+}
+
+static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter,
+ u8 *addr, struct list_head *del_list)
+{
+ struct list_head *head;
+ struct qlcnic_mac_list_s *cur;
+
+ /* look up if already exists */
+ list_for_each(head, del_list) {
+ cur = list_entry(head, struct qlcnic_mac_list_s, list);
+
+ if (memcmp(addr, cur->mac_addr, ETH_ALEN) == 0) {
+ list_move_tail(head, &adapter->mac_list);
+ return 0;
+ }
+ }
+
+ cur = kzalloc(sizeof(struct qlcnic_mac_list_s), GFP_ATOMIC);
+ if (cur == NULL) {
+ dev_err(&adapter->netdev->dev,
+ "failed to add mac address filter\n");
+ return -ENOMEM;
+ }
+ memcpy(cur->mac_addr, addr, ETH_ALEN);
+ list_add_tail(&cur->list, &adapter->mac_list);
+
+ return qlcnic_sre_macaddr_change(adapter,
+ cur->mac_addr, QLCNIC_MAC_ADD);
+}
+
+void qlcnic_set_multi(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct dev_mc_list *mc_ptr;
+ u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+ u32 mode = VPORT_MISS_MODE_DROP;
+ LIST_HEAD(del_list);
+ struct list_head *head;
+ struct qlcnic_mac_list_s *cur;
+
+ list_splice_tail_init(&adapter->mac_list, &del_list);
+
+ qlcnic_nic_add_mac(adapter, adapter->mac_addr, &del_list);
+ qlcnic_nic_add_mac(adapter, bcast_addr, &del_list);
+
+ if (netdev->flags & IFF_PROMISC) {
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+ goto send_fw_cmd;
+ }
+
+ if ((netdev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(netdev) > adapter->max_mc_count)) {
+ mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+ goto send_fw_cmd;
+ }
+
+ if (!netdev_mc_empty(netdev)) {
+ netdev_for_each_mc_addr(mc_ptr, netdev) {
+ qlcnic_nic_add_mac(adapter, mc_ptr->dmi_addr,
+ &del_list);
+ }
+ }
+
+send_fw_cmd:
+ qlcnic_nic_set_promisc(adapter, mode);
+ head = &del_list;
+ while (!list_empty(head)) {
+ cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
+
+ qlcnic_sre_macaddr_change(adapter,
+ cur->mac_addr, QLCNIC_MAC_DEL);
+ list_del(&cur->list);
+ kfree(cur);
+ }
+}
+
+int qlcnic_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_PROXY_SET_VPORT_MISS_MODE |
+ ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(mode);
+
+ return qlcnic_send_cmd_descs(adapter,
+ (struct cmd_desc_type0 *)&req, 1);
+}
+
+void qlcnic_free_mac_list(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_mac_list_s *cur;
+ struct list_head *head = &adapter->mac_list;
+
+ while (!list_empty(head)) {
+ cur = list_entry(head->next, struct qlcnic_mac_list_s, list);
+ qlcnic_sre_macaddr_change(adapter,
+ cur->mac_addr, QLCNIC_MAC_DEL);
+ list_del(&cur->list);
+ kfree(cur);
+ }
+}
+
+#define QLCNIC_CONFIG_INTR_COALESCE 3
+
+/*
+ * Send the interrupt coalescing parameter set by ethtool to the card.
+ */
+int qlcnic_config_intr_coalesce(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_nic_req req;
+ u64 word[6];
+ int rv, i;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word[0] = QLCNIC_CONFIG_INTR_COALESCE | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word[0]);
+
+ memcpy(&word[0], &adapter->coal, sizeof(adapter->coal));
+ for (i = 0; i < 6; i++)
+ req.words[i] = cpu_to_le64(word[i]);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "Could not send interrupt coalescing parameters\n");
+
+ return rv;
+}
+
+int qlcnic_config_hw_lro(struct qlcnic_adapter *adapter, int enable)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+ int rv;
+
+ if ((adapter->flags & QLCNIC_LRO_ENABLED) == enable)
+ return 0;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_CONFIG_HW_LRO | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(enable);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "Could not send configure hw lro request\n");
+
+ adapter->flags ^= QLCNIC_LRO_ENABLED;
+
+ return rv;
+}
+
+int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, int enable)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+ int rv;
+
+ if (!!(adapter->flags & QLCNIC_BRIDGE_ENABLED) == enable)
+ return 0;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_CONFIG_BRIDGING |
+ ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(enable);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "Could not send configure bridge mode request\n");
+
+ adapter->flags ^= QLCNIC_BRIDGE_ENABLED;
+
+ return rv;
+}
+
+
+#define RSS_HASHTYPE_IP_TCP 0x3
+
+int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+ int i, rv;
+
+ const u64 key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
+ 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
+ 0x255b0ec26d5a56daULL };
+
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_CONFIG_RSS | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ /*
+ * RSS request:
+ * bits 3-0: hash_method
+ * 5-4: hash_type_ipv4
+ * 7-6: hash_type_ipv6
+ * 8: enable
+ * 9: use indirection table
+ * 47-10: reserved
+ * 63-48: indirection table mask
+ */
+ word = ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 4) |
+ ((u64)(RSS_HASHTYPE_IP_TCP & 0x3) << 6) |
+ ((u64)(enable & 0x1) << 8) |
+ ((0x7ULL) << 48);
+ req.words[0] = cpu_to_le64(word);
+ for (i = 0; i < 5; i++)
+ req.words[i+1] = cpu_to_le64(key[i]);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev, "could not configure RSS\n");
+
+ return rv;
+}
+
+int qlcnic_config_ipaddr(struct qlcnic_adapter *adapter, u32 ip, int cmd)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+ int rv;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_CONFIG_IPADDR | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64(cmd);
+ req.words[1] = cpu_to_le64(ip);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "could not notify %s IP 0x%x reuqest\n",
+ (cmd == QLCNIC_IP_UP) ? "Add" : "Remove", ip);
+
+ return rv;
+}
+
+int qlcnic_linkevent_request(struct qlcnic_adapter *adapter, int enable)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+ int rv;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_GET_LINKEVENT | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+ req.words[0] = cpu_to_le64(enable | (enable << 8));
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "could not configure link notification\n");
+
+ return rv;
+}
+
+int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_nic_req req;
+ u64 word;
+ int rv;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_LRO_REQUEST |
+ ((u64)adapter->portnum << 16) |
+ ((u64)QLCNIC_LRO_REQUEST_CLEANUP << 56) ;
+
+ req.req_hdr = cpu_to_le64(word);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv != 0)
+ dev_err(&adapter->netdev->dev,
+ "could not cleanup lro flows\n");
+
+ return rv;
+}
+
+/*
+ * qlcnic_change_mtu - Change the Maximum Transfer Unit
+ * @returns 0 on success, negative on failure
+ */
+
+int qlcnic_change_mtu(struct net_device *netdev, int mtu)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int rc = 0;
+
+ if (mtu > P3_MAX_MTU) {
+ dev_err(&adapter->netdev->dev, "mtu > %d bytes unsupported\n",
+ P3_MAX_MTU);
+ return -EINVAL;
+ }
+
+ rc = qlcnic_fw_cmd_set_mtu(adapter, mtu);
+
+ if (!rc)
+ netdev->mtu = mtu;
+
+ return rc;
+}
+
+int qlcnic_get_mac_addr(struct qlcnic_adapter *adapter, u64 *mac)
+{
+ u32 crbaddr, mac_hi, mac_lo;
+ int pci_func = adapter->ahw.pci_func;
+
+ crbaddr = CRB_MAC_BLOCK_START +
+ (4 * ((pci_func/2) * 3)) + (4 * (pci_func & 1));
+
+ mac_lo = QLCRD32(adapter, crbaddr);
+ mac_hi = QLCRD32(adapter, crbaddr+4);
+
+ if (pci_func & 1)
+ *mac = le64_to_cpu((mac_lo >> 16) | ((u64)mac_hi << 16));
+ else
+ *mac = le64_to_cpu((u64)mac_lo | ((u64)mac_hi << 32));
+
+ return 0;
+}
+
+/*
+ * Changes the CRB window to the specified window.
+ */
+ /* Returns < 0 if off is not valid,
+ * 1 if window access is needed. 'off' is set to offset from
+ * CRB space in 128M pci map
+ * 0 if no window access is needed. 'off' is set to 2M addr
+ * In: 'off' is offset from base in 128M pci map
+ */
+static int
+qlcnic_pci_get_crb_addr_2M(struct qlcnic_adapter *adapter,
+ ulong off, void __iomem **addr)
+{
+ const struct crb_128M_2M_sub_block_map *m;
+
+ if ((off >= QLCNIC_CRB_MAX) || (off < QLCNIC_PCI_CRBSPACE))
+ return -EINVAL;
+
+ off -= QLCNIC_PCI_CRBSPACE;
+
+ /*
+ * Try direct map
+ */
+ m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)];
+
+ if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) {
+ *addr = adapter->ahw.pci_base0 + m->start_2M +
+ (off - m->start_128M);
+ return 0;
+ }
+
+ /*
+ * Not in direct map, use crb window
+ */
+ *addr = adapter->ahw.pci_base0 + CRB_INDIRECT_2M + (off & MASK(16));
+ return 1;
+}
+
+/*
+ * In: 'off' is offset from CRB space in 128M pci map
+ * Out: 'off' is 2M pci map addr
+ * side effect: lock crb window
+ */
+static void
+qlcnic_pci_set_crbwindow_2M(struct qlcnic_adapter *adapter, ulong off)
+{
+ u32 window;
+ void __iomem *addr = adapter->ahw.pci_base0 + CRB_WINDOW_2M;
+
+ off -= QLCNIC_PCI_CRBSPACE;
+
+ window = CRB_HI(off);
+
+ if (adapter->ahw.crb_win == window)
+ return;
+
+ writel(window, addr);
+ if (readl(addr) != window) {
+ if (printk_ratelimit())
+ dev_warn(&adapter->pdev->dev,
+ "failed to set CRB window to %d off 0x%lx\n",
+ window, off);
+ }
+ adapter->ahw.crb_win = window;
+}
+
+int
+qlcnic_hw_write_wx_2M(struct qlcnic_adapter *adapter, ulong off, u32 data)
+{
+ unsigned long flags;
+ int rv;
+ void __iomem *addr = NULL;
+
+ rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
+
+ if (rv == 0) {
+ writel(data, addr);
+ return 0;
+ }
+
+ if (rv > 0) {
+ /* indirect access */
+ write_lock_irqsave(&adapter->ahw.crb_lock, flags);
+ crb_win_lock(adapter);
+ qlcnic_pci_set_crbwindow_2M(adapter, off);
+ writel(data, addr);
+ crb_win_unlock(adapter);
+ write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
+ return 0;
+ }
+
+ dev_err(&adapter->pdev->dev,
+ "%s: invalid offset: 0x%016lx\n", __func__, off);
+ dump_stack();
+ return -EIO;
+}
+
+u32
+qlcnic_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong off)
+{
+ unsigned long flags;
+ int rv;
+ u32 data;
+ void __iomem *addr = NULL;
+
+ rv = qlcnic_pci_get_crb_addr_2M(adapter, off, &addr);
+
+ if (rv == 0)
+ return readl(addr);
+
+ if (rv > 0) {
+ /* indirect access */
+ write_lock_irqsave(&adapter->ahw.crb_lock, flags);
+ crb_win_lock(adapter);
+ qlcnic_pci_set_crbwindow_2M(adapter, off);
+ data = readl(addr);
+ crb_win_unlock(adapter);
+ write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
+ return data;
+ }
+
+ dev_err(&adapter->pdev->dev,
+ "%s: invalid offset: 0x%016lx\n", __func__, off);
+ dump_stack();
+ return -1;
+}
+
+
+void __iomem *
+qlcnic_get_ioaddr(struct qlcnic_adapter *adapter, u32 offset)
+{
+ void __iomem *addr = NULL;
+
+ WARN_ON(qlcnic_pci_get_crb_addr_2M(adapter, offset, &addr));
+
+ return addr;
+}
+
+
+static int
+qlcnic_pci_set_window_2M(struct qlcnic_adapter *adapter,
+ u64 addr, u32 *start)
+{
+ u32 window;
+ struct pci_dev *pdev = adapter->pdev;
+
+ if ((addr & 0x00ff800) == 0xff800) {
+ if (printk_ratelimit())
+ dev_warn(&pdev->dev, "QM access not handled\n");
+ return -EIO;
+ }
+
+ window = OCM_WIN_P3P(addr);
+
+ writel(window, adapter->ahw.ocm_win_crb);
+ /* read back to flush */
+ readl(adapter->ahw.ocm_win_crb);
+
+ adapter->ahw.ocm_win = window;
+ *start = QLCNIC_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr);
+ return 0;
+}
+
+static int
+qlcnic_pci_mem_access_direct(struct qlcnic_adapter *adapter, u64 off,
+ u64 *data, int op)
+{
+ void __iomem *addr, *mem_ptr = NULL;
+ resource_size_t mem_base;
+ int ret;
+ u32 start;
+
+ mutex_lock(&adapter->ahw.mem_lock);
+
+ ret = qlcnic_pci_set_window_2M(adapter, off, &start);
+ if (ret != 0)
+ goto unlock;
+
+ addr = pci_base_offset(adapter, start);
+ if (addr)
+ goto noremap;
+
+ mem_base = pci_resource_start(adapter->pdev, 0) + (start & PAGE_MASK);
+
+ mem_ptr = ioremap(mem_base, PAGE_SIZE);
+ if (mem_ptr == NULL) {
+ ret = -EIO;
+ goto unlock;
+ }
+
+ addr = mem_ptr + (start & (PAGE_SIZE - 1));
+
+noremap:
+ if (op == 0) /* read */
+ *data = readq(addr);
+ else /* write */
+ writeq(*data, addr);
+
+unlock:
+ mutex_unlock(&adapter->ahw.mem_lock);
+
+ if (mem_ptr)
+ iounmap(mem_ptr);
+ return ret;
+}
+
+#define MAX_CTL_CHECK 1000
+
+int
+qlcnic_pci_mem_write_2M(struct qlcnic_adapter *adapter,
+ u64 off, u64 data)
+{
+ int i, j, ret;
+ u32 temp, off8;
+ u64 stride;
+ void __iomem *mem_crb;
+
+ /* Only 64-bit aligned access */
+ if (off & 7)
+ return -EIO;
+
+ /* P3 onward, test agent base for MIU and SIU is same */
+ if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
+ QLCNIC_ADDR_QDR_NET_MAX_P3)) {
+ mem_crb = qlcnic_get_ioaddr(adapter,
+ QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) {
+ mem_crb = qlcnic_get_ioaddr(adapter,
+ QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE);
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX))
+ return qlcnic_pci_mem_access_direct(adapter, off, &data, 1);
+
+ return -EIO;
+
+correct:
+ stride = QLCNIC_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
+
+ off8 = off & ~(stride-1);
+
+ mutex_lock(&adapter->ahw.mem_lock);
+
+ writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
+ writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
+
+ i = 0;
+ if (stride == 16) {
+ writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
+ writel((TA_CTL_START | TA_CTL_ENABLE),
+ (mem_crb + TEST_AGT_CTRL));
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = readl(mem_crb + TEST_AGT_CTRL);
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ ret = -EIO;
+ goto done;
+ }
+
+ i = (off & 0xf) ? 0 : 2;
+ writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)),
+ mem_crb + MIU_TEST_AGT_WRDATA(i));
+ writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)),
+ mem_crb + MIU_TEST_AGT_WRDATA(i+1));
+ i = (off & 0xf) ? 2 : 0;
+ }
+
+ writel(data & 0xffffffff,
+ mem_crb + MIU_TEST_AGT_WRDATA(i));
+ writel((data >> 32) & 0xffffffff,
+ mem_crb + MIU_TEST_AGT_WRDATA(i+1));
+
+ writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL));
+ writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE),
+ (mem_crb + TEST_AGT_CTRL));
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = readl(mem_crb + TEST_AGT_CTRL);
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ if (printk_ratelimit())
+ dev_err(&adapter->pdev->dev,
+ "failed to write through agent\n");
+ ret = -EIO;
+ } else
+ ret = 0;
+
+done:
+ mutex_unlock(&adapter->ahw.mem_lock);
+
+ return ret;
+}
+
+int
+qlcnic_pci_mem_read_2M(struct qlcnic_adapter *adapter,
+ u64 off, u64 *data)
+{
+ int j, ret;
+ u32 temp, off8;
+ u64 val, stride;
+ void __iomem *mem_crb;
+
+ /* Only 64-bit aligned access */
+ if (off & 7)
+ return -EIO;
+
+ /* P3 onward, test agent base for MIU and SIU is same */
+ if (ADDR_IN_RANGE(off, QLCNIC_ADDR_QDR_NET,
+ QLCNIC_ADDR_QDR_NET_MAX_P3)) {
+ mem_crb = qlcnic_get_ioaddr(adapter,
+ QLCNIC_CRB_QDR_NET+MIU_TEST_AGT_BASE);
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, QLCNIC_ADDR_DDR_NET, QLCNIC_ADDR_DDR_NET_MAX)) {
+ mem_crb = qlcnic_get_ioaddr(adapter,
+ QLCNIC_CRB_DDR_NET+MIU_TEST_AGT_BASE);
+ goto correct;
+ }
+
+ if (ADDR_IN_RANGE(off, QLCNIC_ADDR_OCM0, QLCNIC_ADDR_OCM0_MAX)) {
+ return qlcnic_pci_mem_access_direct(adapter,
+ off, data, 0);
+ }
+
+ return -EIO;
+
+correct:
+ stride = QLCNIC_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
+
+ off8 = off & ~(stride-1);
+
+ mutex_lock(&adapter->ahw.mem_lock);
+
+ writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
+ writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
+ writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
+ writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL));
+
+ for (j = 0; j < MAX_CTL_CHECK; j++) {
+ temp = readl(mem_crb + TEST_AGT_CTRL);
+ if ((temp & TA_CTL_BUSY) == 0)
+ break;
+ }
+
+ if (j >= MAX_CTL_CHECK) {
+ if (printk_ratelimit())
+ dev_err(&adapter->pdev->dev,
+ "failed to read through agent\n");
+ ret = -EIO;
+ } else {
+ off8 = MIU_TEST_AGT_RDDATA_LO;
+ if ((stride == 16) && (off & 0xf))
+ off8 = MIU_TEST_AGT_RDDATA_UPPER_LO;
+
+ temp = readl(mem_crb + off8 + 4);
+ val = (u64)temp << 32;
+ val |= readl(mem_crb + off8);
+ *data = val;
+ ret = 0;
+ }
+
+ mutex_unlock(&adapter->ahw.mem_lock);
+
+ return ret;
+}
+
+int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
+{
+ int offset, board_type, magic;
+ struct pci_dev *pdev = adapter->pdev;
+
+ offset = QLCNIC_FW_MAGIC_OFFSET;
+ if (qlcnic_rom_fast_read(adapter, offset, &magic))
+ return -EIO;
+
+ if (magic != QLCNIC_BDINFO_MAGIC) {
+ dev_err(&pdev->dev, "invalid board config, magic=%08x\n",
+ magic);
+ return -EIO;
+ }
+
+ offset = QLCNIC_BRDTYPE_OFFSET;
+ if (qlcnic_rom_fast_read(adapter, offset, &board_type))
+ return -EIO;
+
+ adapter->ahw.board_type = board_type;
+
+ if (board_type == QLCNIC_BRDTYPE_P3_4_GB_MM) {
+ u32 gpio = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_PAD_GPIO_I);
+ if ((gpio & 0x8000) == 0)
+ board_type = QLCNIC_BRDTYPE_P3_10G_TP;
+ }
+
+ switch (board_type) {
+ case QLCNIC_BRDTYPE_P3_HMEZ:
+ case QLCNIC_BRDTYPE_P3_XG_LOM:
+ case QLCNIC_BRDTYPE_P3_10G_CX4:
+ case QLCNIC_BRDTYPE_P3_10G_CX4_LP:
+ case QLCNIC_BRDTYPE_P3_IMEZ:
+ case QLCNIC_BRDTYPE_P3_10G_SFP_PLUS:
+ case QLCNIC_BRDTYPE_P3_10G_SFP_CT:
+ case QLCNIC_BRDTYPE_P3_10G_SFP_QT:
+ case QLCNIC_BRDTYPE_P3_10G_XFP:
+ case QLCNIC_BRDTYPE_P3_10000_BASE_T:
+ adapter->ahw.port_type = QLCNIC_XGBE;
+ break;
+ case QLCNIC_BRDTYPE_P3_REF_QG:
+ case QLCNIC_BRDTYPE_P3_4_GB:
+ case QLCNIC_BRDTYPE_P3_4_GB_MM:
+ adapter->ahw.port_type = QLCNIC_GBE;
+ break;
+ case QLCNIC_BRDTYPE_P3_10G_TP:
+ adapter->ahw.port_type = (adapter->portnum < 2) ?
+ QLCNIC_XGBE : QLCNIC_GBE;
+ break;
+ default:
+ dev_err(&pdev->dev, "unknown board type %x\n", board_type);
+ adapter->ahw.port_type = QLCNIC_XGBE;
+ break;
+ }
+
+ return 0;
+}
+
+int
+qlcnic_wol_supported(struct qlcnic_adapter *adapter)
+{
+ u32 wol_cfg;
+
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG_NV);
+ if (wol_cfg & (1UL << adapter->portnum)) {
+ wol_cfg = QLCRD32(adapter, QLCNIC_WOL_CONFIG);
+ if (wol_cfg & (1 << adapter->portnum))
+ return 1;
+ }
+
+ return 0;
+}
+
+int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
+{
+ struct qlcnic_nic_req req;
+ int rv;
+ u64 word;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_CONFIG_LED | ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+
+ req.words[0] = cpu_to_le64((u64)rate << 32);
+ req.words[1] = cpu_to_le64(state);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv)
+ dev_err(&adapter->pdev->dev, "LED configuration failed.\n");
+
+ return rv;
+}
+
+static int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u32 flag)
+{
+ struct qlcnic_nic_req req;
+ int rv;
+ u64 word;
+
+ memset(&req, 0, sizeof(struct qlcnic_nic_req));
+ req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
+
+ word = QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK |
+ ((u64)adapter->portnum << 16);
+ req.req_hdr = cpu_to_le64(word);
+ req.words[0] = cpu_to_le64(flag);
+
+ rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
+ if (rv)
+ dev_err(&adapter->pdev->dev,
+ "%sting loopback mode failed.\n",
+ flag ? "Set" : "Reset");
+ return rv;
+}
+
+int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter)
+{
+ if (qlcnic_set_fw_loopback(adapter, 1))
+ return -EIO;
+
+ if (qlcnic_nic_set_promisc(adapter,
+ VPORT_MISS_MODE_ACCEPT_ALL)) {
+ qlcnic_set_fw_loopback(adapter, 0);
+ return -EIO;
+ }
+
+ msleep(1000);
+ return 0;
+}
+
+void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter)
+{
+ int mode = VPORT_MISS_MODE_DROP;
+ struct net_device *netdev = adapter->netdev;
+
+ qlcnic_set_fw_loopback(adapter, 0);
+
+ if (netdev->flags & IFF_PROMISC)
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+ else if (netdev->flags & IFF_ALLMULTI)
+ mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+
+ qlcnic_nic_set_promisc(adapter, mode);
+}
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
new file mode 100644
index 00000000000..ea00ab4d4fe
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -0,0 +1,1541 @@
+/*
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#include <linux/netdevice.h>
+#include <linux/delay.h>
+#include "qlcnic.h"
+
+struct crb_addr_pair {
+ u32 addr;
+ u32 data;
+};
+
+#define QLCNIC_MAX_CRB_XFORM 60
+static unsigned int crb_addr_xform[QLCNIC_MAX_CRB_XFORM];
+
+#define crb_addr_transform(name) \
+ (crb_addr_xform[QLCNIC_HW_PX_MAP_CRB_##name] = \
+ QLCNIC_HW_CRB_HUB_AGT_ADR_##name << 20)
+
+#define QLCNIC_ADDR_ERROR (0xffffffff)
+
+static void
+qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *rds_ring);
+
+static void crb_addr_transform_setup(void)
+{
+ crb_addr_transform(XDMA);
+ crb_addr_transform(TIMR);
+ crb_addr_transform(SRE);
+ crb_addr_transform(SQN3);
+ crb_addr_transform(SQN2);
+ crb_addr_transform(SQN1);
+ crb_addr_transform(SQN0);
+ crb_addr_transform(SQS3);
+ crb_addr_transform(SQS2);
+ crb_addr_transform(SQS1);
+ crb_addr_transform(SQS0);
+ crb_addr_transform(RPMX7);
+ crb_addr_transform(RPMX6);
+ crb_addr_transform(RPMX5);
+ crb_addr_transform(RPMX4);
+ crb_addr_transform(RPMX3);
+ crb_addr_transform(RPMX2);
+ crb_addr_transform(RPMX1);
+ crb_addr_transform(RPMX0);
+ crb_addr_transform(ROMUSB);
+ crb_addr_transform(SN);
+ crb_addr_transform(QMN);
+ crb_addr_transform(QMS);
+ crb_addr_transform(PGNI);
+ crb_addr_transform(PGND);
+ crb_addr_transform(PGN3);
+ crb_addr_transform(PGN2);
+ crb_addr_transform(PGN1);
+ crb_addr_transform(PGN0);
+ crb_addr_transform(PGSI);
+ crb_addr_transform(PGSD);
+ crb_addr_transform(PGS3);
+ crb_addr_transform(PGS2);
+ crb_addr_transform(PGS1);
+ crb_addr_transform(PGS0);
+ crb_addr_transform(PS);
+ crb_addr_transform(PH);
+ crb_addr_transform(NIU);
+ crb_addr_transform(I2Q);
+ crb_addr_transform(EG);
+ crb_addr_transform(MN);
+ crb_addr_transform(MS);
+ crb_addr_transform(CAS2);
+ crb_addr_transform(CAS1);
+ crb_addr_transform(CAS0);
+ crb_addr_transform(CAM);
+ crb_addr_transform(C2C1);
+ crb_addr_transform(C2C0);
+ crb_addr_transform(SMB);
+ crb_addr_transform(OCM0);
+ crb_addr_transform(I2C0);
+}
+
+void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_rx_buffer *rx_buf;
+ int i, ring;
+
+ recv_ctx = &adapter->recv_ctx;
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ for (i = 0; i < rds_ring->num_desc; ++i) {
+ rx_buf = &(rds_ring->rx_buf_arr[i]);
+ if (rx_buf->state == QLCNIC_BUFFER_FREE)
+ continue;
+ pci_unmap_single(adapter->pdev,
+ rx_buf->dma,
+ rds_ring->dma_size,
+ PCI_DMA_FROMDEVICE);
+ if (rx_buf->skb != NULL)
+ dev_kfree_skb_any(rx_buf->skb);
+ }
+ }
+}
+
+void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_cmd_buffer *cmd_buf;
+ struct qlcnic_skb_frag *buffrag;
+ int i, j;
+ struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+
+ cmd_buf = tx_ring->cmd_buf_arr;
+ for (i = 0; i < tx_ring->num_desc; i++) {
+ buffrag = cmd_buf->frag_array;
+ if (buffrag->dma) {
+ pci_unmap_single(adapter->pdev, buffrag->dma,
+ buffrag->length, PCI_DMA_TODEVICE);
+ buffrag->dma = 0ULL;
+ }
+ for (j = 0; j < cmd_buf->frag_count; j++) {
+ buffrag++;
+ if (buffrag->dma) {
+ pci_unmap_page(adapter->pdev, buffrag->dma,
+ buffrag->length,
+ PCI_DMA_TODEVICE);
+ buffrag->dma = 0ULL;
+ }
+ }
+ if (cmd_buf->skb) {
+ dev_kfree_skb_any(cmd_buf->skb);
+ cmd_buf->skb = NULL;
+ }
+ cmd_buf++;
+ }
+}
+
+void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ int ring;
+
+ recv_ctx = &adapter->recv_ctx;
+
+ if (recv_ctx->rds_rings == NULL)
+ goto skip_rds;
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ vfree(rds_ring->rx_buf_arr);
+ rds_ring->rx_buf_arr = NULL;
+ }
+ kfree(recv_ctx->rds_rings);
+
+skip_rds:
+ if (adapter->tx_ring == NULL)
+ return;
+
+ tx_ring = adapter->tx_ring;
+ vfree(tx_ring->cmd_buf_arr);
+ kfree(adapter->tx_ring);
+}
+
+int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_recv_context *recv_ctx;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_host_tx_ring *tx_ring;
+ struct qlcnic_rx_buffer *rx_buf;
+ int ring, i, size;
+
+ struct qlcnic_cmd_buffer *cmd_buf_arr;
+ struct net_device *netdev = adapter->netdev;
+
+ size = sizeof(struct qlcnic_host_tx_ring);
+ tx_ring = kzalloc(size, GFP_KERNEL);
+ if (tx_ring == NULL) {
+ dev_err(&netdev->dev, "failed to allocate tx ring struct\n");
+ return -ENOMEM;
+ }
+ adapter->tx_ring = tx_ring;
+
+ tx_ring->num_desc = adapter->num_txd;
+ tx_ring->txq = netdev_get_tx_queue(netdev, 0);
+
+ cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring));
+ if (cmd_buf_arr == NULL) {
+ dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n");
+ return -ENOMEM;
+ }
+ memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
+ tx_ring->cmd_buf_arr = cmd_buf_arr;
+
+ recv_ctx = &adapter->recv_ctx;
+
+ size = adapter->max_rds_rings * sizeof(struct qlcnic_host_rds_ring);
+ rds_ring = kzalloc(size, GFP_KERNEL);
+ if (rds_ring == NULL) {
+ dev_err(&netdev->dev, "failed to allocate rds ring struct\n");
+ return -ENOMEM;
+ }
+ recv_ctx->rds_rings = rds_ring;
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &recv_ctx->rds_rings[ring];
+ switch (ring) {
+ case RCV_RING_NORMAL:
+ rds_ring->num_desc = adapter->num_rxd;
+ if (adapter->ahw.cut_through) {
+ rds_ring->dma_size =
+ QLCNIC_CT_DEFAULT_RX_BUF_LEN;
+ rds_ring->skb_size =
+ QLCNIC_CT_DEFAULT_RX_BUF_LEN;
+ } else {
+ rds_ring->dma_size =
+ QLCNIC_P3_RX_BUF_MAX_LEN;
+ rds_ring->skb_size =
+ rds_ring->dma_size + NET_IP_ALIGN;
+ }
+ break;
+
+ case RCV_RING_JUMBO:
+ rds_ring->num_desc = adapter->num_jumbo_rxd;
+ rds_ring->dma_size =
+ QLCNIC_P3_RX_JUMBO_BUF_MAX_LEN;
+
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
+ rds_ring->dma_size += QLCNIC_LRO_BUFFER_EXTRA;
+
+ rds_ring->skb_size =
+ rds_ring->dma_size + NET_IP_ALIGN;
+ break;
+
+ case RCV_RING_LRO:
+ rds_ring->num_desc = adapter->num_lro_rxd;
+ rds_ring->dma_size = QLCNIC_RX_LRO_BUFFER_LENGTH;
+ rds_ring->skb_size = rds_ring->dma_size + NET_IP_ALIGN;
+ break;
+
+ }
+ rds_ring->rx_buf_arr = (struct qlcnic_rx_buffer *)
+ vmalloc(RCV_BUFF_RINGSIZE(rds_ring));
+ if (rds_ring->rx_buf_arr == NULL) {
+ dev_err(&netdev->dev, "Failed to allocate "
+ "rx buffer ring %d\n", ring);
+ goto err_out;
+ }
+ memset(rds_ring->rx_buf_arr, 0, RCV_BUFF_RINGSIZE(rds_ring));
+ INIT_LIST_HEAD(&rds_ring->free_list);
+ /*
+ * Now go through all of them, set reference handles
+ * and put them in the queues.
+ */
+ rx_buf = rds_ring->rx_buf_arr;
+ for (i = 0; i < rds_ring->num_desc; i++) {
+ list_add_tail(&rx_buf->list,
+ &rds_ring->free_list);
+ rx_buf->ref_handle = i;
+ rx_buf->state = QLCNIC_BUFFER_FREE;
+ rx_buf++;
+ }
+ spin_lock_init(&rds_ring->lock);
+ }
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ sds_ring->irq = adapter->msix_entries[ring].vector;
+ sds_ring->adapter = adapter;
+ sds_ring->num_desc = adapter->num_rxd;
+
+ for (i = 0; i < NUM_RCV_DESC_RINGS; i++)
+ INIT_LIST_HEAD(&sds_ring->free_list[i]);
+ }
+
+ return 0;
+
+err_out:
+ qlcnic_free_sw_resources(adapter);
+ return -ENOMEM;
+}
+
+/*
+ * Utility to translate from internal Phantom CRB address
+ * to external PCI CRB address.
+ */
+static u32 qlcnic_decode_crb_addr(u32 addr)
+{
+ int i;
+ u32 base_addr, offset, pci_base;
+
+ crb_addr_transform_setup();
+
+ pci_base = QLCNIC_ADDR_ERROR;
+ base_addr = addr & 0xfff00000;
+ offset = addr & 0x000fffff;
+
+ for (i = 0; i < QLCNIC_MAX_CRB_XFORM; i++) {
+ if (crb_addr_xform[i] == base_addr) {
+ pci_base = i << 20;
+ break;
+ }
+ }
+ if (pci_base == QLCNIC_ADDR_ERROR)
+ return pci_base;
+ else
+ return pci_base + offset;
+}
+
+#define QLCNIC_MAX_ROM_WAIT_USEC 100
+
+static int qlcnic_wait_rom_done(struct qlcnic_adapter *adapter)
+{
+ long timeout = 0;
+ long done = 0;
+
+ cond_resched();
+
+ while (done == 0) {
+ done = QLCRD32(adapter, QLCNIC_ROMUSB_GLB_STATUS);
+ done &= 2;
+ if (++timeout >= QLCNIC_MAX_ROM_WAIT_USEC) {
+ dev_err(&adapter->pdev->dev,
+ "Timeout reached waiting for rom done");
+ return -EIO;
+ }
+ udelay(1);
+ }
+ return 0;
+}
+
+static int do_rom_fast_read(struct qlcnic_adapter *adapter,
+ int addr, int *valp)
+{
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ADDRESS, addr);
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 3);
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_INSTR_OPCODE, 0xb);
+ if (qlcnic_wait_rom_done(adapter)) {
+ dev_err(&adapter->pdev->dev, "Error waiting for rom done\n");
+ return -EIO;
+ }
+ /* reset abyte_cnt and dummy_byte_cnt */
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_ABYTE_CNT, 0);
+ udelay(10);
+ QLCWR32(adapter, QLCNIC_ROMUSB_ROM_DUMMY_BYTE_CNT, 0);
+
+ *valp = QLCRD32(adapter, QLCNIC_ROMUSB_ROM_RDATA);
+ return 0;
+}
+
+static int do_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
+ u8 *bytes, size_t size)
+{
+ int addridx;
+ int ret = 0;
+
+ for (addridx = addr; addridx < (addr + size); addridx += 4) {
+ int v;
+ ret = do_rom_fast_read(adapter, addridx, &v);
+ if (ret != 0)
+ break;
+ *(__le32 *)bytes = cpu_to_le32(v);
+ bytes += 4;
+ }
+
+ return ret;
+}
+
+int
+qlcnic_rom_fast_read_words(struct qlcnic_adapter *adapter, int addr,
+ u8 *bytes, size_t size)
+{
+ int ret;
+
+ ret = qlcnic_rom_lock(adapter);
+ if (ret < 0)
+ return ret;
+
+ ret = do_rom_fast_read_words(adapter, addr, bytes, size);
+
+ qlcnic_rom_unlock(adapter);
+ return ret;
+}
+
+int qlcnic_rom_fast_read(struct qlcnic_adapter *adapter, int addr, int *valp)
+{
+ int ret;
+
+ if (qlcnic_rom_lock(adapter) != 0)
+ return -EIO;
+
+ ret = do_rom_fast_read(adapter, addr, valp);
+ qlcnic_rom_unlock(adapter);
+ return ret;
+}
+
+int qlcnic_pinit_from_rom(struct qlcnic_adapter *adapter)
+{
+ int addr, val;
+ int i, n, init_delay;
+ struct crb_addr_pair *buf;
+ unsigned offset;
+ u32 off;
+ struct pci_dev *pdev = adapter->pdev;
+
+ /* resetall */
+ qlcnic_rom_lock(adapter);
+ QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0xffffffff);
+ qlcnic_rom_unlock(adapter);
+
+ if (qlcnic_rom_fast_read(adapter, 0, &n) != 0 || (n != 0xcafecafe) ||
+ qlcnic_rom_fast_read(adapter, 4, &n) != 0) {
+ dev_err(&pdev->dev, "ERROR Reading crb_init area: val:%x\n", n);
+ return -EIO;
+ }
+ offset = n & 0xffffU;
+ n = (n >> 16) & 0xffffU;
+
+ if (n >= 1024) {
+ dev_err(&pdev->dev, "QLOGIC card flash not initialized.\n");
+ return -EIO;
+ }
+
+ buf = kcalloc(n, sizeof(struct crb_addr_pair), GFP_KERNEL);
+ if (buf == NULL) {
+ dev_err(&pdev->dev, "Unable to calloc memory for rom read.\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < n; i++) {
+ if (qlcnic_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
+ qlcnic_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) {
+ kfree(buf);
+ return -EIO;
+ }
+
+ buf[i].addr = addr;
+ buf[i].data = val;
+ }
+
+ for (i = 0; i < n; i++) {
+
+ off = qlcnic_decode_crb_addr(buf[i].addr);
+ if (off == QLCNIC_ADDR_ERROR) {
+ dev_err(&pdev->dev, "CRB init value out of range %x\n",
+ buf[i].addr);
+ continue;
+ }
+ off += QLCNIC_PCI_CRBSPACE;
+
+ if (off & 1)
+ continue;
+
+ /* skipping cold reboot MAGIC */
+ if (off == QLCNIC_CAM_RAM(0x1fc))
+ continue;
+ if (off == (QLCNIC_CRB_I2C0 + 0x1c))
+ continue;
+ if (off == (ROMUSB_GLB + 0xbc)) /* do not reset PCI */
+ continue;
+ if (off == (ROMUSB_GLB + 0xa8))
+ continue;
+ if (off == (ROMUSB_GLB + 0xc8)) /* core clock */
+ continue;
+ if (off == (ROMUSB_GLB + 0x24)) /* MN clock */
+ continue;
+ if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */
+ continue;
+ if ((off & 0x0ff00000) == QLCNIC_CRB_DDR_NET)
+ continue;
+ /* skip the function enable register */
+ if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION))
+ continue;
+ if (off == QLCNIC_PCIE_REG(PCIE_SETUP_FUNCTION2))
+ continue;
+ if ((off & 0x0ff00000) == QLCNIC_CRB_SMB)
+ continue;
+
+ init_delay = 1;
+ /* After writing this register, HW needs time for CRB */
+ /* to quiet down (else crb_window returns 0xffffffff) */
+ if (off == QLCNIC_ROMUSB_GLB_SW_RESET)
+ init_delay = 1000;
+
+ QLCWR32(adapter, off, buf[i].data);
+
+ msleep(init_delay);
+ }
+ kfree(buf);
+
+ /* p2dn replyCount */
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0xec, 0x1e);
+ /* disable_peg_cache 0 & 1*/
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_D + 0x4c, 8);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_I + 0x4c, 8);
+
+ /* peg_clr_all */
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x8, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0xc, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0x8, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_1 + 0xc, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0x8, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_2 + 0xc, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0x8, 0);
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_3 + 0xc, 0);
+ return 0;
+}
+
+static int
+qlcnic_has_mn(struct qlcnic_adapter *adapter)
+{
+ u32 capability, flashed_ver;
+ capability = 0;
+
+ qlcnic_rom_fast_read(adapter,
+ QLCNIC_FW_VERSION_OFFSET, (int *)&flashed_ver);
+ flashed_ver = QLCNIC_DECODE_VERSION(flashed_ver);
+
+ if (flashed_ver >= QLCNIC_VERSION_CODE(4, 0, 220)) {
+
+ capability = QLCRD32(adapter, QLCNIC_PEG_TUNE_CAPABILITY);
+ if (capability & QLCNIC_PEG_TUNE_MN_PRESENT)
+ return 1;
+ }
+ return 0;
+}
+
+static
+struct uni_table_desc *qlcnic_get_table_desc(const u8 *unirom, int section)
+{
+ u32 i;
+ struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
+ __le32 entries = cpu_to_le32(directory->num_entries);
+
+ for (i = 0; i < entries; i++) {
+
+ __le32 offs = cpu_to_le32(directory->findex) +
+ (i * cpu_to_le32(directory->entry_size));
+ __le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8));
+
+ if (tab_type == section)
+ return (struct uni_table_desc *) &unirom[offs];
+ }
+
+ return NULL;
+}
+
+static int
+qlcnic_set_product_offs(struct qlcnic_adapter *adapter)
+{
+ struct uni_table_desc *ptab_descr;
+ const u8 *unirom = adapter->fw->data;
+ u32 i;
+ __le32 entries;
+ int mn_present = qlcnic_has_mn(adapter);
+
+ ptab_descr = qlcnic_get_table_desc(unirom,
+ QLCNIC_UNI_DIR_SECT_PRODUCT_TBL);
+ if (ptab_descr == NULL)
+ return -1;
+
+ entries = cpu_to_le32(ptab_descr->num_entries);
+nomn:
+ for (i = 0; i < entries; i++) {
+
+ __le32 flags, file_chiprev, offs;
+ u8 chiprev = adapter->ahw.revision_id;
+ u32 flagbit;
+
+ offs = cpu_to_le32(ptab_descr->findex) +
+ (i * cpu_to_le32(ptab_descr->entry_size));
+ flags = cpu_to_le32(*((int *)&unirom[offs] +
+ QLCNIC_UNI_FLAGS_OFF));
+ file_chiprev = cpu_to_le32(*((int *)&unirom[offs] +
+ QLCNIC_UNI_CHIP_REV_OFF));
+
+ flagbit = mn_present ? 1 : 2;
+
+ if ((chiprev == file_chiprev) &&
+ ((1ULL << flagbit) & flags)) {
+ adapter->file_prd_off = offs;
+ return 0;
+ }
+ }
+ if (mn_present) {
+ mn_present = 0;
+ goto nomn;
+ }
+ return -1;
+}
+
+static
+struct uni_data_desc *qlcnic_get_data_desc(struct qlcnic_adapter *adapter,
+ u32 section, u32 idx_offset)
+{
+ const u8 *unirom = adapter->fw->data;
+ int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
+ idx_offset));
+ struct uni_table_desc *tab_desc;
+ __le32 offs;
+
+ tab_desc = qlcnic_get_table_desc(unirom, section);
+
+ if (tab_desc == NULL)
+ return NULL;
+
+ offs = cpu_to_le32(tab_desc->findex) +
+ (cpu_to_le32(tab_desc->entry_size) * idx);
+
+ return (struct uni_data_desc *)&unirom[offs];
+}
+
+static u8 *
+qlcnic_get_bootld_offs(struct qlcnic_adapter *adapter)
+{
+ u32 offs = QLCNIC_BOOTLD_START;
+
+ if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
+ offs = cpu_to_le32((qlcnic_get_data_desc(adapter,
+ QLCNIC_UNI_DIR_SECT_BOOTLD,
+ QLCNIC_UNI_BOOTLD_IDX_OFF))->findex);
+
+ return (u8 *)&adapter->fw->data[offs];
+}
+
+static u8 *
+qlcnic_get_fw_offs(struct qlcnic_adapter *adapter)
+{
+ u32 offs = QLCNIC_IMAGE_START;
+
+ if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
+ offs = cpu_to_le32((qlcnic_get_data_desc(adapter,
+ QLCNIC_UNI_DIR_SECT_FW,
+ QLCNIC_UNI_FIRMWARE_IDX_OFF))->findex);
+
+ return (u8 *)&adapter->fw->data[offs];
+}
+
+static __le32
+qlcnic_get_fw_size(struct qlcnic_adapter *adapter)
+{
+ if (adapter->fw_type == QLCNIC_UNIFIED_ROMIMAGE)
+ return cpu_to_le32((qlcnic_get_data_desc(adapter,
+ QLCNIC_UNI_DIR_SECT_FW,
+ QLCNIC_UNI_FIRMWARE_IDX_OFF))->size);
+ else
+ return cpu_to_le32(
+ *(u32 *)&adapter->fw->data[QLCNIC_FW_SIZE_OFFSET]);
+}
+
+static __le32
+qlcnic_get_fw_version(struct qlcnic_adapter *adapter)
+{
+ struct uni_data_desc *fw_data_desc;
+ const struct firmware *fw = adapter->fw;
+ __le32 major, minor, sub;
+ const u8 *ver_str;
+ int i, ret;
+
+ if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE)
+ return cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_VERSION_OFFSET]);
+
+ fw_data_desc = qlcnic_get_data_desc(adapter, QLCNIC_UNI_DIR_SECT_FW,
+ QLCNIC_UNI_FIRMWARE_IDX_OFF);
+ ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) +
+ cpu_to_le32(fw_data_desc->size) - 17;
+
+ for (i = 0; i < 12; i++) {
+ if (!strncmp(&ver_str[i], "REV=", 4)) {
+ ret = sscanf(&ver_str[i+4], "%u.%u.%u ",
+ &major, &minor, &sub);
+ if (ret != 3)
+ return 0;
+ else
+ return major + (minor << 8) + (sub << 16);
+ }
+ }
+
+ return 0;
+}
+
+static __le32
+qlcnic_get_bios_version(struct qlcnic_adapter *adapter)
+{
+ const struct firmware *fw = adapter->fw;
+ __le32 bios_ver, prd_off = adapter->file_prd_off;
+
+ if (adapter->fw_type != QLCNIC_UNIFIED_ROMIMAGE)
+ return cpu_to_le32(
+ *(u32 *)&fw->data[QLCNIC_BIOS_VERSION_OFFSET]);
+
+ bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off])
+ + QLCNIC_UNI_BIOS_VERSION_OFF));
+
+ return (bios_ver << 24) + ((bios_ver >> 8) & 0xff00) + (bios_ver >> 24);
+}
+
+int
+qlcnic_need_fw_reset(struct qlcnic_adapter *adapter)
+{
+ u32 count, old_count;
+ u32 val, version, major, minor, build;
+ int i, timeout;
+
+ if (adapter->need_fw_reset)
+ return 1;
+
+ /* last attempt had failed */
+ if (QLCRD32(adapter, CRB_CMDPEG_STATE) == PHAN_INITIALIZE_FAILED)
+ return 1;
+
+ old_count = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
+
+ for (i = 0; i < 10; i++) {
+
+ timeout = msleep_interruptible(200);
+ if (timeout) {
+ QLCWR32(adapter, CRB_CMDPEG_STATE,
+ PHAN_INITIALIZE_FAILED);
+ return -EINTR;
+ }
+
+ count = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
+ if (count != old_count)
+ break;
+ }
+
+ /* firmware is dead */
+ if (count == old_count)
+ return 1;
+
+ /* check if we have got newer or different file firmware */
+ if (adapter->fw) {
+
+ val = qlcnic_get_fw_version(adapter);
+
+ version = QLCNIC_DECODE_VERSION(val);
+
+ major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+ minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
+ build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
+
+ if (version > QLCNIC_VERSION_CODE(major, minor, build))
+ return 1;
+ }
+
+ return 0;
+}
+
+static const char *fw_name[] = {
+ QLCNIC_UNIFIED_ROMIMAGE_NAME,
+ QLCNIC_FLASH_ROMIMAGE_NAME,
+};
+
+int
+qlcnic_load_firmware(struct qlcnic_adapter *adapter)
+{
+ u64 *ptr64;
+ u32 i, flashaddr, size;
+ const struct firmware *fw = adapter->fw;
+ struct pci_dev *pdev = adapter->pdev;
+
+ dev_info(&pdev->dev, "loading firmware from %s\n",
+ fw_name[adapter->fw_type]);
+
+ if (fw) {
+ __le64 data;
+
+ size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
+
+ ptr64 = (u64 *)qlcnic_get_bootld_offs(adapter);
+ flashaddr = QLCNIC_BOOTLD_START;
+
+ for (i = 0; i < size; i++) {
+ data = cpu_to_le64(ptr64[i]);
+
+ if (qlcnic_pci_mem_write_2M(adapter, flashaddr, data))
+ return -EIO;
+
+ flashaddr += 8;
+ }
+
+ size = (__force u32)qlcnic_get_fw_size(adapter) / 8;
+
+ ptr64 = (u64 *)qlcnic_get_fw_offs(adapter);
+ flashaddr = QLCNIC_IMAGE_START;
+
+ for (i = 0; i < size; i++) {
+ data = cpu_to_le64(ptr64[i]);
+
+ if (qlcnic_pci_mem_write_2M(adapter,
+ flashaddr, data))
+ return -EIO;
+
+ flashaddr += 8;
+ }
+ } else {
+ u64 data;
+ u32 hi, lo;
+
+ size = (QLCNIC_IMAGE_START - QLCNIC_BOOTLD_START) / 8;
+ flashaddr = QLCNIC_BOOTLD_START;
+
+ for (i = 0; i < size; i++) {
+ if (qlcnic_rom_fast_read(adapter,
+ flashaddr, (int *)&lo) != 0)
+ return -EIO;
+ if (qlcnic_rom_fast_read(adapter,
+ flashaddr + 4, (int *)&hi) != 0)
+ return -EIO;
+
+ data = (((u64)hi << 32) | lo);
+
+ if (qlcnic_pci_mem_write_2M(adapter,
+ flashaddr, data))
+ return -EIO;
+
+ flashaddr += 8;
+ }
+ }
+ msleep(1);
+
+ QLCWR32(adapter, QLCNIC_CRB_PEG_NET_0 + 0x18, 0x1020);
+ QLCWR32(adapter, QLCNIC_ROMUSB_GLB_SW_RESET, 0x80001e);
+ return 0;
+}
+
+static int
+qlcnic_validate_firmware(struct qlcnic_adapter *adapter)
+{
+ __le32 val;
+ u32 ver, min_ver, bios, min_size;
+ struct pci_dev *pdev = adapter->pdev;
+ const struct firmware *fw = adapter->fw;
+ u8 fw_type = adapter->fw_type;
+
+ if (fw_type == QLCNIC_UNIFIED_ROMIMAGE) {
+ if (qlcnic_set_product_offs(adapter))
+ return -EINVAL;
+
+ min_size = QLCNIC_UNI_FW_MIN_SIZE;
+ } else {
+ val = cpu_to_le32(*(u32 *)&fw->data[QLCNIC_FW_MAGIC_OFFSET]);
+ if ((__force u32)val != QLCNIC_BDINFO_MAGIC)
+ return -EINVAL;
+
+ min_size = QLCNIC_FW_MIN_SIZE;
+ }
+
+ if (fw->size < min_size)
+ return -EINVAL;
+
+ val = qlcnic_get_fw_version(adapter);
+
+ min_ver = QLCNIC_VERSION_CODE(4, 0, 216);
+
+ ver = QLCNIC_DECODE_VERSION(val);
+
+ if ((_major(ver) > _QLCNIC_LINUX_MAJOR) || (ver < min_ver)) {
+ dev_err(&pdev->dev,
+ "%s: firmware version %d.%d.%d unsupported\n",
+ fw_name[fw_type], _major(ver), _minor(ver), _build(ver));
+ return -EINVAL;
+ }
+
+ val = qlcnic_get_bios_version(adapter);
+ qlcnic_rom_fast_read(adapter, QLCNIC_BIOS_VERSION_OFFSET, (int *)&bios);
+ if ((__force u32)val != bios) {
+ dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
+ fw_name[fw_type]);
+ return -EINVAL;
+ }
+
+ /* check if flashed firmware is newer */
+ if (qlcnic_rom_fast_read(adapter,
+ QLCNIC_FW_VERSION_OFFSET, (int *)&val))
+ return -EIO;
+
+ val = QLCNIC_DECODE_VERSION(val);
+ if (val > ver) {
+ dev_info(&pdev->dev, "%s: firmware is older than flash\n",
+ fw_name[fw_type]);
+ return -EINVAL;
+ }
+
+ QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
+ return 0;
+}
+
+static void
+qlcnic_get_next_fwtype(struct qlcnic_adapter *adapter)
+{
+ u8 fw_type;
+
+ switch (adapter->fw_type) {
+ case QLCNIC_UNKNOWN_ROMIMAGE:
+ fw_type = QLCNIC_UNIFIED_ROMIMAGE;
+ break;
+
+ case QLCNIC_UNIFIED_ROMIMAGE:
+ default:
+ fw_type = QLCNIC_FLASH_ROMIMAGE;
+ break;
+ }
+
+ adapter->fw_type = fw_type;
+}
+
+
+
+void qlcnic_request_firmware(struct qlcnic_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int rc;
+
+ adapter->fw_type = QLCNIC_UNKNOWN_ROMIMAGE;
+
+next:
+ qlcnic_get_next_fwtype(adapter);
+
+ if (adapter->fw_type == QLCNIC_FLASH_ROMIMAGE) {
+ adapter->fw = NULL;
+ } else {
+ rc = request_firmware(&adapter->fw,
+ fw_name[adapter->fw_type], &pdev->dev);
+ if (rc != 0)
+ goto next;
+
+ rc = qlcnic_validate_firmware(adapter);
+ if (rc != 0) {
+ release_firmware(adapter->fw);
+ msleep(1);
+ goto next;
+ }
+ }
+}
+
+
+void
+qlcnic_release_firmware(struct qlcnic_adapter *adapter)
+{
+ if (adapter->fw)
+ release_firmware(adapter->fw);
+ adapter->fw = NULL;
+}
+
+int qlcnic_phantom_init(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+ int retries = 60;
+
+ do {
+ val = QLCRD32(adapter, CRB_CMDPEG_STATE);
+
+ switch (val) {
+ case PHAN_INITIALIZE_COMPLETE:
+ case PHAN_INITIALIZE_ACK:
+ return 0;
+ case PHAN_INITIALIZE_FAILED:
+ goto out_err;
+ default:
+ break;
+ }
+
+ msleep(500);
+
+ } while (--retries);
+
+ QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED);
+
+out_err:
+ dev_err(&adapter->pdev->dev, "firmware init failed\n");
+ return -EIO;
+}
+
+static int
+qlcnic_receive_peg_ready(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+ int retries = 2000;
+
+ do {
+ val = QLCRD32(adapter, CRB_RCVPEG_STATE);
+
+ if (val == PHAN_PEG_RCV_INITIALIZED)
+ return 0;
+
+ msleep(10);
+
+ } while (--retries);
+
+ if (!retries) {
+ dev_err(&adapter->pdev->dev, "Receive Peg initialization not "
+ "complete, state: 0x%x.\n", val);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int qlcnic_init_firmware(struct qlcnic_adapter *adapter)
+{
+ int err;
+
+ err = qlcnic_receive_peg_ready(adapter);
+ if (err)
+ return err;
+
+ QLCWR32(adapter, CRB_NIC_CAPABILITIES_HOST, INTR_SCHEME_PERPORT);
+ QLCWR32(adapter, CRB_NIC_MSI_MODE_HOST, MSI_MODE_MULTIFUNC);
+ QLCWR32(adapter, CRB_MPORT_MODE, MPORT_MULTI_FUNCTION_MODE);
+ QLCWR32(adapter, CRB_CMDPEG_STATE, PHAN_INITIALIZE_ACK);
+
+ return err;
+}
+
+static void
+qlcnic_handle_linkevent(struct qlcnic_adapter *adapter,
+ struct qlcnic_fw_msg *msg)
+{
+ u32 cable_OUI;
+ u16 cable_len;
+ u16 link_speed;
+ u8 link_status, module, duplex, autoneg;
+ struct net_device *netdev = adapter->netdev;
+
+ adapter->has_link_events = 1;
+
+ cable_OUI = msg->body[1] & 0xffffffff;
+ cable_len = (msg->body[1] >> 32) & 0xffff;
+ link_speed = (msg->body[1] >> 48) & 0xffff;
+
+ link_status = msg->body[2] & 0xff;
+ duplex = (msg->body[2] >> 16) & 0xff;
+ autoneg = (msg->body[2] >> 24) & 0xff;
+
+ module = (msg->body[2] >> 8) & 0xff;
+ if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLE)
+ dev_info(&netdev->dev, "unsupported cable: OUI 0x%x, "
+ "length %d\n", cable_OUI, cable_len);
+ else if (module == LINKEVENT_MODULE_TWINAX_UNSUPPORTED_CABLELEN)
+ dev_info(&netdev->dev, "unsupported cable length %d\n",
+ cable_len);
+
+ qlcnic_advert_link_change(adapter, link_status);
+
+ if (duplex == LINKEVENT_FULL_DUPLEX)
+ adapter->link_duplex = DUPLEX_FULL;
+ else
+ adapter->link_duplex = DUPLEX_HALF;
+
+ adapter->module_type = module;
+ adapter->link_autoneg = autoneg;
+ adapter->link_speed = link_speed;
+}
+
+static void
+qlcnic_handle_fw_message(int desc_cnt, int index,
+ struct qlcnic_host_sds_ring *sds_ring)
+{
+ struct qlcnic_fw_msg msg;
+ struct status_desc *desc;
+ int i = 0, opcode;
+
+ while (desc_cnt > 0 && i < 8) {
+ desc = &sds_ring->desc_head[index];
+ msg.words[i++] = le64_to_cpu(desc->status_desc_data[0]);
+ msg.words[i++] = le64_to_cpu(desc->status_desc_data[1]);
+
+ index = get_next_index(index, sds_ring->num_desc);
+ desc_cnt--;
+ }
+
+ opcode = qlcnic_get_nic_msg_opcode(msg.body[0]);
+ switch (opcode) {
+ case QLCNIC_C2H_OPCODE_GET_LINKEVENT_RESPONSE:
+ qlcnic_handle_linkevent(sds_ring->adapter, &msg);
+ break;
+ default:
+ break;
+ }
+}
+
+static int
+qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *rds_ring,
+ struct qlcnic_rx_buffer *buffer)
+{
+ struct sk_buff *skb;
+ dma_addr_t dma;
+ struct pci_dev *pdev = adapter->pdev;
+
+ buffer->skb = dev_alloc_skb(rds_ring->skb_size);
+ if (!buffer->skb)
+ return -ENOMEM;
+
+ skb = buffer->skb;
+
+ if (!adapter->ahw.cut_through)
+ skb_reserve(skb, 2);
+
+ dma = pci_map_single(pdev, skb->data,
+ rds_ring->dma_size, PCI_DMA_FROMDEVICE);
+
+ if (pci_dma_mapping_error(pdev, dma)) {
+ dev_kfree_skb_any(skb);
+ buffer->skb = NULL;
+ return -ENOMEM;
+ }
+
+ buffer->skb = skb;
+ buffer->dma = dma;
+ buffer->state = QLCNIC_BUFFER_BUSY;
+
+ return 0;
+}
+
+static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *rds_ring, u16 index, u16 cksum)
+{
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+
+ pci_unmap_single(adapter->pdev, buffer->dma, rds_ring->dma_size,
+ PCI_DMA_FROMDEVICE);
+
+ skb = buffer->skb;
+ if (!skb)
+ goto no_skb;
+
+ if (likely(adapter->rx_csum && cksum == STATUS_CKSUM_OK)) {
+ adapter->stats.csummed++;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+
+ skb->dev = adapter->netdev;
+
+ buffer->skb = NULL;
+no_skb:
+ buffer->state = QLCNIC_BUFFER_FREE;
+ return skb;
+}
+
+static struct qlcnic_rx_buffer *
+qlcnic_process_rcv(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring,
+ int ring, u64 sts_data0)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ int index, length, cksum, pkt_offset;
+
+ if (unlikely(ring >= adapter->max_rds_rings))
+ return NULL;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = qlcnic_get_sts_refhandle(sts_data0);
+ if (unlikely(index >= rds_ring->num_desc))
+ return NULL;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+
+ length = qlcnic_get_sts_totallength(sts_data0);
+ cksum = qlcnic_get_sts_status(sts_data0);
+ pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
+
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
+ if (!skb)
+ return buffer;
+
+ if (length > rds_ring->skb_size)
+ skb_put(skb, rds_ring->skb_size);
+ else
+ skb_put(skb, length);
+
+ if (pkt_offset)
+ skb_pull(skb, pkt_offset);
+
+ skb->truesize = skb->len + sizeof(struct sk_buff);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ napi_gro_receive(&sds_ring->napi, skb);
+
+ adapter->stats.rx_pkts++;
+ adapter->stats.rxbytes += length;
+
+ return buffer;
+}
+
+#define QLC_TCP_HDR_SIZE 20
+#define QLC_TCP_TS_OPTION_SIZE 12
+#define QLC_TCP_TS_HDR_SIZE (QLC_TCP_HDR_SIZE + QLC_TCP_TS_OPTION_SIZE)
+
+static struct qlcnic_rx_buffer *
+qlcnic_process_lro(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring,
+ int ring, u64 sts_data0, u64 sts_data1)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ struct iphdr *iph;
+ struct tcphdr *th;
+ bool push, timestamp;
+ int l2_hdr_offset, l4_hdr_offset;
+ int index;
+ u16 lro_length, length, data_offset;
+ u32 seq_number;
+
+ if (unlikely(ring > adapter->max_rds_rings))
+ return NULL;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = qlcnic_get_lro_sts_refhandle(sts_data0);
+ if (unlikely(index > rds_ring->num_desc))
+ return NULL;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+
+ timestamp = qlcnic_get_lro_sts_timestamp(sts_data0);
+ lro_length = qlcnic_get_lro_sts_length(sts_data0);
+ l2_hdr_offset = qlcnic_get_lro_sts_l2_hdr_offset(sts_data0);
+ l4_hdr_offset = qlcnic_get_lro_sts_l4_hdr_offset(sts_data0);
+ push = qlcnic_get_lro_sts_push_flag(sts_data0);
+ seq_number = qlcnic_get_lro_sts_seq_number(sts_data1);
+
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, STATUS_CKSUM_OK);
+ if (!skb)
+ return buffer;
+
+ if (timestamp)
+ data_offset = l4_hdr_offset + QLC_TCP_TS_HDR_SIZE;
+ else
+ data_offset = l4_hdr_offset + QLC_TCP_HDR_SIZE;
+
+ skb_put(skb, lro_length + data_offset);
+
+ skb->truesize = skb->len + sizeof(struct sk_buff) + skb_headroom(skb);
+
+ skb_pull(skb, l2_hdr_offset);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ iph = (struct iphdr *)skb->data;
+ th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
+
+ length = (iph->ihl << 2) + (th->doff << 2) + lro_length;
+ iph->tot_len = htons(length);
+ iph->check = 0;
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+ th->psh = push;
+ th->seq = htonl(seq_number);
+
+ length = skb->len;
+
+ netif_receive_skb(skb);
+
+ adapter->stats.lro_pkts++;
+ adapter->stats.rxbytes += length;
+
+ return buffer;
+}
+
+int
+qlcnic_process_rcv_ring(struct qlcnic_host_sds_ring *sds_ring, int max)
+{
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+ struct list_head *cur;
+ struct status_desc *desc;
+ struct qlcnic_rx_buffer *rxbuf;
+ u64 sts_data0, sts_data1;
+
+ int count = 0;
+ int opcode, ring, desc_cnt;
+ u32 consumer = sds_ring->consumer;
+
+ while (count < max) {
+ desc = &sds_ring->desc_head[consumer];
+ sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
+
+ if (!(sts_data0 & STATUS_OWNER_HOST))
+ break;
+
+ desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
+ opcode = qlcnic_get_sts_opcode(sts_data0);
+
+ switch (opcode) {
+ case QLCNIC_RXPKT_DESC:
+ case QLCNIC_OLD_RXPKT_DESC:
+ case QLCNIC_SYN_OFFLOAD:
+ ring = qlcnic_get_sts_type(sts_data0);
+ rxbuf = qlcnic_process_rcv(adapter, sds_ring,
+ ring, sts_data0);
+ break;
+ case QLCNIC_LRO_DESC:
+ ring = qlcnic_get_lro_sts_type(sts_data0);
+ sts_data1 = le64_to_cpu(desc->status_desc_data[1]);
+ rxbuf = qlcnic_process_lro(adapter, sds_ring,
+ ring, sts_data0, sts_data1);
+ break;
+ case QLCNIC_RESPONSE_DESC:
+ qlcnic_handle_fw_message(desc_cnt, consumer, sds_ring);
+ default:
+ goto skip;
+ }
+
+ WARN_ON(desc_cnt > 1);
+
+ if (rxbuf)
+ list_add_tail(&rxbuf->list, &sds_ring->free_list[ring]);
+
+skip:
+ for (; desc_cnt > 0; desc_cnt--) {
+ desc = &sds_ring->desc_head[consumer];
+ desc->status_desc_data[0] =
+ cpu_to_le64(STATUS_OWNER_PHANTOM);
+ consumer = get_next_index(consumer, sds_ring->num_desc);
+ }
+ count++;
+ }
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ struct qlcnic_host_rds_ring *rds_ring =
+ &adapter->recv_ctx.rds_rings[ring];
+
+ if (!list_empty(&sds_ring->free_list[ring])) {
+ list_for_each(cur, &sds_ring->free_list[ring]) {
+ rxbuf = list_entry(cur,
+ struct qlcnic_rx_buffer, list);
+ qlcnic_alloc_rx_skb(adapter, rds_ring, rxbuf);
+ }
+ spin_lock(&rds_ring->lock);
+ list_splice_tail_init(&sds_ring->free_list[ring],
+ &rds_ring->free_list);
+ spin_unlock(&rds_ring->lock);
+ }
+
+ qlcnic_post_rx_buffers_nodb(adapter, rds_ring);
+ }
+
+ if (count) {
+ sds_ring->consumer = consumer;
+ writel(consumer, sds_ring->crb_sts_consumer);
+ }
+
+ return count;
+}
+
+void
+qlcnic_post_rx_buffers(struct qlcnic_adapter *adapter, u32 ringid,
+ struct qlcnic_host_rds_ring *rds_ring)
+{
+ struct rcv_desc *pdesc;
+ struct qlcnic_rx_buffer *buffer;
+ int producer, count = 0;
+ struct list_head *head;
+
+ producer = rds_ring->producer;
+
+ spin_lock(&rds_ring->lock);
+ head = &rds_ring->free_list;
+ while (!list_empty(head)) {
+
+ buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
+
+ if (!buffer->skb) {
+ if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
+ break;
+ }
+
+ count++;
+ list_del(&buffer->list);
+
+ /* make a rcv descriptor */
+ pdesc = &rds_ring->desc_head[producer];
+ pdesc->addr_buffer = cpu_to_le64(buffer->dma);
+ pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
+ pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
+
+ producer = get_next_index(producer, rds_ring->num_desc);
+ }
+ spin_unlock(&rds_ring->lock);
+
+ if (count) {
+ rds_ring->producer = producer;
+ writel((producer-1) & (rds_ring->num_desc-1),
+ rds_ring->crb_rcv_producer);
+ }
+}
+
+static void
+qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_rds_ring *rds_ring)
+{
+ struct rcv_desc *pdesc;
+ struct qlcnic_rx_buffer *buffer;
+ int producer, count = 0;
+ struct list_head *head;
+
+ producer = rds_ring->producer;
+ if (!spin_trylock(&rds_ring->lock))
+ return;
+
+ head = &rds_ring->free_list;
+ while (!list_empty(head)) {
+
+ buffer = list_entry(head->next, struct qlcnic_rx_buffer, list);
+
+ if (!buffer->skb) {
+ if (qlcnic_alloc_rx_skb(adapter, rds_ring, buffer))
+ break;
+ }
+
+ count++;
+ list_del(&buffer->list);
+
+ /* make a rcv descriptor */
+ pdesc = &rds_ring->desc_head[producer];
+ pdesc->reference_handle = cpu_to_le16(buffer->ref_handle);
+ pdesc->buffer_length = cpu_to_le32(rds_ring->dma_size);
+ pdesc->addr_buffer = cpu_to_le64(buffer->dma);
+
+ producer = get_next_index(producer, rds_ring->num_desc);
+ }
+
+ if (count) {
+ rds_ring->producer = producer;
+ writel((producer - 1) & (rds_ring->num_desc - 1),
+ rds_ring->crb_rcv_producer);
+ }
+ spin_unlock(&rds_ring->lock);
+}
+
+static struct qlcnic_rx_buffer *
+qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_sds_ring *sds_ring,
+ int ring, u64 sts_data0)
+{
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+ struct qlcnic_rx_buffer *buffer;
+ struct sk_buff *skb;
+ struct qlcnic_host_rds_ring *rds_ring;
+ int index, length, cksum, pkt_offset;
+
+ if (unlikely(ring >= adapter->max_rds_rings))
+ return NULL;
+
+ rds_ring = &recv_ctx->rds_rings[ring];
+
+ index = qlcnic_get_sts_refhandle(sts_data0);
+ if (unlikely(index >= rds_ring->num_desc))
+ return NULL;
+
+ buffer = &rds_ring->rx_buf_arr[index];
+
+ length = qlcnic_get_sts_totallength(sts_data0);
+ cksum = qlcnic_get_sts_status(sts_data0);
+ pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
+
+ skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
+ if (!skb)
+ return buffer;
+
+ skb_put(skb, rds_ring->skb_size);
+
+ if (pkt_offset)
+ skb_pull(skb, pkt_offset);
+
+ skb->truesize = skb->len + sizeof(struct sk_buff);
+
+ if (!qlcnic_check_loopback_buff(skb->data))
+ adapter->diag_cnt++;
+
+ dev_kfree_skb_any(skb);
+
+ return buffer;
+}
+
+void
+qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
+{
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+ struct status_desc *desc;
+ struct qlcnic_rx_buffer *rxbuf;
+ u64 sts_data0;
+
+ int opcode, ring, desc_cnt;
+ u32 consumer = sds_ring->consumer;
+
+ desc = &sds_ring->desc_head[consumer];
+ sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
+
+ if (!(sts_data0 & STATUS_OWNER_HOST))
+ return;
+
+ desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
+ opcode = qlcnic_get_sts_opcode(sts_data0);
+
+ ring = qlcnic_get_sts_type(sts_data0);
+ rxbuf = qlcnic_process_rcv_diag(adapter, sds_ring,
+ ring, sts_data0);
+
+ desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
+ consumer = get_next_index(consumer, sds_ring->num_desc);
+
+ sds_ring->consumer = consumer;
+ writel(consumer, sds_ring->crb_sts_consumer);
+}
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
new file mode 100644
index 00000000000..665e8e56b6a
--- /dev/null
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -0,0 +1,2720 @@
+/*
+ * Copyright (C) 2009 - QLogic Corporation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
+ * MA 02111-1307, USA.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called "COPYING".
+ *
+ */
+
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+
+#include "qlcnic.h"
+
+#include <linux/dma-mapping.h>
+#include <linux/if_vlan.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <linux/inetdevice.h>
+#include <linux/sysfs.h>
+
+MODULE_DESCRIPTION("QLogic 10 GbE Converged Ethernet Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(QLCNIC_LINUX_VERSIONID);
+MODULE_FIRMWARE(QLCNIC_UNIFIED_ROMIMAGE_NAME);
+
+char qlcnic_driver_name[] = "qlcnic";
+static const char qlcnic_driver_string[] = "QLogic Converged Ethernet Driver v"
+ QLCNIC_LINUX_VERSIONID;
+
+static int port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
+
+/* Default to restricted 1G auto-neg mode */
+static int wol_port_mode = 5;
+
+static int use_msi = 1;
+module_param(use_msi, int, 0644);
+MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
+
+static int use_msi_x = 1;
+module_param(use_msi_x, int, 0644);
+MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
+
+static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
+module_param(auto_fw_reset, int, 0644);
+MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
+
+static int __devinit qlcnic_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+static void __devexit qlcnic_remove(struct pci_dev *pdev);
+static int qlcnic_open(struct net_device *netdev);
+static int qlcnic_close(struct net_device *netdev);
+static void qlcnic_tx_timeout(struct net_device *netdev);
+static void qlcnic_tx_timeout_task(struct work_struct *work);
+static void qlcnic_attach_work(struct work_struct *work);
+static void qlcnic_fwinit_work(struct work_struct *work);
+static void qlcnic_fw_poll_work(struct work_struct *work);
+static void qlcnic_schedule_work(struct qlcnic_adapter *adapter,
+ work_func_t func, int delay);
+static void qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter);
+static int qlcnic_poll(struct napi_struct *napi, int budget);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void qlcnic_poll_controller(struct net_device *netdev);
+#endif
+
+static void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter);
+static void qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter);
+static void qlcnic_create_diag_entries(struct qlcnic_adapter *adapter);
+static void qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter);
+
+static void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter);
+static int qlcnic_can_start_firmware(struct qlcnic_adapter *adapter);
+
+static irqreturn_t qlcnic_tmp_intr(int irq, void *data);
+static irqreturn_t qlcnic_intr(int irq, void *data);
+static irqreturn_t qlcnic_msi_intr(int irq, void *data);
+static irqreturn_t qlcnic_msix_intr(int irq, void *data);
+
+static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev);
+static void qlcnic_config_indev_addr(struct net_device *dev, unsigned long);
+
+/* PCI Device ID Table */
+#define ENTRY(device) \
+ {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
+ .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
+
+#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
+
+static DEFINE_PCI_DEVICE_TABLE(qlcnic_pci_tbl) = {
+ ENTRY(PCI_DEVICE_ID_QLOGIC_QLE824X),
+ {0,}
+};
+
+MODULE_DEVICE_TABLE(pci, qlcnic_pci_tbl);
+
+
+void
+qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
+ struct qlcnic_host_tx_ring *tx_ring)
+{
+ writel(tx_ring->producer, tx_ring->crb_cmd_producer);
+
+ if (qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH) {
+ netif_stop_queue(adapter->netdev);
+ smp_mb();
+ }
+}
+
+static const u32 msi_tgt_status[8] = {
+ ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
+ ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
+ ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
+ ISR_INT_TARGET_STATUS_F6, ISR_INT_TARGET_STATUS_F7
+};
+
+static const
+struct qlcnic_legacy_intr_set legacy_intr[] = QLCNIC_LEGACY_INTR_CONFIG;
+
+static inline void qlcnic_disable_int(struct qlcnic_host_sds_ring *sds_ring)
+{
+ writel(0, sds_ring->crb_intr_mask);
+}
+
+static inline void qlcnic_enable_int(struct qlcnic_host_sds_ring *sds_ring)
+{
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ writel(0x1, sds_ring->crb_intr_mask);
+
+ if (!QLCNIC_IS_MSI_FAMILY(adapter))
+ writel(0xfbff, adapter->tgt_mask_reg);
+}
+
+static int
+qlcnic_alloc_sds_rings(struct qlcnic_recv_context *recv_ctx, int count)
+{
+ int size = sizeof(struct qlcnic_host_sds_ring) * count;
+
+ recv_ctx->sds_rings = kzalloc(size, GFP_KERNEL);
+
+ return (recv_ctx->sds_rings == NULL);
+}
+
+static void
+qlcnic_free_sds_rings(struct qlcnic_recv_context *recv_ctx)
+{
+ if (recv_ctx->sds_rings != NULL)
+ kfree(recv_ctx->sds_rings);
+
+ recv_ctx->sds_rings = NULL;
+}
+
+static int
+qlcnic_napi_add(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ if (qlcnic_alloc_sds_rings(recv_ctx, adapter->max_sds_rings))
+ return -ENOMEM;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ netif_napi_add(netdev, &sds_ring->napi,
+ qlcnic_poll, QLCNIC_NETDEV_WEIGHT);
+ }
+
+ return 0;
+}
+
+static void
+qlcnic_napi_del(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ netif_napi_del(&sds_ring->napi);
+ }
+
+ qlcnic_free_sds_rings(&adapter->recv_ctx);
+}
+
+static void
+qlcnic_napi_enable(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ napi_enable(&sds_ring->napi);
+ qlcnic_enable_int(sds_ring);
+ }
+}
+
+static void
+qlcnic_napi_disable(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ qlcnic_disable_int(sds_ring);
+ napi_synchronize(&sds_ring->napi);
+ napi_disable(&sds_ring->napi);
+ }
+}
+
+static void qlcnic_clear_stats(struct qlcnic_adapter *adapter)
+{
+ memset(&adapter->stats, 0, sizeof(adapter->stats));
+ return;
+}
+
+static int qlcnic_set_dma_mask(struct qlcnic_adapter *adapter)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ u64 mask, cmask;
+
+ adapter->pci_using_dac = 0;
+
+ mask = DMA_BIT_MASK(39);
+ cmask = mask;
+
+ if (pci_set_dma_mask(pdev, mask) == 0 &&
+ pci_set_consistent_dma_mask(pdev, cmask) == 0) {
+ adapter->pci_using_dac = 1;
+ return 0;
+ }
+
+ return -EIO;
+}
+
+/* Update addressable range if firmware supports it */
+static int
+qlcnic_update_dma_mask(struct qlcnic_adapter *adapter)
+{
+ int change, shift, err;
+ u64 mask, old_mask, old_cmask;
+ struct pci_dev *pdev = adapter->pdev;
+
+ change = 0;
+
+ shift = QLCRD32(adapter, CRB_DMA_SHIFT);
+ if (shift > 32)
+ return 0;
+
+ if (shift > 9)
+ change = 1;
+
+ if (change) {
+ old_mask = pdev->dma_mask;
+ old_cmask = pdev->dev.coherent_dma_mask;
+
+ mask = DMA_BIT_MASK(32+shift);
+
+ err = pci_set_dma_mask(pdev, mask);
+ if (err)
+ goto err_out;
+
+ err = pci_set_consistent_dma_mask(pdev, mask);
+ if (err)
+ goto err_out;
+ dev_info(&pdev->dev, "using %d-bit dma mask\n", 32+shift);
+ }
+
+ return 0;
+
+err_out:
+ pci_set_dma_mask(pdev, old_mask);
+ pci_set_consistent_dma_mask(pdev, old_cmask);
+ return err;
+}
+
+static void qlcnic_set_port_mode(struct qlcnic_adapter *adapter)
+{
+ u32 val, data;
+
+ val = adapter->ahw.board_type;
+ if ((val == QLCNIC_BRDTYPE_P3_HMEZ) ||
+ (val == QLCNIC_BRDTYPE_P3_XG_LOM)) {
+ if (port_mode == QLCNIC_PORT_MODE_802_3_AP) {
+ data = QLCNIC_PORT_MODE_802_3_AP;
+ QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
+ } else if (port_mode == QLCNIC_PORT_MODE_XG) {
+ data = QLCNIC_PORT_MODE_XG;
+ QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
+ } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_1G) {
+ data = QLCNIC_PORT_MODE_AUTO_NEG_1G;
+ QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
+ } else if (port_mode == QLCNIC_PORT_MODE_AUTO_NEG_XG) {
+ data = QLCNIC_PORT_MODE_AUTO_NEG_XG;
+ QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
+ } else {
+ data = QLCNIC_PORT_MODE_AUTO_NEG;
+ QLCWR32(adapter, QLCNIC_PORT_MODE_ADDR, data);
+ }
+
+ if ((wol_port_mode != QLCNIC_PORT_MODE_802_3_AP) &&
+ (wol_port_mode != QLCNIC_PORT_MODE_XG) &&
+ (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_1G) &&
+ (wol_port_mode != QLCNIC_PORT_MODE_AUTO_NEG_XG)) {
+ wol_port_mode = QLCNIC_PORT_MODE_AUTO_NEG;
+ }
+ QLCWR32(adapter, QLCNIC_WOL_PORT_MODE, wol_port_mode);
+ }
+}
+
+static void qlcnic_set_msix_bit(struct pci_dev *pdev, int enable)
+{
+ u32 control;
+ int pos;
+
+ pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
+ if (pos) {
+ pci_read_config_dword(pdev, pos, &control);
+ if (enable)
+ control |= PCI_MSIX_FLAGS_ENABLE;
+ else
+ control = 0;
+ pci_write_config_dword(pdev, pos, control);
+ }
+}
+
+static void qlcnic_init_msix_entries(struct qlcnic_adapter *adapter, int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ adapter->msix_entries[i].entry = i;
+}
+
+static int
+qlcnic_read_mac_addr(struct qlcnic_adapter *adapter)
+{
+ int i;
+ unsigned char *p;
+ u64 mac_addr;
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+
+ if (qlcnic_get_mac_addr(adapter, &mac_addr) != 0)
+ return -EIO;
+
+ p = (unsigned char *)&mac_addr;
+ for (i = 0; i < 6; i++)
+ netdev->dev_addr[i] = *(p + 5 - i);
+
+ memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
+ memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
+
+ /* set station address */
+
+ if (!is_valid_ether_addr(netdev->perm_addr))
+ dev_warn(&pdev->dev, "Bad MAC address %pM.\n",
+ netdev->dev_addr);
+
+ return 0;
+}
+
+static int qlcnic_set_mac(struct net_device *netdev, void *p)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ if (netif_running(netdev)) {
+ netif_device_detach(netdev);
+ qlcnic_napi_disable(adapter);
+ }
+
+ memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ qlcnic_set_multi(adapter->netdev);
+
+ if (netif_running(netdev)) {
+ netif_device_attach(netdev);
+ qlcnic_napi_enable(adapter);
+ }
+ return 0;
+}
+
+static const struct net_device_ops qlcnic_netdev_ops = {
+ .ndo_open = qlcnic_open,
+ .ndo_stop = qlcnic_close,
+ .ndo_start_xmit = qlcnic_xmit_frame,
+ .ndo_get_stats = qlcnic_get_stats,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_multicast_list = qlcnic_set_multi,
+ .ndo_set_mac_address = qlcnic_set_mac,
+ .ndo_change_mtu = qlcnic_change_mtu,
+ .ndo_tx_timeout = qlcnic_tx_timeout,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = qlcnic_poll_controller,
+#endif
+};
+
+static void
+qlcnic_setup_intr(struct qlcnic_adapter *adapter)
+{
+ const struct qlcnic_legacy_intr_set *legacy_intrp;
+ struct pci_dev *pdev = adapter->pdev;
+ int err, num_msix;
+
+ if (adapter->rss_supported) {
+ num_msix = (num_online_cpus() >= MSIX_ENTRIES_PER_ADAPTER) ?
+ MSIX_ENTRIES_PER_ADAPTER : 2;
+ } else
+ num_msix = 1;
+
+ adapter->max_sds_rings = 1;
+
+ adapter->flags &= ~(QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED);
+
+ legacy_intrp = &legacy_intr[adapter->ahw.pci_func];
+
+ adapter->int_vec_bit = legacy_intrp->int_vec_bit;
+ adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
+ legacy_intrp->tgt_status_reg);
+ adapter->tgt_mask_reg = qlcnic_get_ioaddr(adapter,
+ legacy_intrp->tgt_mask_reg);
+ adapter->isr_int_vec = qlcnic_get_ioaddr(adapter, ISR_INT_VECTOR);
+
+ adapter->crb_int_state_reg = qlcnic_get_ioaddr(adapter,
+ ISR_INT_STATE_REG);
+
+ qlcnic_set_msix_bit(pdev, 0);
+
+ if (adapter->msix_supported) {
+
+ qlcnic_init_msix_entries(adapter, num_msix);
+ err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
+ if (err == 0) {
+ adapter->flags |= QLCNIC_MSIX_ENABLED;
+ qlcnic_set_msix_bit(pdev, 1);
+
+ if (adapter->rss_supported)
+ adapter->max_sds_rings = num_msix;
+
+ dev_info(&pdev->dev, "using msi-x interrupts\n");
+ return;
+ }
+
+ if (err > 0)
+ pci_disable_msix(pdev);
+
+ /* fall through for msi */
+ }
+
+ if (use_msi && !pci_enable_msi(pdev)) {
+ adapter->flags |= QLCNIC_MSI_ENABLED;
+ adapter->tgt_status_reg = qlcnic_get_ioaddr(adapter,
+ msi_tgt_status[adapter->ahw.pci_func]);
+ dev_info(&pdev->dev, "using msi interrupts\n");
+ adapter->msix_entries[0].vector = pdev->irq;
+ return;
+ }
+
+ dev_info(&pdev->dev, "using legacy interrupts\n");
+ adapter->msix_entries[0].vector = pdev->irq;
+}
+
+static void
+qlcnic_teardown_intr(struct qlcnic_adapter *adapter)
+{
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ pci_disable_msix(adapter->pdev);
+ if (adapter->flags & QLCNIC_MSI_ENABLED)
+ pci_disable_msi(adapter->pdev);
+}
+
+static void
+qlcnic_cleanup_pci_map(struct qlcnic_adapter *adapter)
+{
+ if (adapter->ahw.pci_base0 != NULL)
+ iounmap(adapter->ahw.pci_base0);
+}
+
+static int
+qlcnic_setup_pci_map(struct qlcnic_adapter *adapter)
+{
+ void __iomem *mem_ptr0 = NULL;
+ resource_size_t mem_base;
+ unsigned long mem_len, pci_len0 = 0;
+
+ struct pci_dev *pdev = adapter->pdev;
+ int pci_func = adapter->ahw.pci_func;
+
+ /*
+ * Set the CRB window to invalid. If any register in window 0 is
+ * accessed it should set the window to 0 and then reset it to 1.
+ */
+ adapter->ahw.crb_win = -1;
+ adapter->ahw.ocm_win = -1;
+
+ /* remap phys address */
+ mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
+ mem_len = pci_resource_len(pdev, 0);
+
+ if (mem_len == QLCNIC_PCI_2MB_SIZE) {
+
+ mem_ptr0 = pci_ioremap_bar(pdev, 0);
+ if (mem_ptr0 == NULL) {
+ dev_err(&pdev->dev, "failed to map PCI bar 0\n");
+ return -EIO;
+ }
+ pci_len0 = mem_len;
+ } else {
+ return -EIO;
+ }
+
+ dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
+
+ adapter->ahw.pci_base0 = mem_ptr0;
+ adapter->ahw.pci_len0 = pci_len0;
+
+ adapter->ahw.ocm_win_crb = qlcnic_get_ioaddr(adapter,
+ QLCNIC_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func)));
+
+ return 0;
+}
+
+static void get_brd_name(struct qlcnic_adapter *adapter, char *name)
+{
+ struct pci_dev *pdev = adapter->pdev;
+ int i, found = 0;
+
+ for (i = 0; i < NUM_SUPPORTED_BOARDS; ++i) {
+ if (qlcnic_boards[i].vendor == pdev->vendor &&
+ qlcnic_boards[i].device == pdev->device &&
+ qlcnic_boards[i].sub_vendor == pdev->subsystem_vendor &&
+ qlcnic_boards[i].sub_device == pdev->subsystem_device) {
+ strcpy(name, qlcnic_boards[i].short_name);
+ found = 1;
+ break;
+ }
+
+ }
+
+ if (!found)
+ name = "Unknown";
+}
+
+static void
+qlcnic_check_options(struct qlcnic_adapter *adapter)
+{
+ u32 fw_major, fw_minor, fw_build;
+ char brd_name[QLCNIC_MAX_BOARD_NAME_LEN];
+ char serial_num[32];
+ int i, offset, val;
+ int *ptr32;
+ struct pci_dev *pdev = adapter->pdev;
+
+ adapter->driver_mismatch = 0;
+
+ ptr32 = (int *)&serial_num;
+ offset = QLCNIC_FW_SERIAL_NUM_OFFSET;
+ for (i = 0; i < 8; i++) {
+ if (qlcnic_rom_fast_read(adapter, offset, &val) == -1) {
+ dev_err(&pdev->dev, "error reading board info\n");
+ adapter->driver_mismatch = 1;
+ return;
+ }
+ ptr32[i] = cpu_to_le32(val);
+ offset += sizeof(u32);
+ }
+
+ fw_major = QLCRD32(adapter, QLCNIC_FW_VERSION_MAJOR);
+ fw_minor = QLCRD32(adapter, QLCNIC_FW_VERSION_MINOR);
+ fw_build = QLCRD32(adapter, QLCNIC_FW_VERSION_SUB);
+
+ adapter->fw_version = QLCNIC_VERSION_CODE(fw_major, fw_minor, fw_build);
+
+ if (adapter->portnum == 0) {
+ get_brd_name(adapter, brd_name);
+
+ pr_info("%s: %s Board Chip rev 0x%x\n",
+ module_name(THIS_MODULE),
+ brd_name, adapter->ahw.revision_id);
+ }
+
+ if (adapter->fw_version < QLCNIC_VERSION_CODE(3, 4, 216)) {
+ adapter->driver_mismatch = 1;
+ dev_warn(&pdev->dev, "firmware version %d.%d.%d unsupported\n",
+ fw_major, fw_minor, fw_build);
+ return;
+ }
+
+ i = QLCRD32(adapter, QLCNIC_SRE_MISC);
+ adapter->ahw.cut_through = (i & 0x8000) ? 1 : 0;
+
+ dev_info(&pdev->dev, "firmware v%d.%d.%d [%s]\n",
+ fw_major, fw_minor, fw_build,
+ adapter->ahw.cut_through ? "cut-through" : "legacy");
+
+ if (adapter->fw_version >= QLCNIC_VERSION_CODE(4, 0, 222))
+ adapter->capabilities = QLCRD32(adapter, CRB_FW_CAPABILITIES_1);
+
+ adapter->flags &= ~QLCNIC_LRO_ENABLED;
+
+ if (adapter->ahw.port_type == QLCNIC_XGBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
+ } else if (adapter->ahw.port_type == QLCNIC_GBE) {
+ adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_1G;
+ adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_1G;
+ }
+
+ adapter->msix_supported = !!use_msi_x;
+ adapter->rss_supported = !!use_msi_x;
+
+ adapter->num_txd = MAX_CMD_DESCRIPTORS;
+
+ adapter->num_lro_rxd = 0;
+ adapter->max_rds_rings = 2;
+}
+
+static int
+qlcnic_start_firmware(struct qlcnic_adapter *adapter)
+{
+ int val, err, first_boot;
+
+ err = qlcnic_set_dma_mask(adapter);
+ if (err)
+ return err;
+
+ if (!qlcnic_can_start_firmware(adapter))
+ goto wait_init;
+
+ first_boot = QLCRD32(adapter, QLCNIC_CAM_RAM(0x1fc));
+ if (first_boot == 0x55555555)
+ /* This is the first boot after power up */
+ QLCWR32(adapter, QLCNIC_CAM_RAM(0x1fc), QLCNIC_BDINFO_MAGIC);
+
+ qlcnic_request_firmware(adapter);
+
+ err = qlcnic_need_fw_reset(adapter);
+ if (err < 0)
+ goto err_out;
+ if (err == 0)
+ goto wait_init;
+
+ if (first_boot != 0x55555555) {
+ QLCWR32(adapter, CRB_CMDPEG_STATE, 0);
+ qlcnic_pinit_from_rom(adapter);
+ msleep(1);
+ }
+
+ QLCWR32(adapter, CRB_DMA_SHIFT, 0x55555555);
+ QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS1, 0);
+ QLCWR32(adapter, QLCNIC_PEG_HALT_STATUS2, 0);
+
+ qlcnic_set_port_mode(adapter);
+
+ err = qlcnic_load_firmware(adapter);
+ if (err)
+ goto err_out;
+
+ qlcnic_release_firmware(adapter);
+
+ val = (_QLCNIC_LINUX_MAJOR << 16)
+ | ((_QLCNIC_LINUX_MINOR << 8))
+ | (_QLCNIC_LINUX_SUBVERSION);
+ QLCWR32(adapter, CRB_DRIVER_VERSION, val);
+
+wait_init:
+ /* Handshake with the card before we register the devices. */
+ err = qlcnic_phantom_init(adapter);
+ if (err)
+ goto err_out;
+
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
+
+ qlcnic_update_dma_mask(adapter);
+
+ qlcnic_check_options(adapter);
+
+ adapter->need_fw_reset = 0;
+
+ /* fall through and release firmware */
+
+err_out:
+ qlcnic_release_firmware(adapter);
+ return err;
+}
+
+static int
+qlcnic_request_irq(struct qlcnic_adapter *adapter)
+{
+ irq_handler_t handler;
+ struct qlcnic_host_sds_ring *sds_ring;
+ int err, ring;
+
+ unsigned long flags = 0;
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
+ handler = qlcnic_tmp_intr;
+ if (!QLCNIC_IS_MSI_FAMILY(adapter))
+ flags |= IRQF_SHARED;
+
+ } else {
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ handler = qlcnic_msix_intr;
+ else if (adapter->flags & QLCNIC_MSI_ENABLED)
+ handler = qlcnic_msi_intr;
+ else {
+ flags |= IRQF_SHARED;
+ handler = qlcnic_intr;
+ }
+ }
+ adapter->irq = netdev->irq;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ sprintf(sds_ring->name, "%s[%d]", netdev->name, ring);
+ err = request_irq(sds_ring->irq, handler,
+ flags, sds_ring->name, sds_ring);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void
+qlcnic_free_irq(struct qlcnic_adapter *adapter)
+{
+ int ring;
+ struct qlcnic_host_sds_ring *sds_ring;
+
+ struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
+
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &recv_ctx->sds_rings[ring];
+ free_irq(sds_ring->irq, sds_ring);
+ }
+}
+
+static void
+qlcnic_init_coalesce_defaults(struct qlcnic_adapter *adapter)
+{
+ adapter->coal.flags = QLCNIC_INTR_DEFAULT;
+ adapter->coal.normal.data.rx_time_us =
+ QLCNIC_DEFAULT_INTR_COALESCE_RX_TIME_US;
+ adapter->coal.normal.data.rx_packets =
+ QLCNIC_DEFAULT_INTR_COALESCE_RX_PACKETS;
+ adapter->coal.normal.data.tx_time_us =
+ QLCNIC_DEFAULT_INTR_COALESCE_TX_TIME_US;
+ adapter->coal.normal.data.tx_packets =
+ QLCNIC_DEFAULT_INTR_COALESCE_TX_PACKETS;
+}
+
+static int
+__qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return -EIO;
+
+ qlcnic_set_multi(netdev);
+ qlcnic_fw_cmd_set_mtu(adapter, netdev->mtu);
+
+ adapter->ahw.linkup = 0;
+
+ if (adapter->max_sds_rings > 1)
+ qlcnic_config_rss(adapter, 1);
+
+ qlcnic_config_intr_coalesce(adapter);
+
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
+ qlcnic_config_hw_lro(adapter, QLCNIC_LRO_ENABLED);
+
+ qlcnic_napi_enable(adapter);
+
+ qlcnic_linkevent_request(adapter, 1);
+
+ set_bit(__QLCNIC_DEV_UP, &adapter->state);
+ return 0;
+}
+
+/* Usage: During resume and firmware recovery module.*/
+
+static int
+qlcnic_up(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+ int err = 0;
+
+ rtnl_lock();
+ if (netif_running(netdev))
+ err = __qlcnic_up(adapter, netdev);
+ rtnl_unlock();
+
+ return err;
+}
+
+static void
+__qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
+ if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
+ return;
+
+ smp_mb();
+ spin_lock(&adapter->tx_clean_lock);
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ qlcnic_free_mac_list(adapter);
+
+ qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
+
+ qlcnic_napi_disable(adapter);
+
+ qlcnic_release_tx_buffers(adapter);
+ spin_unlock(&adapter->tx_clean_lock);
+}
+
+/* Usage: During suspend and firmware recovery module */
+
+static void
+qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
+{
+ rtnl_lock();
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+ rtnl_unlock();
+
+}
+
+static int
+qlcnic_attach(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ struct pci_dev *pdev = adapter->pdev;
+ int err, ring;
+ struct qlcnic_host_rds_ring *rds_ring;
+
+ if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC)
+ return 0;
+
+ err = qlcnic_init_firmware(adapter);
+ if (err)
+ return err;
+
+ err = qlcnic_napi_add(adapter, netdev);
+ if (err)
+ return err;
+
+ err = qlcnic_alloc_sw_resources(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Error in setting sw resources\n");
+ return err;
+ }
+
+ err = qlcnic_alloc_hw_resources(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Error in setting hw resources\n");
+ goto err_out_free_sw;
+ }
+
+
+ for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+ rds_ring = &adapter->recv_ctx.rds_rings[ring];
+ qlcnic_post_rx_buffers(adapter, ring, rds_ring);
+ }
+
+ err = qlcnic_request_irq(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "failed to setup interrupt\n");
+ goto err_out_free_rxbuf;
+ }
+
+ qlcnic_init_coalesce_defaults(adapter);
+
+ qlcnic_create_sysfs_entries(adapter);
+
+ adapter->is_up = QLCNIC_ADAPTER_UP_MAGIC;
+ return 0;
+
+err_out_free_rxbuf:
+ qlcnic_release_rx_buffers(adapter);
+ qlcnic_free_hw_resources(adapter);
+err_out_free_sw:
+ qlcnic_free_sw_resources(adapter);
+ return err;
+}
+
+static void
+qlcnic_detach(struct qlcnic_adapter *adapter)
+{
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ return;
+
+ qlcnic_remove_sysfs_entries(adapter);
+
+ qlcnic_free_hw_resources(adapter);
+ qlcnic_release_rx_buffers(adapter);
+ qlcnic_free_irq(adapter);
+ qlcnic_napi_del(adapter);
+ qlcnic_free_sw_resources(adapter);
+
+ adapter->is_up = 0;
+}
+
+void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_sds_ring *sds_ring;
+ int ring;
+
+ if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &adapter->recv_ctx.sds_rings[ring];
+ qlcnic_disable_int(sds_ring);
+ }
+ }
+
+ qlcnic_detach(adapter);
+
+ adapter->diag_test = 0;
+ adapter->max_sds_rings = max_sds_rings;
+
+ if (qlcnic_attach(adapter))
+ return;
+
+ if (netif_running(netdev))
+ __qlcnic_up(adapter, netdev);
+
+ netif_device_attach(netdev);
+}
+
+int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_sds_ring *sds_ring;
+ int ring;
+ int ret;
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+
+ qlcnic_detach(adapter);
+
+ adapter->max_sds_rings = 1;
+ adapter->diag_test = test;
+
+ ret = qlcnic_attach(adapter);
+ if (ret)
+ return ret;
+
+ if (adapter->diag_test == QLCNIC_INTERRUPT_TEST) {
+ for (ring = 0; ring < adapter->max_sds_rings; ring++) {
+ sds_ring = &adapter->recv_ctx.sds_rings[ring];
+ qlcnic_enable_int(sds_ring);
+ }
+ }
+
+ return 0;
+}
+
+int
+qlcnic_reset_context(struct qlcnic_adapter *adapter)
+{
+ int err = 0;
+ struct net_device *netdev = adapter->netdev;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return -EBUSY;
+
+ if (adapter->is_up == QLCNIC_ADAPTER_UP_MAGIC) {
+
+ netif_device_detach(netdev);
+
+ if (netif_running(netdev))
+ __qlcnic_down(adapter, netdev);
+
+ qlcnic_detach(adapter);
+
+ if (netif_running(netdev)) {
+ err = qlcnic_attach(adapter);
+ if (!err)
+ err = __qlcnic_up(adapter, netdev);
+
+ if (err)
+ goto done;
+ }
+
+ netif_device_attach(netdev);
+ }
+
+done:
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ return err;
+}
+
+static int
+qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
+ struct net_device *netdev)
+{
+ int err;
+ struct pci_dev *pdev = adapter->pdev;
+
+ adapter->rx_csum = 1;
+ adapter->mc_enabled = 0;
+ adapter->max_mc_count = 38;
+
+ netdev->netdev_ops = &qlcnic_netdev_ops;
+ netdev->watchdog_timeo = 2*HZ;
+
+ qlcnic_change_mtu(netdev, netdev->mtu);
+
+ SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
+
+ netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
+ netdev->features |= (NETIF_F_GRO);
+ netdev->vlan_features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO);
+
+ netdev->features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
+ netdev->vlan_features |= (NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
+
+ if (adapter->pci_using_dac) {
+ netdev->features |= NETIF_F_HIGHDMA;
+ netdev->vlan_features |= NETIF_F_HIGHDMA;
+ }
+
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_FVLANTX)
+ netdev->features |= (NETIF_F_HW_VLAN_TX);
+
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_HW_LRO)
+ netdev->features |= NETIF_F_LRO;
+
+ netdev->irq = adapter->msix_entries[0].vector;
+
+ INIT_WORK(&adapter->tx_timeout_task, qlcnic_tx_timeout_task);
+
+ if (qlcnic_read_mac_addr(adapter))
+ dev_warn(&pdev->dev, "failed to read mac addr\n");
+
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to register net device\n");
+ return err;
+ }
+
+ return 0;
+}
+
+static int __devinit
+qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct net_device *netdev = NULL;
+ struct qlcnic_adapter *adapter = NULL;
+ int err;
+ int pci_func_id = PCI_FUNC(pdev->devfn);
+ uint8_t revision_id;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
+ err = -ENODEV;
+ goto err_out_disable_pdev;
+ }
+
+ err = pci_request_regions(pdev, qlcnic_driver_name);
+ if (err)
+ goto err_out_disable_pdev;
+
+ pci_set_master(pdev);
+
+ netdev = alloc_etherdev(sizeof(struct qlcnic_adapter));
+ if (!netdev) {
+ dev_err(&pdev->dev, "failed to allocate net_device\n");
+ err = -ENOMEM;
+ goto err_out_free_res;
+ }
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ adapter = netdev_priv(netdev);
+ adapter->netdev = netdev;
+ adapter->pdev = pdev;
+ adapter->ahw.pci_func = pci_func_id;
+
+ revision_id = pdev->revision;
+ adapter->ahw.revision_id = revision_id;
+
+ rwlock_init(&adapter->ahw.crb_lock);
+ mutex_init(&adapter->ahw.mem_lock);
+
+ spin_lock_init(&adapter->tx_clean_lock);
+ INIT_LIST_HEAD(&adapter->mac_list);
+
+ err = qlcnic_setup_pci_map(adapter);
+ if (err)
+ goto err_out_free_netdev;
+
+ /* This will be reset for mezz cards */
+ adapter->portnum = pci_func_id;
+
+ err = qlcnic_get_board_info(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "Error getting board config info.\n");
+ goto err_out_iounmap;
+ }
+
+
+ err = qlcnic_start_firmware(adapter);
+ if (err)
+ goto err_out_decr_ref;
+
+ /*
+ * See if the firmware gave us a virtual-physical port mapping.
+ */
+ adapter->physical_port = adapter->portnum;
+
+ qlcnic_clear_stats(adapter);
+
+ qlcnic_setup_intr(adapter);
+
+ err = qlcnic_setup_netdev(adapter, netdev);
+ if (err)
+ goto err_out_disable_msi;
+
+ pci_set_drvdata(pdev, adapter);
+
+ qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
+
+ switch (adapter->ahw.port_type) {
+ case QLCNIC_GBE:
+ dev_info(&adapter->pdev->dev, "%s: GbE port initialized\n",
+ adapter->netdev->name);
+ break;
+ case QLCNIC_XGBE:
+ dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
+ adapter->netdev->name);
+ break;
+ }
+
+ qlcnic_create_diag_entries(adapter);
+
+ return 0;
+
+err_out_disable_msi:
+ qlcnic_teardown_intr(adapter);
+
+err_out_decr_ref:
+ qlcnic_clr_all_drv_state(adapter);
+
+err_out_iounmap:
+ qlcnic_cleanup_pci_map(adapter);
+
+err_out_free_netdev:
+ free_netdev(netdev);
+
+err_out_free_res:
+ pci_release_regions(pdev);
+
+err_out_disable_pdev:
+ pci_set_drvdata(pdev, NULL);
+ pci_disable_device(pdev);
+ return err;
+}
+
+static void __devexit qlcnic_remove(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter;
+ struct net_device *netdev;
+
+ adapter = pci_get_drvdata(pdev);
+ if (adapter == NULL)
+ return;
+
+ netdev = adapter->netdev;
+
+ qlcnic_cancel_fw_work(adapter);
+
+ unregister_netdev(netdev);
+
+ cancel_work_sync(&adapter->tx_timeout_task);
+
+ qlcnic_detach(adapter);
+
+ qlcnic_clr_all_drv_state(adapter);
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
+ qlcnic_teardown_intr(adapter);
+
+ qlcnic_remove_diag_entries(adapter);
+
+ qlcnic_cleanup_pci_map(adapter);
+
+ qlcnic_release_firmware(adapter);
+
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+
+ free_netdev(netdev);
+}
+static int __qlcnic_shutdown(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+ int retval;
+
+ netif_device_detach(netdev);
+
+ qlcnic_cancel_fw_work(adapter);
+
+ if (netif_running(netdev))
+ qlcnic_down(adapter, netdev);
+
+ cancel_work_sync(&adapter->tx_timeout_task);
+
+ qlcnic_detach(adapter);
+
+ qlcnic_clr_all_drv_state(adapter);
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
+ retval = pci_save_state(pdev);
+ if (retval)
+ return retval;
+
+ if (qlcnic_wol_supported(adapter)) {
+ pci_enable_wake(pdev, PCI_D3cold, 1);
+ pci_enable_wake(pdev, PCI_D3hot, 1);
+ }
+
+ return 0;
+}
+
+static void qlcnic_shutdown(struct pci_dev *pdev)
+{
+ if (__qlcnic_shutdown(pdev))
+ return;
+
+ pci_disable_device(pdev);
+}
+
+#ifdef CONFIG_PM
+static int
+qlcnic_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ int retval;
+
+ retval = __qlcnic_shutdown(pdev);
+ if (retval)
+ return retval;
+
+ pci_set_power_state(pdev, pci_choose_state(pdev, state));
+ return 0;
+}
+
+static int
+qlcnic_resume(struct pci_dev *pdev)
+{
+ struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err)
+ return err;
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+
+ adapter->ahw.crb_win = -1;
+ adapter->ahw.ocm_win = -1;
+
+ err = qlcnic_start_firmware(adapter);
+ if (err) {
+ dev_err(&pdev->dev, "failed to start firmware\n");
+ return err;
+ }
+
+ if (netif_running(netdev)) {
+ err = qlcnic_attach(adapter);
+ if (err)
+ goto err_out;
+
+ err = qlcnic_up(adapter, netdev);
+ if (err)
+ goto err_out_detach;
+
+
+ qlcnic_config_indev_addr(netdev, NETDEV_UP);
+ }
+
+ netif_device_attach(netdev);
+ qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
+ return 0;
+
+err_out_detach:
+ qlcnic_detach(adapter);
+err_out:
+ qlcnic_clr_all_drv_state(adapter);
+ return err;
+}
+#endif
+
+static int qlcnic_open(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ int err;
+
+ if (adapter->driver_mismatch)
+ return -EIO;
+
+ err = qlcnic_attach(adapter);
+ if (err)
+ return err;
+
+ err = __qlcnic_up(adapter, netdev);
+ if (err)
+ goto err_out;
+
+ netif_start_queue(netdev);
+
+ return 0;
+
+err_out:
+ qlcnic_detach(adapter);
+ return err;
+}
+
+/*
+ * qlcnic_close - Disables a network interface entry point
+ */
+static int qlcnic_close(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ __qlcnic_down(adapter, netdev);
+ return 0;
+}
+
+static void
+qlcnic_tso_check(struct net_device *netdev,
+ struct qlcnic_host_tx_ring *tx_ring,
+ struct cmd_desc_type0 *first_desc,
+ struct sk_buff *skb)
+{
+ u8 opcode = TX_ETHER_PKT;
+ __be16 protocol = skb->protocol;
+ u16 flags = 0, vid = 0;
+ u32 producer;
+ int copied, offset, copy_len, hdr_len = 0, tso = 0, vlan_oob = 0;
+ struct cmd_desc_type0 *hwdesc;
+ struct vlan_ethhdr *vh;
+
+ if (protocol == cpu_to_be16(ETH_P_8021Q)) {
+
+ vh = (struct vlan_ethhdr *)skb->data;
+ protocol = vh->h_vlan_encapsulated_proto;
+ flags = FLAGS_VLAN_TAGGED;
+
+ } else if (vlan_tx_tag_present(skb)) {
+
+ flags = FLAGS_VLAN_OOB;
+ vid = vlan_tx_tag_get(skb);
+ qlcnic_set_tx_vlan_tci(first_desc, vid);
+ vlan_oob = 1;
+ }
+
+ if ((netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
+ skb_shinfo(skb)->gso_size > 0) {
+
+ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+ first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
+ first_desc->total_hdr_length = hdr_len;
+ if (vlan_oob) {
+ first_desc->total_hdr_length += VLAN_HLEN;
+ first_desc->tcp_hdr_offset = VLAN_HLEN;
+ first_desc->ip_hdr_offset = VLAN_HLEN;
+ /* Only in case of TSO on vlan device */
+ flags |= FLAGS_VLAN_TAGGED;
+ }
+
+ opcode = (protocol == cpu_to_be16(ETH_P_IPV6)) ?
+ TX_TCP_LSO6 : TX_TCP_LSO;
+ tso = 1;
+
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ u8 l4proto;
+
+ if (protocol == cpu_to_be16(ETH_P_IP)) {
+ l4proto = ip_hdr(skb)->protocol;
+
+ if (l4proto == IPPROTO_TCP)
+ opcode = TX_TCP_PKT;
+ else if (l4proto == IPPROTO_UDP)
+ opcode = TX_UDP_PKT;
+ } else if (protocol == cpu_to_be16(ETH_P_IPV6)) {
+ l4proto = ipv6_hdr(skb)->nexthdr;
+
+ if (l4proto == IPPROTO_TCP)
+ opcode = TX_TCPV6_PKT;
+ else if (l4proto == IPPROTO_UDP)
+ opcode = TX_UDPV6_PKT;
+ }
+ }
+
+ first_desc->tcp_hdr_offset += skb_transport_offset(skb);
+ first_desc->ip_hdr_offset += skb_network_offset(skb);
+ qlcnic_set_tx_flags_opcode(first_desc, flags, opcode);
+
+ if (!tso)
+ return;
+
+ /* For LSO, we need to copy the MAC/IP/TCP headers into
+ * the descriptor ring
+ */
+ producer = tx_ring->producer;
+ copied = 0;
+ offset = 2;
+
+ if (vlan_oob) {
+ /* Create a TSO vlan header template for firmware */
+
+ hwdesc = &tx_ring->desc_head[producer];
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+
+ copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
+ hdr_len + VLAN_HLEN);
+
+ vh = (struct vlan_ethhdr *)((char *)hwdesc + 2);
+ skb_copy_from_linear_data(skb, vh, 12);
+ vh->h_vlan_proto = htons(ETH_P_8021Q);
+ vh->h_vlan_TCI = htons(vid);
+ skb_copy_from_linear_data_offset(skb, 12,
+ (char *)vh + 16, copy_len - 16);
+
+ copied = copy_len - VLAN_HLEN;
+ offset = 0;
+
+ producer = get_next_index(producer, tx_ring->num_desc);
+ }
+
+ while (copied < hdr_len) {
+
+ copy_len = min((int)sizeof(struct cmd_desc_type0) - offset,
+ (hdr_len - copied));
+
+ hwdesc = &tx_ring->desc_head[producer];
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+
+ skb_copy_from_linear_data_offset(skb, copied,
+ (char *)hwdesc + offset, copy_len);
+
+ copied += copy_len;
+ offset = 0;
+
+ producer = get_next_index(producer, tx_ring->num_desc);
+ }
+
+ tx_ring->producer = producer;
+ barrier();
+}
+
+static int
+qlcnic_map_tx_skb(struct pci_dev *pdev,
+ struct sk_buff *skb, struct qlcnic_cmd_buffer *pbuf)
+{
+ struct qlcnic_skb_frag *nf;
+ struct skb_frag_struct *frag;
+ int i, nr_frags;
+ dma_addr_t map;
+
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ nf = &pbuf->frag_array[0];
+
+ map = pci_map_single(pdev, skb->data,
+ skb_headlen(skb), PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(pdev, map))
+ goto out_err;
+
+ nf->dma = map;
+ nf->length = skb_headlen(skb);
+
+ for (i = 0; i < nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ nf = &pbuf->frag_array[i+1];
+
+ map = pci_map_page(pdev, frag->page, frag->page_offset,
+ frag->size, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(pdev, map))
+ goto unwind;
+
+ nf->dma = map;
+ nf->length = frag->size;
+ }
+
+ return 0;
+
+unwind:
+ while (--i >= 0) {
+ nf = &pbuf->frag_array[i+1];
+ pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE);
+ }
+
+ nf = &pbuf->frag_array[0];
+ pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE);
+
+out_err:
+ return -ENOMEM;
+}
+
+static inline void
+qlcnic_clear_cmddesc(u64 *desc)
+{
+ desc[0] = 0ULL;
+ desc[2] = 0ULL;
+}
+
+netdev_tx_t
+qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+ struct qlcnic_cmd_buffer *pbuf;
+ struct qlcnic_skb_frag *buffrag;
+ struct cmd_desc_type0 *hwdesc, *first_desc;
+ struct pci_dev *pdev;
+ int i, k;
+
+ u32 producer;
+ int frag_count, no_of_desc;
+ u32 num_txd = tx_ring->num_desc;
+
+ frag_count = skb_shinfo(skb)->nr_frags + 1;
+
+ /* 4 fragments per cmd des */
+ no_of_desc = (frag_count + 3) >> 2;
+
+ if (unlikely(no_of_desc + 2 > qlcnic_tx_avail(tx_ring))) {
+ netif_stop_queue(netdev);
+ return NETDEV_TX_BUSY;
+ }
+
+ producer = tx_ring->producer;
+ pbuf = &tx_ring->cmd_buf_arr[producer];
+
+ pdev = adapter->pdev;
+
+ if (qlcnic_map_tx_skb(pdev, skb, pbuf))
+ goto drop_packet;
+
+ pbuf->skb = skb;
+ pbuf->frag_count = frag_count;
+
+ first_desc = hwdesc = &tx_ring->desc_head[producer];
+ qlcnic_clear_cmddesc((u64 *)hwdesc);
+
+ qlcnic_set_tx_frags_len(first_desc, frag_count, skb->len);
+ qlcnic_set_tx_port(first_desc, adapter->portnum);
+
+ for (i = 0; i < frag_count; i++) {
+
+ k = i % 4;
+
+ if ((k == 0) && (i > 0)) {
+ /* move to next desc.*/
+ producer = get_next_index(producer, num_txd);
+ hwdesc = &tx_ring->desc_head[producer];
+ qlcnic_clear_cmddesc((u64 *)hwdesc);
+ tx_ring->cmd_buf_arr[producer].skb = NULL;
+ }
+
+ buffrag = &pbuf->frag_array[i];
+
+ hwdesc->buffer_length[k] = cpu_to_le16(buffrag->length);
+ switch (k) {
+ case 0:
+ hwdesc->addr_buffer1 = cpu_to_le64(buffrag->dma);
+ break;
+ case 1:
+ hwdesc->addr_buffer2 = cpu_to_le64(buffrag->dma);
+ break;
+ case 2:
+ hwdesc->addr_buffer3 = cpu_to_le64(buffrag->dma);
+ break;
+ case 3:
+ hwdesc->addr_buffer4 = cpu_to_le64(buffrag->dma);
+ break;
+ }
+ }
+
+ tx_ring->producer = get_next_index(producer, num_txd);
+
+ qlcnic_tso_check(netdev, tx_ring, first_desc, skb);
+
+ qlcnic_update_cmd_producer(adapter, tx_ring);
+
+ adapter->stats.txbytes += skb->len;
+ adapter->stats.xmitcalled++;
+
+ return NETDEV_TX_OK;
+
+drop_packet:
+ adapter->stats.txdropped++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static int qlcnic_check_temp(struct qlcnic_adapter *adapter)
+{
+ struct net_device *netdev = adapter->netdev;
+ u32 temp, temp_state, temp_val;
+ int rv = 0;
+
+ temp = QLCRD32(adapter, CRB_TEMP_STATE);
+
+ temp_state = qlcnic_get_temp_state(temp);
+ temp_val = qlcnic_get_temp_val(temp);
+
+ if (temp_state == QLCNIC_TEMP_PANIC) {
+ dev_err(&netdev->dev,
+ "Device temperature %d degrees C exceeds"
+ " maximum allowed. Hardware has been shut down.\n",
+ temp_val);
+ rv = 1;
+ } else if (temp_state == QLCNIC_TEMP_WARN) {
+ if (adapter->temp == QLCNIC_TEMP_NORMAL) {
+ dev_err(&netdev->dev,
+ "Device temperature %d degrees C "
+ "exceeds operating range."
+ " Immediate action needed.\n",
+ temp_val);
+ }
+ } else {
+ if (adapter->temp == QLCNIC_TEMP_WARN) {
+ dev_info(&netdev->dev,
+ "Device temperature is now %d degrees C"
+ " in normal range.\n", temp_val);
+ }
+ }
+ adapter->temp = temp_state;
+ return rv;
+}
+
+void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
+{
+ struct net_device *netdev = adapter->netdev;
+
+ if (adapter->ahw.linkup && !linkup) {
+ dev_info(&netdev->dev, "NIC Link is down\n");
+ adapter->ahw.linkup = 0;
+ if (netif_running(netdev)) {
+ netif_carrier_off(netdev);
+ netif_stop_queue(netdev);
+ }
+ } else if (!adapter->ahw.linkup && linkup) {
+ dev_info(&netdev->dev, "NIC Link is up\n");
+ adapter->ahw.linkup = 1;
+ if (netif_running(netdev)) {
+ netif_carrier_on(netdev);
+ netif_wake_queue(netdev);
+ }
+ }
+}
+
+static void qlcnic_tx_timeout(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+ return;
+
+ dev_err(&netdev->dev, "transmit timeout, resetting.\n");
+ schedule_work(&adapter->tx_timeout_task);
+}
+
+static void qlcnic_tx_timeout_task(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter =
+ container_of(work, struct qlcnic_adapter, tx_timeout_task);
+
+ if (!netif_running(adapter->netdev))
+ return;
+
+ if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ return;
+
+ if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS)
+ goto request_reset;
+
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+ if (!qlcnic_reset_context(adapter)) {
+ adapter->netdev->trans_start = jiffies;
+ return;
+
+ /* context reset failed, fall through for fw reset */
+ }
+
+request_reset:
+ adapter->need_fw_reset = 1;
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+}
+
+static struct net_device_stats *qlcnic_get_stats(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ struct net_device_stats *stats = &netdev->stats;
+
+ memset(stats, 0, sizeof(*stats));
+
+ stats->rx_packets = adapter->stats.rx_pkts + adapter->stats.lro_pkts;
+ stats->tx_packets = adapter->stats.xmitfinished;
+ stats->rx_bytes = adapter->stats.rxbytes;
+ stats->tx_bytes = adapter->stats.txbytes;
+ stats->rx_dropped = adapter->stats.rxdropped;
+ stats->tx_dropped = adapter->stats.txdropped;
+
+ return stats;
+}
+
+static irqreturn_t qlcnic_clear_legacy_intr(struct qlcnic_adapter *adapter)
+{
+ u32 status;
+
+ status = readl(adapter->isr_int_vec);
+
+ if (!(status & adapter->int_vec_bit))
+ return IRQ_NONE;
+
+ /* check interrupt state machine, to be sure */
+ status = readl(adapter->crb_int_state_reg);
+ if (!ISR_LEGACY_INT_TRIGGERED(status))
+ return IRQ_NONE;
+
+ writel(0xffffffff, adapter->tgt_status_reg);
+ /* read twice to ensure write is flushed */
+ readl(adapter->isr_int_vec);
+ readl(adapter->isr_int_vec);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qlcnic_tmp_intr(int irq, void *data)
+{
+ struct qlcnic_host_sds_ring *sds_ring = data;
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ goto done;
+ else if (adapter->flags & QLCNIC_MSI_ENABLED) {
+ writel(0xffffffff, adapter->tgt_status_reg);
+ goto done;
+ }
+
+ if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
+ return IRQ_NONE;
+
+done:
+ adapter->diag_cnt++;
+ qlcnic_enable_int(sds_ring);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qlcnic_intr(int irq, void *data)
+{
+ struct qlcnic_host_sds_ring *sds_ring = data;
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ if (qlcnic_clear_legacy_intr(adapter) == IRQ_NONE)
+ return IRQ_NONE;
+
+ napi_schedule(&sds_ring->napi);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qlcnic_msi_intr(int irq, void *data)
+{
+ struct qlcnic_host_sds_ring *sds_ring = data;
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ /* clear interrupt */
+ writel(0xffffffff, adapter->tgt_status_reg);
+
+ napi_schedule(&sds_ring->napi);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t qlcnic_msix_intr(int irq, void *data)
+{
+ struct qlcnic_host_sds_ring *sds_ring = data;
+
+ napi_schedule(&sds_ring->napi);
+ return IRQ_HANDLED;
+}
+
+static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
+{
+ u32 sw_consumer, hw_consumer;
+ int count = 0, i;
+ struct qlcnic_cmd_buffer *buffer;
+ struct pci_dev *pdev = adapter->pdev;
+ struct net_device *netdev = adapter->netdev;
+ struct qlcnic_skb_frag *frag;
+ int done;
+ struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
+
+ if (!spin_trylock(&adapter->tx_clean_lock))
+ return 1;
+
+ sw_consumer = tx_ring->sw_consumer;
+ hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
+
+ while (sw_consumer != hw_consumer) {
+ buffer = &tx_ring->cmd_buf_arr[sw_consumer];
+ if (buffer->skb) {
+ frag = &buffer->frag_array[0];
+ pci_unmap_single(pdev, frag->dma, frag->length,
+ PCI_DMA_TODEVICE);
+ frag->dma = 0ULL;
+ for (i = 1; i < buffer->frag_count; i++) {
+ frag++;
+ pci_unmap_page(pdev, frag->dma, frag->length,
+ PCI_DMA_TODEVICE);
+ frag->dma = 0ULL;
+ }
+
+ adapter->stats.xmitfinished++;
+ dev_kfree_skb_any(buffer->skb);
+ buffer->skb = NULL;
+ }
+
+ sw_consumer = get_next_index(sw_consumer, tx_ring->num_desc);
+ if (++count >= MAX_STATUS_HANDLE)
+ break;
+ }
+
+ if (count && netif_running(netdev)) {
+ tx_ring->sw_consumer = sw_consumer;
+
+ smp_mb();
+
+ if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
+ __netif_tx_lock(tx_ring->txq, smp_processor_id());
+ if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
+ netif_wake_queue(netdev);
+ adapter->tx_timeo_cnt = 0;
+ }
+ __netif_tx_unlock(tx_ring->txq);
+ }
+ }
+ /*
+ * If everything is freed up to consumer then check if the ring is full
+ * If the ring is full then check if more needs to be freed and
+ * schedule the call back again.
+ *
+ * This happens when there are 2 CPUs. One could be freeing and the
+ * other filling it. If the ring is full when we get out of here and
+ * the card has already interrupted the host then the host can miss the
+ * interrupt.
+ *
+ * There is still a possible race condition and the host could miss an
+ * interrupt. The card has to take care of this.
+ */
+ hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
+ done = (sw_consumer == hw_consumer);
+ spin_unlock(&adapter->tx_clean_lock);
+
+ return done;
+}
+
+static int qlcnic_poll(struct napi_struct *napi, int budget)
+{
+ struct qlcnic_host_sds_ring *sds_ring =
+ container_of(napi, struct qlcnic_host_sds_ring, napi);
+
+ struct qlcnic_adapter *adapter = sds_ring->adapter;
+
+ int tx_complete;
+ int work_done;
+
+ tx_complete = qlcnic_process_cmd_ring(adapter);
+
+ work_done = qlcnic_process_rcv_ring(sds_ring, budget);
+
+ if ((work_done < budget) && tx_complete) {
+ napi_complete(&sds_ring->napi);
+ if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
+ qlcnic_enable_int(sds_ring);
+ }
+
+ return work_done;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void qlcnic_poll_controller(struct net_device *netdev)
+{
+ struct qlcnic_adapter *adapter = netdev_priv(netdev);
+ disable_irq(adapter->irq);
+ qlcnic_intr(adapter->irq, adapter);
+ enable_irq(adapter->irq);
+}
+#endif
+
+static void
+qlcnic_set_drv_state(struct qlcnic_adapter *adapter, int state)
+{
+ u32 val;
+
+ WARN_ON(state != QLCNIC_DEV_NEED_RESET &&
+ state != QLCNIC_DEV_NEED_QUISCENT);
+
+ if (qlcnic_api_lock(adapter))
+ return ;
+
+ val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+
+ if (state == QLCNIC_DEV_NEED_RESET)
+ val |= ((u32)0x1 << (adapter->portnum * 4));
+ else if (state == QLCNIC_DEV_NEED_QUISCENT)
+ val |= ((u32)0x1 << ((adapter->portnum * 4) + 1));
+
+ QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+
+ qlcnic_api_unlock(adapter);
+}
+
+static int
+qlcnic_clr_drv_state(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+
+ if (qlcnic_api_lock(adapter))
+ return -EBUSY;
+
+ val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val &= ~((u32)0x3 << (adapter->portnum * 4));
+ QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+
+ qlcnic_api_unlock(adapter);
+
+ return 0;
+}
+
+static void
+qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter)
+{
+ u32 val;
+
+ if (qlcnic_api_lock(adapter))
+ goto err;
+
+ val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
+ val &= ~((u32)0x1 << (adapter->portnum * 4));
+ QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
+
+ if (!(val & 0x11111111))
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_COLD);
+
+ val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val &= ~((u32)0x3 << (adapter->portnum * 4));
+ QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+
+ qlcnic_api_unlock(adapter);
+err:
+ adapter->fw_fail_cnt = 0;
+ clear_bit(__QLCNIC_START_FW, &adapter->state);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+}
+
+static int
+qlcnic_check_drv_state(struct qlcnic_adapter *adapter)
+{
+ int act, state;
+
+ state = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ act = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
+
+ if (((state & 0x11111111) == (act & 0x11111111)) ||
+ ((act & 0x11111111) == ((state >> 1) & 0x11111111)))
+ return 0;
+ else
+ return 1;
+}
+
+static int
+qlcnic_can_start_firmware(struct qlcnic_adapter *adapter)
+{
+ u32 val, prev_state;
+ int cnt = 0;
+ int portnum = adapter->portnum;
+
+ if (qlcnic_api_lock(adapter))
+ return -1;
+
+ val = QLCRD32(adapter, QLCNIC_CRB_DEV_REF_COUNT);
+ if (!(val & ((int)0x1 << (portnum * 4)))) {
+ val |= ((u32)0x1 << (portnum * 4));
+ QLCWR32(adapter, QLCNIC_CRB_DEV_REF_COUNT, val);
+ } else if (test_and_clear_bit(__QLCNIC_START_FW, &adapter->state)) {
+ goto start_fw;
+ }
+
+ prev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+
+ switch (prev_state) {
+ case QLCNIC_DEV_COLD:
+start_fw:
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_INITALIZING);
+ qlcnic_api_unlock(adapter);
+ return 1;
+
+ case QLCNIC_DEV_READY:
+ qlcnic_api_unlock(adapter);
+ return 0;
+
+ case QLCNIC_DEV_NEED_RESET:
+ val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val |= ((u32)0x1 << (portnum * 4));
+ QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ break;
+
+ case QLCNIC_DEV_NEED_QUISCENT:
+ val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val |= ((u32)0x1 << ((portnum * 4) + 1));
+ QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+ break;
+
+ case QLCNIC_DEV_FAILED:
+ qlcnic_api_unlock(adapter);
+ return -1;
+ }
+
+ qlcnic_api_unlock(adapter);
+ msleep(1000);
+ while ((QLCRD32(adapter, QLCNIC_CRB_DEV_STATE) != QLCNIC_DEV_READY) &&
+ ++cnt < 20)
+ msleep(1000);
+
+ if (cnt >= 20)
+ return -1;
+
+ if (qlcnic_api_lock(adapter))
+ return -1;
+
+ val = QLCRD32(adapter, QLCNIC_CRB_DRV_STATE);
+ val &= ~((u32)0x3 << (portnum * 4));
+ QLCWR32(adapter, QLCNIC_CRB_DRV_STATE, val);
+
+ qlcnic_api_unlock(adapter);
+
+ return 0;
+}
+
+static void
+qlcnic_fwinit_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter = container_of(work,
+ struct qlcnic_adapter, fw_work.work);
+ int dev_state;
+
+ if (++adapter->fw_wait_cnt > FW_POLL_THRESH)
+ goto err_ret;
+
+ if (test_bit(__QLCNIC_START_FW, &adapter->state)) {
+
+ if (qlcnic_check_drv_state(adapter)) {
+ qlcnic_schedule_work(adapter,
+ qlcnic_fwinit_work, FW_POLL_DELAY);
+ return;
+ }
+
+ if (!qlcnic_start_firmware(adapter)) {
+ qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
+ return;
+ }
+
+ goto err_ret;
+ }
+
+ dev_state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ switch (dev_state) {
+ case QLCNIC_DEV_READY:
+ if (!qlcnic_start_firmware(adapter)) {
+ qlcnic_schedule_work(adapter, qlcnic_attach_work, 0);
+ return;
+ }
+ case QLCNIC_DEV_FAILED:
+ break;
+
+ default:
+ qlcnic_schedule_work(adapter,
+ qlcnic_fwinit_work, 2 * FW_POLL_DELAY);
+ return;
+ }
+
+err_ret:
+ qlcnic_clr_all_drv_state(adapter);
+}
+
+static void
+qlcnic_detach_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter = container_of(work,
+ struct qlcnic_adapter, fw_work.work);
+ struct net_device *netdev = adapter->netdev;
+ u32 status;
+
+ netif_device_detach(netdev);
+
+ qlcnic_down(adapter, netdev);
+
+ rtnl_lock();
+ qlcnic_detach(adapter);
+ rtnl_unlock();
+
+ status = QLCRD32(adapter, QLCNIC_PEG_HALT_STATUS1);
+
+ if (status & QLCNIC_RCODE_FATAL_ERROR)
+ goto err_ret;
+
+ if (adapter->temp == QLCNIC_TEMP_PANIC)
+ goto err_ret;
+
+ qlcnic_set_drv_state(adapter, adapter->dev_state);
+
+ adapter->fw_wait_cnt = 0;
+
+ qlcnic_schedule_work(adapter, qlcnic_fwinit_work, FW_POLL_DELAY);
+
+ return;
+
+err_ret:
+ qlcnic_clr_all_drv_state(adapter);
+
+}
+
+static void
+qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
+{
+ u32 state;
+
+ if (qlcnic_api_lock(adapter))
+ return;
+
+ state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+
+ if (state != QLCNIC_DEV_INITALIZING && state != QLCNIC_DEV_NEED_RESET) {
+ QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_RESET);
+ set_bit(__QLCNIC_START_FW, &adapter->state);
+ }
+
+ qlcnic_api_unlock(adapter);
+}
+
+static void
+qlcnic_schedule_work(struct qlcnic_adapter *adapter,
+ work_func_t func, int delay)
+{
+ INIT_DELAYED_WORK(&adapter->fw_work, func);
+ schedule_delayed_work(&adapter->fw_work, round_jiffies_relative(delay));
+}
+
+static void
+qlcnic_cancel_fw_work(struct qlcnic_adapter *adapter)
+{
+ while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ msleep(10);
+
+ cancel_delayed_work_sync(&adapter->fw_work);
+}
+
+static void
+qlcnic_attach_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter = container_of(work,
+ struct qlcnic_adapter, fw_work.work);
+ struct net_device *netdev = adapter->netdev;
+ int err;
+
+ if (netif_running(netdev)) {
+ err = qlcnic_attach(adapter);
+ if (err)
+ goto done;
+
+ err = qlcnic_up(adapter, netdev);
+ if (err) {
+ qlcnic_detach(adapter);
+ goto done;
+ }
+
+ qlcnic_config_indev_addr(netdev, NETDEV_UP);
+ }
+
+ netif_device_attach(netdev);
+
+done:
+ adapter->fw_fail_cnt = 0;
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
+
+ if (!qlcnic_clr_drv_state(adapter))
+ qlcnic_schedule_work(adapter, qlcnic_fw_poll_work,
+ FW_POLL_DELAY);
+}
+
+static int
+qlcnic_check_health(struct qlcnic_adapter *adapter)
+{
+ u32 state = 0, heartbit;
+ struct net_device *netdev = adapter->netdev;
+
+ if (qlcnic_check_temp(adapter))
+ goto detach;
+
+ if (adapter->need_fw_reset) {
+ qlcnic_dev_request_reset(adapter);
+ goto detach;
+ }
+
+ state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
+ if (state == QLCNIC_DEV_NEED_RESET || state == QLCNIC_DEV_NEED_QUISCENT)
+ adapter->need_fw_reset = 1;
+
+ heartbit = QLCRD32(adapter, QLCNIC_PEG_ALIVE_COUNTER);
+ if (heartbit != adapter->heartbit) {
+ adapter->heartbit = heartbit;
+ adapter->fw_fail_cnt = 0;
+ if (adapter->need_fw_reset)
+ goto detach;
+ return 0;
+ }
+
+ if (++adapter->fw_fail_cnt < FW_FAIL_THRESH)
+ return 0;
+
+ qlcnic_dev_request_reset(adapter);
+
+ clear_bit(__QLCNIC_FW_ATTACHED, &adapter->state);
+
+ dev_info(&netdev->dev, "firmware hang detected\n");
+
+detach:
+ adapter->dev_state = (state == QLCNIC_DEV_NEED_QUISCENT) ? state :
+ QLCNIC_DEV_NEED_RESET;
+
+ if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
+ !test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
+ qlcnic_schedule_work(adapter, qlcnic_detach_work, 0);
+
+ return 1;
+}
+
+static void
+qlcnic_fw_poll_work(struct work_struct *work)
+{
+ struct qlcnic_adapter *adapter = container_of(work,
+ struct qlcnic_adapter, fw_work.work);
+
+ if (test_bit(__QLCNIC_RESETTING, &adapter->state))
+ goto reschedule;
+
+
+ if (qlcnic_check_health(adapter))
+ return;
+
+reschedule:
+ qlcnic_schedule_work(adapter, qlcnic_fw_poll_work, FW_POLL_DELAY);
+}
+
+static ssize_t
+qlcnic_store_bridged_mode(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ unsigned long new;
+ int ret = -EINVAL;
+
+ if (!(adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG))
+ goto err_out;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ goto err_out;
+
+ if (strict_strtoul(buf, 2, &new))
+ goto err_out;
+
+ if (!qlcnic_config_bridged_mode(adapter, !!new))
+ ret = len;
+
+err_out:
+ return ret;
+}
+
+static ssize_t
+qlcnic_show_bridged_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ int bridged_mode = 0;
+
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
+ bridged_mode = !!(adapter->flags & QLCNIC_BRIDGE_ENABLED);
+
+ return sprintf(buf, "%d\n", bridged_mode);
+}
+
+static struct device_attribute dev_attr_bridged_mode = {
+ .attr = {.name = "bridged_mode", .mode = (S_IRUGO | S_IWUSR)},
+ .show = qlcnic_show_bridged_mode,
+ .store = qlcnic_store_bridged_mode,
+};
+
+static ssize_t
+qlcnic_store_diag_mode(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ unsigned long new;
+
+ if (strict_strtoul(buf, 2, &new))
+ return -EINVAL;
+
+ if (!!new != !!(adapter->flags & QLCNIC_DIAG_ENABLED))
+ adapter->flags ^= QLCNIC_DIAG_ENABLED;
+
+ return len;
+}
+
+static ssize_t
+qlcnic_show_diag_mode(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n",
+ !!(adapter->flags & QLCNIC_DIAG_ENABLED));
+}
+
+static struct device_attribute dev_attr_diag_mode = {
+ .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
+ .show = qlcnic_show_diag_mode,
+ .store = qlcnic_store_diag_mode,
+};
+
+static int
+qlcnic_sysfs_validate_crb(struct qlcnic_adapter *adapter,
+ loff_t offset, size_t size)
+{
+ if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
+ return -EIO;
+
+ if ((size != 4) || (offset & 0x3))
+ return -EINVAL;
+
+ if (offset < QLCNIC_PCI_CRBSPACE)
+ return -EINVAL;
+
+ return 0;
+}
+
+static ssize_t
+qlcnic_sysfs_read_crb(struct kobject *kobj, struct bin_attribute *attr,
+ char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u32 data;
+ int ret;
+
+ ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ data = QLCRD32(adapter, offset);
+ memcpy(buf, &data, size);
+ return size;
+}
+
+static ssize_t
+qlcnic_sysfs_write_crb(struct kobject *kobj, struct bin_attribute *attr,
+ char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u32 data;
+ int ret;
+
+ ret = qlcnic_sysfs_validate_crb(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ memcpy(&data, buf, size);
+ QLCWR32(adapter, offset, data);
+ return size;
+}
+
+static int
+qlcnic_sysfs_validate_mem(struct qlcnic_adapter *adapter,
+ loff_t offset, size_t size)
+{
+ if (!(adapter->flags & QLCNIC_DIAG_ENABLED))
+ return -EIO;
+
+ if ((size != 8) || (offset & 0x7))
+ return -EIO;
+
+ return 0;
+}
+
+static ssize_t
+qlcnic_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
+ char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u64 data;
+ int ret;
+
+ ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ if (qlcnic_pci_mem_read_2M(adapter, offset, &data))
+ return -EIO;
+
+ memcpy(buf, &data, size);
+
+ return size;
+}
+
+static ssize_t
+qlcnic_sysfs_write_mem(struct kobject *kobj, struct bin_attribute *attr,
+ char *buf, loff_t offset, size_t size)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ u64 data;
+ int ret;
+
+ ret = qlcnic_sysfs_validate_mem(adapter, offset, size);
+ if (ret != 0)
+ return ret;
+
+ memcpy(&data, buf, size);
+
+ if (qlcnic_pci_mem_write_2M(adapter, offset, data))
+ return -EIO;
+
+ return size;
+}
+
+
+static struct bin_attribute bin_attr_crb = {
+ .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_read_crb,
+ .write = qlcnic_sysfs_write_crb,
+};
+
+static struct bin_attribute bin_attr_mem = {
+ .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
+ .size = 0,
+ .read = qlcnic_sysfs_read_mem,
+ .write = qlcnic_sysfs_write_mem,
+};
+
+static void
+qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
+ if (device_create_file(dev, &dev_attr_bridged_mode))
+ dev_warn(dev,
+ "failed to create bridged_mode sysfs entry\n");
+}
+
+static void
+qlcnic_remove_sysfs_entries(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ if (adapter->capabilities & QLCNIC_FW_CAPABILITY_BDG)
+ device_remove_file(dev, &dev_attr_bridged_mode);
+}
+
+static void
+qlcnic_create_diag_entries(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ if (device_create_file(dev, &dev_attr_diag_mode))
+ dev_info(dev, "failed to create diag_mode sysfs entry\n");
+ if (device_create_bin_file(dev, &bin_attr_crb))
+ dev_info(dev, "failed to create crb sysfs entry\n");
+ if (device_create_bin_file(dev, &bin_attr_mem))
+ dev_info(dev, "failed to create mem sysfs entry\n");
+}
+
+
+static void
+qlcnic_remove_diag_entries(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+
+ device_remove_file(dev, &dev_attr_diag_mode);
+ device_remove_bin_file(dev, &bin_attr_crb);
+ device_remove_bin_file(dev, &bin_attr_mem);
+}
+
+#ifdef CONFIG_INET
+
+#define is_qlcnic_netdev(dev) (dev->netdev_ops == &qlcnic_netdev_ops)
+
+static int
+qlcnic_destip_supported(struct qlcnic_adapter *adapter)
+{
+ if (adapter->ahw.cut_through)
+ return 0;
+
+ return 1;
+}
+
+static void
+qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
+{
+ struct in_device *indev;
+ struct qlcnic_adapter *adapter = netdev_priv(dev);
+
+ if (!qlcnic_destip_supported(adapter))
+ return;
+
+ indev = in_dev_get(dev);
+ if (!indev)
+ return;
+
+ for_ifa(indev) {
+ switch (event) {
+ case NETDEV_UP:
+ qlcnic_config_ipaddr(adapter,
+ ifa->ifa_address, QLCNIC_IP_UP);
+ break;
+ case NETDEV_DOWN:
+ qlcnic_config_ipaddr(adapter,
+ ifa->ifa_address, QLCNIC_IP_DOWN);
+ break;
+ default:
+ break;
+ }
+ } endfor_ifa(indev);
+
+ in_dev_put(indev);
+ return;
+}
+
+static int qlcnic_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct qlcnic_adapter *adapter;
+ struct net_device *dev = (struct net_device *)ptr;
+
+recheck:
+ if (dev == NULL)
+ goto done;
+
+ if (dev->priv_flags & IFF_802_1Q_VLAN) {
+ dev = vlan_dev_real_dev(dev);
+ goto recheck;
+ }
+
+ if (!is_qlcnic_netdev(dev))
+ goto done;
+
+ adapter = netdev_priv(dev);
+
+ if (!adapter)
+ goto done;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ goto done;
+
+ qlcnic_config_indev_addr(dev, event);
+done:
+ return NOTIFY_DONE;
+}
+
+static int
+qlcnic_inetaddr_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct qlcnic_adapter *adapter;
+ struct net_device *dev;
+
+ struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
+
+ dev = ifa->ifa_dev ? ifa->ifa_dev->dev : NULL;
+
+recheck:
+ if (dev == NULL || !netif_running(dev))
+ goto done;
+
+ if (dev->priv_flags & IFF_802_1Q_VLAN) {
+ dev = vlan_dev_real_dev(dev);
+ goto recheck;
+ }
+
+ if (!is_qlcnic_netdev(dev))
+ goto done;
+
+ adapter = netdev_priv(dev);
+
+ if (!adapter || !qlcnic_destip_supported(adapter))
+ goto done;
+
+ if (adapter->is_up != QLCNIC_ADAPTER_UP_MAGIC)
+ goto done;
+
+ switch (event) {
+ case NETDEV_UP:
+ qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_UP);
+ break;
+ case NETDEV_DOWN:
+ qlcnic_config_ipaddr(adapter, ifa->ifa_address, QLCNIC_IP_DOWN);
+ break;
+ default:
+ break;
+ }
+
+done:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block qlcnic_netdev_cb = {
+ .notifier_call = qlcnic_netdev_event,
+};
+
+static struct notifier_block qlcnic_inetaddr_cb = {
+ .notifier_call = qlcnic_inetaddr_event,
+};
+#else
+static void
+qlcnic_config_indev_addr(struct net_device *dev, unsigned long event)
+{ }
+#endif
+
+static struct pci_driver qlcnic_driver = {
+ .name = qlcnic_driver_name,
+ .id_table = qlcnic_pci_tbl,
+ .probe = qlcnic_probe,
+ .remove = __devexit_p(qlcnic_remove),
+#ifdef CONFIG_PM
+ .suspend = qlcnic_suspend,
+ .resume = qlcnic_resume,
+#endif
+ .shutdown = qlcnic_shutdown
+};
+
+static int __init qlcnic_init_module(void)
+{
+
+ printk(KERN_INFO "%s\n", qlcnic_driver_string);
+
+#ifdef CONFIG_INET
+ register_netdevice_notifier(&qlcnic_netdev_cb);
+ register_inetaddr_notifier(&qlcnic_inetaddr_cb);
+#endif
+
+
+ return pci_register_driver(&qlcnic_driver);
+}
+
+module_init(qlcnic_init_module);
+
+static void __exit qlcnic_exit_module(void)
+{
+
+ pci_unregister_driver(&qlcnic_driver);
+
+#ifdef CONFIG_INET
+ unregister_inetaddr_notifier(&qlcnic_inetaddr_cb);
+ unregister_netdevice_notifier(&qlcnic_netdev_cb);
+#endif
+}
+
+module_exit(qlcnic_exit_module);
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 862c1aaf386..8b742b639ce 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -19,14 +19,6 @@
#define DRV_VERSION "v1.00.00.23.00.00-01"
#define PFX "qlge: "
-#define QPRINTK(qdev, nlevel, klevel, fmt, args...) \
- do { \
- if (!((qdev)->msg_enable & NETIF_MSG_##nlevel)) \
- ; \
- else \
- dev_printk(KERN_##klevel, &((qdev)->pdev->dev), \
- "%s: " fmt, __func__, ##args); \
- } while (0)
#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
@@ -54,12 +46,8 @@
#define RX_RING_SHADOW_SPACE (sizeof(u64) + \
MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \
MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64))
-#define SMALL_BUFFER_SIZE 512
-#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2)
#define LARGE_BUFFER_MAX_SIZE 8192
#define LARGE_BUFFER_MIN_SIZE 2048
-#define MAX_SPLIT_SIZE 1023
-#define QLGE_SB_PAD 32
#define MAX_CQ 128
#define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */
@@ -79,15 +67,43 @@
#define TX_DESC_PER_OAL 0
#endif
+/* Word shifting for converting 64-bit
+ * address to a series of 16-bit words.
+ * This is used for some MPI firmware
+ * mailbox commands.
+ */
+#define LSW(x) ((u16)(x))
+#define MSW(x) ((u16)((u32)(x) >> 16))
+#define LSD(x) ((u32)((u64)(x)))
+#define MSD(x) ((u32)((((u64)(x)) >> 32)))
+
/* MPI test register definitions. This register
* is used for determining alternate NIC function's
* PCI->func number.
*/
enum {
MPI_TEST_FUNC_PORT_CFG = 0x1002,
+ MPI_TEST_FUNC_PRB_CTL = 0x100e,
+ MPI_TEST_FUNC_PRB_EN = 0x18a20000,
+ MPI_TEST_FUNC_RST_STS = 0x100a,
+ MPI_TEST_FUNC_RST_FRC = 0x00000003,
+ MPI_TEST_NIC_FUNC_MASK = 0x00000007,
+ MPI_TEST_NIC1_FUNCTION_ENABLE = (1 << 0),
+ MPI_TEST_NIC1_FUNCTION_MASK = 0x0000000e,
MPI_TEST_NIC1_FUNC_SHIFT = 1,
+ MPI_TEST_NIC2_FUNCTION_ENABLE = (1 << 4),
+ MPI_TEST_NIC2_FUNCTION_MASK = 0x000000e0,
MPI_TEST_NIC2_FUNC_SHIFT = 5,
- MPI_TEST_NIC_FUNC_MASK = 0x00000007,
+ MPI_TEST_FC1_FUNCTION_ENABLE = (1 << 8),
+ MPI_TEST_FC1_FUNCTION_MASK = 0x00000e00,
+ MPI_TEST_FC1_FUNCTION_SHIFT = 9,
+ MPI_TEST_FC2_FUNCTION_ENABLE = (1 << 12),
+ MPI_TEST_FC2_FUNCTION_MASK = 0x0000e000,
+ MPI_TEST_FC2_FUNCTION_SHIFT = 13,
+
+ MPI_NIC_READ = 0x00000000,
+ MPI_NIC_REG_BLOCK = 0x00020000,
+ MPI_NIC_FUNCTION_SHIFT = 6,
};
/*
@@ -468,7 +484,7 @@ enum {
MDIO_PORT = 0x00000440,
MDIO_STATUS = 0x00000450,
- /* XGMAC AUX statistics registers */
+ XGMAC_REGISTER_END = 0x00000740,
};
/*
@@ -509,6 +525,7 @@ enum {
enum {
MAC_ADDR_IDX_SHIFT = 4,
MAC_ADDR_TYPE_SHIFT = 16,
+ MAC_ADDR_TYPE_COUNT = 10,
MAC_ADDR_TYPE_MASK = 0x000f0000,
MAC_ADDR_TYPE_CAM_MAC = 0x00000000,
MAC_ADDR_TYPE_MULTI_MAC = 0x00010000,
@@ -526,6 +543,30 @@ enum {
MAC_ADDR_MR = (1 << 30),
MAC_ADDR_MW = (1 << 31),
MAX_MULTICAST_ENTRIES = 32,
+
+ /* Entry count and words per entry
+ * for each address type in the filter.
+ */
+ MAC_ADDR_MAX_CAM_ENTRIES = 512,
+ MAC_ADDR_MAX_CAM_WCOUNT = 3,
+ MAC_ADDR_MAX_MULTICAST_ENTRIES = 32,
+ MAC_ADDR_MAX_MULTICAST_WCOUNT = 2,
+ MAC_ADDR_MAX_VLAN_ENTRIES = 4096,
+ MAC_ADDR_MAX_VLAN_WCOUNT = 1,
+ MAC_ADDR_MAX_MCAST_FLTR_ENTRIES = 4096,
+ MAC_ADDR_MAX_MCAST_FLTR_WCOUNT = 1,
+ MAC_ADDR_MAX_FC_MAC_ENTRIES = 4,
+ MAC_ADDR_MAX_FC_MAC_WCOUNT = 2,
+ MAC_ADDR_MAX_MGMT_MAC_ENTRIES = 8,
+ MAC_ADDR_MAX_MGMT_MAC_WCOUNT = 2,
+ MAC_ADDR_MAX_MGMT_VLAN_ENTRIES = 16,
+ MAC_ADDR_MAX_MGMT_VLAN_WCOUNT = 1,
+ MAC_ADDR_MAX_MGMT_V4_ENTRIES = 4,
+ MAC_ADDR_MAX_MGMT_V4_WCOUNT = 1,
+ MAC_ADDR_MAX_MGMT_V6_ENTRIES = 4,
+ MAC_ADDR_MAX_MGMT_V6_WCOUNT = 4,
+ MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES = 4,
+ MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT = 1,
};
/*
@@ -596,6 +637,7 @@ enum {
enum {
RT_IDX_IDX_SHIFT = 8,
RT_IDX_TYPE_MASK = 0x000f0000,
+ RT_IDX_TYPE_SHIFT = 16,
RT_IDX_TYPE_RT = 0x00000000,
RT_IDX_TYPE_RT_INV = 0x00010000,
RT_IDX_TYPE_NICQ = 0x00020000,
@@ -664,7 +706,89 @@ enum {
RT_IDX_UNUSED013 = 13,
RT_IDX_UNUSED014 = 14,
RT_IDX_PROMISCUOUS_SLOT = 15,
- RT_IDX_MAX_SLOTS = 16,
+ RT_IDX_MAX_RT_SLOTS = 8,
+ RT_IDX_MAX_NIC_SLOTS = 16,
+};
+
+/*
+ * Serdes Address Register (XG_SERDES_ADDR) bit definitions.
+ */
+enum {
+ XG_SERDES_ADDR_RDY = (1 << 31),
+ XG_SERDES_ADDR_R = (1 << 30),
+
+ XG_SERDES_ADDR_STS = 0x00001E06,
+ XG_SERDES_ADDR_XFI1_PWR_UP = 0x00000005,
+ XG_SERDES_ADDR_XFI2_PWR_UP = 0x0000000a,
+ XG_SERDES_ADDR_XAUI_PWR_DOWN = 0x00000001,
+
+ /* Serdes coredump definitions. */
+ XG_SERDES_XAUI_AN_START = 0x00000000,
+ XG_SERDES_XAUI_AN_END = 0x00000034,
+ XG_SERDES_XAUI_HSS_PCS_START = 0x00000800,
+ XG_SERDES_XAUI_HSS_PCS_END = 0x0000880,
+ XG_SERDES_XFI_AN_START = 0x00001000,
+ XG_SERDES_XFI_AN_END = 0x00001034,
+ XG_SERDES_XFI_TRAIN_START = 0x10001050,
+ XG_SERDES_XFI_TRAIN_END = 0x1000107C,
+ XG_SERDES_XFI_HSS_PCS_START = 0x00001800,
+ XG_SERDES_XFI_HSS_PCS_END = 0x00001838,
+ XG_SERDES_XFI_HSS_TX_START = 0x00001c00,
+ XG_SERDES_XFI_HSS_TX_END = 0x00001c1f,
+ XG_SERDES_XFI_HSS_RX_START = 0x00001c40,
+ XG_SERDES_XFI_HSS_RX_END = 0x00001c5f,
+ XG_SERDES_XFI_HSS_PLL_START = 0x00001e00,
+ XG_SERDES_XFI_HSS_PLL_END = 0x00001e1f,
+};
+
+/*
+ * NIC Probe Mux Address Register (PRB_MX_ADDR) bit definitions.
+ */
+enum {
+ PRB_MX_ADDR_ARE = (1 << 16),
+ PRB_MX_ADDR_UP = (1 << 15),
+ PRB_MX_ADDR_SWP = (1 << 14),
+
+ /* Module select values. */
+ PRB_MX_ADDR_MAX_MODS = 21,
+ PRB_MX_ADDR_MOD_SEL_SHIFT = 9,
+ PRB_MX_ADDR_MOD_SEL_TBD = 0,
+ PRB_MX_ADDR_MOD_SEL_IDE1 = 1,
+ PRB_MX_ADDR_MOD_SEL_IDE2 = 2,
+ PRB_MX_ADDR_MOD_SEL_FRB = 3,
+ PRB_MX_ADDR_MOD_SEL_ODE1 = 4,
+ PRB_MX_ADDR_MOD_SEL_ODE2 = 5,
+ PRB_MX_ADDR_MOD_SEL_DA1 = 6,
+ PRB_MX_ADDR_MOD_SEL_DA2 = 7,
+ PRB_MX_ADDR_MOD_SEL_IMP1 = 8,
+ PRB_MX_ADDR_MOD_SEL_IMP2 = 9,
+ PRB_MX_ADDR_MOD_SEL_OMP1 = 10,
+ PRB_MX_ADDR_MOD_SEL_OMP2 = 11,
+ PRB_MX_ADDR_MOD_SEL_ORS1 = 12,
+ PRB_MX_ADDR_MOD_SEL_ORS2 = 13,
+ PRB_MX_ADDR_MOD_SEL_REG = 14,
+ PRB_MX_ADDR_MOD_SEL_MAC1 = 16,
+ PRB_MX_ADDR_MOD_SEL_MAC2 = 17,
+ PRB_MX_ADDR_MOD_SEL_VQM1 = 18,
+ PRB_MX_ADDR_MOD_SEL_VQM2 = 19,
+ PRB_MX_ADDR_MOD_SEL_MOP = 20,
+ /* Bit fields indicating which modules
+ * are valid for each clock domain.
+ */
+ PRB_MX_ADDR_VALID_SYS_MOD = 0x000f7ff7,
+ PRB_MX_ADDR_VALID_PCI_MOD = 0x000040c1,
+ PRB_MX_ADDR_VALID_XGM_MOD = 0x00037309,
+ PRB_MX_ADDR_VALID_FC_MOD = 0x00003001,
+ PRB_MX_ADDR_VALID_TOTAL = 34,
+
+ /* Clock domain values. */
+ PRB_MX_ADDR_CLOCK_SHIFT = 6,
+ PRB_MX_ADDR_SYS_CLOCK = 0,
+ PRB_MX_ADDR_PCI_CLOCK = 2,
+ PRB_MX_ADDR_FC_CLOCK = 5,
+ PRB_MX_ADDR_XGM_CLOCK = 6,
+
+ PRB_MX_ADDR_MAX_MUX = 64,
};
/*
@@ -737,6 +861,21 @@ enum {
PRB_MX_DATA = 0xfc, /* Use semaphore */
};
+#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
+#define SMALL_BUFFER_SIZE 256
+#define SMALL_BUF_MAP_SIZE SMALL_BUFFER_SIZE
+#define SPLT_SETTING FSC_DBRST_1024
+#define SPLT_LEN 0
+#define QLGE_SB_PAD 0
+#else
+#define SMALL_BUFFER_SIZE 512
+#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2)
+#define SPLT_SETTING FSC_SH
+#define SPLT_LEN (SPLT_HDR_EP | \
+ min(SMALL_BUF_MAP_SIZE, 1023))
+#define QLGE_SB_PAD 32
+#endif
+
/*
* CAM output format.
*/
@@ -1421,7 +1560,7 @@ struct nic_stats {
u64 rx_nic_fifo_drop;
};
-/* Address/Length pairs for the coredump. */
+/* Firmware coredump internal register address/length pairs. */
enum {
MPI_CORE_REGS_ADDR = 0x00030000,
MPI_CORE_REGS_CNT = 127,
@@ -1476,7 +1615,7 @@ struct mpi_coredump_segment_header {
u8 description[16];
};
-/* Reg dump segment numbers. */
+/* Firmware coredump header segment numbers. */
enum {
CORE_SEG_NUM = 1,
TEST_LOGIC_SEG_NUM = 2,
@@ -1527,6 +1666,67 @@ enum {
};
+/* There are 64 generic NIC registers. */
+#define NIC_REGS_DUMP_WORD_COUNT 64
+/* XGMAC word count. */
+#define XGMAC_DUMP_WORD_COUNT (XGMAC_REGISTER_END / 4)
+/* Word counts for the SERDES blocks. */
+#define XG_SERDES_XAUI_AN_COUNT 14
+#define XG_SERDES_XAUI_HSS_PCS_COUNT 33
+#define XG_SERDES_XFI_AN_COUNT 14
+#define XG_SERDES_XFI_TRAIN_COUNT 12
+#define XG_SERDES_XFI_HSS_PCS_COUNT 15
+#define XG_SERDES_XFI_HSS_TX_COUNT 32
+#define XG_SERDES_XFI_HSS_RX_COUNT 32
+#define XG_SERDES_XFI_HSS_PLL_COUNT 32
+
+/* There are 2 CNA ETS and 8 NIC ETS registers. */
+#define ETS_REGS_DUMP_WORD_COUNT 10
+
+/* Each probe mux entry stores the probe type plus 64 entries
+ * that are each each 64-bits in length. There are a total of
+ * 34 (PRB_MX_ADDR_VALID_TOTAL) valid probes.
+ */
+#define PRB_MX_ADDR_PRB_WORD_COUNT (1 + (PRB_MX_ADDR_MAX_MUX * 2))
+#define PRB_MX_DUMP_TOT_COUNT (PRB_MX_ADDR_PRB_WORD_COUNT * \
+ PRB_MX_ADDR_VALID_TOTAL)
+/* Each routing entry consists of 4 32-bit words.
+ * They are route type, index, index word, and result.
+ * There are 2 route blocks with 8 entries each and
+ * 2 NIC blocks with 16 entries each.
+ * The totol entries is 48 with 4 words each.
+ */
+#define RT_IDX_DUMP_ENTRIES 48
+#define RT_IDX_DUMP_WORDS_PER_ENTRY 4
+#define RT_IDX_DUMP_TOT_WORDS (RT_IDX_DUMP_ENTRIES * \
+ RT_IDX_DUMP_WORDS_PER_ENTRY)
+/* There are 10 address blocks in filter, each with
+ * different entry counts and different word-count-per-entry.
+ */
+#define MAC_ADDR_DUMP_ENTRIES \
+ ((MAC_ADDR_MAX_CAM_ENTRIES * MAC_ADDR_MAX_CAM_WCOUNT) + \
+ (MAC_ADDR_MAX_MULTICAST_ENTRIES * MAC_ADDR_MAX_MULTICAST_WCOUNT) + \
+ (MAC_ADDR_MAX_VLAN_ENTRIES * MAC_ADDR_MAX_VLAN_WCOUNT) + \
+ (MAC_ADDR_MAX_MCAST_FLTR_ENTRIES * MAC_ADDR_MAX_MCAST_FLTR_WCOUNT) + \
+ (MAC_ADDR_MAX_FC_MAC_ENTRIES * MAC_ADDR_MAX_FC_MAC_WCOUNT) + \
+ (MAC_ADDR_MAX_MGMT_MAC_ENTRIES * MAC_ADDR_MAX_MGMT_MAC_WCOUNT) + \
+ (MAC_ADDR_MAX_MGMT_VLAN_ENTRIES * MAC_ADDR_MAX_MGMT_VLAN_WCOUNT) + \
+ (MAC_ADDR_MAX_MGMT_V4_ENTRIES * MAC_ADDR_MAX_MGMT_V4_WCOUNT) + \
+ (MAC_ADDR_MAX_MGMT_V6_ENTRIES * MAC_ADDR_MAX_MGMT_V6_WCOUNT) + \
+ (MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES * MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT))
+#define MAC_ADDR_DUMP_WORDS_PER_ENTRY 2
+#define MAC_ADDR_DUMP_TOT_WORDS (MAC_ADDR_DUMP_ENTRIES * \
+ MAC_ADDR_DUMP_WORDS_PER_ENTRY)
+/* Maximum of 4 functions whose semaphore registeres are
+ * in the coredump.
+ */
+#define MAX_SEMAPHORE_FUNCTIONS 4
+/* Defines for access the MPI shadow registers. */
+#define RISC_124 0x0003007c
+#define RISC_127 0x0003007f
+#define SHADOW_OFFSET 0xb0000000
+#define SHADOW_REG_SHIFT 20
+
struct ql_nic_misc {
u32 rx_ring_count;
u32 tx_ring_count;
@@ -1568,6 +1768,199 @@ struct ql_reg_dump {
u32 ets[8+2];
};
+struct ql_mpi_coredump {
+ /* segment 0 */
+ struct mpi_coredump_global_header mpi_global_header;
+
+ /* segment 1 */
+ struct mpi_coredump_segment_header core_regs_seg_hdr;
+ u32 mpi_core_regs[MPI_CORE_REGS_CNT];
+ u32 mpi_core_sh_regs[MPI_CORE_SH_REGS_CNT];
+
+ /* segment 2 */
+ struct mpi_coredump_segment_header test_logic_regs_seg_hdr;
+ u32 test_logic_regs[TEST_REGS_CNT];
+
+ /* segment 3 */
+ struct mpi_coredump_segment_header rmii_regs_seg_hdr;
+ u32 rmii_regs[RMII_REGS_CNT];
+
+ /* segment 4 */
+ struct mpi_coredump_segment_header fcmac1_regs_seg_hdr;
+ u32 fcmac1_regs[FCMAC_REGS_CNT];
+
+ /* segment 5 */
+ struct mpi_coredump_segment_header fcmac2_regs_seg_hdr;
+ u32 fcmac2_regs[FCMAC_REGS_CNT];
+
+ /* segment 6 */
+ struct mpi_coredump_segment_header fc1_mbx_regs_seg_hdr;
+ u32 fc1_mbx_regs[FC_MBX_REGS_CNT];
+
+ /* segment 7 */
+ struct mpi_coredump_segment_header ide_regs_seg_hdr;
+ u32 ide_regs[IDE_REGS_CNT];
+
+ /* segment 8 */
+ struct mpi_coredump_segment_header nic1_mbx_regs_seg_hdr;
+ u32 nic1_mbx_regs[NIC_MBX_REGS_CNT];
+
+ /* segment 9 */
+ struct mpi_coredump_segment_header smbus_regs_seg_hdr;
+ u32 smbus_regs[SMBUS_REGS_CNT];
+
+ /* segment 10 */
+ struct mpi_coredump_segment_header fc2_mbx_regs_seg_hdr;
+ u32 fc2_mbx_regs[FC_MBX_REGS_CNT];
+
+ /* segment 11 */
+ struct mpi_coredump_segment_header nic2_mbx_regs_seg_hdr;
+ u32 nic2_mbx_regs[NIC_MBX_REGS_CNT];
+
+ /* segment 12 */
+ struct mpi_coredump_segment_header i2c_regs_seg_hdr;
+ u32 i2c_regs[I2C_REGS_CNT];
+ /* segment 13 */
+ struct mpi_coredump_segment_header memc_regs_seg_hdr;
+ u32 memc_regs[MEMC_REGS_CNT];
+
+ /* segment 14 */
+ struct mpi_coredump_segment_header pbus_regs_seg_hdr;
+ u32 pbus_regs[PBUS_REGS_CNT];
+
+ /* segment 15 */
+ struct mpi_coredump_segment_header mde_regs_seg_hdr;
+ u32 mde_regs[MDE_REGS_CNT];
+
+ /* segment 16 */
+ struct mpi_coredump_segment_header nic_regs_seg_hdr;
+ u32 nic_regs[NIC_REGS_DUMP_WORD_COUNT];
+
+ /* segment 17 */
+ struct mpi_coredump_segment_header nic2_regs_seg_hdr;
+ u32 nic2_regs[NIC_REGS_DUMP_WORD_COUNT];
+
+ /* segment 18 */
+ struct mpi_coredump_segment_header xgmac1_seg_hdr;
+ u32 xgmac1[XGMAC_DUMP_WORD_COUNT];
+
+ /* segment 19 */
+ struct mpi_coredump_segment_header xgmac2_seg_hdr;
+ u32 xgmac2[XGMAC_DUMP_WORD_COUNT];
+
+ /* segment 20 */
+ struct mpi_coredump_segment_header code_ram_seg_hdr;
+ u32 code_ram[CODE_RAM_CNT];
+
+ /* segment 21 */
+ struct mpi_coredump_segment_header memc_ram_seg_hdr;
+ u32 memc_ram[MEMC_RAM_CNT];
+
+ /* segment 22 */
+ struct mpi_coredump_segment_header xaui_an_hdr;
+ u32 serdes_xaui_an[XG_SERDES_XAUI_AN_COUNT];
+
+ /* segment 23 */
+ struct mpi_coredump_segment_header xaui_hss_pcs_hdr;
+ u32 serdes_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT];
+
+ /* segment 24 */
+ struct mpi_coredump_segment_header xfi_an_hdr;
+ u32 serdes_xfi_an[XG_SERDES_XFI_AN_COUNT];
+
+ /* segment 25 */
+ struct mpi_coredump_segment_header xfi_train_hdr;
+ u32 serdes_xfi_train[XG_SERDES_XFI_TRAIN_COUNT];
+
+ /* segment 26 */
+ struct mpi_coredump_segment_header xfi_hss_pcs_hdr;
+ u32 serdes_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT];
+
+ /* segment 27 */
+ struct mpi_coredump_segment_header xfi_hss_tx_hdr;
+ u32 serdes_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT];
+
+ /* segment 28 */
+ struct mpi_coredump_segment_header xfi_hss_rx_hdr;
+ u32 serdes_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT];
+
+ /* segment 29 */
+ struct mpi_coredump_segment_header xfi_hss_pll_hdr;
+ u32 serdes_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT];
+
+ /* segment 30 */
+ struct mpi_coredump_segment_header misc_nic_seg_hdr;
+ struct ql_nic_misc misc_nic_info;
+
+ /* segment 31 */
+ /* one interrupt state for each CQ */
+ struct mpi_coredump_segment_header intr_states_seg_hdr;
+ u32 intr_states[MAX_RX_RINGS];
+
+ /* segment 32 */
+ /* 3 cam words each for 16 unicast,
+ * 2 cam words for each of 32 multicast.
+ */
+ struct mpi_coredump_segment_header cam_entries_seg_hdr;
+ u32 cam_entries[(16 * 3) + (32 * 3)];
+
+ /* segment 33 */
+ struct mpi_coredump_segment_header nic_routing_words_seg_hdr;
+ u32 nic_routing_words[16];
+ /* segment 34 */
+ struct mpi_coredump_segment_header ets_seg_hdr;
+ u32 ets[ETS_REGS_DUMP_WORD_COUNT];
+
+ /* segment 35 */
+ struct mpi_coredump_segment_header probe_dump_seg_hdr;
+ u32 probe_dump[PRB_MX_DUMP_TOT_COUNT];
+
+ /* segment 36 */
+ struct mpi_coredump_segment_header routing_reg_seg_hdr;
+ u32 routing_regs[RT_IDX_DUMP_TOT_WORDS];
+
+ /* segment 37 */
+ struct mpi_coredump_segment_header mac_prot_reg_seg_hdr;
+ u32 mac_prot_regs[MAC_ADDR_DUMP_TOT_WORDS];
+
+ /* segment 38 */
+ struct mpi_coredump_segment_header xaui2_an_hdr;
+ u32 serdes2_xaui_an[XG_SERDES_XAUI_AN_COUNT];
+
+ /* segment 39 */
+ struct mpi_coredump_segment_header xaui2_hss_pcs_hdr;
+ u32 serdes2_xaui_hss_pcs[XG_SERDES_XAUI_HSS_PCS_COUNT];
+
+ /* segment 40 */
+ struct mpi_coredump_segment_header xfi2_an_hdr;
+ u32 serdes2_xfi_an[XG_SERDES_XFI_AN_COUNT];
+
+ /* segment 41 */
+ struct mpi_coredump_segment_header xfi2_train_hdr;
+ u32 serdes2_xfi_train[XG_SERDES_XFI_TRAIN_COUNT];
+
+ /* segment 42 */
+ struct mpi_coredump_segment_header xfi2_hss_pcs_hdr;
+ u32 serdes2_xfi_hss_pcs[XG_SERDES_XFI_HSS_PCS_COUNT];
+
+ /* segment 43 */
+ struct mpi_coredump_segment_header xfi2_hss_tx_hdr;
+ u32 serdes2_xfi_hss_tx[XG_SERDES_XFI_HSS_TX_COUNT];
+
+ /* segment 44 */
+ struct mpi_coredump_segment_header xfi2_hss_rx_hdr;
+ u32 serdes2_xfi_hss_rx[XG_SERDES_XFI_HSS_RX_COUNT];
+
+ /* segment 45 */
+ struct mpi_coredump_segment_header xfi2_hss_pll_hdr;
+ u32 serdes2_xfi_hss_pll[XG_SERDES_XFI_HSS_PLL_COUNT];
+
+ /* segment 50 */
+ /* semaphore register for all 5 functions */
+ struct mpi_coredump_segment_header sem_regs_seg_hdr;
+ u32 sem_regs[MAX_SEMAPHORE_FUNCTIONS];
+};
+
/*
* intr_context structure is used during initialization
* to hook the interrupts. It is also used in a single
@@ -1603,6 +1996,8 @@ enum {
QL_CAM_RT_SET = 8,
QL_SELFTEST = 9,
QL_LB_LINK_UP = 10,
+ QL_FRC_COREDUMP = 11,
+ QL_EEH_FATAL = 12,
};
/* link_status bit definitions */
@@ -1724,6 +2119,8 @@ struct ql_adapter {
u32 port_link_up;
u32 port_init;
u32 link_status;
+ struct ql_mpi_coredump *mpi_coredump;
+ u32 core_is_dumped;
u32 link_config;
u32 led_config;
u32 max_frame_size;
@@ -1736,10 +2133,14 @@ struct ql_adapter {
struct delayed_work mpi_work;
struct delayed_work mpi_port_cfg_work;
struct delayed_work mpi_idc_work;
+ struct delayed_work mpi_core_to_log;
struct completion ide_completion;
struct nic_operations *nic_ops;
u16 device_id;
+ struct timer_list timer;
atomic_t lb_count;
+ /* Keep local copy of current mac address. */
+ char current_mac_addr[6];
};
/*
@@ -1807,6 +2208,7 @@ extern int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
void ql_queue_fw_error(struct ql_adapter *qdev);
void ql_mpi_work(struct work_struct *work);
void ql_mpi_reset_work(struct work_struct *work);
+void ql_mpi_core_to_log(struct work_struct *work);
int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit);
void ql_queue_asic_error(struct ql_adapter *qdev);
u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr);
@@ -1817,6 +2219,15 @@ void ql_mpi_port_cfg_work(struct work_struct *work);
int ql_mb_get_fw_state(struct ql_adapter *qdev);
int ql_cam_route_initialize(struct ql_adapter *qdev);
int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
+int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data);
+int ql_unpause_mpi_risc(struct ql_adapter *qdev);
+int ql_pause_mpi_risc(struct ql_adapter *qdev);
+int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
+int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
+ u32 ram_addr, int word_count);
+int ql_core_dump(struct ql_adapter *qdev,
+ struct ql_mpi_coredump *mpi_coredump);
+int ql_mb_sys_err(struct ql_adapter *qdev);
int ql_mb_about_fw(struct ql_adapter *qdev);
int ql_wol(struct ql_adapter *qdev);
int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
@@ -1833,6 +2244,7 @@ void ql_gen_reg_dump(struct ql_adapter *qdev,
struct ql_reg_dump *mpi_coredump);
netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
+int ql_own_firmware(struct ql_adapter *qdev);
int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
#if 1
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
index 9f58c471076..ff8550d2ca8 100644
--- a/drivers/net/qlge/qlge_dbg.c
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -1,5 +1,405 @@
#include "qlge.h"
+/* Read a NIC register from the alternate function. */
+static u32 ql_read_other_func_reg(struct ql_adapter *qdev,
+ u32 reg)
+{
+ u32 register_to_read;
+ u32 reg_val;
+ unsigned int status = 0;
+
+ register_to_read = MPI_NIC_REG_BLOCK
+ | MPI_NIC_READ
+ | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
+ | reg;
+ status = ql_read_mpi_reg(qdev, register_to_read, &reg_val);
+ if (status != 0)
+ return 0xffffffff;
+
+ return reg_val;
+}
+
+/* Write a NIC register from the alternate function. */
+static int ql_write_other_func_reg(struct ql_adapter *qdev,
+ u32 reg, u32 reg_val)
+{
+ u32 register_to_read;
+ int status = 0;
+
+ register_to_read = MPI_NIC_REG_BLOCK
+ | MPI_NIC_READ
+ | (qdev->alt_func << MPI_NIC_FUNCTION_SHIFT)
+ | reg;
+ status = ql_write_mpi_reg(qdev, register_to_read, reg_val);
+
+ return status;
+}
+
+static int ql_wait_other_func_reg_rdy(struct ql_adapter *qdev, u32 reg,
+ u32 bit, u32 err_bit)
+{
+ u32 temp;
+ int count = 10;
+
+ while (count) {
+ temp = ql_read_other_func_reg(qdev, reg);
+
+ /* check for errors */
+ if (temp & err_bit)
+ return -1;
+ else if (temp & bit)
+ return 0;
+ mdelay(10);
+ count--;
+ }
+ return -1;
+}
+
+static int ql_read_other_func_serdes_reg(struct ql_adapter *qdev, u32 reg,
+ u32 *data)
+{
+ int status;
+
+ /* wait for reg to come ready */
+ status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
+ XG_SERDES_ADDR_RDY, 0);
+ if (status)
+ goto exit;
+
+ /* set up for reg read */
+ ql_write_other_func_reg(qdev, XG_SERDES_ADDR/4, reg | PROC_ADDR_R);
+
+ /* wait for reg to come ready */
+ status = ql_wait_other_func_reg_rdy(qdev, XG_SERDES_ADDR / 4,
+ XG_SERDES_ADDR_RDY, 0);
+ if (status)
+ goto exit;
+
+ /* get the data */
+ *data = ql_read_other_func_reg(qdev, (XG_SERDES_DATA / 4));
+exit:
+ return status;
+}
+
+/* Read out the SERDES registers */
+static int ql_read_serdes_reg(struct ql_adapter *qdev, u32 reg, u32 * data)
+{
+ int status;
+
+ /* wait for reg to come ready */
+ status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
+ if (status)
+ goto exit;
+
+ /* set up for reg read */
+ ql_write32(qdev, XG_SERDES_ADDR, reg | PROC_ADDR_R);
+
+ /* wait for reg to come ready */
+ status = ql_wait_reg_rdy(qdev, XG_SERDES_ADDR, XG_SERDES_ADDR_RDY, 0);
+ if (status)
+ goto exit;
+
+ /* get the data */
+ *data = ql_read32(qdev, XG_SERDES_DATA);
+exit:
+ return status;
+}
+
+static void ql_get_both_serdes(struct ql_adapter *qdev, u32 addr,
+ u32 *direct_ptr, u32 *indirect_ptr,
+ unsigned int direct_valid, unsigned int indirect_valid)
+{
+ unsigned int status;
+
+ status = 1;
+ if (direct_valid)
+ status = ql_read_serdes_reg(qdev, addr, direct_ptr);
+ /* Dead fill any failures or invalids. */
+ if (status)
+ *direct_ptr = 0xDEADBEEF;
+
+ status = 1;
+ if (indirect_valid)
+ status = ql_read_other_func_serdes_reg(
+ qdev, addr, indirect_ptr);
+ /* Dead fill any failures or invalids. */
+ if (status)
+ *indirect_ptr = 0xDEADBEEF;
+}
+
+static int ql_get_serdes_regs(struct ql_adapter *qdev,
+ struct ql_mpi_coredump *mpi_coredump)
+{
+ int status;
+ unsigned int xfi_direct_valid, xfi_indirect_valid, xaui_direct_valid;
+ unsigned int xaui_indirect_valid, i;
+ u32 *direct_ptr, temp;
+ u32 *indirect_ptr;
+
+ xfi_direct_valid = xfi_indirect_valid = 0;
+ xaui_direct_valid = xaui_indirect_valid = 1;
+
+ /* The XAUI needs to be read out per port */
+ if (qdev->func & 1) {
+ /* We are NIC 2 */
+ status = ql_read_other_func_serdes_reg(qdev,
+ XG_SERDES_XAUI_HSS_PCS_START, &temp);
+ if (status)
+ temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
+ if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
+ XG_SERDES_ADDR_XAUI_PWR_DOWN)
+ xaui_indirect_valid = 0;
+
+ status = ql_read_serdes_reg(qdev,
+ XG_SERDES_XAUI_HSS_PCS_START, &temp);
+ if (status)
+ temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
+
+ if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
+ XG_SERDES_ADDR_XAUI_PWR_DOWN)
+ xaui_direct_valid = 0;
+ } else {
+ /* We are NIC 1 */
+ status = ql_read_other_func_serdes_reg(qdev,
+ XG_SERDES_XAUI_HSS_PCS_START, &temp);
+ if (status)
+ temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
+ if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
+ XG_SERDES_ADDR_XAUI_PWR_DOWN)
+ xaui_indirect_valid = 0;
+
+ status = ql_read_serdes_reg(qdev,
+ XG_SERDES_XAUI_HSS_PCS_START, &temp);
+ if (status)
+ temp = XG_SERDES_ADDR_XAUI_PWR_DOWN;
+ if ((temp & XG_SERDES_ADDR_XAUI_PWR_DOWN) ==
+ XG_SERDES_ADDR_XAUI_PWR_DOWN)
+ xaui_direct_valid = 0;
+ }
+
+ /*
+ * XFI register is shared so only need to read one
+ * functions and then check the bits.
+ */
+ status = ql_read_serdes_reg(qdev, XG_SERDES_ADDR_STS, &temp);
+ if (status)
+ temp = 0;
+
+ if ((temp & XG_SERDES_ADDR_XFI1_PWR_UP) ==
+ XG_SERDES_ADDR_XFI1_PWR_UP) {
+ /* now see if i'm NIC 1 or NIC 2 */
+ if (qdev->func & 1)
+ /* I'm NIC 2, so the indirect (NIC1) xfi is up. */
+ xfi_indirect_valid = 1;
+ else
+ xfi_direct_valid = 1;
+ }
+ if ((temp & XG_SERDES_ADDR_XFI2_PWR_UP) ==
+ XG_SERDES_ADDR_XFI2_PWR_UP) {
+ /* now see if i'm NIC 1 or NIC 2 */
+ if (qdev->func & 1)
+ /* I'm NIC 2, so the indirect (NIC1) xfi is up. */
+ xfi_direct_valid = 1;
+ else
+ xfi_indirect_valid = 1;
+ }
+
+ /* Get XAUI_AN register block. */
+ if (qdev->func & 1) {
+ /* Function 2 is direct */
+ direct_ptr = mpi_coredump->serdes2_xaui_an;
+ indirect_ptr = mpi_coredump->serdes_xaui_an;
+ } else {
+ /* Function 1 is direct */
+ direct_ptr = mpi_coredump->serdes_xaui_an;
+ indirect_ptr = mpi_coredump->serdes2_xaui_an;
+ }
+
+ for (i = 0; i <= 0x000000034; i += 4, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xaui_direct_valid, xaui_indirect_valid);
+
+ /* Get XAUI_HSS_PCS register block. */
+ if (qdev->func & 1) {
+ direct_ptr =
+ mpi_coredump->serdes2_xaui_hss_pcs;
+ indirect_ptr =
+ mpi_coredump->serdes_xaui_hss_pcs;
+ } else {
+ direct_ptr =
+ mpi_coredump->serdes_xaui_hss_pcs;
+ indirect_ptr =
+ mpi_coredump->serdes2_xaui_hss_pcs;
+ }
+
+ for (i = 0x800; i <= 0x880; i += 4, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xaui_direct_valid, xaui_indirect_valid);
+
+ /* Get XAUI_XFI_AN register block. */
+ if (qdev->func & 1) {
+ direct_ptr = mpi_coredump->serdes2_xfi_an;
+ indirect_ptr = mpi_coredump->serdes_xfi_an;
+ } else {
+ direct_ptr = mpi_coredump->serdes_xfi_an;
+ indirect_ptr = mpi_coredump->serdes2_xfi_an;
+ }
+
+ for (i = 0x1000; i <= 0x1034; i += 4, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
+
+ /* Get XAUI_XFI_TRAIN register block. */
+ if (qdev->func & 1) {
+ direct_ptr = mpi_coredump->serdes2_xfi_train;
+ indirect_ptr =
+ mpi_coredump->serdes_xfi_train;
+ } else {
+ direct_ptr = mpi_coredump->serdes_xfi_train;
+ indirect_ptr =
+ mpi_coredump->serdes2_xfi_train;
+ }
+
+ for (i = 0x1050; i <= 0x107c; i += 4, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
+
+ /* Get XAUI_XFI_HSS_PCS register block. */
+ if (qdev->func & 1) {
+ direct_ptr =
+ mpi_coredump->serdes2_xfi_hss_pcs;
+ indirect_ptr =
+ mpi_coredump->serdes_xfi_hss_pcs;
+ } else {
+ direct_ptr =
+ mpi_coredump->serdes_xfi_hss_pcs;
+ indirect_ptr =
+ mpi_coredump->serdes2_xfi_hss_pcs;
+ }
+
+ for (i = 0x1800; i <= 0x1838; i += 4, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
+
+ /* Get XAUI_XFI_HSS_TX register block. */
+ if (qdev->func & 1) {
+ direct_ptr =
+ mpi_coredump->serdes2_xfi_hss_tx;
+ indirect_ptr =
+ mpi_coredump->serdes_xfi_hss_tx;
+ } else {
+ direct_ptr = mpi_coredump->serdes_xfi_hss_tx;
+ indirect_ptr =
+ mpi_coredump->serdes2_xfi_hss_tx;
+ }
+ for (i = 0x1c00; i <= 0x1c1f; i++, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
+
+ /* Get XAUI_XFI_HSS_RX register block. */
+ if (qdev->func & 1) {
+ direct_ptr =
+ mpi_coredump->serdes2_xfi_hss_rx;
+ indirect_ptr =
+ mpi_coredump->serdes_xfi_hss_rx;
+ } else {
+ direct_ptr = mpi_coredump->serdes_xfi_hss_rx;
+ indirect_ptr =
+ mpi_coredump->serdes2_xfi_hss_rx;
+ }
+
+ for (i = 0x1c40; i <= 0x1c5f; i++, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
+
+
+ /* Get XAUI_XFI_HSS_PLL register block. */
+ if (qdev->func & 1) {
+ direct_ptr =
+ mpi_coredump->serdes2_xfi_hss_pll;
+ indirect_ptr =
+ mpi_coredump->serdes_xfi_hss_pll;
+ } else {
+ direct_ptr =
+ mpi_coredump->serdes_xfi_hss_pll;
+ indirect_ptr =
+ mpi_coredump->serdes2_xfi_hss_pll;
+ }
+ for (i = 0x1e00; i <= 0x1e1f; i++, direct_ptr++, indirect_ptr++)
+ ql_get_both_serdes(qdev, i, direct_ptr, indirect_ptr,
+ xfi_direct_valid, xfi_indirect_valid);
+ return 0;
+}
+
+static int ql_read_other_func_xgmac_reg(struct ql_adapter *qdev, u32 reg,
+ u32 *data)
+{
+ int status = 0;
+
+ /* wait for reg to come ready */
+ status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
+ XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+ if (status)
+ goto exit;
+
+ /* set up for reg read */
+ ql_write_other_func_reg(qdev, XGMAC_ADDR / 4, reg | XGMAC_ADDR_R);
+
+ /* wait for reg to come ready */
+ status = ql_wait_other_func_reg_rdy(qdev, XGMAC_ADDR / 4,
+ XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
+ if (status)
+ goto exit;
+
+ /* get the data */
+ *data = ql_read_other_func_reg(qdev, XGMAC_DATA / 4);
+exit:
+ return status;
+}
+
+/* Read the 400 xgmac control/statistics registers
+ * skipping unused locations.
+ */
+static int ql_get_xgmac_regs(struct ql_adapter *qdev, u32 * buf,
+ unsigned int other_function)
+{
+ int status = 0;
+ int i;
+
+ for (i = PAUSE_SRC_LO; i < XGMAC_REGISTER_END; i += 4, buf++) {
+ /* We're reading 400 xgmac registers, but we filter out
+ * serveral locations that are non-responsive to reads.
+ */
+ if ((i == 0x00000114) ||
+ (i == 0x00000118) ||
+ (i == 0x0000013c) ||
+ (i == 0x00000140) ||
+ (i > 0x00000150 && i < 0x000001fc) ||
+ (i > 0x00000278 && i < 0x000002a0) ||
+ (i > 0x000002c0 && i < 0x000002cf) ||
+ (i > 0x000002dc && i < 0x000002f0) ||
+ (i > 0x000003c8 && i < 0x00000400) ||
+ (i > 0x00000400 && i < 0x00000410) ||
+ (i > 0x00000410 && i < 0x00000420) ||
+ (i > 0x00000420 && i < 0x00000430) ||
+ (i > 0x00000430 && i < 0x00000440) ||
+ (i > 0x00000440 && i < 0x00000450) ||
+ (i > 0x00000450 && i < 0x00000500) ||
+ (i > 0x0000054c && i < 0x00000568) ||
+ (i > 0x000005c8 && i < 0x00000600)) {
+ if (other_function)
+ status =
+ ql_read_other_func_xgmac_reg(qdev, i, buf);
+ else
+ status = ql_read_xgmac_reg(qdev, i, buf);
+
+ if (status)
+ *buf = 0xdeadbeef;
+ break;
+ }
+ }
+ return status;
+}
static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf)
{
@@ -43,8 +443,8 @@ static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf)
status = ql_get_mac_addr_reg(qdev,
MAC_ADDR_TYPE_CAM_MAC, i, value);
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "Failed read of mac index register.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed read of mac index register.\n");
goto err;
}
*buf++ = value[0]; /* lower MAC address */
@@ -55,8 +455,8 @@ static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf)
status = ql_get_mac_addr_reg(qdev,
MAC_ADDR_TYPE_MULTI_MAC, i, value);
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "Failed read of mac index register.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed read of mac index register.\n");
goto err;
}
*buf++ = value[0]; /* lower Mcast address */
@@ -79,8 +479,8 @@ static int ql_get_routing_entries(struct ql_adapter *qdev, u32 * buf)
for (i = 0; i < 16; i++) {
status = ql_get_routing_reg(qdev, i, &value);
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "Failed read of routing index register.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed read of routing index register.\n");
goto err;
} else {
*buf++ = value;
@@ -91,6 +491,226 @@ err:
return status;
}
+/* Read the MPI Processor shadow registers */
+static int ql_get_mpi_shadow_regs(struct ql_adapter *qdev, u32 * buf)
+{
+ u32 i;
+ int status;
+
+ for (i = 0; i < MPI_CORE_SH_REGS_CNT; i++, buf++) {
+ status = ql_write_mpi_reg(qdev, RISC_124,
+ (SHADOW_OFFSET | i << SHADOW_REG_SHIFT));
+ if (status)
+ goto end;
+ status = ql_read_mpi_reg(qdev, RISC_127, buf);
+ if (status)
+ goto end;
+ }
+end:
+ return status;
+}
+
+/* Read the MPI Processor core registers */
+static int ql_get_mpi_regs(struct ql_adapter *qdev, u32 * buf,
+ u32 offset, u32 count)
+{
+ int i, status = 0;
+ for (i = 0; i < count; i++, buf++) {
+ status = ql_read_mpi_reg(qdev, offset + i, buf);
+ if (status)
+ return status;
+ }
+ return status;
+}
+
+/* Read the ASIC probe dump */
+static unsigned int *ql_get_probe(struct ql_adapter *qdev, u32 clock,
+ u32 valid, u32 *buf)
+{
+ u32 module, mux_sel, probe, lo_val, hi_val;
+
+ for (module = 0; module < PRB_MX_ADDR_MAX_MODS; module++) {
+ if (!((valid >> module) & 1))
+ continue;
+ for (mux_sel = 0; mux_sel < PRB_MX_ADDR_MAX_MUX; mux_sel++) {
+ probe = clock
+ | PRB_MX_ADDR_ARE
+ | mux_sel
+ | (module << PRB_MX_ADDR_MOD_SEL_SHIFT);
+ ql_write32(qdev, PRB_MX_ADDR, probe);
+ lo_val = ql_read32(qdev, PRB_MX_DATA);
+ if (mux_sel == 0) {
+ *buf = probe;
+ buf++;
+ }
+ probe |= PRB_MX_ADDR_UP;
+ ql_write32(qdev, PRB_MX_ADDR, probe);
+ hi_val = ql_read32(qdev, PRB_MX_DATA);
+ *buf = lo_val;
+ buf++;
+ *buf = hi_val;
+ buf++;
+ }
+ }
+ return buf;
+}
+
+static int ql_get_probe_dump(struct ql_adapter *qdev, unsigned int *buf)
+{
+ /* First we have to enable the probe mux */
+ ql_write_mpi_reg(qdev, MPI_TEST_FUNC_PRB_CTL, MPI_TEST_FUNC_PRB_EN);
+ buf = ql_get_probe(qdev, PRB_MX_ADDR_SYS_CLOCK,
+ PRB_MX_ADDR_VALID_SYS_MOD, buf);
+ buf = ql_get_probe(qdev, PRB_MX_ADDR_PCI_CLOCK,
+ PRB_MX_ADDR_VALID_PCI_MOD, buf);
+ buf = ql_get_probe(qdev, PRB_MX_ADDR_XGM_CLOCK,
+ PRB_MX_ADDR_VALID_XGM_MOD, buf);
+ buf = ql_get_probe(qdev, PRB_MX_ADDR_FC_CLOCK,
+ PRB_MX_ADDR_VALID_FC_MOD, buf);
+ return 0;
+
+}
+
+/* Read out the routing index registers */
+static int ql_get_routing_index_registers(struct ql_adapter *qdev, u32 *buf)
+{
+ int status;
+ u32 type, index, index_max;
+ u32 result_index;
+ u32 result_data;
+ u32 val;
+
+ status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
+ if (status)
+ return status;
+
+ for (type = 0; type < 4; type++) {
+ if (type < 2)
+ index_max = 8;
+ else
+ index_max = 16;
+ for (index = 0; index < index_max; index++) {
+ val = RT_IDX_RS
+ | (type << RT_IDX_TYPE_SHIFT)
+ | (index << RT_IDX_IDX_SHIFT);
+ ql_write32(qdev, RT_IDX, val);
+ result_index = 0;
+ while ((result_index & RT_IDX_MR) == 0)
+ result_index = ql_read32(qdev, RT_IDX);
+ result_data = ql_read32(qdev, RT_DATA);
+ *buf = type;
+ buf++;
+ *buf = index;
+ buf++;
+ *buf = result_index;
+ buf++;
+ *buf = result_data;
+ buf++;
+ }
+ }
+ ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
+ return status;
+}
+
+/* Read out the MAC protocol registers */
+static void ql_get_mac_protocol_registers(struct ql_adapter *qdev, u32 *buf)
+{
+ u32 result_index, result_data;
+ u32 type;
+ u32 index;
+ u32 offset;
+ u32 val;
+ u32 initial_val = MAC_ADDR_RS;
+ u32 max_index;
+ u32 max_offset;
+
+ for (type = 0; type < MAC_ADDR_TYPE_COUNT; type++) {
+ switch (type) {
+
+ case 0: /* CAM */
+ initial_val |= MAC_ADDR_ADR;
+ max_index = MAC_ADDR_MAX_CAM_ENTRIES;
+ max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
+ break;
+ case 1: /* Multicast MAC Address */
+ max_index = MAC_ADDR_MAX_CAM_WCOUNT;
+ max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
+ break;
+ case 2: /* VLAN filter mask */
+ case 3: /* MC filter mask */
+ max_index = MAC_ADDR_MAX_CAM_WCOUNT;
+ max_offset = MAC_ADDR_MAX_CAM_WCOUNT;
+ break;
+ case 4: /* FC MAC addresses */
+ max_index = MAC_ADDR_MAX_FC_MAC_ENTRIES;
+ max_offset = MAC_ADDR_MAX_FC_MAC_WCOUNT;
+ break;
+ case 5: /* Mgmt MAC addresses */
+ max_index = MAC_ADDR_MAX_MGMT_MAC_ENTRIES;
+ max_offset = MAC_ADDR_MAX_MGMT_MAC_WCOUNT;
+ break;
+ case 6: /* Mgmt VLAN addresses */
+ max_index = MAC_ADDR_MAX_MGMT_VLAN_ENTRIES;
+ max_offset = MAC_ADDR_MAX_MGMT_VLAN_WCOUNT;
+ break;
+ case 7: /* Mgmt IPv4 address */
+ max_index = MAC_ADDR_MAX_MGMT_V4_ENTRIES;
+ max_offset = MAC_ADDR_MAX_MGMT_V4_WCOUNT;
+ break;
+ case 8: /* Mgmt IPv6 address */
+ max_index = MAC_ADDR_MAX_MGMT_V6_ENTRIES;
+ max_offset = MAC_ADDR_MAX_MGMT_V6_WCOUNT;
+ break;
+ case 9: /* Mgmt TCP/UDP Dest port */
+ max_index = MAC_ADDR_MAX_MGMT_TU_DP_ENTRIES;
+ max_offset = MAC_ADDR_MAX_MGMT_TU_DP_WCOUNT;
+ break;
+ default:
+ printk(KERN_ERR"Bad type!!! 0x%08x\n", type);
+ max_index = 0;
+ max_offset = 0;
+ break;
+ }
+ for (index = 0; index < max_index; index++) {
+ for (offset = 0; offset < max_offset; offset++) {
+ val = initial_val
+ | (type << MAC_ADDR_TYPE_SHIFT)
+ | (index << MAC_ADDR_IDX_SHIFT)
+ | (offset);
+ ql_write32(qdev, MAC_ADDR_IDX, val);
+ result_index = 0;
+ while ((result_index & MAC_ADDR_MR) == 0) {
+ result_index = ql_read32(qdev,
+ MAC_ADDR_IDX);
+ }
+ result_data = ql_read32(qdev, MAC_ADDR_DATA);
+ *buf = result_index;
+ buf++;
+ *buf = result_data;
+ buf++;
+ }
+ }
+ }
+}
+
+static void ql_get_sem_registers(struct ql_adapter *qdev, u32 *buf)
+{
+ u32 func_num, reg, reg_val;
+ int status;
+
+ for (func_num = 0; func_num < MAX_SEMAPHORE_FUNCTIONS ; func_num++) {
+ reg = MPI_NIC_REG_BLOCK
+ | (func_num << MPI_NIC_FUNCTION_SHIFT)
+ | (SEM / 4);
+ status = ql_read_mpi_reg(qdev, reg, &reg_val);
+ *buf = reg_val;
+ /* if the read failed then dead fill the element. */
+ if (!status)
+ *buf = 0xdeadbeef;
+ buf++;
+ }
+}
+
/* Create a coredump segment header */
static void ql_build_coredump_seg_header(
struct mpi_coredump_segment_header *seg_hdr,
@@ -103,6 +723,526 @@ static void ql_build_coredump_seg_header(
memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
}
+/*
+ * This function should be called when a coredump / probedump
+ * is to be extracted from the HBA. It is assumed there is a
+ * qdev structure that contains the base address of the register
+ * space for this function as well as a coredump structure that
+ * will contain the dump.
+ */
+int ql_core_dump(struct ql_adapter *qdev, struct ql_mpi_coredump *mpi_coredump)
+{
+ int status;
+ int i;
+
+ if (!mpi_coredump) {
+ netif_err(qdev, drv, qdev->ndev, "No memory available.\n");
+ return -ENOMEM;
+ }
+
+ /* Try to get the spinlock, but dont worry if
+ * it isn't available. If the firmware died it
+ * might be holding the sem.
+ */
+ ql_sem_spinlock(qdev, SEM_PROC_REG_MASK);
+
+ status = ql_pause_mpi_risc(qdev);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed RISC pause. Status = 0x%.08x\n", status);
+ goto err;
+ }
+
+ /* Insert the global header */
+ memset(&(mpi_coredump->mpi_global_header), 0,
+ sizeof(struct mpi_coredump_global_header));
+ mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
+ mpi_coredump->mpi_global_header.headerSize =
+ sizeof(struct mpi_coredump_global_header);
+ mpi_coredump->mpi_global_header.imageSize =
+ sizeof(struct ql_mpi_coredump);
+ memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
+ sizeof(mpi_coredump->mpi_global_header.idString));
+
+ /* Get generic NIC reg dump */
+ ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
+ NIC1_CONTROL_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->nic_regs), "NIC1 Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->nic2_regs_seg_hdr,
+ NIC2_CONTROL_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->nic2_regs), "NIC2 Registers");
+
+ /* Get XGMac registers. (Segment 18, Rev C. step 21) */
+ ql_build_coredump_seg_header(&mpi_coredump->xgmac1_seg_hdr,
+ NIC1_XGMAC_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->xgmac1), "NIC1 XGMac Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xgmac2_seg_hdr,
+ NIC2_XGMAC_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->xgmac2), "NIC2 XGMac Registers");
+
+ if (qdev->func & 1) {
+ /* Odd means our function is NIC 2 */
+ for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
+ mpi_coredump->nic2_regs[i] =
+ ql_read32(qdev, i * sizeof(u32));
+
+ for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
+ mpi_coredump->nic_regs[i] =
+ ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
+
+ ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 0);
+ ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 1);
+ } else {
+ /* Even means our function is NIC 1 */
+ for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
+ mpi_coredump->nic_regs[i] =
+ ql_read32(qdev, i * sizeof(u32));
+ for (i = 0; i < NIC_REGS_DUMP_WORD_COUNT; i++)
+ mpi_coredump->nic2_regs[i] =
+ ql_read_other_func_reg(qdev, (i * sizeof(u32)) / 4);
+
+ ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac1[0], 0);
+ ql_get_xgmac_regs(qdev, &mpi_coredump->xgmac2[0], 1);
+ }
+
+ /* Rev C. Step 20a */
+ ql_build_coredump_seg_header(&mpi_coredump->xaui_an_hdr,
+ XAUI_AN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xaui_an),
+ "XAUI AN Registers");
+
+ /* Rev C. Step 20b */
+ ql_build_coredump_seg_header(&mpi_coredump->xaui_hss_pcs_hdr,
+ XAUI_HSS_PCS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xaui_hss_pcs),
+ "XAUI HSS PCS Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_an_hdr, XFI_AN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_an),
+ "XFI AN Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_train_hdr,
+ XFI_TRAIN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_train),
+ "XFI TRAIN Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pcs_hdr,
+ XFI_HSS_PCS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_hss_pcs),
+ "XFI HSS PCS Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_tx_hdr,
+ XFI_HSS_TX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_hss_tx),
+ "XFI HSS TX Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_rx_hdr,
+ XFI_HSS_RX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_hss_rx),
+ "XFI HSS RX Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi_hss_pll_hdr,
+ XFI_HSS_PLL_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes_xfi_hss_pll),
+ "XFI HSS PLL Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xaui2_an_hdr,
+ XAUI2_AN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xaui_an),
+ "XAUI2 AN Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xaui2_hss_pcs_hdr,
+ XAUI2_HSS_PCS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xaui_hss_pcs),
+ "XAUI2 HSS PCS Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi2_an_hdr,
+ XFI2_AN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_an),
+ "XFI2 AN Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi2_train_hdr,
+ XFI2_TRAIN_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_train),
+ "XFI2 TRAIN Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pcs_hdr,
+ XFI2_HSS_PCS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_hss_pcs),
+ "XFI2 HSS PCS Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_tx_hdr,
+ XFI2_HSS_TX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_hss_tx),
+ "XFI2 HSS TX Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_rx_hdr,
+ XFI2_HSS_RX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_hss_rx),
+ "XFI2 HSS RX Registers");
+
+ ql_build_coredump_seg_header(&mpi_coredump->xfi2_hss_pll_hdr,
+ XFI2_HSS_PLL_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->serdes2_xfi_hss_pll),
+ "XFI2 HSS PLL Registers");
+
+ status = ql_get_serdes_regs(qdev, mpi_coredump);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed Dump of Serdes Registers. Status = 0x%.08x\n",
+ status);
+ goto err;
+ }
+
+ ql_build_coredump_seg_header(&mpi_coredump->core_regs_seg_hdr,
+ CORE_SEG_NUM,
+ sizeof(mpi_coredump->core_regs_seg_hdr) +
+ sizeof(mpi_coredump->mpi_core_regs) +
+ sizeof(mpi_coredump->mpi_core_sh_regs),
+ "Core Registers");
+
+ /* Get the MPI Core Registers */
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->mpi_core_regs[0],
+ MPI_CORE_REGS_ADDR, MPI_CORE_REGS_CNT);
+ if (status)
+ goto err;
+ /* Get the 16 MPI shadow registers */
+ status = ql_get_mpi_shadow_regs(qdev,
+ &mpi_coredump->mpi_core_sh_regs[0]);
+ if (status)
+ goto err;
+
+ /* Get the Test Logic Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->test_logic_regs_seg_hdr,
+ TEST_LOGIC_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->test_logic_regs),
+ "Test Logic Regs");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->test_logic_regs[0],
+ TEST_REGS_ADDR, TEST_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the RMII Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->rmii_regs_seg_hdr,
+ RMII_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->rmii_regs),
+ "RMII Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->rmii_regs[0],
+ RMII_REGS_ADDR, RMII_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the FCMAC1 Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->fcmac1_regs_seg_hdr,
+ FCMAC1_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->fcmac1_regs),
+ "FCMAC1 Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac1_regs[0],
+ FCMAC1_REGS_ADDR, FCMAC_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the FCMAC2 Registers */
+
+ ql_build_coredump_seg_header(&mpi_coredump->fcmac2_regs_seg_hdr,
+ FCMAC2_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->fcmac2_regs),
+ "FCMAC2 Registers");
+
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->fcmac2_regs[0],
+ FCMAC2_REGS_ADDR, FCMAC_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the FC1 MBX Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->fc1_mbx_regs_seg_hdr,
+ FC1_MBOX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->fc1_mbx_regs),
+ "FC1 MBox Regs");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->fc1_mbx_regs[0],
+ FC1_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the IDE Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->ide_regs_seg_hdr,
+ IDE_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->ide_regs),
+ "IDE Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->ide_regs[0],
+ IDE_REGS_ADDR, IDE_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the NIC1 MBX Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->nic1_mbx_regs_seg_hdr,
+ NIC1_MBOX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->nic1_mbx_regs),
+ "NIC1 MBox Regs");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->nic1_mbx_regs[0],
+ NIC1_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the SMBus Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->smbus_regs_seg_hdr,
+ SMBUS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->smbus_regs),
+ "SMBus Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->smbus_regs[0],
+ SMBUS_REGS_ADDR, SMBUS_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the FC2 MBX Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->fc2_mbx_regs_seg_hdr,
+ FC2_MBOX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->fc2_mbx_regs),
+ "FC2 MBox Regs");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->fc2_mbx_regs[0],
+ FC2_MBX_REGS_ADDR, FC_MBX_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the NIC2 MBX Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->nic2_mbx_regs_seg_hdr,
+ NIC2_MBOX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->nic2_mbx_regs),
+ "NIC2 MBox Regs");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->nic2_mbx_regs[0],
+ NIC2_MBX_REGS_ADDR, NIC_MBX_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the I2C Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->i2c_regs_seg_hdr,
+ I2C_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->i2c_regs),
+ "I2C Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->i2c_regs[0],
+ I2C_REGS_ADDR, I2C_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the MEMC Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->memc_regs_seg_hdr,
+ MEMC_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->memc_regs),
+ "MEMC Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->memc_regs[0],
+ MEMC_REGS_ADDR, MEMC_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the PBus Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->pbus_regs_seg_hdr,
+ PBUS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->pbus_regs),
+ "PBUS Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->pbus_regs[0],
+ PBUS_REGS_ADDR, PBUS_REGS_CNT);
+ if (status)
+ goto err;
+
+ /* Get the MDE Registers */
+ ql_build_coredump_seg_header(&mpi_coredump->mde_regs_seg_hdr,
+ MDE_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->mde_regs),
+ "MDE Registers");
+ status = ql_get_mpi_regs(qdev, &mpi_coredump->mde_regs[0],
+ MDE_REGS_ADDR, MDE_REGS_CNT);
+ if (status)
+ goto err;
+
+ ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
+ MISC_NIC_INFO_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->misc_nic_info),
+ "MISC NIC INFO");
+ mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
+ mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
+ mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
+ mpi_coredump->misc_nic_info.function = qdev->func;
+
+ /* Segment 31 */
+ /* Get indexed register values. */
+ ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
+ INTR_STATES_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->intr_states),
+ "INTR States");
+ ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
+
+ ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
+ CAM_ENTRIES_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->cam_entries),
+ "CAM Entries");
+ status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
+ if (status)
+ goto err;
+
+ ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
+ ROUTING_WORDS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->nic_routing_words),
+ "Routing Words");
+ status = ql_get_routing_entries(qdev,
+ &mpi_coredump->nic_routing_words[0]);
+ if (status)
+ goto err;
+
+ /* Segment 34 (Rev C. step 23) */
+ ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
+ ETS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->ets),
+ "ETS Registers");
+ status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
+ if (status)
+ goto err;
+
+ ql_build_coredump_seg_header(&mpi_coredump->probe_dump_seg_hdr,
+ PROBE_DUMP_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->probe_dump),
+ "Probe Dump");
+ ql_get_probe_dump(qdev, &mpi_coredump->probe_dump[0]);
+
+ ql_build_coredump_seg_header(&mpi_coredump->routing_reg_seg_hdr,
+ ROUTING_INDEX_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->routing_regs),
+ "Routing Regs");
+ status = ql_get_routing_index_registers(qdev,
+ &mpi_coredump->routing_regs[0]);
+ if (status)
+ goto err;
+
+ ql_build_coredump_seg_header(&mpi_coredump->mac_prot_reg_seg_hdr,
+ MAC_PROTOCOL_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->mac_prot_regs),
+ "MAC Prot Regs");
+ ql_get_mac_protocol_registers(qdev, &mpi_coredump->mac_prot_regs[0]);
+
+ /* Get the semaphore registers for all 5 functions */
+ ql_build_coredump_seg_header(&mpi_coredump->sem_regs_seg_hdr,
+ SEM_REGS_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header) +
+ sizeof(mpi_coredump->sem_regs), "Sem Registers");
+
+ ql_get_sem_registers(qdev, &mpi_coredump->sem_regs[0]);
+
+ /* Prevent the mpi restarting while we dump the memory.*/
+ ql_write_mpi_reg(qdev, MPI_TEST_FUNC_RST_STS, MPI_TEST_FUNC_RST_FRC);
+
+ /* clear the pause */
+ status = ql_unpause_mpi_risc(qdev);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed RISC unpause. Status = 0x%.08x\n", status);
+ goto err;
+ }
+
+ /* Reset the RISC so we can dump RAM */
+ status = ql_hard_reset_mpi_risc(qdev);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed RISC reset. Status = 0x%.08x\n", status);
+ goto err;
+ }
+
+ ql_build_coredump_seg_header(&mpi_coredump->code_ram_seg_hdr,
+ WCS_RAM_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->code_ram),
+ "WCS RAM");
+ status = ql_dump_risc_ram_area(qdev, &mpi_coredump->code_ram[0],
+ CODE_RAM_ADDR, CODE_RAM_CNT);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed Dump of CODE RAM. Status = 0x%.08x\n",
+ status);
+ goto err;
+ }
+
+ /* Insert the segment header */
+ ql_build_coredump_seg_header(&mpi_coredump->memc_ram_seg_hdr,
+ MEMC_RAM_SEG_NUM,
+ sizeof(struct mpi_coredump_segment_header)
+ + sizeof(mpi_coredump->memc_ram),
+ "MEMC RAM");
+ status = ql_dump_risc_ram_area(qdev, &mpi_coredump->memc_ram[0],
+ MEMC_RAM_ADDR, MEMC_RAM_CNT);
+ if (status) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed Dump of MEMC RAM. Status = 0x%.08x\n",
+ status);
+ goto err;
+ }
+err:
+ ql_sem_unlock(qdev, SEM_PROC_REG_MASK); /* does flush too */
+ return status;
+
+}
+
+static void ql_get_core_dump(struct ql_adapter *qdev)
+{
+ if (!ql_own_firmware(qdev)) {
+ netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
+ return;
+ }
+
+ if (!netif_running(qdev->ndev)) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Force Coredump can only be done from interface that is up.\n");
+ return;
+ }
+
+ if (ql_mb_sys_err(qdev)) {
+ netif_err(qdev, ifup, qdev->ndev,
+ "Fail force coredump with ql_mb_sys_err().\n");
+ return;
+ }
+}
+
void ql_gen_reg_dump(struct ql_adapter *qdev,
struct ql_reg_dump *mpi_coredump)
{
@@ -178,6 +1318,37 @@ void ql_gen_reg_dump(struct ql_adapter *qdev,
status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
if (status)
return;
+
+ if (test_bit(QL_FRC_COREDUMP, &qdev->flags))
+ ql_get_core_dump(qdev);
+}
+
+/* Coredump to messages log file using separate worker thread */
+void ql_mpi_core_to_log(struct work_struct *work)
+{
+ struct ql_adapter *qdev =
+ container_of(work, struct ql_adapter, mpi_core_to_log.work);
+ u32 *tmp, count;
+ int i;
+
+ count = sizeof(struct ql_mpi_coredump) / sizeof(u32);
+ tmp = (u32 *)qdev->mpi_coredump;
+ netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
+ "Core is dumping to log file!\n");
+
+ for (i = 0; i < count; i += 8) {
+ printk(KERN_ERR "%.08x: %.08x %.08x %.08x %.08x %.08x "
+ "%.08x %.08x %.08x \n", i,
+ tmp[i + 0],
+ tmp[i + 1],
+ tmp[i + 2],
+ tmp[i + 3],
+ tmp[i + 4],
+ tmp[i + 5],
+ tmp[i + 6],
+ tmp[i + 7]);
+ msleep(5);
+ }
}
#ifdef QL_REG_DUMP
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 058fa0a48c6..05b8bde9980 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -67,8 +67,8 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
CFG_LCQ, rx_ring->cq_id);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to load CQICB.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to load CQICB.\n");
goto exit;
}
}
@@ -89,8 +89,8 @@ static int ql_update_ring_coalescing(struct ql_adapter *qdev)
status = ql_write_cfg(qdev, cqicb, sizeof(*cqicb),
CFG_LCQ, rx_ring->cq_id);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to load CQICB.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to load CQICB.\n");
goto exit;
}
}
@@ -107,8 +107,8 @@ static void ql_update_stats(struct ql_adapter *qdev)
spin_lock(&qdev->stats_lock);
if (ql_sem_spinlock(qdev, qdev->xg_sem_mask)) {
- QPRINTK(qdev, DRV, ERR,
- "Couldn't get xgmac sem.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Couldn't get xgmac sem.\n");
goto quit;
}
/*
@@ -116,8 +116,9 @@ static void ql_update_stats(struct ql_adapter *qdev)
*/
for (i = 0x200; i < 0x280; i += 8) {
if (ql_read_xgmac_reg64(qdev, i, &data)) {
- QPRINTK(qdev, DRV, ERR,
- "Error reading status register 0x%.04x.\n", i);
+ netif_err(qdev, drv, qdev->ndev,
+ "Error reading status register 0x%.04x.\n",
+ i);
goto end;
} else
*iter = data;
@@ -129,8 +130,9 @@ static void ql_update_stats(struct ql_adapter *qdev)
*/
for (i = 0x300; i < 0x3d0; i += 8) {
if (ql_read_xgmac_reg64(qdev, i, &data)) {
- QPRINTK(qdev, DRV, ERR,
- "Error reading status register 0x%.04x.\n", i);
+ netif_err(qdev, drv, qdev->ndev,
+ "Error reading status register 0x%.04x.\n",
+ i);
goto end;
} else
*iter = data;
@@ -142,8 +144,9 @@ static void ql_update_stats(struct ql_adapter *qdev)
*/
for (i = 0x500; i < 0x540; i += 8) {
if (ql_read_xgmac_reg64(qdev, i, &data)) {
- QPRINTK(qdev, DRV, ERR,
- "Error reading status register 0x%.04x.\n", i);
+ netif_err(qdev, drv, qdev->ndev,
+ "Error reading status register 0x%.04x.\n",
+ i);
goto end;
} else
*iter = data;
@@ -155,8 +158,9 @@ static void ql_update_stats(struct ql_adapter *qdev)
*/
for (i = 0x568; i < 0x5a8; i += 8) {
if (ql_read_xgmac_reg64(qdev, i, &data)) {
- QPRINTK(qdev, DRV, ERR,
- "Error reading status register 0x%.04x.\n", i);
+ netif_err(qdev, drv, qdev->ndev,
+ "Error reading status register 0x%.04x.\n",
+ i);
goto end;
} else
*iter = data;
@@ -167,8 +171,8 @@ static void ql_update_stats(struct ql_adapter *qdev)
* Get RX NIC FIFO DROP statistics.
*/
if (ql_read_xgmac_reg64(qdev, 0x5b8, &data)) {
- QPRINTK(qdev, DRV, ERR,
- "Error reading status register 0x%.04x.\n", i);
+ netif_err(qdev, drv, qdev->ndev,
+ "Error reading status register 0x%.04x.\n", i);
goto end;
} else
*iter = data;
@@ -396,14 +400,13 @@ static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
return -EINVAL;
qdev->wol = wol->wolopts;
- QPRINTK(qdev, DRV, INFO, "Set wol option 0x%x on %s\n",
- qdev->wol, ndev->name);
+ netif_info(qdev, drv, qdev->ndev, "Set wol option 0x%x\n", qdev->wol);
if (!qdev->wol) {
u32 wol = 0;
status = ql_mb_wol_mode(qdev, wol);
- QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n",
- (status == 0) ? "cleared sucessfully" : "clear failed",
- wol, qdev->ndev->name);
+ netif_err(qdev, drv, qdev->ndev, "WOL %s (wol code 0x%x)\n",
+ status == 0 ? "cleared sucessfully" : "clear failed",
+ wol);
}
return 0;
@@ -500,7 +503,8 @@ static int ql_run_loopback_test(struct ql_adapter *qdev)
return -EPIPE;
atomic_inc(&qdev->lb_count);
}
-
+ /* Give queue time to settle before testing results. */
+ msleep(2);
ql_clean_lb_rx_ring(&qdev->rx_ring[0], 128);
return atomic_read(&qdev->lb_count) ? -EIO : 0;
}
@@ -533,9 +537,13 @@ static void ql_self_test(struct net_device *ndev,
data[0] = 0;
}
clear_bit(QL_SELFTEST, &qdev->flags);
+ /* Give link time to come up after
+ * port configuration changes.
+ */
+ msleep_interruptible(4 * 1000);
} else {
- QPRINTK(qdev, DRV, ERR,
- "%s: is down, Loopback test will fail.\n", ndev->name);
+ netif_err(qdev, drv, qdev->ndev,
+ "is down, Loopback test will fail.\n");
eth_test->flags |= ETH_TEST_FL_FAILED;
}
}
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 894a7c84fae..c26ec5d740f 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -73,7 +73,19 @@ static int qlge_irq_type = MSIX_IRQ;
module_param(qlge_irq_type, int, MSIX_IRQ);
MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
-static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
+static int qlge_mpi_coredump;
+module_param(qlge_mpi_coredump, int, 0);
+MODULE_PARM_DESC(qlge_mpi_coredump,
+ "Option to enable MPI firmware dump. "
+ "Default is OFF - Do Not allocate memory. ");
+
+static int qlge_force_coredump;
+module_param(qlge_force_coredump, int, 0);
+MODULE_PARM_DESC(qlge_force_coredump,
+ "Option to allow force of firmware core dump. "
+ "Default is OFF - Do not allow.");
+
+static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
/* required last entry */
@@ -116,7 +128,7 @@ static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
break;
default:
- QPRINTK(qdev, PROBE, ALERT, "Bad Semaphore mask!.\n");
+ netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
return -EINVAL;
}
@@ -156,17 +168,17 @@ int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
/* check for errors */
if (temp & err_bit) {
- QPRINTK(qdev, PROBE, ALERT,
- "register 0x%.08x access error, value = 0x%.08x!.\n",
- reg, temp);
+ netif_alert(qdev, probe, qdev->ndev,
+ "register 0x%.08x access error, value = 0x%.08x!.\n",
+ reg, temp);
return -EIO;
} else if (temp & bit)
return 0;
udelay(UDELAY_DELAY);
count--;
}
- QPRINTK(qdev, PROBE, ALERT,
- "Timed out waiting for reg %x to come ready.\n", reg);
+ netif_alert(qdev, probe, qdev->ndev,
+ "Timed out waiting for reg %x to come ready.\n", reg);
return -ETIMEDOUT;
}
@@ -209,7 +221,7 @@ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
map = pci_map_single(qdev->pdev, ptr, size, direction);
if (pci_dma_mapping_error(qdev->pdev, map)) {
- QPRINTK(qdev, IFUP, ERR, "Couldn't map DMA area.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
return -ENOMEM;
}
@@ -219,8 +231,8 @@ int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
status = ql_wait_cfg(qdev, bit);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Timed out waiting for CFG to come ready.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Timed out waiting for CFG to come ready.\n");
goto exit;
}
@@ -301,8 +313,8 @@ int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
case MAC_ADDR_TYPE_VLAN:
case MAC_ADDR_TYPE_MULTI_FLTR:
default:
- QPRINTK(qdev, IFUP, CRIT,
- "Address type %d not yet supported.\n", type);
+ netif_crit(qdev, ifup, qdev->ndev,
+ "Address type %d not yet supported.\n", type);
status = -EPERM;
}
exit:
@@ -359,12 +371,11 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
(addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
(addr[5]);
- QPRINTK(qdev, IFUP, DEBUG,
- "Adding %s address %pM"
- " at index %d in the CAM.\n",
- ((type ==
- MAC_ADDR_TYPE_MULTI_MAC) ? "MULTICAST" :
- "UNICAST"), addr, index);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Adding %s address %pM at index %d in the CAM.\n",
+ type == MAC_ADDR_TYPE_MULTI_MAC ?
+ "MULTICAST" : "UNICAST",
+ addr, index);
status =
ql_wait_reg_rdy(qdev,
@@ -414,9 +425,11 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
* addressing. It's either MAC_ADDR_E on or off.
* That's bit-27 we're talking about.
*/
- QPRINTK(qdev, IFUP, INFO, "%s VLAN ID %d %s the CAM.\n",
- (enable_bit ? "Adding" : "Removing"),
- index, (enable_bit ? "to" : "from"));
+ netif_info(qdev, ifup, qdev->ndev,
+ "%s VLAN ID %d %s the CAM.\n",
+ enable_bit ? "Adding" : "Removing",
+ index,
+ enable_bit ? "to" : "from");
status =
ql_wait_reg_rdy(qdev,
@@ -431,8 +444,8 @@ static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
}
case MAC_ADDR_TYPE_MULTI_FLTR:
default:
- QPRINTK(qdev, IFUP, CRIT,
- "Address type %d not yet supported.\n", type);
+ netif_crit(qdev, ifup, qdev->ndev,
+ "Address type %d not yet supported.\n", type);
status = -EPERM;
}
exit:
@@ -450,17 +463,14 @@ static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
char *addr;
if (set) {
- addr = &qdev->ndev->dev_addr[0];
- QPRINTK(qdev, IFUP, DEBUG,
- "Set Mac addr %02x:%02x:%02x:%02x:%02x:%02x\n",
- addr[0], addr[1], addr[2], addr[3],
- addr[4], addr[5]);
+ addr = &qdev->current_mac_addr[0];
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Set Mac addr %pM\n", addr);
} else {
memset(zero_mac_addr, 0, ETH_ALEN);
addr = &zero_mac_addr[0];
- QPRINTK(qdev, IFUP, DEBUG,
- "Clearing MAC address on %s\n",
- qdev->ndev->name);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Clearing MAC address\n");
}
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
@@ -469,23 +479,21 @@ static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
- QPRINTK(qdev, IFUP, ERR, "Failed to init mac "
- "address.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init mac address.\n");
return status;
}
void ql_link_on(struct ql_adapter *qdev)
{
- QPRINTK(qdev, LINK, ERR, "%s: Link is up.\n",
- qdev->ndev->name);
+ netif_err(qdev, link, qdev->ndev, "Link is up.\n");
netif_carrier_on(qdev->ndev);
ql_set_mac_addr(qdev, 1);
}
void ql_link_off(struct ql_adapter *qdev)
{
- QPRINTK(qdev, LINK, ERR, "%s: Link is down.\n",
- qdev->ndev->name);
+ netif_err(qdev, link, qdev->ndev, "Link is down.\n");
netif_carrier_off(qdev->ndev);
ql_set_mac_addr(qdev, 0);
}
@@ -522,27 +530,27 @@ static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
int status = -EINVAL; /* Return error if no mask match. */
u32 value = 0;
- QPRINTK(qdev, IFUP, DEBUG,
- "%s %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s mask %s the routing reg.\n",
- (enable ? "Adding" : "Removing"),
- ((index == RT_IDX_ALL_ERR_SLOT) ? "MAC ERROR/ALL ERROR" : ""),
- ((index == RT_IDX_IP_CSUM_ERR_SLOT) ? "IP CSUM ERROR" : ""),
- ((index ==
- RT_IDX_TCP_UDP_CSUM_ERR_SLOT) ? "TCP/UDP CSUM ERROR" : ""),
- ((index == RT_IDX_BCAST_SLOT) ? "BROADCAST" : ""),
- ((index == RT_IDX_MCAST_MATCH_SLOT) ? "MULTICAST MATCH" : ""),
- ((index == RT_IDX_ALLMULTI_SLOT) ? "ALL MULTICAST MATCH" : ""),
- ((index == RT_IDX_UNUSED6_SLOT) ? "UNUSED6" : ""),
- ((index == RT_IDX_UNUSED7_SLOT) ? "UNUSED7" : ""),
- ((index == RT_IDX_RSS_MATCH_SLOT) ? "RSS ALL/IPV4 MATCH" : ""),
- ((index == RT_IDX_RSS_IPV6_SLOT) ? "RSS IPV6" : ""),
- ((index == RT_IDX_RSS_TCP4_SLOT) ? "RSS TCP4" : ""),
- ((index == RT_IDX_RSS_TCP6_SLOT) ? "RSS TCP6" : ""),
- ((index == RT_IDX_CAM_HIT_SLOT) ? "CAM HIT" : ""),
- ((index == RT_IDX_UNUSED013) ? "UNUSED13" : ""),
- ((index == RT_IDX_UNUSED014) ? "UNUSED14" : ""),
- ((index == RT_IDX_PROMISCUOUS_SLOT) ? "PROMISCUOUS" : ""),
- (enable ? "to" : "from"));
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "%s %s mask %s the routing reg.\n",
+ enable ? "Adding" : "Removing",
+ index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
+ index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
+ index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
+ index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
+ index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
+ index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
+ index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
+ index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
+ index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
+ index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
+ index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
+ index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
+ index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
+ index == RT_IDX_UNUSED013 ? "UNUSED13" :
+ index == RT_IDX_UNUSED014 ? "UNUSED14" :
+ index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
+ "(Bad index != RT_IDX)",
+ enable ? "to" : "from");
switch (mask) {
case RT_IDX_CAM_HIT:
@@ -602,8 +610,8 @@ static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
break;
}
default:
- QPRINTK(qdev, IFUP, ERR, "Mask type %d not yet supported.\n",
- mask);
+ netif_err(qdev, ifup, qdev->ndev,
+ "Mask type %d not yet supported.\n", mask);
status = -EPERM;
goto exit;
}
@@ -709,7 +717,7 @@ static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
status = strncmp((char *)&qdev->flash, str, 4);
if (status) {
- QPRINTK(qdev, IFUP, ERR, "Invalid flash signature.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
return status;
}
@@ -717,8 +725,8 @@ static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
csum += le16_to_cpu(*flash++);
if (csum)
- QPRINTK(qdev, IFUP, ERR,
- "Invalid flash checksum, csum = 0x%.04x.\n", csum);
+ netif_err(qdev, ifup, qdev->ndev,
+ "Invalid flash checksum, csum = 0x%.04x.\n", csum);
return csum;
}
@@ -770,7 +778,8 @@ static int ql_get_8000_flash_params(struct ql_adapter *qdev)
for (i = 0; i < size; i++, p++) {
status = ql_read_flash_word(qdev, i+offset, p);
if (status) {
- QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Error reading flash.\n");
goto exit;
}
}
@@ -779,7 +788,7 @@ static int ql_get_8000_flash_params(struct ql_adapter *qdev)
sizeof(struct flash_params_8000) / sizeof(u16),
"8000");
if (status) {
- QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
status = -EINVAL;
goto exit;
}
@@ -797,7 +806,7 @@ static int ql_get_8000_flash_params(struct ql_adapter *qdev)
qdev->ndev->addr_len);
if (!is_valid_ether_addr(mac_addr)) {
- QPRINTK(qdev, IFUP, ERR, "Invalid MAC address.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
status = -EINVAL;
goto exit;
}
@@ -831,7 +840,8 @@ static int ql_get_8012_flash_params(struct ql_adapter *qdev)
for (i = 0; i < size; i++, p++) {
status = ql_read_flash_word(qdev, i+offset, p);
if (status) {
- QPRINTK(qdev, IFUP, ERR, "Error reading flash.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Error reading flash.\n");
goto exit;
}
@@ -841,7 +851,7 @@ static int ql_get_8012_flash_params(struct ql_adapter *qdev)
sizeof(struct flash_params_8012) / sizeof(u16),
"8012");
if (status) {
- QPRINTK(qdev, IFUP, ERR, "Invalid flash.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
status = -EINVAL;
goto exit;
}
@@ -959,17 +969,17 @@ static int ql_8012_port_initialize(struct ql_adapter *qdev)
/* Another function has the semaphore, so
* wait for the port init bit to come ready.
*/
- QPRINTK(qdev, LINK, INFO,
- "Another function has the semaphore, so wait for the port init bit to come ready.\n");
+ netif_info(qdev, link, qdev->ndev,
+ "Another function has the semaphore, so wait for the port init bit to come ready.\n");
status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
if (status) {
- QPRINTK(qdev, LINK, CRIT,
- "Port initialize timed out.\n");
+ netif_crit(qdev, link, qdev->ndev,
+ "Port initialize timed out.\n");
}
return status;
}
- QPRINTK(qdev, LINK, INFO, "Got xgmac semaphore!.\n");
+ netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
/* Set the core reset. */
status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
if (status)
@@ -1099,8 +1109,8 @@ static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
GFP_ATOMIC,
qdev->lbq_buf_order);
if (unlikely(!rx_ring->pg_chunk.page)) {
- QPRINTK(qdev, DRV, ERR,
- "page allocation failed.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "page allocation failed.\n");
return -ENOMEM;
}
rx_ring->pg_chunk.offset = 0;
@@ -1110,8 +1120,8 @@ static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
if (pci_dma_mapping_error(qdev->pdev, map)) {
__free_pages(rx_ring->pg_chunk.page,
qdev->lbq_buf_order);
- QPRINTK(qdev, DRV, ERR,
- "PCI mapping failed.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "PCI mapping failed.\n");
return -ENOMEM;
}
rx_ring->pg_chunk.map = map;
@@ -1148,15 +1158,15 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
while (rx_ring->lbq_free_cnt > 32) {
for (i = 0; i < 16; i++) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "lbq: try cleaning clean_idx = %d.\n",
- clean_idx);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "lbq: try cleaning clean_idx = %d.\n",
+ clean_idx);
lbq_desc = &rx_ring->lbq[clean_idx];
if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
- QPRINTK(qdev, IFUP, ERR,
- "Could not get a page chunk.\n");
- return;
- }
+ netif_err(qdev, ifup, qdev->ndev,
+ "Could not get a page chunk.\n");
+ return;
+ }
map = lbq_desc->p.pg_chunk.map +
lbq_desc->p.pg_chunk.offset;
@@ -1181,9 +1191,9 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
}
if (start_idx != clean_idx) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "lbq: updating prod idx = %d.\n",
- rx_ring->lbq_prod_idx);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "lbq: updating prod idx = %d.\n",
+ rx_ring->lbq_prod_idx);
ql_write_db_reg(rx_ring->lbq_prod_idx,
rx_ring->lbq_prod_idx_db_reg);
}
@@ -1201,19 +1211,20 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
while (rx_ring->sbq_free_cnt > 16) {
for (i = 0; i < 16; i++) {
sbq_desc = &rx_ring->sbq[clean_idx];
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "sbq: try cleaning clean_idx = %d.\n",
- clean_idx);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "sbq: try cleaning clean_idx = %d.\n",
+ clean_idx);
if (sbq_desc->p.skb == NULL) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "sbq: getting new skb for index %d.\n",
- sbq_desc->index);
+ netif_printk(qdev, rx_status, KERN_DEBUG,
+ qdev->ndev,
+ "sbq: getting new skb for index %d.\n",
+ sbq_desc->index);
sbq_desc->p.skb =
netdev_alloc_skb(qdev->ndev,
SMALL_BUFFER_SIZE);
if (sbq_desc->p.skb == NULL) {
- QPRINTK(qdev, PROBE, ERR,
- "Couldn't get an skb.\n");
+ netif_err(qdev, probe, qdev->ndev,
+ "Couldn't get an skb.\n");
rx_ring->sbq_clean_idx = clean_idx;
return;
}
@@ -1223,7 +1234,8 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
rx_ring->sbq_buf_size,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(qdev->pdev, map)) {
- QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "PCI mapping failed.\n");
rx_ring->sbq_clean_idx = clean_idx;
dev_kfree_skb_any(sbq_desc->p.skb);
sbq_desc->p.skb = NULL;
@@ -1247,9 +1259,9 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
}
if (start_idx != clean_idx) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "sbq: updating prod idx = %d.\n",
- rx_ring->sbq_prod_idx);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "sbq: updating prod idx = %d.\n",
+ rx_ring->sbq_prod_idx);
ql_write_db_reg(rx_ring->sbq_prod_idx,
rx_ring->sbq_prod_idx_db_reg);
}
@@ -1281,8 +1293,9 @@ static void ql_unmap_send(struct ql_adapter *qdev,
* then its an OAL.
*/
if (i == 7) {
- QPRINTK(qdev, TX_DONE, DEBUG,
- "unmapping OAL area.\n");
+ netif_printk(qdev, tx_done, KERN_DEBUG,
+ qdev->ndev,
+ "unmapping OAL area.\n");
}
pci_unmap_single(qdev->pdev,
pci_unmap_addr(&tx_ring_desc->map[i],
@@ -1291,8 +1304,8 @@ static void ql_unmap_send(struct ql_adapter *qdev,
maplen),
PCI_DMA_TODEVICE);
} else {
- QPRINTK(qdev, TX_DONE, DEBUG, "unmapping frag %d.\n",
- i);
+ netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
+ "unmapping frag %d.\n", i);
pci_unmap_page(qdev->pdev,
pci_unmap_addr(&tx_ring_desc->map[i],
mapaddr),
@@ -1317,7 +1330,8 @@ static int ql_map_send(struct ql_adapter *qdev,
int frag_cnt = skb_shinfo(skb)->nr_frags;
if (frag_cnt) {
- QPRINTK(qdev, TX_QUEUED, DEBUG, "frag_cnt = %d.\n", frag_cnt);
+ netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
+ "frag_cnt = %d.\n", frag_cnt);
}
/*
* Map the skb buffer first.
@@ -1326,8 +1340,8 @@ static int ql_map_send(struct ql_adapter *qdev,
err = pci_dma_mapping_error(qdev->pdev, map);
if (err) {
- QPRINTK(qdev, TX_QUEUED, ERR,
- "PCI mapping failed with error: %d\n", err);
+ netif_err(qdev, tx_queued, qdev->ndev,
+ "PCI mapping failed with error: %d\n", err);
return NETDEV_TX_BUSY;
}
@@ -1373,9 +1387,9 @@ static int ql_map_send(struct ql_adapter *qdev,
PCI_DMA_TODEVICE);
err = pci_dma_mapping_error(qdev->pdev, map);
if (err) {
- QPRINTK(qdev, TX_QUEUED, ERR,
- "PCI mapping outbound address list with error: %d\n",
- err);
+ netif_err(qdev, tx_queued, qdev->ndev,
+ "PCI mapping outbound address list with error: %d\n",
+ err);
goto map_error;
}
@@ -1403,9 +1417,9 @@ static int ql_map_send(struct ql_adapter *qdev,
err = pci_dma_mapping_error(qdev->pdev, map);
if (err) {
- QPRINTK(qdev, TX_QUEUED, ERR,
- "PCI mapping frags failed with error: %d.\n",
- err);
+ netif_err(qdev, tx_queued, qdev->ndev,
+ "PCI mapping frags failed with error: %d.\n",
+ err);
goto map_error;
}
@@ -1433,6 +1447,260 @@ map_error:
return NETDEV_TX_BUSY;
}
+/* Process an inbound completion from an rx ring. */
+static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ u32 length,
+ u16 vlan_id)
+{
+ struct sk_buff *skb;
+ struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+ struct skb_frag_struct *rx_frag;
+ int nr_frags;
+ struct napi_struct *napi = &rx_ring->napi;
+
+ napi->dev = qdev->ndev;
+
+ skb = napi_get_frags(napi);
+ if (!skb) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Couldn't get an skb, exiting.\n");
+ rx_ring->rx_dropped++;
+ put_page(lbq_desc->p.pg_chunk.page);
+ return;
+ }
+ prefetch(lbq_desc->p.pg_chunk.va);
+ rx_frag = skb_shinfo(skb)->frags;
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ rx_frag += nr_frags;
+ rx_frag->page = lbq_desc->p.pg_chunk.page;
+ rx_frag->page_offset = lbq_desc->p.pg_chunk.offset;
+ rx_frag->size = length;
+
+ skb->len += length;
+ skb->data_len += length;
+ skb->truesize += length;
+ skb_shinfo(skb)->nr_frags++;
+
+ rx_ring->rx_packets++;
+ rx_ring->rx_bytes += length;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb_record_rx_queue(skb, rx_ring->cq_id);
+ if (qdev->vlgrp && (vlan_id != 0xffff))
+ vlan_gro_frags(&rx_ring->napi, qdev->vlgrp, vlan_id);
+ else
+ napi_gro_frags(napi);
+}
+
+/* Process an inbound completion from an rx ring. */
+static void ql_process_mac_rx_page(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ u32 length,
+ u16 vlan_id)
+{
+ struct net_device *ndev = qdev->ndev;
+ struct sk_buff *skb = NULL;
+ void *addr;
+ struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
+ struct napi_struct *napi = &rx_ring->napi;
+
+ skb = netdev_alloc_skb(ndev, length);
+ if (!skb) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Couldn't get an skb, need to unwind!.\n");
+ rx_ring->rx_dropped++;
+ put_page(lbq_desc->p.pg_chunk.page);
+ return;
+ }
+
+ addr = lbq_desc->p.pg_chunk.va;
+ prefetch(addr);
+
+
+ /* Frame error, so drop the packet. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
+ rx_ring->rx_errors++;
+ goto err_out;
+ }
+
+ /* The max framesize filter on this chip is set higher than
+ * MTU since FCoE uses 2k frames.
+ */
+ if (skb->len > ndev->mtu + ETH_HLEN) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Segment too small, dropping.\n");
+ rx_ring->rx_dropped++;
+ goto err_out;
+ }
+ memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
+ length);
+ skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
+ lbq_desc->p.pg_chunk.offset+ETH_HLEN,
+ length-ETH_HLEN);
+ skb->len += length-ETH_HLEN;
+ skb->data_len += length-ETH_HLEN;
+ skb->truesize += length-ETH_HLEN;
+
+ rx_ring->rx_packets++;
+ rx_ring->rx_bytes += skb->len;
+ skb->protocol = eth_type_trans(skb, ndev);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ if (qdev->rx_csum &&
+ !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
+ /* TCP frame. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "TCP checksum done!\n");
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
+ (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
+ /* Unfragmented ipv4 UDP frame. */
+ struct iphdr *iph = (struct iphdr *) skb->data;
+ if (!(iph->frag_off &
+ cpu_to_be16(IP_MF|IP_OFFSET))) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ netif_printk(qdev, rx_status, KERN_DEBUG,
+ qdev->ndev,
+ "TCP checksum done!\n");
+ }
+ }
+ }
+
+ skb_record_rx_queue(skb, rx_ring->cq_id);
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ if (qdev->vlgrp && (vlan_id != 0xffff))
+ vlan_gro_receive(napi, qdev->vlgrp, vlan_id, skb);
+ else
+ napi_gro_receive(napi, skb);
+ } else {
+ if (qdev->vlgrp && (vlan_id != 0xffff))
+ vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
+ else
+ netif_receive_skb(skb);
+ }
+ return;
+err_out:
+ dev_kfree_skb_any(skb);
+ put_page(lbq_desc->p.pg_chunk.page);
+}
+
+/* Process an inbound completion from an rx ring. */
+static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ u32 length,
+ u16 vlan_id)
+{
+ struct net_device *ndev = qdev->ndev;
+ struct sk_buff *skb = NULL;
+ struct sk_buff *new_skb = NULL;
+ struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
+
+ skb = sbq_desc->p.skb;
+ /* Allocate new_skb and copy */
+ new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
+ if (new_skb == NULL) {
+ netif_err(qdev, probe, qdev->ndev,
+ "No skb available, drop the packet.\n");
+ rx_ring->rx_dropped++;
+ return;
+ }
+ skb_reserve(new_skb, NET_IP_ALIGN);
+ memcpy(skb_put(new_skb, length), skb->data, length);
+ skb = new_skb;
+
+ /* Frame error, so drop the packet. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+ netif_err(qdev, drv, qdev->ndev,
+ "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
+ dev_kfree_skb_any(skb);
+ rx_ring->rx_errors++;
+ return;
+ }
+
+ /* loopback self test for ethtool */
+ if (test_bit(QL_SELFTEST, &qdev->flags)) {
+ ql_check_lb_frame(qdev, skb);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ /* The max framesize filter on this chip is set higher than
+ * MTU since FCoE uses 2k frames.
+ */
+ if (skb->len > ndev->mtu + ETH_HLEN) {
+ dev_kfree_skb_any(skb);
+ rx_ring->rx_dropped++;
+ return;
+ }
+
+ prefetch(skb->data);
+ skb->dev = ndev;
+ if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "%s Multicast.\n",
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_REG ? "Registered" :
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
+ }
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Promiscuous Packet.\n");
+
+ rx_ring->rx_packets++;
+ rx_ring->rx_bytes += skb->len;
+ skb->protocol = eth_type_trans(skb, ndev);
+ skb->ip_summed = CHECKSUM_NONE;
+
+ /* If rx checksum is on, and there are no
+ * csum or frame errors.
+ */
+ if (qdev->rx_csum &&
+ !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
+ /* TCP frame. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "TCP checksum done!\n");
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
+ (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
+ /* Unfragmented ipv4 UDP frame. */
+ struct iphdr *iph = (struct iphdr *) skb->data;
+ if (!(iph->frag_off &
+ cpu_to_be16(IP_MF|IP_OFFSET))) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ netif_printk(qdev, rx_status, KERN_DEBUG,
+ qdev->ndev,
+ "TCP checksum done!\n");
+ }
+ }
+ }
+
+ skb_record_rx_queue(skb, rx_ring->cq_id);
+ if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ if (qdev->vlgrp && (vlan_id != 0xffff))
+ vlan_gro_receive(&rx_ring->napi, qdev->vlgrp,
+ vlan_id, skb);
+ else
+ napi_gro_receive(&rx_ring->napi, skb);
+ } else {
+ if (qdev->vlgrp && (vlan_id != 0xffff))
+ vlan_hwaccel_receive_skb(skb, qdev->vlgrp, vlan_id);
+ else
+ netif_receive_skb(skb);
+ }
+}
+
static void ql_realign_skb(struct sk_buff *skb, int len)
{
void *temp_addr = skb->data;
@@ -1467,7 +1735,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
*/
if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
- QPRINTK(qdev, RX_STATUS, DEBUG, "Header of %d bytes in small buffer.\n", hdr_len);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Header of %d bytes in small buffer.\n", hdr_len);
/*
* Headers fit nicely into a small buffer.
*/
@@ -1486,15 +1755,16 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
* Handle the data buffer(s).
*/
if (unlikely(!length)) { /* Is there data too? */
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "No Data buffer in this packet.\n");
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "No Data buffer in this packet.\n");
return skb;
}
if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "Headers in small, data of %d bytes in small, combine them.\n", length);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Headers in small, data of %d bytes in small, combine them.\n",
+ length);
/*
* Data is less than small buffer size so it's
* stuffed in a small buffer.
@@ -1520,8 +1790,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
maplen),
PCI_DMA_FROMDEVICE);
} else {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "%d bytes in a single small buffer.\n", length);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "%d bytes in a single small buffer.\n",
+ length);
sbq_desc = ql_get_curr_sbuf(rx_ring);
skb = sbq_desc->p.skb;
ql_realign_skb(skb, length);
@@ -1536,18 +1807,18 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
}
} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "Header in small, %d bytes in large. Chain large to small!\n", length);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Header in small, %d bytes in large. Chain large to small!\n",
+ length);
/*
* The data is in a single large buffer. We
* chain it to the header buffer's skb and let
* it rip.
*/
lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "Chaining page at offset = %d,"
- "for %d bytes to skb.\n",
- lbq_desc->p.pg_chunk.offset, length);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Chaining page at offset = %d, for %d bytes to skb.\n",
+ lbq_desc->p.pg_chunk.offset, length);
skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
lbq_desc->p.pg_chunk.offset,
length);
@@ -1563,8 +1834,8 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
skb = netdev_alloc_skb(qdev->ndev, length);
if (skb == NULL) {
- QPRINTK(qdev, PROBE, DEBUG,
- "No skb available, drop the packet.\n");
+ netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
+ "No skb available, drop the packet.\n");
return NULL;
}
pci_unmap_page(qdev->pdev,
@@ -1573,8 +1844,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
pci_unmap_len(lbq_desc, maplen),
PCI_DMA_FROMDEVICE);
skb_reserve(skb, NET_IP_ALIGN);
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
+ length);
skb_fill_page_desc(skb, 0,
lbq_desc->p.pg_chunk.page,
lbq_desc->p.pg_chunk.offset,
@@ -1615,8 +1887,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
* a local buffer and use it to find the
* pages to chain.
*/
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "%d bytes of headers & data in chain of large.\n", length);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "%d bytes of headers & data in chain of large.\n",
+ length);
skb = sbq_desc->p.skb;
sbq_desc->p.skb = NULL;
skb_reserve(skb, NET_IP_ALIGN);
@@ -1626,9 +1899,9 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
size = (length < rx_ring->lbq_buf_size) ? length :
rx_ring->lbq_buf_size;
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "Adding page %d to skb for %d bytes.\n",
- i, size);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Adding page %d to skb for %d bytes.\n",
+ i, size);
skb_fill_page_desc(skb, i,
lbq_desc->p.pg_chunk.page,
lbq_desc->p.pg_chunk.offset,
@@ -1646,29 +1919,28 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
}
/* Process an inbound completion from an rx ring. */
-static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
+static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
struct rx_ring *rx_ring,
- struct ib_mac_iocb_rsp *ib_mac_rsp)
+ struct ib_mac_iocb_rsp *ib_mac_rsp,
+ u16 vlan_id)
{
struct net_device *ndev = qdev->ndev;
struct sk_buff *skb = NULL;
- u16 vlan_id = (le16_to_cpu(ib_mac_rsp->vlan_id) &
- IB_MAC_IOCB_RSP_VLAN_MASK)
QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
if (unlikely(!skb)) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "No skb available, drop packet.\n");
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "No skb available, drop packet.\n");
rx_ring->rx_dropped++;
return;
}
/* Frame error, so drop the packet. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
- ib_mac_rsp->flags2);
+ netif_err(qdev, drv, qdev->ndev,
+ "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
dev_kfree_skb_any(skb);
rx_ring->rx_errors++;
return;
@@ -1693,17 +1965,18 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
prefetch(skb->data);
skb->dev = ndev;
if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
- QPRINTK(qdev, RX_STATUS, DEBUG, "%s%s%s Multicast.\n",
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_HASH ? "Hash" : "",
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
- (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
- IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_REG ? "Registered" :
+ (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
+ IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
rx_ring->rx_multicast++;
}
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
- QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Promiscuous Packet.\n");
}
skb->protocol = eth_type_trans(skb, ndev);
@@ -1716,8 +1989,8 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
/* TCP frame. */
if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "TCP checksum done!\n");
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "TCP checksum done!\n");
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
@@ -1726,8 +1999,8 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
if (!(iph->frag_off &
cpu_to_be16(IP_MF|IP_OFFSET))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "TCP checksum done!\n");
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "TCP checksum done!\n");
}
}
}
@@ -1753,6 +2026,56 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
}
}
+/* Process an inbound completion from an rx ring. */
+static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
+ struct rx_ring *rx_ring,
+ struct ib_mac_iocb_rsp *ib_mac_rsp)
+{
+ u32 length = le32_to_cpu(ib_mac_rsp->data_len);
+ u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
+ ((le16_to_cpu(ib_mac_rsp->vlan_id) &
+ IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
+
+ QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
+
+ if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
+ /* The data and headers are split into
+ * separate buffers.
+ */
+ ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
+ vlan_id);
+ } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
+ /* The data fit in a single small buffer.
+ * Allocate a new skb, copy the data and
+ * return the buffer to the free pool.
+ */
+ ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
+ length, vlan_id);
+ } else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
+ !(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
+ (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
+ /* TCP packet in a page chunk that's been checksummed.
+ * Tack it on to our GRO skb and let it go.
+ */
+ ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
+ length, vlan_id);
+ } else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
+ /* Non-TCP packet in a page chunk. Allocate an
+ * skb, tack it on frags, and send it up.
+ */
+ ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
+ length, vlan_id);
+ } else {
+ /* Non-TCP/UDP large frames that span multiple buffers
+ * can be processed corrrectly by the split frame logic.
+ */
+ ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
+ vlan_id);
+ }
+
+ return (unsigned long)length;
+}
+
/* Process an outbound completion from an rx ring. */
static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
struct ob_mac_iocb_rsp *mac_rsp)
@@ -1774,20 +2097,20 @@ static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
OB_MAC_IOCB_RSP_L |
OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
- QPRINTK(qdev, TX_DONE, WARNING,
- "Total descriptor length did not match transfer length.\n");
+ netif_warn(qdev, tx_done, qdev->ndev,
+ "Total descriptor length did not match transfer length.\n");
}
if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
- QPRINTK(qdev, TX_DONE, WARNING,
- "Frame too short to be legal, not sent.\n");
+ netif_warn(qdev, tx_done, qdev->ndev,
+ "Frame too short to be valid, not sent.\n");
}
if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
- QPRINTK(qdev, TX_DONE, WARNING,
- "Frame too long, but sent anyway.\n");
+ netif_warn(qdev, tx_done, qdev->ndev,
+ "Frame too long, but sent anyway.\n");
}
if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
- QPRINTK(qdev, TX_DONE, WARNING,
- "PCI backplane error. Frame not sent.\n");
+ netif_warn(qdev, tx_done, qdev->ndev,
+ "PCI backplane error. Frame not sent.\n");
}
}
atomic_inc(&tx_ring->tx_count);
@@ -1817,33 +2140,35 @@ static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
{
switch (ib_ae_rsp->event) {
case MGMT_ERR_EVENT:
- QPRINTK(qdev, RX_ERR, ERR,
- "Management Processor Fatal Error.\n");
+ netif_err(qdev, rx_err, qdev->ndev,
+ "Management Processor Fatal Error.\n");
ql_queue_fw_error(qdev);
return;
case CAM_LOOKUP_ERR_EVENT:
- QPRINTK(qdev, LINK, ERR,
- "Multiple CAM hits lookup occurred.\n");
- QPRINTK(qdev, DRV, ERR, "This event shouldn't occur.\n");
+ netif_err(qdev, link, qdev->ndev,
+ "Multiple CAM hits lookup occurred.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "This event shouldn't occur.\n");
ql_queue_asic_error(qdev);
return;
case SOFT_ECC_ERROR_EVENT:
- QPRINTK(qdev, RX_ERR, ERR, "Soft ECC error detected.\n");
+ netif_err(qdev, rx_err, qdev->ndev,
+ "Soft ECC error detected.\n");
ql_queue_asic_error(qdev);
break;
case PCI_ERR_ANON_BUF_RD:
- QPRINTK(qdev, RX_ERR, ERR,
- "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
- ib_ae_rsp->q_id);
+ netif_err(qdev, rx_err, qdev->ndev,
+ "PCI error occurred when reading anonymous buffers from rx_ring %d.\n",
+ ib_ae_rsp->q_id);
ql_queue_asic_error(qdev);
break;
default:
- QPRINTK(qdev, DRV, ERR, "Unexpected event %d.\n",
- ib_ae_rsp->event);
+ netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
+ ib_ae_rsp->event);
ql_queue_asic_error(qdev);
break;
}
@@ -1860,9 +2185,9 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
/* While there are entries in the completion queue. */
while (prod != rx_ring->cnsmr_idx) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
- prod, rx_ring->cnsmr_idx);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "cq_id = %d, prod = %d, cnsmr = %d.\n.",
+ rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
rmb();
@@ -1873,9 +2198,9 @@ static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
ql_process_mac_tx_intr(qdev, net_rsp);
break;
default:
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "Hit default case, not handled! dropping the packet, opcode = %x.\n",
- net_rsp->opcode);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Hit default case, not handled! dropping the packet, opcode = %x.\n",
+ net_rsp->opcode);
}
count++;
ql_update_cq(rx_ring);
@@ -1907,9 +2232,9 @@ static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
/* While there are entries in the completion queue. */
while (prod != rx_ring->cnsmr_idx) {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "cq_id = %d, prod = %d, cnsmr = %d.\n.", rx_ring->cq_id,
- prod, rx_ring->cnsmr_idx);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "cq_id = %d, prod = %d, cnsmr = %d.\n.",
+ rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
net_rsp = rx_ring->curr_entry;
rmb();
@@ -1925,11 +2250,10 @@ static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
net_rsp);
break;
default:
- {
- QPRINTK(qdev, RX_STATUS, DEBUG,
- "Hit default case, not handled! dropping the packet, opcode = %x.\n",
- net_rsp->opcode);
- }
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Hit default case, not handled! dropping the packet, opcode = %x.\n",
+ net_rsp->opcode);
+ break;
}
count++;
ql_update_cq(rx_ring);
@@ -1950,8 +2274,8 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
int i, work_done = 0;
struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
- QPRINTK(qdev, RX_STATUS, DEBUG, "Enter, NAPI POLL cq_id = %d.\n",
- rx_ring->cq_id);
+ netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
+ "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
/* Service the TX rings first. They start
* right after the RSS rings. */
@@ -1963,9 +2287,9 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
(ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
trx_ring->cnsmr_idx)) {
- QPRINTK(qdev, INTR, DEBUG,
- "%s: Servicing TX completion ring %d.\n",
- __func__, trx_ring->cq_id);
+ netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
+ "%s: Servicing TX completion ring %d.\n",
+ __func__, trx_ring->cq_id);
ql_clean_outbound_rx_ring(trx_ring);
}
}
@@ -1975,9 +2299,9 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
*/
if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
rx_ring->cnsmr_idx) {
- QPRINTK(qdev, INTR, DEBUG,
- "%s: Servicing RX completion ring %d.\n",
- __func__, rx_ring->cq_id);
+ netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
+ "%s: Servicing RX completion ring %d.\n",
+ __func__, rx_ring->cq_id);
work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
}
@@ -1994,12 +2318,13 @@ static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *gr
qdev->vlgrp = grp;
if (grp) {
- QPRINTK(qdev, IFUP, DEBUG, "Turning on VLAN in NIC_RCV_CFG.\n");
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Turning on VLAN in NIC_RCV_CFG.\n");
ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
NIC_RCV_CFG_VLAN_MATCH_AND_NON);
} else {
- QPRINTK(qdev, IFUP, DEBUG,
- "Turning off VLAN in NIC_RCV_CFG.\n");
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Turning off VLAN in NIC_RCV_CFG.\n");
ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
}
}
@@ -2015,7 +2340,8 @@ static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
return;
if (ql_set_mac_addr_reg
(qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
- QPRINTK(qdev, IFUP, ERR, "Failed to init vlan address.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init vlan address.\n");
}
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
}
@@ -2032,7 +2358,8 @@ static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
if (ql_set_mac_addr_reg
(qdev, (u8 *) &enable_bit, MAC_ADDR_TYPE_VLAN, vid)) {
- QPRINTK(qdev, IFUP, ERR, "Failed to clear vlan address.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to clear vlan address.\n");
}
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
@@ -2061,7 +2388,8 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
spin_lock(&qdev->hw_lock);
if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
- QPRINTK(qdev, INTR, DEBUG, "Shared Interrupt, Not ours!\n");
+ netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
+ "Shared Interrupt, Not ours!\n");
spin_unlock(&qdev->hw_lock);
return IRQ_NONE;
}
@@ -2074,10 +2402,11 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
*/
if (var & STS_FE) {
ql_queue_asic_error(qdev);
- QPRINTK(qdev, INTR, ERR, "Got fatal error, STS = %x.\n", var);
+ netif_err(qdev, intr, qdev->ndev,
+ "Got fatal error, STS = %x.\n", var);
var = ql_read32(qdev, ERR_STS);
- QPRINTK(qdev, INTR, ERR,
- "Resetting chip. Error Status Register = 0x%x\n", var);
+ netif_err(qdev, intr, qdev->ndev,
+ "Resetting chip. Error Status Register = 0x%x\n", var);
return IRQ_HANDLED;
}
@@ -2090,7 +2419,8 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
* We've got an async event or mailbox completion.
* Handle it and clear the source of the interrupt.
*/
- QPRINTK(qdev, INTR, ERR, "Got MPI processor interrupt.\n");
+ netif_err(qdev, intr, qdev->ndev,
+ "Got MPI processor interrupt.\n");
ql_disable_completion_interrupt(qdev, intr_context->intr);
ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
queue_delayed_work_on(smp_processor_id(),
@@ -2105,8 +2435,8 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
*/
var = ql_read32(qdev, ISR1);
if (var & intr_context->irq_mask) {
- QPRINTK(qdev, INTR, INFO,
- "Waking handler for rx_ring[0].\n");
+ netif_info(qdev, intr, qdev->ndev,
+ "Waking handler for rx_ring[0].\n");
ql_disable_completion_interrupt(qdev, intr_context->intr);
napi_schedule(&rx_ring->napi);
work_done++;
@@ -2203,9 +2533,9 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_OK;
if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
- QPRINTK(qdev, TX_QUEUED, INFO,
- "%s: shutting down tx queue %d du to lack of resources.\n",
- __func__, tx_ring_idx);
+ netif_info(qdev, tx_queued, qdev->ndev,
+ "%s: shutting down tx queue %d du to lack of resources.\n",
+ __func__, tx_ring_idx);
netif_stop_subqueue(ndev, tx_ring->wq_id);
atomic_inc(&tx_ring->queue_stopped);
tx_ring->tx_errors++;
@@ -2226,8 +2556,8 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
if (qdev->vlgrp && vlan_tx_tag_present(skb)) {
- QPRINTK(qdev, TX_QUEUED, DEBUG, "Adding a vlan tag %d.\n",
- vlan_tx_tag_get(skb));
+ netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
+ "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
}
@@ -2241,8 +2571,8 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
}
if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
NETDEV_TX_OK) {
- QPRINTK(qdev, TX_QUEUED, ERR,
- "Could not map the segments.\n");
+ netif_err(qdev, tx_queued, qdev->ndev,
+ "Could not map the segments.\n");
tx_ring->tx_errors++;
return NETDEV_TX_BUSY;
}
@@ -2253,8 +2583,9 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
wmb();
ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
- QPRINTK(qdev, TX_QUEUED, DEBUG, "tx queued, slot %d, len %d\n",
- tx_ring->prod_idx, skb->len);
+ netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
+ "tx queued, slot %d, len %d\n",
+ tx_ring->prod_idx, skb->len);
atomic_dec(&tx_ring->tx_count);
return NETDEV_TX_OK;
@@ -2285,8 +2616,8 @@ static int ql_alloc_shadow_space(struct ql_adapter *qdev)
pci_alloc_consistent(qdev->pdev,
PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
if (qdev->rx_ring_shadow_reg_area == NULL) {
- QPRINTK(qdev, IFUP, ERR,
- "Allocation of RX shadow space failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Allocation of RX shadow space failed.\n");
return -ENOMEM;
}
memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
@@ -2294,8 +2625,8 @@ static int ql_alloc_shadow_space(struct ql_adapter *qdev)
pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
&qdev->tx_ring_shadow_reg_dma);
if (qdev->tx_ring_shadow_reg_area == NULL) {
- QPRINTK(qdev, IFUP, ERR,
- "Allocation of TX shadow space failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Allocation of TX shadow space failed.\n");
goto err_wqp_sh_area;
}
memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
@@ -2349,7 +2680,7 @@ static int ql_alloc_tx_resources(struct ql_adapter *qdev,
if ((tx_ring->wq_base == NULL) ||
tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
- QPRINTK(qdev, IFUP, ERR, "tx_ring alloc failed.\n");
+ netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
return -ENOMEM;
}
tx_ring->q =
@@ -2400,7 +2731,8 @@ static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring
for (i = 0; i < rx_ring->sbq_len; i++) {
sbq_desc = &rx_ring->sbq[i];
if (sbq_desc == NULL) {
- QPRINTK(qdev, IFUP, ERR, "sbq_desc %d is NULL.\n", i);
+ netif_err(qdev, ifup, qdev->ndev,
+ "sbq_desc %d is NULL.\n", i);
return;
}
if (sbq_desc->p.skb) {
@@ -2527,7 +2859,7 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
&rx_ring->cq_base_dma);
if (rx_ring->cq_base == NULL) {
- QPRINTK(qdev, IFUP, ERR, "rx_ring alloc failed.\n");
+ netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
return -ENOMEM;
}
@@ -2540,8 +2872,8 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
&rx_ring->sbq_base_dma);
if (rx_ring->sbq_base == NULL) {
- QPRINTK(qdev, IFUP, ERR,
- "Small buffer queue allocation failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Small buffer queue allocation failed.\n");
goto err_mem;
}
@@ -2552,8 +2884,8 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
GFP_KERNEL);
if (rx_ring->sbq == NULL) {
- QPRINTK(qdev, IFUP, ERR,
- "Small buffer queue control block allocation failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Small buffer queue control block allocation failed.\n");
goto err_mem;
}
@@ -2569,8 +2901,8 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
&rx_ring->lbq_base_dma);
if (rx_ring->lbq_base == NULL) {
- QPRINTK(qdev, IFUP, ERR,
- "Large buffer queue allocation failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Large buffer queue allocation failed.\n");
goto err_mem;
}
/*
@@ -2580,8 +2912,8 @@ static int ql_alloc_rx_resources(struct ql_adapter *qdev,
kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
GFP_KERNEL);
if (rx_ring->lbq == NULL) {
- QPRINTK(qdev, IFUP, ERR,
- "Large buffer queue control block allocation failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Large buffer queue control block allocation failed.\n");
goto err_mem;
}
@@ -2610,10 +2942,10 @@ static void ql_tx_ring_clean(struct ql_adapter *qdev)
for (i = 0; i < tx_ring->wq_len; i++) {
tx_ring_desc = &tx_ring->q[i];
if (tx_ring_desc && tx_ring_desc->skb) {
- QPRINTK(qdev, IFDOWN, ERR,
- "Freeing lost SKB %p, from queue %d, index %d.\n",
- tx_ring_desc->skb, j,
- tx_ring_desc->index);
+ netif_err(qdev, ifdown, qdev->ndev,
+ "Freeing lost SKB %p, from queue %d, index %d.\n",
+ tx_ring_desc->skb, j,
+ tx_ring_desc->index);
ql_unmap_send(qdev, tx_ring_desc,
tx_ring_desc->map_cnt);
dev_kfree_skb(tx_ring_desc->skb);
@@ -2644,16 +2976,16 @@ static int ql_alloc_mem_resources(struct ql_adapter *qdev)
for (i = 0; i < qdev->rx_ring_count; i++) {
if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
- QPRINTK(qdev, IFUP, ERR,
- "RX resource allocation failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "RX resource allocation failed.\n");
goto err_mem;
}
}
/* Allocate tx queue resources */
for (i = 0; i < qdev->tx_ring_count; i++) {
if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
- QPRINTK(qdev, IFUP, ERR,
- "TX resource allocation failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "TX resource allocation failed.\n");
goto err_mem;
}
}
@@ -2788,14 +3120,15 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
break;
default:
- QPRINTK(qdev, IFUP, DEBUG, "Invalid rx_ring->type = %d.\n",
- rx_ring->type);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Invalid rx_ring->type = %d.\n", rx_ring->type);
}
- QPRINTK(qdev, IFUP, DEBUG, "Initializing rx work queue.\n");
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Initializing rx work queue.\n");
err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
CFG_LCQ, rx_ring->cq_id);
if (err) {
- QPRINTK(qdev, IFUP, ERR, "Failed to load CQICB.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
return err;
}
return err;
@@ -2841,10 +3174,11 @@ static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
(u16) tx_ring->wq_id);
if (err) {
- QPRINTK(qdev, IFUP, ERR, "Failed to load tx_ring.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
return err;
}
- QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded WQICB.\n");
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Successfully loaded WQICB.\n");
return err;
}
@@ -2898,15 +3232,15 @@ static void ql_enable_msix(struct ql_adapter *qdev)
if (err < 0) {
kfree(qdev->msi_x_entry);
qdev->msi_x_entry = NULL;
- QPRINTK(qdev, IFUP, WARNING,
- "MSI-X Enable failed, trying MSI.\n");
+ netif_warn(qdev, ifup, qdev->ndev,
+ "MSI-X Enable failed, trying MSI.\n");
qdev->intr_count = 1;
qlge_irq_type = MSI_IRQ;
} else if (err == 0) {
set_bit(QL_MSIX_ENABLED, &qdev->flags);
- QPRINTK(qdev, IFUP, INFO,
- "MSI-X Enabled, got %d vectors.\n",
- qdev->intr_count);
+ netif_info(qdev, ifup, qdev->ndev,
+ "MSI-X Enabled, got %d vectors.\n",
+ qdev->intr_count);
return;
}
}
@@ -2915,13 +3249,14 @@ msi:
if (qlge_irq_type == MSI_IRQ) {
if (!pci_enable_msi(qdev->pdev)) {
set_bit(QL_MSI_ENABLED, &qdev->flags);
- QPRINTK(qdev, IFUP, INFO,
- "Running with MSI interrupts.\n");
+ netif_info(qdev, ifup, qdev->ndev,
+ "Running with MSI interrupts.\n");
return;
}
}
qlge_irq_type = LEG_IRQ;
- QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Running with legacy interrupts.\n");
}
/* Each vector services 1 RSS ring and and 1 or more
@@ -3093,12 +3428,12 @@ static void ql_free_irq(struct ql_adapter *qdev)
if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
free_irq(qdev->msi_x_entry[i].vector,
&qdev->rx_ring[i]);
- QPRINTK(qdev, IFDOWN, DEBUG,
- "freeing msix interrupt %d.\n", i);
+ netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
+ "freeing msix interrupt %d.\n", i);
} else {
free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
- QPRINTK(qdev, IFDOWN, DEBUG,
- "freeing msi interrupt %d.\n", i);
+ netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
+ "freeing msi interrupt %d.\n", i);
}
}
}
@@ -3123,32 +3458,33 @@ static int ql_request_irq(struct ql_adapter *qdev)
intr_context->name,
&qdev->rx_ring[i]);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed request for MSIX interrupt %d.\n",
- i);
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed request for MSIX interrupt %d.\n",
+ i);
goto err_irq;
} else {
- QPRINTK(qdev, IFUP, DEBUG,
- "Hooked intr %d, queue type %s%s%s, with name %s.\n",
- i,
- qdev->rx_ring[i].type ==
- DEFAULT_Q ? "DEFAULT_Q" : "",
- qdev->rx_ring[i].type ==
- TX_Q ? "TX_Q" : "",
- qdev->rx_ring[i].type ==
- RX_Q ? "RX_Q" : "", intr_context->name);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Hooked intr %d, queue type %s, with name %s.\n",
+ i,
+ qdev->rx_ring[i].type == DEFAULT_Q ?
+ "DEFAULT_Q" :
+ qdev->rx_ring[i].type == TX_Q ?
+ "TX_Q" :
+ qdev->rx_ring[i].type == RX_Q ?
+ "RX_Q" : "",
+ intr_context->name);
}
} else {
- QPRINTK(qdev, IFUP, DEBUG,
- "trying msi or legacy interrupts.\n");
- QPRINTK(qdev, IFUP, DEBUG,
- "%s: irq = %d.\n", __func__, pdev->irq);
- QPRINTK(qdev, IFUP, DEBUG,
- "%s: context->name = %s.\n", __func__,
- intr_context->name);
- QPRINTK(qdev, IFUP, DEBUG,
- "%s: dev_id = 0x%p.\n", __func__,
- &qdev->rx_ring[0]);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "trying msi or legacy interrupts.\n");
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "%s: irq = %d.\n", __func__, pdev->irq);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "%s: context->name = %s.\n", __func__,
+ intr_context->name);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "%s: dev_id = 0x%p.\n", __func__,
+ &qdev->rx_ring[0]);
status =
request_irq(pdev->irq, qlge_isr,
test_bit(QL_MSI_ENABLED,
@@ -3158,20 +3494,20 @@ static int ql_request_irq(struct ql_adapter *qdev)
if (status)
goto err_irq;
- QPRINTK(qdev, IFUP, ERR,
- "Hooked intr %d, queue type %s%s%s, with name %s.\n",
- i,
- qdev->rx_ring[0].type ==
- DEFAULT_Q ? "DEFAULT_Q" : "",
- qdev->rx_ring[0].type == TX_Q ? "TX_Q" : "",
- qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
- intr_context->name);
+ netif_err(qdev, ifup, qdev->ndev,
+ "Hooked intr %d, queue type %s, with name %s.\n",
+ i,
+ qdev->rx_ring[0].type == DEFAULT_Q ?
+ "DEFAULT_Q" :
+ qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
+ qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
+ intr_context->name);
}
intr_context->hooked = 1;
}
return status;
err_irq:
- QPRINTK(qdev, IFUP, ERR, "Failed to get the interrupts!!!/n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
ql_free_irq(qdev);
return status;
}
@@ -3205,14 +3541,15 @@ static int ql_start_rss(struct ql_adapter *qdev)
memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
- QPRINTK(qdev, IFUP, DEBUG, "Initializing RSS.\n");
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
if (status) {
- QPRINTK(qdev, IFUP, ERR, "Failed to load RICB.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
return status;
}
- QPRINTK(qdev, IFUP, DEBUG, "Successfully loaded RICB.\n");
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Successfully loaded RICB.\n");
return status;
}
@@ -3227,9 +3564,8 @@ static int ql_clear_routing_entries(struct ql_adapter *qdev)
for (i = 0; i < 16; i++) {
status = ql_set_routing_reg(qdev, i, 0, 0);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to init routing register for CAM "
- "packets.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init routing register for CAM packets.\n");
break;
}
}
@@ -3253,14 +3589,14 @@ static int ql_route_initialize(struct ql_adapter *qdev)
status = ql_set_routing_reg(qdev, RT_IDX_ALL_ERR_SLOT, RT_IDX_ERR, 1);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to init routing register for error packets.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init routing register for error packets.\n");
goto exit;
}
status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to init routing register for broadcast packets.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init routing register for broadcast packets.\n");
goto exit;
}
/* If we have more than one inbound queue, then turn on RSS in the
@@ -3270,8 +3606,8 @@ static int ql_route_initialize(struct ql_adapter *qdev)
status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
RT_IDX_RSS_MATCH, 1);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to init routing register for MATCH RSS packets.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init routing register for MATCH RSS packets.\n");
goto exit;
}
}
@@ -3279,8 +3615,8 @@ static int ql_route_initialize(struct ql_adapter *qdev)
status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
RT_IDX_CAM_HIT, 1);
if (status)
- QPRINTK(qdev, IFUP, ERR,
- "Failed to init routing register for CAM packets.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init routing register for CAM packets.\n");
exit:
ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
return status;
@@ -3298,13 +3634,13 @@ int ql_cam_route_initialize(struct ql_adapter *qdev)
set &= qdev->port_link_up;
status = ql_set_mac_addr(qdev, set);
if (status) {
- QPRINTK(qdev, IFUP, ERR, "Failed to init mac address.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
return status;
}
status = ql_route_initialize(qdev);
if (status)
- QPRINTK(qdev, IFUP, ERR, "Failed to init routing table.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
return status;
}
@@ -3332,15 +3668,15 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
/* Enable the function, set pagesize, enable error checking. */
value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
- FSC_EC | FSC_VM_PAGE_4K | FSC_SH;
+ FSC_EC | FSC_VM_PAGE_4K;
+ value |= SPLT_SETTING;
/* Set/clear header splitting. */
mask = FSC_VM_PAGESIZE_MASK |
FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
ql_write32(qdev, FSC, mask | value);
- ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
- min(SMALL_BUF_MAP_SIZE, MAX_SPLIT_SIZE));
+ ql_write32(qdev, SPLT_HDR, SPLT_LEN);
/* Set RX packet routing to use port/pci function on which the
* packet arrived on in addition to usual frame routing.
@@ -3369,8 +3705,8 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
for (i = 0; i < qdev->rx_ring_count; i++) {
status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to start rx ring[%d].\n", i);
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to start rx ring[%d].\n", i);
return status;
}
}
@@ -3381,7 +3717,7 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
if (qdev->rss_ring_count > 1) {
status = ql_start_rss(qdev);
if (status) {
- QPRINTK(qdev, IFUP, ERR, "Failed to start RSS.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
return status;
}
}
@@ -3390,8 +3726,8 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
for (i = 0; i < qdev->tx_ring_count; i++) {
status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to start tx ring[%d].\n", i);
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to start tx ring[%d].\n", i);
return status;
}
}
@@ -3399,20 +3735,20 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
/* Initialize the port and set the max framesize. */
status = qdev->nic_ops->port_initialize(qdev);
if (status)
- QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
/* Set up the MAC address and frame routing filter. */
status = ql_cam_route_initialize(qdev);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to init CAM/Routing tables.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init CAM/Routing tables.\n");
return status;
}
/* Start NAPI for the RSS queues. */
for (i = 0; i < qdev->rss_ring_count; i++) {
- QPRINTK(qdev, IFUP, DEBUG, "Enabling NAPI for rx_ring[%d].\n",
- i);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "Enabling NAPI for rx_ring[%d].\n", i);
napi_enable(&qdev->rx_ring[i].napi);
}
@@ -3429,7 +3765,7 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
/* Clear all the entries in the routing table. */
status = ql_clear_routing_entries(qdev);
if (status) {
- QPRINTK(qdev, IFUP, ERR, "Failed to clear routing bits.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
return status;
}
@@ -3452,8 +3788,8 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
} while (time_before(jiffies, end_jiffies));
if (value & RST_FO_FR) {
- QPRINTK(qdev, IFDOWN, ERR,
- "ETIMEDOUT!!! errored out of resetting the chip!\n");
+ netif_err(qdev, ifdown, qdev->ndev,
+ "ETIMEDOUT!!! errored out of resetting the chip!\n");
status = -ETIMEDOUT;
}
@@ -3466,16 +3802,17 @@ static void ql_display_dev_info(struct net_device *ndev)
{
struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
- QPRINTK(qdev, PROBE, INFO,
- "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
- "XG Roll = %d, XG Rev = %d.\n",
- qdev->func,
- qdev->port,
- qdev->chip_rev_id & 0x0000000f,
- qdev->chip_rev_id >> 4 & 0x0000000f,
- qdev->chip_rev_id >> 8 & 0x0000000f,
- qdev->chip_rev_id >> 12 & 0x0000000f);
- QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
+ netif_info(qdev, probe, qdev->ndev,
+ "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
+ "XG Roll = %d, XG Rev = %d.\n",
+ qdev->func,
+ qdev->port,
+ qdev->chip_rev_id & 0x0000000f,
+ qdev->chip_rev_id >> 4 & 0x0000000f,
+ qdev->chip_rev_id >> 8 & 0x0000000f,
+ qdev->chip_rev_id >> 12 & 0x0000000f);
+ netif_info(qdev, probe, qdev->ndev,
+ "MAC address %pM\n", ndev->dev_addr);
}
int ql_wol(struct ql_adapter *qdev)
@@ -3492,23 +3829,23 @@ int ql_wol(struct ql_adapter *qdev)
if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
WAKE_MCAST | WAKE_BCAST)) {
- QPRINTK(qdev, IFDOWN, ERR,
- "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
- qdev->wol);
+ netif_err(qdev, ifdown, qdev->ndev,
+ "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
+ qdev->wol);
return -EINVAL;
}
if (qdev->wol & WAKE_MAGIC) {
status = ql_mb_wol_set_magic(qdev, 1);
if (status) {
- QPRINTK(qdev, IFDOWN, ERR,
- "Failed to set magic packet on %s.\n",
- qdev->ndev->name);
+ netif_err(qdev, ifdown, qdev->ndev,
+ "Failed to set magic packet on %s.\n",
+ qdev->ndev->name);
return status;
} else
- QPRINTK(qdev, DRV, INFO,
- "Enabled magic packet successfully on %s.\n",
- qdev->ndev->name);
+ netif_info(qdev, drv, qdev->ndev,
+ "Enabled magic packet successfully on %s.\n",
+ qdev->ndev->name);
wol |= MB_WOL_MAGIC_PKT;
}
@@ -3516,9 +3853,10 @@ int ql_wol(struct ql_adapter *qdev)
if (qdev->wol) {
wol |= MB_WOL_MODE_ON;
status = ql_mb_wol_mode(qdev, wol);
- QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n",
- (status == 0) ? "Sucessfully set" : "Failed", wol,
- qdev->ndev->name);
+ netif_err(qdev, drv, qdev->ndev,
+ "WOL %s (wol code 0x%x) on %s\n",
+ (status == 0) ? "Sucessfully set" : "Failed",
+ wol, qdev->ndev->name);
}
return status;
@@ -3538,6 +3876,7 @@ static int ql_adapter_down(struct ql_adapter *qdev)
cancel_delayed_work_sync(&qdev->mpi_reset_work);
cancel_delayed_work_sync(&qdev->mpi_work);
cancel_delayed_work_sync(&qdev->mpi_idc_work);
+ cancel_delayed_work_sync(&qdev->mpi_core_to_log);
cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
for (i = 0; i < qdev->rss_ring_count; i++)
@@ -3558,8 +3897,8 @@ static int ql_adapter_down(struct ql_adapter *qdev)
status = ql_adapter_reset(qdev);
if (status)
- QPRINTK(qdev, IFDOWN, ERR, "reset(func #%d) FAILED!\n",
- qdev->func);
+ netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
+ qdev->func);
return status;
}
@@ -3569,7 +3908,7 @@ static int ql_adapter_up(struct ql_adapter *qdev)
err = ql_adapter_initialize(qdev);
if (err) {
- QPRINTK(qdev, IFUP, INFO, "Unable to initialize adapter.\n");
+ netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
goto err_init;
}
set_bit(QL_ADAPTER_UP, &qdev->flags);
@@ -3601,7 +3940,7 @@ static int ql_get_adapter_resources(struct ql_adapter *qdev)
int status = 0;
if (ql_alloc_mem_resources(qdev)) {
- QPRINTK(qdev, IFUP, ERR, "Unable to allocate memory.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Unable to allocate memory.\n");
return -ENOMEM;
}
status = ql_request_irq(qdev);
@@ -3612,6 +3951,16 @@ static int qlge_close(struct net_device *ndev)
{
struct ql_adapter *qdev = netdev_priv(ndev);
+ /* If we hit pci_channel_io_perm_failure
+ * failure condition, then we already
+ * brought the adapter down.
+ */
+ if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
+ netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
+ clear_bit(QL_EEH_FATAL, &qdev->flags);
+ return 0;
+ }
+
/*
* Wait for device to recover from a reset.
* (Rarely happens, but possible.)
@@ -3681,9 +4030,10 @@ static int ql_configure_rings(struct ql_adapter *qdev)
rx_ring->lbq_size =
rx_ring->lbq_len * sizeof(__le64);
rx_ring->lbq_buf_size = (u16)lbq_buf_len;
- QPRINTK(qdev, IFUP, DEBUG,
- "lbq_buf_size %d, order = %d\n",
- rx_ring->lbq_buf_size, qdev->lbq_buf_order);
+ netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
+ "lbq_buf_size %d, order = %d\n",
+ rx_ring->lbq_buf_size,
+ qdev->lbq_buf_order);
rx_ring->sbq_len = NUM_SMALL_BUFFERS;
rx_ring->sbq_size =
rx_ring->sbq_len * sizeof(__le64);
@@ -3747,14 +4097,14 @@ static int ql_change_rx_buffers(struct ql_adapter *qdev)
if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
int i = 3;
while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
- QPRINTK(qdev, IFUP, ERR,
- "Waiting for adapter UP...\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Waiting for adapter UP...\n");
ssleep(1);
}
if (!i) {
- QPRINTK(qdev, IFUP, ERR,
- "Timed out waiting for adapter UP\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Timed out waiting for adapter UP\n");
return -ETIMEDOUT;
}
}
@@ -3780,8 +4130,8 @@ static int ql_change_rx_buffers(struct ql_adapter *qdev)
return status;
error:
- QPRINTK(qdev, IFUP, ALERT,
- "Driver up/down cycle failed, closing device.\n");
+ netif_alert(qdev, ifup, qdev->ndev,
+ "Driver up/down cycle failed, closing device.\n");
set_bit(QL_ADAPTER_UP, &qdev->flags);
dev_close(qdev->ndev);
return status;
@@ -3793,28 +4143,25 @@ static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
int status;
if (ndev->mtu == 1500 && new_mtu == 9000) {
- QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
+ netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
} else if (ndev->mtu == 9000 && new_mtu == 1500) {
- QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
- } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
- (ndev->mtu == 9000 && new_mtu == 9000)) {
- return 0;
+ netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
} else
return -EINVAL;
queue_delayed_work(qdev->workqueue,
&qdev->mpi_port_cfg_work, 3*HZ);
+ ndev->mtu = new_mtu;
+
if (!netif_running(qdev->ndev)) {
- ndev->mtu = new_mtu;
return 0;
}
- ndev->mtu = new_mtu;
status = ql_change_rx_buffers(qdev);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Changing MTU failed.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Changing MTU failed.\n");
}
return status;
@@ -3874,8 +4221,8 @@ static void qlge_set_multicast_list(struct net_device *ndev)
if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
if (ql_set_routing_reg
(qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
- QPRINTK(qdev, HW, ERR,
- "Failed to set promiscous mode.\n");
+ netif_err(qdev, hw, qdev->ndev,
+ "Failed to set promiscous mode.\n");
} else {
set_bit(QL_PROMISCUOUS, &qdev->flags);
}
@@ -3884,8 +4231,8 @@ static void qlge_set_multicast_list(struct net_device *ndev)
if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
if (ql_set_routing_reg
(qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
- QPRINTK(qdev, HW, ERR,
- "Failed to clear promiscous mode.\n");
+ netif_err(qdev, hw, qdev->ndev,
+ "Failed to clear promiscous mode.\n");
} else {
clear_bit(QL_PROMISCUOUS, &qdev->flags);
}
@@ -3897,12 +4244,12 @@ static void qlge_set_multicast_list(struct net_device *ndev)
* transition is taking place.
*/
if ((ndev->flags & IFF_ALLMULTI) ||
- (ndev->mc_count > MAX_MULTICAST_ENTRIES)) {
+ (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
if (ql_set_routing_reg
(qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
- QPRINTK(qdev, HW, ERR,
- "Failed to set all-multi mode.\n");
+ netif_err(qdev, hw, qdev->ndev,
+ "Failed to set all-multi mode.\n");
} else {
set_bit(QL_ALLMULTI, &qdev->flags);
}
@@ -3911,32 +4258,34 @@ static void qlge_set_multicast_list(struct net_device *ndev)
if (test_bit(QL_ALLMULTI, &qdev->flags)) {
if (ql_set_routing_reg
(qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
- QPRINTK(qdev, HW, ERR,
- "Failed to clear all-multi mode.\n");
+ netif_err(qdev, hw, qdev->ndev,
+ "Failed to clear all-multi mode.\n");
} else {
clear_bit(QL_ALLMULTI, &qdev->flags);
}
}
}
- if (ndev->mc_count) {
+ if (!netdev_mc_empty(ndev)) {
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
goto exit;
- for (i = 0, mc_ptr = ndev->mc_list; mc_ptr;
- i++, mc_ptr = mc_ptr->next)
+ i = 0;
+ netdev_for_each_mc_addr(mc_ptr, ndev) {
if (ql_set_mac_addr_reg(qdev, (u8 *) mc_ptr->dmi_addr,
MAC_ADDR_TYPE_MULTI_MAC, i)) {
- QPRINTK(qdev, HW, ERR,
- "Failed to loadmulticast address.\n");
+ netif_err(qdev, hw, qdev->ndev,
+ "Failed to loadmulticast address.\n");
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
goto exit;
}
+ i++;
+ }
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
if (ql_set_routing_reg
(qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
- QPRINTK(qdev, HW, ERR,
- "Failed to set multicast match mode.\n");
+ netif_err(qdev, hw, qdev->ndev,
+ "Failed to set multicast match mode.\n");
} else {
set_bit(QL_ALLMULTI, &qdev->flags);
}
@@ -3954,6 +4303,8 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+ /* Update local copy of current mac address. */
+ memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
if (status)
@@ -3961,7 +4312,7 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
if (status)
- QPRINTK(qdev, HW, ERR, "Failed to load MAC address.\n");
+ netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
return status;
}
@@ -3994,8 +4345,8 @@ static void ql_asic_reset_work(struct work_struct *work)
rtnl_unlock();
return;
error:
- QPRINTK(qdev, IFUP, ALERT,
- "Driver up/down cycle failed, closing device\n");
+ netif_alert(qdev, ifup, qdev->ndev,
+ "Driver up/down cycle failed, closing device\n");
set_bit(QL_ADAPTER_UP, &qdev->flags);
dev_close(qdev->ndev);
@@ -4094,6 +4445,7 @@ static void ql_release_all(struct pci_dev *pdev)
iounmap(qdev->reg_base);
if (qdev->doorbell_area)
iounmap(qdev->doorbell_area);
+ vfree(qdev->mpi_coredump);
pci_release_regions(pdev);
pci_set_drvdata(pdev, NULL);
}
@@ -4175,6 +4527,17 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
spin_lock_init(&qdev->hw_lock);
spin_lock_init(&qdev->stats_lock);
+ if (qlge_mpi_coredump) {
+ qdev->mpi_coredump =
+ vmalloc(sizeof(struct ql_mpi_coredump));
+ if (qdev->mpi_coredump == NULL) {
+ dev_err(&pdev->dev, "Coredump alloc failed.\n");
+ err = -ENOMEM;
+ goto err_out2;
+ }
+ if (qlge_force_coredump)
+ set_bit(QL_FRC_COREDUMP, &qdev->flags);
+ }
/* make sure the EEPROM is good */
err = qdev->nic_ops->get_flash(qdev);
if (err) {
@@ -4183,6 +4546,8 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
}
memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
+ /* Keep local copy of current mac address. */
+ memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
/* Set up the default ring sizes. */
qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
@@ -4204,6 +4569,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
+ INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
init_completion(&qdev->ide_completion);
if (!cards_found) {
@@ -4234,6 +4600,21 @@ static const struct net_device_ops qlge_netdev_ops = {
.ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
};
+static void ql_timer(unsigned long data)
+{
+ struct ql_adapter *qdev = (struct ql_adapter *)data;
+ u32 var = 0;
+
+ var = ql_read32(qdev, STS);
+ if (pci_channel_offline(qdev->pdev)) {
+ netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
+ return;
+ }
+
+ qdev->timer.expires = jiffies + (5*HZ);
+ add_timer(&qdev->timer);
+}
+
static int __devinit qlge_probe(struct pci_dev *pdev,
const struct pci_device_id *pci_entry)
{
@@ -4285,6 +4666,14 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
pci_disable_device(pdev);
return err;
}
+ /* Start up the timer to trigger EEH if
+ * the bus goes dead
+ */
+ init_timer_deferrable(&qdev->timer);
+ qdev->timer.data = (unsigned long)qdev;
+ qdev->timer.function = ql_timer;
+ qdev->timer.expires = jiffies + (5*HZ);
+ add_timer(&qdev->timer);
ql_link_off(qdev);
ql_display_dev_info(ndev);
atomic_set(&qdev->lb_count, 0);
@@ -4305,6 +4694,8 @@ int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
static void __devexit qlge_remove(struct pci_dev *pdev)
{
struct net_device *ndev = pci_get_drvdata(pdev);
+ struct ql_adapter *qdev = netdev_priv(ndev);
+ del_timer_sync(&qdev->timer);
unregister_netdev(ndev);
ql_release_all(pdev);
pci_disable_device(pdev);
@@ -4327,6 +4718,7 @@ static void ql_eeh_close(struct net_device *ndev)
cancel_delayed_work_sync(&qdev->mpi_reset_work);
cancel_delayed_work_sync(&qdev->mpi_work);
cancel_delayed_work_sync(&qdev->mpi_idc_work);
+ cancel_delayed_work_sync(&qdev->mpi_core_to_log);
cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
for (i = 0; i < qdev->rss_ring_count; i++)
@@ -4346,6 +4738,7 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
enum pci_channel_state state)
{
struct net_device *ndev = pci_get_drvdata(pdev);
+ struct ql_adapter *qdev = netdev_priv(ndev);
switch (state) {
case pci_channel_io_normal:
@@ -4359,6 +4752,8 @@ static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
case pci_channel_io_perm_failure:
dev_err(&pdev->dev,
"%s: pci_channel_io_perm_failure.\n", __func__);
+ ql_eeh_close(ndev);
+ set_bit(QL_EEH_FATAL, &qdev->flags);
return PCI_ERS_RESULT_DISCONNECT;
}
@@ -4381,11 +4776,18 @@ static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
pci_restore_state(pdev);
if (pci_enable_device(pdev)) {
- QPRINTK(qdev, IFUP, ERR,
- "Cannot re-enable PCI device after reset.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Cannot re-enable PCI device after reset.\n");
return PCI_ERS_RESULT_DISCONNECT;
}
pci_set_master(pdev);
+
+ if (ql_adapter_reset(qdev)) {
+ netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
+ set_bit(QL_EEH_FATAL, &qdev->flags);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
return PCI_ERS_RESULT_RECOVERED;
}
@@ -4395,19 +4797,19 @@ static void qlge_io_resume(struct pci_dev *pdev)
struct ql_adapter *qdev = netdev_priv(ndev);
int err = 0;
- if (ql_adapter_reset(qdev))
- QPRINTK(qdev, DRV, ERR, "reset FAILED!\n");
if (netif_running(ndev)) {
err = qlge_open(ndev);
if (err) {
- QPRINTK(qdev, IFUP, ERR,
- "Device initialization failed after reset.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Device initialization failed after reset.\n");
return;
}
} else {
- QPRINTK(qdev, IFUP, ERR,
- "Device was not running prior to EEH.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Device was not running prior to EEH.\n");
}
+ qdev->timer.expires = jiffies + (5*HZ);
+ add_timer(&qdev->timer);
netif_device_attach(ndev);
}
@@ -4424,6 +4826,7 @@ static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
int err;
netif_device_detach(ndev);
+ del_timer_sync(&qdev->timer);
if (netif_running(ndev)) {
err = ql_adapter_down(qdev);
@@ -4454,7 +4857,7 @@ static int qlge_resume(struct pci_dev *pdev)
pci_restore_state(pdev);
err = pci_enable_device(pdev);
if (err) {
- QPRINTK(qdev, IFUP, ERR, "Cannot enable PCI device from suspend\n");
+ netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
return err;
}
pci_set_master(pdev);
@@ -4468,6 +4871,8 @@ static int qlge_resume(struct pci_dev *pdev)
return err;
}
+ qdev->timer.expires = jiffies + (5*HZ);
+ add_timer(&qdev->timer);
netif_device_attach(ndev);
return 0;
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index e2b2286102d..3c00462a5d2 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -1,5 +1,54 @@
#include "qlge.h"
+int ql_unpause_mpi_risc(struct ql_adapter *qdev)
+{
+ u32 tmp;
+
+ /* Un-pause the RISC */
+ tmp = ql_read32(qdev, CSR);
+ if (!(tmp & CSR_RP))
+ return -EIO;
+
+ ql_write32(qdev, CSR, CSR_CMD_CLR_PAUSE);
+ return 0;
+}
+
+int ql_pause_mpi_risc(struct ql_adapter *qdev)
+{
+ u32 tmp;
+ int count = UDELAY_COUNT;
+
+ /* Pause the RISC */
+ ql_write32(qdev, CSR, CSR_CMD_SET_PAUSE);
+ do {
+ tmp = ql_read32(qdev, CSR);
+ if (tmp & CSR_RP)
+ break;
+ mdelay(UDELAY_DELAY);
+ count--;
+ } while (count);
+ return (count == 0) ? -ETIMEDOUT : 0;
+}
+
+int ql_hard_reset_mpi_risc(struct ql_adapter *qdev)
+{
+ u32 tmp;
+ int count = UDELAY_COUNT;
+
+ /* Reset the RISC */
+ ql_write32(qdev, CSR, CSR_CMD_SET_RST);
+ do {
+ tmp = ql_read32(qdev, CSR);
+ if (tmp & CSR_RR) {
+ ql_write32(qdev, CSR, CSR_CMD_CLR_RST);
+ break;
+ }
+ mdelay(UDELAY_DELAY);
+ count--;
+ } while (count);
+ return (count == 0) ? -ETIMEDOUT : 0;
+}
+
int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
{
int status;
@@ -45,6 +94,35 @@ int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
return status;
}
+/* Determine if we are in charge of the firwmare. If
+ * we are the lower of the 2 NIC pcie functions, or if
+ * we are the higher function and the lower function
+ * is not enabled.
+ */
+int ql_own_firmware(struct ql_adapter *qdev)
+{
+ u32 temp;
+
+ /* If we are the lower of the 2 NIC functions
+ * on the chip the we are responsible for
+ * core dump and firmware reset after an error.
+ */
+ if (qdev->func < qdev->alt_func)
+ return 1;
+
+ /* If we are the higher of the 2 NIC functions
+ * on the chip and the lower function is not
+ * enabled, then we are responsible for
+ * core dump and firmware reset after an error.
+ */
+ temp = ql_read32(qdev, STS);
+ if (!(temp & (1 << (8 + qdev->alt_func))))
+ return 1;
+
+ return 0;
+
+}
+
static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
{
int i, status;
@@ -57,7 +135,7 @@ static int ql_get_mb_sts(struct ql_adapter *qdev, struct mbox_params *mbcp)
ql_read_mpi_reg(qdev, qdev->mailbox_out + i,
&mbcp->mbox_out[i]);
if (status) {
- QPRINTK(qdev, DRV, ERR, "Failed mailbox read.\n");
+ netif_err(qdev, drv, qdev->ndev, "Failed mailbox read.\n");
break;
}
}
@@ -130,7 +208,7 @@ static int ql_idc_req_aen(struct ql_adapter *qdev)
int status;
struct mbox_params *mbcp = &qdev->idc_mbc;
- QPRINTK(qdev, DRV, ERR, "Enter!\n");
+ netif_err(qdev, drv, qdev->ndev, "Enter!\n");
/* Get the status data and start up a thread to
* handle the request.
*/
@@ -138,8 +216,8 @@ static int ql_idc_req_aen(struct ql_adapter *qdev)
mbcp->out_count = 4;
status = ql_get_mb_sts(qdev, mbcp);
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "Could not read MPI, resetting ASIC!\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Could not read MPI, resetting ASIC!\n");
ql_queue_asic_error(qdev);
} else {
/* Begin polled mode early so
@@ -162,8 +240,8 @@ static int ql_idc_cmplt_aen(struct ql_adapter *qdev)
mbcp->out_count = 4;
status = ql_get_mb_sts(qdev, mbcp);
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "Could not read MPI, resetting RISC!\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Could not read MPI, resetting RISC!\n");
ql_queue_fw_error(qdev);
} else
/* Wake up the sleeping mpi_idc_work thread that is
@@ -181,13 +259,13 @@ static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
status = ql_get_mb_sts(qdev, mbcp);
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "%s: Could not get mailbox status.\n", __func__);
+ netif_err(qdev, drv, qdev->ndev,
+ "%s: Could not get mailbox status.\n", __func__);
return;
}
qdev->link_status = mbcp->mbox_out[1];
- QPRINTK(qdev, DRV, ERR, "Link Up.\n");
+ netif_err(qdev, drv, qdev->ndev, "Link Up.\n");
/* If we're coming back from an IDC event
* then set up the CAM and frame routing.
@@ -195,8 +273,8 @@ static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
if (test_bit(QL_CAM_RT_SET, &qdev->flags)) {
status = ql_cam_route_initialize(qdev);
if (status) {
- QPRINTK(qdev, IFUP, ERR,
- "Failed to init CAM/Routing tables.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init CAM/Routing tables.\n");
return;
} else
clear_bit(QL_CAM_RT_SET, &qdev->flags);
@@ -207,7 +285,7 @@ static void ql_link_up(struct ql_adapter *qdev, struct mbox_params *mbcp)
* to our liking.
*/
if (!test_bit(QL_PORT_CFG, &qdev->flags)) {
- QPRINTK(qdev, DRV, ERR, "Queue Port Config Worker!\n");
+ netif_err(qdev, drv, qdev->ndev, "Queue Port Config Worker!\n");
set_bit(QL_PORT_CFG, &qdev->flags);
/* Begin polled mode early so
* we don't get another interrupt
@@ -229,7 +307,7 @@ static void ql_link_down(struct ql_adapter *qdev, struct mbox_params *mbcp)
status = ql_get_mb_sts(qdev, mbcp);
if (status)
- QPRINTK(qdev, DRV, ERR, "Link down AEN broken!\n");
+ netif_err(qdev, drv, qdev->ndev, "Link down AEN broken!\n");
ql_link_off(qdev);
}
@@ -242,9 +320,9 @@ static int ql_sfp_in(struct ql_adapter *qdev, struct mbox_params *mbcp)
status = ql_get_mb_sts(qdev, mbcp);
if (status)
- QPRINTK(qdev, DRV, ERR, "SFP in AEN broken!\n");
+ netif_err(qdev, drv, qdev->ndev, "SFP in AEN broken!\n");
else
- QPRINTK(qdev, DRV, ERR, "SFP insertion detected.\n");
+ netif_err(qdev, drv, qdev->ndev, "SFP insertion detected.\n");
return status;
}
@@ -257,9 +335,9 @@ static int ql_sfp_out(struct ql_adapter *qdev, struct mbox_params *mbcp)
status = ql_get_mb_sts(qdev, mbcp);
if (status)
- QPRINTK(qdev, DRV, ERR, "SFP out AEN broken!\n");
+ netif_err(qdev, drv, qdev->ndev, "SFP out AEN broken!\n");
else
- QPRINTK(qdev, DRV, ERR, "SFP removal detected.\n");
+ netif_err(qdev, drv, qdev->ndev, "SFP removal detected.\n");
return status;
}
@@ -272,13 +350,13 @@ static int ql_aen_lost(struct ql_adapter *qdev, struct mbox_params *mbcp)
status = ql_get_mb_sts(qdev, mbcp);
if (status)
- QPRINTK(qdev, DRV, ERR, "Lost AEN broken!\n");
+ netif_err(qdev, drv, qdev->ndev, "Lost AEN broken!\n");
else {
int i;
- QPRINTK(qdev, DRV, ERR, "Lost AEN detected.\n");
+ netif_err(qdev, drv, qdev->ndev, "Lost AEN detected.\n");
for (i = 0; i < mbcp->out_count; i++)
- QPRINTK(qdev, DRV, ERR, "mbox_out[%d] = 0x%.08x.\n",
- i, mbcp->mbox_out[i]);
+ netif_err(qdev, drv, qdev->ndev, "mbox_out[%d] = 0x%.08x.\n",
+ i, mbcp->mbox_out[i]);
}
@@ -293,15 +371,15 @@ static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp)
status = ql_get_mb_sts(qdev, mbcp);
if (status) {
- QPRINTK(qdev, DRV, ERR, "Firmware did not initialize!\n");
+ netif_err(qdev, drv, qdev->ndev, "Firmware did not initialize!\n");
} else {
- QPRINTK(qdev, DRV, ERR, "Firmware Revision = 0x%.08x.\n",
- mbcp->mbox_out[1]);
+ netif_err(qdev, drv, qdev->ndev, "Firmware Revision = 0x%.08x.\n",
+ mbcp->mbox_out[1]);
qdev->fw_rev_id = mbcp->mbox_out[1];
status = ql_cam_route_initialize(qdev);
if (status)
- QPRINTK(qdev, IFUP, ERR,
- "Failed to init CAM/Routing tables.\n");
+ netif_err(qdev, ifup, qdev->ndev,
+ "Failed to init CAM/Routing tables.\n");
}
}
@@ -320,8 +398,8 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
mbcp->out_count = 1;
status = ql_get_mb_sts(qdev, mbcp);
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "Could not read MPI, resetting ASIC!\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Could not read MPI, resetting ASIC!\n");
ql_queue_asic_error(qdev);
goto end;
}
@@ -410,15 +488,14 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
mbcp->mbox_out[0] = MB_CMD_STS_ERR;
return status;
}
- QPRINTK(qdev, DRV, ERR,
- "Firmware initialization failed.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Firmware initialization failed.\n");
status = -EIO;
ql_queue_fw_error(qdev);
break;
case AEN_SYS_ERR:
- QPRINTK(qdev, DRV, ERR,
- "System Error.\n");
+ netif_err(qdev, drv, qdev->ndev, "System Error.\n");
ql_queue_fw_error(qdev);
status = -EIO;
break;
@@ -431,8 +508,8 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
/* Need to support AEN 8110 */
break;
default:
- QPRINTK(qdev, DRV, ERR,
- "Unsupported AE %.08x.\n", mbcp->mbox_out[0]);
+ netif_err(qdev, drv, qdev->ndev,
+ "Unsupported AE %.08x.\n", mbcp->mbox_out[0]);
/* Clear the MPI firmware status. */
}
end:
@@ -505,8 +582,8 @@ static int ql_mailbox_command(struct ql_adapter *qdev, struct mbox_params *mbcp)
goto done;
} while (time_before(jiffies, count));
- QPRINTK(qdev, DRV, ERR,
- "Timed out waiting for mailbox complete.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Timed out waiting for mailbox complete.\n");
status = -ETIMEDOUT;
goto end;
@@ -529,6 +606,22 @@ end:
return status;
}
+int ql_mb_sys_err(struct ql_adapter *qdev)
+{
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+ int status;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 1;
+ mbcp->out_count = 0;
+
+ mbcp->mbox_in[0] = MB_CMD_MAKE_SYS_ERR;
+
+ status = ql_mailbox_command(qdev, mbcp);
+ return status;
+}
/* Get MPI firmware version. This will be used for
* driver banner and for ethtool info.
@@ -552,8 +645,8 @@ int ql_mb_about_fw(struct ql_adapter *qdev)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- QPRINTK(qdev, DRV, ERR,
- "Failed about firmware command\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed about firmware command\n");
status = -EIO;
}
@@ -584,8 +677,8 @@ int ql_mb_get_fw_state(struct ql_adapter *qdev)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- QPRINTK(qdev, DRV, ERR,
- "Failed Get Firmware State.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed Get Firmware State.\n");
status = -EIO;
}
@@ -594,8 +687,8 @@ int ql_mb_get_fw_state(struct ql_adapter *qdev)
* happen.
*/
if (mbcp->mbox_out[1] & 1) {
- QPRINTK(qdev, DRV, ERR,
- "Firmware waiting for initialization.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Firmware waiting for initialization.\n");
status = -EIO;
}
@@ -627,8 +720,7 @@ int ql_mb_idc_ack(struct ql_adapter *qdev)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- QPRINTK(qdev, DRV, ERR,
- "Failed IDC ACK send.\n");
+ netif_err(qdev, drv, qdev->ndev, "Failed IDC ACK send.\n");
status = -EIO;
}
return status;
@@ -659,16 +751,72 @@ int ql_mb_set_port_cfg(struct ql_adapter *qdev)
return status;
if (mbcp->mbox_out[0] == MB_CMD_STS_INTRMDT) {
- QPRINTK(qdev, DRV, ERR,
- "Port Config sent, wait for IDC.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Port Config sent, wait for IDC.\n");
} else if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- QPRINTK(qdev, DRV, ERR,
- "Failed Set Port Configuration.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed Set Port Configuration.\n");
status = -EIO;
}
return status;
}
+int ql_mb_dump_ram(struct ql_adapter *qdev, u64 req_dma, u32 addr,
+ u32 size)
+{
+ int status = 0;
+ struct mbox_params mbc;
+ struct mbox_params *mbcp = &mbc;
+
+ memset(mbcp, 0, sizeof(struct mbox_params));
+
+ mbcp->in_count = 9;
+ mbcp->out_count = 1;
+
+ mbcp->mbox_in[0] = MB_CMD_DUMP_RISC_RAM;
+ mbcp->mbox_in[1] = LSW(addr);
+ mbcp->mbox_in[2] = MSW(req_dma);
+ mbcp->mbox_in[3] = LSW(req_dma);
+ mbcp->mbox_in[4] = MSW(size);
+ mbcp->mbox_in[5] = LSW(size);
+ mbcp->mbox_in[6] = MSW(MSD(req_dma));
+ mbcp->mbox_in[7] = LSW(MSD(req_dma));
+ mbcp->mbox_in[8] = MSW(addr);
+
+
+ status = ql_mailbox_command(qdev, mbcp);
+ if (status)
+ return status;
+
+ if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
+ netif_err(qdev, drv, qdev->ndev, "Failed to dump risc RAM.\n");
+ status = -EIO;
+ }
+ return status;
+}
+
+/* Issue a mailbox command to dump RISC RAM. */
+int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
+ u32 ram_addr, int word_count)
+{
+ int status;
+ char *my_buf;
+ dma_addr_t buf_dma;
+
+ my_buf = pci_alloc_consistent(qdev->pdev, word_count * sizeof(u32),
+ &buf_dma);
+ if (!my_buf)
+ return -EIO;
+
+ status = ql_mb_dump_ram(qdev, buf_dma, ram_addr, word_count);
+ if (!status)
+ memcpy(buf, my_buf, word_count * sizeof(u32));
+
+ pci_free_consistent(qdev->pdev, word_count * sizeof(u32), my_buf,
+ buf_dma);
+ return status;
+}
+
/* Get link settings and maximum frame size settings
* for the current port.
* Most likely will block.
@@ -691,12 +839,12 @@ int ql_mb_get_port_cfg(struct ql_adapter *qdev)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- QPRINTK(qdev, DRV, ERR,
- "Failed Get Port Configuration.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed Get Port Configuration.\n");
status = -EIO;
} else {
- QPRINTK(qdev, DRV, DEBUG,
- "Passed Get Port Configuration.\n");
+ netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
+ "Passed Get Port Configuration.\n");
qdev->link_config = mbcp->mbox_out[1];
qdev->max_frame_size = mbcp->mbox_out[2];
}
@@ -723,8 +871,7 @@ int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- QPRINTK(qdev, DRV, ERR,
- "Failed to set WOL mode.\n");
+ netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n");
status = -EIO;
}
return status;
@@ -766,8 +913,7 @@ int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- QPRINTK(qdev, DRV, ERR,
- "Failed to set WOL mode.\n");
+ netif_err(qdev, drv, qdev->ndev, "Failed to set WOL mode.\n");
status = -EIO;
}
return status;
@@ -793,8 +939,7 @@ static int ql_idc_wait(struct ql_adapter *qdev)
wait_for_completion_timeout(&qdev->ide_completion,
wait_time);
if (!wait_time) {
- QPRINTK(qdev, DRV, ERR,
- "IDC Timeout.\n");
+ netif_err(qdev, drv, qdev->ndev, "IDC Timeout.\n");
break;
}
/* Now examine the response from the IDC process.
@@ -802,18 +947,17 @@ static int ql_idc_wait(struct ql_adapter *qdev)
* more wait time.
*/
if (mbcp->mbox_out[0] == AEN_IDC_EXT) {
- QPRINTK(qdev, DRV, ERR,
- "IDC Time Extension from function.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "IDC Time Extension from function.\n");
wait_time += (mbcp->mbox_out[1] >> 8) & 0x0000000f;
} else if (mbcp->mbox_out[0] == AEN_IDC_CMPLT) {
- QPRINTK(qdev, DRV, ERR,
- "IDC Success.\n");
+ netif_err(qdev, drv, qdev->ndev, "IDC Success.\n");
status = 0;
break;
} else {
- QPRINTK(qdev, DRV, ERR,
- "IDC: Invalid State 0x%.04x.\n",
- mbcp->mbox_out[0]);
+ netif_err(qdev, drv, qdev->ndev,
+ "IDC: Invalid State 0x%.04x.\n",
+ mbcp->mbox_out[0]);
status = -EIO;
break;
}
@@ -842,8 +986,8 @@ int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- QPRINTK(qdev, DRV, ERR,
- "Failed to set LED Configuration.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed to set LED Configuration.\n");
status = -EIO;
}
@@ -868,8 +1012,8 @@ int ql_mb_get_led_cfg(struct ql_adapter *qdev)
return status;
if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
- QPRINTK(qdev, DRV, ERR,
- "Failed to get LED Configuration.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed to get LED Configuration.\n");
status = -EIO;
} else
qdev->led_config = mbcp->mbox_out[1];
@@ -899,16 +1043,16 @@ int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control)
return status;
if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) {
- QPRINTK(qdev, DRV, ERR,
- "Command not supported by firmware.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Command not supported by firmware.\n");
status = -EINVAL;
} else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) {
/* This indicates that the firmware is
* already in the state we are trying to
* change it to.
*/
- QPRINTK(qdev, DRV, ERR,
- "Command parameters make no change.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Command parameters make no change.\n");
}
return status;
}
@@ -938,12 +1082,12 @@ static int ql_mb_get_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 *control)
}
if (mbcp->mbox_out[0] == MB_CMD_STS_INVLD_CMD) {
- QPRINTK(qdev, DRV, ERR,
- "Command not supported by firmware.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Command not supported by firmware.\n");
status = -EINVAL;
} else if (mbcp->mbox_out[0] == MB_CMD_STS_ERR) {
- QPRINTK(qdev, DRV, ERR,
- "Failed to get MPI traffic control.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Failed to get MPI traffic control.\n");
status = -EIO;
}
return status;
@@ -999,8 +1143,8 @@ void ql_mpi_port_cfg_work(struct work_struct *work)
status = ql_mb_get_port_cfg(qdev);
rtnl_unlock();
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "Bug: Failed to get port config data.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Bug: Failed to get port config data.\n");
goto err;
}
@@ -1013,8 +1157,8 @@ void ql_mpi_port_cfg_work(struct work_struct *work)
qdev->max_frame_size = CFG_DEFAULT_MAX_FRAME_SIZE;
status = ql_set_port_cfg(qdev);
if (status) {
- QPRINTK(qdev, DRV, ERR,
- "Bug: Failed to set port config data.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Bug: Failed to set port config data.\n");
goto err;
}
end:
@@ -1046,8 +1190,8 @@ void ql_mpi_idc_work(struct work_struct *work)
switch (aen) {
default:
- QPRINTK(qdev, DRV, ERR,
- "Bug: Unhandled IDC action.\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Bug: Unhandled IDC action.\n");
break;
case MB_CMD_PORT_RESET:
case MB_CMD_STOP_FW:
@@ -1062,11 +1206,11 @@ void ql_mpi_idc_work(struct work_struct *work)
if (timeout) {
status = ql_mb_idc_ack(qdev);
if (status)
- QPRINTK(qdev, DRV, ERR,
- "Bug: No pending IDC!\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Bug: No pending IDC!\n");
} else {
- QPRINTK(qdev, DRV, DEBUG,
- "IDC ACK not required\n");
+ netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
+ "IDC ACK not required\n");
status = 0; /* success */
}
break;
@@ -1095,11 +1239,11 @@ void ql_mpi_idc_work(struct work_struct *work)
if (timeout) {
status = ql_mb_idc_ack(qdev);
if (status)
- QPRINTK(qdev, DRV, ERR,
- "Bug: No pending IDC!\n");
+ netif_err(qdev, drv, qdev->ndev,
+ "Bug: No pending IDC!\n");
} else {
- QPRINTK(qdev, DRV, DEBUG,
- "IDC ACK not required\n");
+ netif_printk(qdev, drv, KERN_DEBUG, qdev->ndev,
+ "IDC ACK not required\n");
status = 0; /* success */
}
break;
@@ -1143,5 +1287,19 @@ void ql_mpi_reset_work(struct work_struct *work)
cancel_delayed_work_sync(&qdev->mpi_work);
cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
cancel_delayed_work_sync(&qdev->mpi_idc_work);
+ /* If we're not the dominant NIC function,
+ * then there is nothing to do.
+ */
+ if (!ql_own_firmware(qdev)) {
+ netif_err(qdev, drv, qdev->ndev, "Don't own firmware!\n");
+ return;
+ }
+
+ if (!ql_core_dump(qdev, qdev->mpi_coredump)) {
+ netif_err(qdev, drv, qdev->ndev, "Core is dumped!\n");
+ qdev->core_is_dumped = 1;
+ queue_delayed_work(qdev->workqueue,
+ &qdev->mpi_core_to_log, 5 * HZ);
+ }
ql_soft_reset_mpi_risc(qdev);
}
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index f03e2e4a15a..15d5373dc8f 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -938,7 +938,7 @@ static void r6040_multicast_list(struct net_device *dev)
u16 *adrp;
u16 reg;
unsigned long flags;
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
int i;
/* MAC Address */
@@ -958,25 +958,24 @@ static void r6040_multicast_list(struct net_device *dev)
}
/* Too many multicast addresses
* accept all traffic */
- else if ((dev->mc_count > MCAST_MAX) || (dev->flags & IFF_ALLMULTI))
+ else if ((netdev_mc_count(dev) > MCAST_MAX) ||
+ (dev->flags & IFF_ALLMULTI))
reg |= 0x0020;
iowrite16(reg, ioaddr);
spin_unlock_irqrestore(&lp->lock, flags);
/* Build the hash table */
- if (dev->mc_count > MCAST_MAX) {
+ if (netdev_mc_count(dev) > MCAST_MAX) {
u16 hash_table[4];
u32 crc;
for (i = 0; i < 4; i++)
hash_table[i] = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ netdev_for_each_mc_addr(dmi, dev) {
char *addrs = dmi->dmi_addr;
- dmi = dmi->next;
-
if (!(*addrs & 1))
continue;
@@ -994,17 +993,19 @@ static void r6040_multicast_list(struct net_device *dev)
iowrite16(hash_table[3], ioaddr + MAR3);
}
/* Multicast Address 1~4 case */
- for (i = 0, dmi; (i < dev->mc_count) && (i < MCAST_MAX); i++) {
- adrp = (u16 *)dmi->dmi_addr;
- iowrite16(adrp[0], ioaddr + MID_1L + 8*i);
- iowrite16(adrp[1], ioaddr + MID_1M + 8*i);
- iowrite16(adrp[2], ioaddr + MID_1H + 8*i);
- dmi = dmi->next;
- }
- for (i = dev->mc_count; i < MCAST_MAX; i++) {
- iowrite16(0xffff, ioaddr + MID_0L + 8*i);
- iowrite16(0xffff, ioaddr + MID_0M + 8*i);
- iowrite16(0xffff, ioaddr + MID_0H + 8*i);
+ i = 0;
+ netdev_for_each_mc_addr(dmi, dev) {
+ if (i < MCAST_MAX) {
+ adrp = (u16 *) dmi->dmi_addr;
+ iowrite16(adrp[0], ioaddr + MID_1L + 8 * i);
+ iowrite16(adrp[1], ioaddr + MID_1M + 8 * i);
+ iowrite16(adrp[2], ioaddr + MID_1H + 8 * i);
+ } else {
+ iowrite16(0xffff, ioaddr + MID_0L + 8 * i);
+ iowrite16(0xffff, ioaddr + MID_0M + 8 * i);
+ iowrite16(0xffff, ioaddr + MID_0H + 8 * i);
+ }
+ i++;
}
}
@@ -1222,7 +1223,7 @@ static void __devexit r6040_remove_one(struct pci_dev *pdev)
}
-static struct pci_device_id r6040_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(r6040_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_RDC, 0x6040) },
{ 0 }
};
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 60f96c468a2..dfc3573c91b 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -168,7 +168,7 @@ static void rtl_hw_start_8169(struct net_device *);
static void rtl_hw_start_8168(struct net_device *);
static void rtl_hw_start_8101(struct net_device *);
-static struct pci_device_id rtl8169_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
@@ -187,7 +187,7 @@ static struct pci_device_id rtl8169_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
static int rx_copybreak = 200;
-static int use_dac;
+static int use_dac = -1;
static struct {
u32 msg_enable;
} debug = { -1 };
@@ -511,7 +511,8 @@ MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
module_param(rx_copybreak, int, 0);
MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
module_param(use_dac, int, 0);
-MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
+MODULE_PARM_DESC(use_dac, "Enable PCI DAC. -1 defaults on for PCI Express only."
+" Unsafe on 32 bit PCI slot.");
module_param_named(debug, debug.msg_enable, int, 0);
MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
MODULE_LICENSE("GPL");
@@ -744,12 +745,10 @@ static void rtl8169_check_link_status(struct net_device *dev,
spin_lock_irqsave(&tp->lock, flags);
if (tp->link_ok(ioaddr)) {
netif_carrier_on(dev);
- if (netif_msg_ifup(tp))
- printk(KERN_INFO PFX "%s: link up\n", dev->name);
+ netif_info(tp, ifup, dev, "link up\n");
} else {
- if (netif_msg_ifdown(tp))
- printk(KERN_INFO PFX "%s: link down\n", dev->name);
netif_carrier_off(dev);
+ netif_info(tp, ifdown, dev, "link down\n");
}
spin_unlock_irqrestore(&tp->lock, flags);
}
@@ -862,11 +861,8 @@ static int rtl8169_set_speed_tbi(struct net_device *dev,
} else if (autoneg == AUTONEG_ENABLE)
RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
else {
- if (netif_msg_link(tp)) {
- printk(KERN_WARNING "%s: "
- "incorrect speed setting refused in TBI mode\n",
- dev->name);
- }
+ netif_warn(tp, link, dev,
+ "incorrect speed setting refused in TBI mode\n");
ret = -EOPNOTSUPP;
}
@@ -901,9 +897,9 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
(tp->mac_version != RTL_GIGA_MAC_VER_15) &&
(tp->mac_version != RTL_GIGA_MAC_VER_16)) {
giga_ctrl |= ADVERTISE_1000FULL | ADVERTISE_1000HALF;
- } else if (netif_msg_link(tp)) {
- printk(KERN_INFO "%s: PHY does not support 1000Mbps.\n",
- dev->name);
+ } else {
+ netif_info(tp, link, dev,
+ "PHY does not support 1000Mbps\n");
}
bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
@@ -2705,8 +2701,7 @@ static void rtl8169_phy_timer(unsigned long __opaque)
if (tp->link_ok(ioaddr))
goto out_unlock;
- if (netif_msg_link(tp))
- printk(KERN_WARNING "%s: PHY reset until link up\n", dev->name);
+ netif_warn(tp, link, dev, "PHY reset until link up\n");
tp->phy_reset_enable(ioaddr);
@@ -2776,8 +2771,7 @@ static void rtl8169_phy_reset(struct net_device *dev,
return;
msleep(1);
}
- if (netif_msg_link(tp))
- printk(KERN_ERR "%s: PHY reset failed.\n", dev->name);
+ netif_err(tp, link, dev, "PHY reset failed\n");
}
static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
@@ -2811,8 +2805,8 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
*/
rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL);
- if ((RTL_R8(PHYstatus) & TBI_Enable) && netif_msg_link(tp))
- printk(KERN_INFO PFX "%s: TBI auto-negotiating\n", dev->name);
+ if (RTL_R8(PHYstatus) & TBI_Enable)
+ netif_info(tp, link, dev, "TBI auto-negotiating\n");
}
static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
@@ -2980,6 +2974,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
void __iomem *ioaddr;
unsigned int i;
int rc;
+ int this_use_dac = use_dac;
if (netif_msg_drv(&debug)) {
printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
@@ -3012,8 +3007,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* enable device (incl. PCI PM wakeup and hotplug setup) */
rc = pci_enable_device(pdev);
if (rc < 0) {
- if (netif_msg_probe(tp))
- dev_err(&pdev->dev, "enable failure\n");
+ netif_err(tp, probe, dev, "enable failure\n");
goto err_out_free_dev_1;
}
@@ -3023,45 +3017,46 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* make sure PCI base addr 1 is MMIO */
if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
- if (netif_msg_probe(tp)) {
- dev_err(&pdev->dev,
- "region #%d not an MMIO resource, aborting\n",
- region);
- }
+ netif_err(tp, probe, dev,
+ "region #%d not an MMIO resource, aborting\n",
+ region);
rc = -ENODEV;
goto err_out_mwi_3;
}
/* check for weird/broken PCI region reporting */
if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
- if (netif_msg_probe(tp)) {
- dev_err(&pdev->dev,
- "Invalid PCI region size(s), aborting\n");
- }
+ netif_err(tp, probe, dev,
+ "Invalid PCI region size(s), aborting\n");
rc = -ENODEV;
goto err_out_mwi_3;
}
rc = pci_request_regions(pdev, MODULENAME);
if (rc < 0) {
- if (netif_msg_probe(tp))
- dev_err(&pdev->dev, "could not request regions.\n");
+ netif_err(tp, probe, dev, "could not request regions\n");
goto err_out_mwi_3;
}
tp->cp_cmd = PCIMulRW | RxChkSum;
+ tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ if (!tp->pcie_cap)
+ netif_info(tp, probe, dev, "no PCI Express capability\n");
+
+ if (this_use_dac < 0)
+ this_use_dac = tp->pcie_cap != 0;
+
if ((sizeof(dma_addr_t) > 4) &&
- !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
+ this_use_dac &&
+ !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ netif_info(tp, probe, dev, "using 64-bit DMA\n");
tp->cp_cmd |= PCIDAC;
dev->features |= NETIF_F_HIGHDMA;
} else {
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc < 0) {
- if (netif_msg_probe(tp)) {
- dev_err(&pdev->dev,
- "DMA configuration failed.\n");
- }
+ netif_err(tp, probe, dev, "DMA configuration failed\n");
goto err_out_free_res_4;
}
}
@@ -3069,16 +3064,11 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* ioremap MMIO region */
ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
if (!ioaddr) {
- if (netif_msg_probe(tp))
- dev_err(&pdev->dev, "cannot remap MMIO, aborting\n");
+ netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
rc = -EIO;
goto err_out_free_res_4;
}
- tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
- if (!tp->pcie_cap && netif_msg_probe(tp))
- dev_info(&pdev->dev, "no PCI Express capability\n");
-
RTL_W16(IntrMask, 0x0000);
/* Soft reset the chip. */
@@ -3100,10 +3090,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* Use appropriate default if unknown */
if (tp->mac_version == RTL_GIGA_MAC_NONE) {
- if (netif_msg_probe(tp)) {
- dev_notice(&pdev->dev,
- "unknown MAC, using family default\n");
- }
+ netif_notice(tp, probe, dev,
+ "unknown MAC, using family default\n");
tp->mac_version = cfg->default_ver;
}
@@ -3185,19 +3173,10 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
- if (netif_msg_probe(tp)) {
- u32 xid = RTL_R32(TxConfig) & 0x9cf0f8ff;
-
- printk(KERN_INFO "%s: %s at 0x%lx, "
- "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, "
- "XID %08x IRQ %d\n",
- dev->name,
- rtl_chip_info[tp->chipset].name,
- dev->base_addr,
- dev->dev_addr[0], dev->dev_addr[1],
- dev->dev_addr[2], dev->dev_addr[3],
- dev->dev_addr[4], dev->dev_addr[5], xid, dev->irq);
- }
+ netif_info(tp, probe, dev, "%s at 0x%lx, %pM, XID %08x IRQ %d\n",
+ rtl_chip_info[tp->chipset].name,
+ dev->base_addr, dev->dev_addr,
+ (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), dev->irq);
rtl8169_init_phy(dev, tp);
@@ -4136,10 +4115,10 @@ static void rtl8169_reinit_task(struct work_struct *work)
ret = rtl8169_open(dev);
if (unlikely(ret < 0)) {
- if (net_ratelimit() && netif_msg_drv(tp)) {
- printk(KERN_ERR PFX "%s: reinit failure (status = %d)."
- " Rescheduling.\n", dev->name, ret);
- }
+ if (net_ratelimit())
+ netif_err(tp, drv, dev,
+ "reinit failure (status = %d). Rescheduling\n",
+ ret);
rtl8169_schedule_work(dev, rtl8169_reinit_task);
}
@@ -4169,10 +4148,8 @@ static void rtl8169_reset_task(struct work_struct *work)
netif_wake_queue(dev);
rtl8169_check_link_status(dev, tp, tp->mmio_addr);
} else {
- if (net_ratelimit() && netif_msg_intr(tp)) {
- printk(KERN_EMERG PFX "%s: Rx buffers shortage\n",
- dev->name);
- }
+ if (net_ratelimit())
+ netif_emerg(tp, intr, dev, "Rx buffers shortage\n");
rtl8169_schedule_work(dev, rtl8169_reset_task);
}
@@ -4260,11 +4237,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
u32 opts1;
if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
- if (netif_msg_drv(tp)) {
- printk(KERN_ERR
- "%s: BUG! Tx Ring full when queue awake!\n",
- dev->name);
- }
+ netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
goto err_stop;
}
@@ -4326,11 +4299,8 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
pci_read_config_word(pdev, PCI_STATUS, &pci_status);
- if (netif_msg_intr(tp)) {
- printk(KERN_ERR
- "%s: PCI error (cmd = 0x%04x, status = 0x%04x).\n",
- dev->name, pci_cmd, pci_status);
- }
+ netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
+ pci_cmd, pci_status);
/*
* The recovery sequence below admits a very elaborated explanation:
@@ -4354,8 +4324,7 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
/* The infamous DAC f*ckup only happens at boot time */
if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
- if (netif_msg_intr(tp))
- printk(KERN_INFO "%s: disabling PCI DAC.\n", dev->name);
+ netif_info(tp, intr, dev, "disabling PCI DAC\n");
tp->cp_cmd &= ~PCIDAC;
RTL_W16(CPlusCmd, tp->cp_cmd);
dev->features &= ~NETIF_F_HIGHDMA;
@@ -4482,11 +4451,8 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
if (status & DescOwn)
break;
if (unlikely(status & RxRES)) {
- if (netif_msg_rx_err(tp)) {
- printk(KERN_INFO
- "%s: Rx ERROR. status = %08x\n",
- dev->name, status);
- }
+ netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
+ status);
dev->stats.rx_errors++;
if (status & (RxRWT | RxRUNT))
dev->stats.rx_length_errors++;
@@ -4549,8 +4515,8 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
tp->cur_rx = cur_rx;
delta = rtl8169_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
- if (!delta && count && netif_msg_intr(tp))
- printk(KERN_INFO "%s: no Rx buffer allocated\n", dev->name);
+ if (!delta && count)
+ netif_info(tp, intr, dev, "no Rx buffer allocated\n");
tp->dirty_rx += delta;
/*
@@ -4560,8 +4526,8 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
* after refill ?
* - how do others driver handle this condition (Uh oh...).
*/
- if ((tp->dirty_rx + NUM_RX_DESC == tp->cur_rx) && netif_msg_intr(tp))
- printk(KERN_EMERG "%s: Rx buffers exhausted\n", dev->name);
+ if (tp->dirty_rx + NUM_RX_DESC == tp->cur_rx)
+ netif_emerg(tp, intr, dev, "Rx buffers exhausted\n");
return count;
}
@@ -4616,10 +4582,9 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
if (likely(napi_schedule_prep(&tp->napi)))
__napi_schedule(&tp->napi);
- else if (netif_msg_intr(tp)) {
- printk(KERN_INFO "%s: interrupt %04x in poll\n",
- dev->name, status);
- }
+ else
+ netif_info(tp, intr, dev,
+ "interrupt %04x in poll\n", status);
}
/* We only get a new MSI interrupt when all active irq
@@ -4755,27 +4720,22 @@ static void rtl_set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) {
/* Unconditionally log net taps. */
- if (netif_msg_link(tp)) {
- printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
- dev->name);
- }
+ netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
rx_mode =
AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
AcceptAllPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
} else {
struct dev_mc_list *mclist;
- unsigned int i;
rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
rx_mode |= AcceptMulticast;
diff --git a/drivers/net/rrunner.c b/drivers/net/rrunner.c
index 1c257098d0a..266baf53496 100644
--- a/drivers/net/rrunner.c
+++ b/drivers/net/rrunner.c
@@ -1688,7 +1688,7 @@ static int rr_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
}
}
-static struct pci_device_id rr_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(rr_pci_tbl) = {
{ PCI_VENDOR_ID_ESSENTIAL, PCI_DEVICE_ID_ESSENTIAL_ROADRUNNER,
PCI_ANY_ID, PCI_ANY_ID, },
{ 0,}
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 3c4836d0898..43bc66aa840 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -523,7 +523,7 @@ module_param_array(rts_frm_len, uint, NULL, 0);
* S2IO device table.
* This table lists all the devices that this driver supports.
*/
-static struct pci_device_id s2io_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(s2io_tbl) = {
{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
PCI_ANY_ID, PCI_ANY_ID},
{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
@@ -5055,8 +5055,8 @@ static void s2io_set_multicast(struct net_device *dev)
}
/* Update individual M_CAST address list */
- if ((!sp->m_cast_flg) && dev->mc_count) {
- if (dev->mc_count >
+ if ((!sp->m_cast_flg) && netdev_mc_count(dev)) {
+ if (netdev_mc_count(dev) >
(config->max_mc_addr - config->max_mac_addr)) {
DBG_PRINT(ERR_DBG,
"%s: No more Rx filters can be added - "
@@ -5066,7 +5066,7 @@ static void s2io_set_multicast(struct net_device *dev)
}
prev_cnt = sp->mc_addr_count;
- sp->mc_addr_count = dev->mc_count;
+ sp->mc_addr_count = netdev_mc_count(dev);
/* Clear out the previous list of Mc in the H/W. */
for (i = 0; i < prev_cnt; i++) {
@@ -5092,8 +5092,8 @@ static void s2io_set_multicast(struct net_device *dev)
}
/* Create the new Rx filter list and update the same in H/W. */
- for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
- i++, mclist = mclist->next) {
+ i = 0;
+ netdev_for_each_mc_addr(mclist, dev) {
memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
ETH_ALEN);
mac_addr = 0;
@@ -5121,6 +5121,7 @@ static void s2io_set_multicast(struct net_device *dev)
dev->name);
return;
}
+ i++;
}
}
}
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index 564d4d7f855..9944e5d662c 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -2161,13 +2161,13 @@ static void sbmac_setmulti(struct sbmac_softc *sc)
* XXX if the table overflows */
idx = 1; /* skip station address */
- mclist = dev->mc_list;
- while (mclist && (idx < MAC_ADDR_COUNT)) {
+ netdev_for_each_mc_addr(mclist, dev) {
+ if (idx == MAC_ADDR_COUNT)
+ break;
reg = sbmac_addr2reg(mclist->dmi_addr);
port = sc->sbm_base + R_MAC_ADDR_BASE+(idx * sizeof(uint64_t));
__raw_writeq(reg, port);
idx++;
- mclist = mclist->next;
}
/*
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index e35050322f9..d87c4787fff 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -429,13 +429,13 @@ static void _sc92031_set_mar(struct net_device *dev)
u32 mar0 = 0, mar1 = 0;
if ((dev->flags & IFF_PROMISC) ||
- dev->mc_count > multicast_filter_limit ||
+ netdev_mc_count(dev) > multicast_filter_limit ||
(dev->flags & IFF_ALLMULTI))
mar0 = mar1 = 0xffffffff;
else if (dev->flags & IFF_MULTICAST) {
struct dev_mc_list *mc_list;
- for (mc_list = dev->mc_list; mc_list; mc_list = mc_list->next) {
+ netdev_for_each_mc_addr(mc_list, dev) {
u32 crc;
unsigned bit = 0;
@@ -1589,7 +1589,7 @@ out:
return 0;
}
-static struct pci_device_id sc92031_pci_device_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(sc92031_pci_device_id_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x2031) },
{ PCI_DEVICE(PCI_VENDOR_ID_SILAN, 0x8139) },
{ PCI_DEVICE(0x1088, 0x2031) },
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index 46997e177ee..88f2fb193ab 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -1602,11 +1602,10 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
static void efx_set_multicast_list(struct net_device *net_dev)
{
struct efx_nic *efx = netdev_priv(net_dev);
- struct dev_mc_list *mc_list = net_dev->mc_list;
+ struct dev_mc_list *mc_list;
union efx_multicast_hash *mc_hash = &efx->multicast_hash;
u32 crc;
int bit;
- int i;
efx->promiscuous = !!(net_dev->flags & IFF_PROMISC);
@@ -1615,11 +1614,10 @@ static void efx_set_multicast_list(struct net_device *net_dev)
memset(mc_hash, 0xff, sizeof(*mc_hash));
} else {
memset(mc_hash, 0x00, sizeof(*mc_hash));
- for (i = 0; i < net_dev->mc_count; i++) {
+ netdev_for_each_mc_addr(mc_list, net_dev) {
crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr);
bit = crc & (EFX_MCAST_HASH_ENTRIES - 1);
set_bit_le(bit, mc_hash->byte);
- mc_list = mc_list->next;
}
/* Broadcast packets go through the multicast hash filter.
@@ -1940,7 +1938,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
**************************************************************************/
/* PCI device ID table */
-static struct pci_device_id efx_pci_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = {
{PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID),
.driver_data = (unsigned long) &falcon_a1_nic_type},
{PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID),
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index a615ac05153..7eff0a615cb 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -79,8 +79,6 @@ extern int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok);
/* Global */
extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type);
-extern void efx_suspend(struct efx_nic *efx);
-extern void efx_resume(struct efx_nic *efx);
extern void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs,
int rx_usecs, bool rx_adaptive);
extern int efx_request_power(struct efx_nic *efx, int mw, const char *name);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index 6c0bbed8c47..d9f9c02a928 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -196,7 +196,7 @@ int efx_ethtool_get_settings(struct net_device *net_dev,
efx->phy_op->get_settings(efx, ecmd);
mutex_unlock(&efx->mac_lock);
- /* Falcon GMAC does not support 1000Mbps HD */
+ /* GMAC does not support 1000Mbps HD */
ecmd->supported &= ~SUPPORTED_1000baseT_Half;
/* Both MACs support pause frames (bidirectional and respond-only) */
ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
@@ -216,7 +216,7 @@ int efx_ethtool_set_settings(struct net_device *net_dev,
struct efx_nic *efx = netdev_priv(net_dev);
int rc;
- /* Falcon GMAC does not support 1000Mbps HD */
+ /* GMAC does not support 1000Mbps HD */
if (ecmd->speed == SPEED_1000 && ecmd->duplex != DUPLEX_FULL) {
EFX_LOG(efx, "rejecting unsupported 1000Mbps HD"
" setting\n");
@@ -342,8 +342,8 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
unsigned int n = 0, i;
enum efx_loopback_mode mode;
- efx_fill_test(n++, strings, data, &tests->mdio,
- "core", 0, "mdio", NULL);
+ efx_fill_test(n++, strings, data, &tests->phy_alive,
+ "phy", 0, "alive", NULL);
efx_fill_test(n++, strings, data, &tests->nvram,
"core", 0, "nvram", NULL);
efx_fill_test(n++, strings, data, &tests->interrupt,
@@ -379,7 +379,7 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
if (name == NULL)
break;
- efx_fill_test(n++, strings, data, &tests->phy[i],
+ efx_fill_test(n++, strings, data, &tests->phy_ext[i],
"phy", 0, name, NULL);
}
}
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 9d009c46e96..1b8d83657aa 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -909,6 +909,8 @@ static int falcon_probe_port(struct efx_nic *efx)
efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
else
efx->wanted_fc = EFX_FC_RX;
+ if (efx->mdio.mmds & MDIO_DEVS_AN)
+ efx->wanted_fc |= EFX_FC_AUTO;
/* Allocate buffer for stats */
rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
@@ -1006,7 +1008,7 @@ static int falcon_test_nvram(struct efx_nic *efx)
static const struct efx_nic_register_test falcon_b0_register_tests[] = {
{ FR_AZ_ADR_REGION,
- EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) },
+ EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
{ FR_AZ_RX_CFG,
EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
{ FR_AZ_TX_CFG,
@@ -1728,7 +1730,7 @@ static int falcon_set_wol(struct efx_nic *efx, u32 type)
/**************************************************************************
*
- * Revision-dependent attributes used by efx.c
+ * Revision-dependent attributes used by efx.c and nic.c
*
**************************************************************************
*/
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index f66b3da6ddf..c48669c7741 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -896,29 +896,73 @@ fail:
return rc;
}
-int efx_mcdi_handle_assertion(struct efx_nic *efx)
+static int efx_mcdi_nvram_test(struct efx_nic *efx, unsigned int type)
+{
+ u8 inbuf[MC_CMD_NVRAM_TEST_IN_LEN];
+ u8 outbuf[MC_CMD_NVRAM_TEST_OUT_LEN];
+ int rc;
+
+ MCDI_SET_DWORD(inbuf, NVRAM_TEST_IN_TYPE, type);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_TEST, inbuf, sizeof(inbuf),
+ outbuf, sizeof(outbuf), NULL);
+ if (rc)
+ return rc;
+
+ switch (MCDI_DWORD(outbuf, NVRAM_TEST_OUT_RESULT)) {
+ case MC_CMD_NVRAM_TEST_PASS:
+ case MC_CMD_NVRAM_TEST_NOTSUPP:
+ return 0;
+ default:
+ return -EIO;
+ }
+}
+
+int efx_mcdi_nvram_test_all(struct efx_nic *efx)
+{
+ u32 nvram_types;
+ unsigned int type;
+ int rc;
+
+ rc = efx_mcdi_nvram_types(efx, &nvram_types);
+ if (rc)
+ return rc;
+
+ type = 0;
+ while (nvram_types != 0) {
+ if (nvram_types & 1) {
+ rc = efx_mcdi_nvram_test(efx, type);
+ if (rc)
+ return rc;
+ }
+ type++;
+ nvram_types >>= 1;
+ }
+
+ return 0;
+}
+
+static int efx_mcdi_read_assertion(struct efx_nic *efx)
{
- union {
- u8 asserts[MC_CMD_GET_ASSERTS_IN_LEN];
- u8 reboot[MC_CMD_REBOOT_IN_LEN];
- } inbuf;
- u8 assertion[MC_CMD_GET_ASSERTS_OUT_LEN];
+ u8 inbuf[MC_CMD_GET_ASSERTS_IN_LEN];
+ u8 outbuf[MC_CMD_GET_ASSERTS_OUT_LEN];
unsigned int flags, index, ofst;
const char *reason;
size_t outlen;
int retry;
int rc;
- /* Check if the MC is in the assertion handler, retrying twice. Once
+ /* Attempt to read any stored assertion state before we reboot
+ * the mcfw out of the assertion handler. Retry twice, once
* because a boot-time assertion might cause this command to fail
* with EINTR. And once again because GET_ASSERTS can race with
* MC_CMD_REBOOT running on the other port. */
retry = 2;
do {
- MCDI_SET_DWORD(inbuf.asserts, GET_ASSERTS_IN_CLEAR, 0);
+ MCDI_SET_DWORD(inbuf, GET_ASSERTS_IN_CLEAR, 1);
rc = efx_mcdi_rpc(efx, MC_CMD_GET_ASSERTS,
- inbuf.asserts, MC_CMD_GET_ASSERTS_IN_LEN,
- assertion, sizeof(assertion), &outlen);
+ inbuf, MC_CMD_GET_ASSERTS_IN_LEN,
+ outbuf, sizeof(outbuf), &outlen);
} while ((rc == -EINTR || rc == -EIO) && retry-- > 0);
if (rc)
@@ -926,21 +970,11 @@ int efx_mcdi_handle_assertion(struct efx_nic *efx)
if (outlen < MC_CMD_GET_ASSERTS_OUT_LEN)
return -EINVAL;
- flags = MCDI_DWORD(assertion, GET_ASSERTS_OUT_GLOBAL_FLAGS);
+ /* Print out any recorded assertion state */
+ flags = MCDI_DWORD(outbuf, GET_ASSERTS_OUT_GLOBAL_FLAGS);
if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS)
return 0;
- /* Reset the hardware atomically such that only one port with succeed.
- * This command will succeed if a reboot is no longer required (because
- * the other port did it first), but fail with EIO if it succeeds.
- */
- BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
- MCDI_SET_DWORD(inbuf.reboot, REBOOT_IN_FLAGS,
- MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
- efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf.reboot, MC_CMD_REBOOT_IN_LEN,
- NULL, 0, NULL);
-
- /* Print out the assertion */
reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL)
? "system-level assertion"
: (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL)
@@ -949,20 +983,45 @@ int efx_mcdi_handle_assertion(struct efx_nic *efx)
? "watchdog reset"
: "unknown assertion";
EFX_ERR(efx, "MCPU %s at PC = 0x%.8x in thread 0x%.8x\n", reason,
- MCDI_DWORD(assertion, GET_ASSERTS_OUT_SAVED_PC_OFFS),
- MCDI_DWORD(assertion, GET_ASSERTS_OUT_THREAD_OFFS));
+ MCDI_DWORD(outbuf, GET_ASSERTS_OUT_SAVED_PC_OFFS),
+ MCDI_DWORD(outbuf, GET_ASSERTS_OUT_THREAD_OFFS));
/* Print out the registers */
ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST;
for (index = 1; index < 32; index++) {
EFX_ERR(efx, "R%.2d (?): 0x%.8x\n", index,
- MCDI_DWORD2(assertion, ofst));
+ MCDI_DWORD2(outbuf, ofst));
ofst += sizeof(efx_dword_t);
}
return 0;
}
+static void efx_mcdi_exit_assertion(struct efx_nic *efx)
+{
+ u8 inbuf[MC_CMD_REBOOT_IN_LEN];
+
+ /* Atomically reboot the mcfw out of the assertion handler */
+ BUILD_BUG_ON(MC_CMD_REBOOT_OUT_LEN != 0);
+ MCDI_SET_DWORD(inbuf, REBOOT_IN_FLAGS,
+ MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION);
+ efx_mcdi_rpc(efx, MC_CMD_REBOOT, inbuf, MC_CMD_REBOOT_IN_LEN,
+ NULL, 0, NULL);
+}
+
+int efx_mcdi_handle_assertion(struct efx_nic *efx)
+{
+ int rc;
+
+ rc = efx_mcdi_read_assertion(efx);
+ if (rc)
+ return rc;
+
+ efx_mcdi_exit_assertion(efx);
+
+ return 0;
+}
+
void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode)
{
u8 inbuf[MC_CMD_SET_ID_LED_IN_LEN];
diff --git a/drivers/net/sfc/mcdi.h b/drivers/net/sfc/mcdi.h
index 10ce98f4c0f..f1f89ad4075 100644
--- a/drivers/net/sfc/mcdi.h
+++ b/drivers/net/sfc/mcdi.h
@@ -116,6 +116,7 @@ extern int efx_mcdi_nvram_erase(struct efx_nic *efx, unsigned int type,
loff_t offset, size_t length);
extern int efx_mcdi_nvram_update_finish(struct efx_nic *efx,
unsigned int type);
+extern int efx_mcdi_nvram_test_all(struct efx_nic *efx);
extern int efx_mcdi_handle_assertion(struct efx_nic *efx);
extern void efx_mcdi_set_id_led(struct efx_nic *efx, enum efx_led_mode mode);
extern int efx_mcdi_reset_port(struct efx_nic *efx);
diff --git a/drivers/net/sfc/mcdi_pcol.h b/drivers/net/sfc/mcdi_pcol.h
index 73e71f42062..bd59302695b 100644
--- a/drivers/net/sfc/mcdi_pcol.h
+++ b/drivers/net/sfc/mcdi_pcol.h
@@ -786,16 +786,18 @@
#define MC_CMD_GET_PHY_CFG_OUT_FLAGS_OFST 0
#define MC_CMD_GET_PHY_CFG_PRESENT_LBN 0
#define MC_CMD_GET_PHY_CFG_PRESENT_WIDTH 1
-#define MC_CMD_GET_PHY_CFG_SHORTBIST_LBN 1
-#define MC_CMD_GET_PHY_CFG_SHORTBIST_WIDTH 1
-#define MC_CMD_GET_PHY_CFG_LONGBIST_LBN 2
-#define MC_CMD_GET_PHY_CFG_LONGBIST_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_LBN 1
+#define MC_CMD_GET_PHY_CFG_BIST_CABLE_SHORT_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_LBN 2
+#define MC_CMD_GET_PHY_CFG_BIST_CABLE_LONG_WIDTH 1
#define MC_CMD_GET_PHY_CFG_LOWPOWER_LBN 3
#define MC_CMD_GET_PHY_CFG_LOWPOWER_WIDTH 1
#define MC_CMD_GET_PHY_CFG_POWEROFF_LBN 4
#define MC_CMD_GET_PHY_CFG_POWEROFF_WIDTH 1
#define MC_CMD_GET_PHY_CFG_TXDIS_LBN 5
#define MC_CMD_GET_PHY_CFG_TXDIS_WIDTH 1
+#define MC_CMD_GET_PHY_CFG_BIST_LBN 6
+#define MC_CMD_GET_PHY_CFG_BIST_WIDTH 1
#define MC_CMD_GET_PHY_CFG_OUT_TYPE_OFST 4
/* Bitmask of supported capabilities */
#define MC_CMD_GET_PHY_CFG_OUT_SUPPORTED_CAP_OFST 8
@@ -832,7 +834,7 @@
#define MC_CMD_GET_PHY_CFG_OUT_REVISION_OFST 52
#define MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN 20
-/* MC_CMD_START_PHY_BIST:
+/* MC_CMD_START_BIST:
* Start a BIST test on the PHY.
*
* Locks required: PHY_LOCK if doing a PHY BIST
@@ -840,34 +842,71 @@
*/
#define MC_CMD_START_BIST 0x25
#define MC_CMD_START_BIST_IN_LEN 4
-#define MC_CMD_START_BIST_TYPE_OFST 0
+#define MC_CMD_START_BIST_IN_TYPE_OFST 0
+#define MC_CMD_START_BIST_OUT_LEN 0
-/* Run the PHY's short BIST */
-#define MC_CMD_PHY_BIST_SHORT 1
-/* Run the PHY's long BIST */
-#define MC_CMD_PHY_BIST_LONG 2
+/* Run the PHY's short cable BIST */
+#define MC_CMD_PHY_BIST_CABLE_SHORT 1
+/* Run the PHY's long cable BIST */
+#define MC_CMD_PHY_BIST_CABLE_LONG 2
/* Run BIST on the currently selected BPX Serdes (XAUI or XFI) */
#define MC_CMD_BPX_SERDES_BIST 3
+/* Run the MC loopback tests */
+#define MC_CMD_MC_LOOPBACK_BIST 4
+/* Run the PHY's standard BIST */
+#define MC_CMD_PHY_BIST 5
/* MC_CMD_POLL_PHY_BIST: (variadic output)
* Poll for BIST completion
*
- * Returns a single status code, and a binary blob of phy-specific
- * bist output. If the driver can't succesfully parse the BIST output,
- * it should still respect the Pass/Fail in OUT.RESULT.
+ * Returns a single status code, and optionally some PHY specific
+ * bist output. The driver should only consume the BIST output
+ * after validating OUTLEN and PHY_CFG.PHY_TYPE.
*
- * Locks required: PHY_LOCK if doing a PHY BIST
+ * If a driver can't succesfully parse the BIST output, it should
+ * still respect the pass/Fail in OUT.RESULT
+ *
+ * Locks required: PHY_LOCK if doing a PHY BIST
* Return code: 0, EACCES (if PHY_LOCK is not held)
*/
#define MC_CMD_POLL_BIST 0x26
#define MC_CMD_POLL_BIST_IN_LEN 0
#define MC_CMD_POLL_BIST_OUT_LEN UNKNOWN
+#define MC_CMD_POLL_BIST_OUT_SFT9001_LEN 40
+#define MC_CMD_POLL_BIST_OUT_MRSFP_LEN 8
#define MC_CMD_POLL_BIST_OUT_RESULT_OFST 0
#define MC_CMD_POLL_BIST_RUNNING 1
#define MC_CMD_POLL_BIST_PASSED 2
#define MC_CMD_POLL_BIST_FAILED 3
#define MC_CMD_POLL_BIST_TIMEOUT 4
+/* Generic: */
#define MC_CMD_POLL_BIST_OUT_PRIVATE_OFST 4
+/* SFT9001-specific: */
+/* (offset 4 unused?) */
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_A_OFST 8
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_B_OFST 12
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_C_OFST 16
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_LENGTH_D_OFST 20
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_A_OFST 24
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_B_OFST 28
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_C_OFST 32
+#define MC_CMD_POLL_BIST_OUT_SFT9001_CABLE_STATUS_D_OFST 36
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_OK 1
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_OPEN 2
+#define MC_CMD_POLL_BIST_SFT9001_INTRA_PAIR_SHORT 3
+#define MC_CMD_POLL_BIST_SFT9001_INTER_PAIR_SHORT 4
+#define MC_CMD_POLL_BIST_SFT9001_PAIR_BUSY 9
+/* mrsfp "PHY" driver: */
+#define MC_CMD_POLL_BIST_OUT_MRSFP_TEST_OFST 4
+#define MC_CMD_POLL_BIST_MRSFP_TEST_COMPLETE 0
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_WRITE 1
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_IO_EXP 2
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_OFF_I2C_NO_ACCESS_MODULE 3
+#define MC_CMD_POLL_BIST_MRSFP_TEST_IO_EXP_I2C_CONFIGURE 4
+#define MC_CMD_POLL_BIST_MRSFP_TEST_BUS_SWITCH_I2C_NO_CROSSTALK 5
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_PRESENCE 6
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_I2C_ACCESS 7
+#define MC_CMD_POLL_BIST_MRSFP_TEST_MODULE_ID_SANE_VALUE 8
/* MC_CMD_PHY_SPI: (variadic in, variadic out)
* Read/Write/Erase the PHY SPI device
@@ -1206,6 +1245,13 @@
#define MC_CMD_WOL_FILTER_SET_IN_BITMAP_LAYER4_OFST \
(MC_CMD_WOL_FILTER_SET_IN_DATA_OFST + 178)
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_MASK_OFST \
+ MC_CMD_WOL_FILTER_SET_IN_DATA_OFST
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_LBN 0
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_UP_WIDTH 1
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_LBN 1
+#define MC_CMD_WOL_FILTER_SET_IN_LINK_DOWN_WIDTH 1
+
#define MC_CMD_WOL_FILTER_SET_OUT_LEN 4
#define MC_CMD_WOL_FILTER_SET_OUT_FILTER_ID_OFST 0
@@ -1216,7 +1262,8 @@
#define MC_CMD_WOL_TYPE_IPV4_SYN 0x3
#define MC_CMD_WOL_TYPE_IPV6_SYN 0x4
#define MC_CMD_WOL_TYPE_BITMAP 0x5
-#define MC_CMD_WOL_TYPE_MAX 0x6
+#define MC_CMD_WOL_TYPE_LINK 0x6
+#define MC_CMD_WOL_TYPE_MAX 0x7
#define MC_CMD_FILTER_MODE_SIMPLE 0x0
#define MC_CMD_FILTER_MODE_STRUCTURED 0xffffffff
@@ -1357,14 +1404,24 @@
* Returns: 0, EINVAL (bad type/offset/length), EACCES (if PHY_LOCK required and not held)
*/
#define MC_CMD_NVRAM_UPDATE_FINISH 0x3c
-#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 4
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_LEN 8
#define MC_CMD_NVRAM_UPDATE_FINISH_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_UPDATE_FINISH_IN_REBOOT_OFST 4
#define MC_CMD_NVRAM_UPDATE_FINISH_OUT_LEN 0
/* MC_CMD_REBOOT:
- * Reboot the MC. The AFTER_ASSERTION flag is intended to be used
- * when the driver notices an assertion failure, to allow two ports to
- * both recover (semi-)gracefully.
+ * Reboot the MC.
+ *
+ * The AFTER_ASSERTION flag is intended to be used when the driver notices
+ * an assertion failure (at which point it is expected to perform a complete
+ * tear down and reinitialise), to allow both ports to reset the MC once
+ * in an atomic fashion.
+ *
+ * Production mc firmwares are generally compiled with REBOOT_ON_ASSERT=1,
+ * which means that they will automatically reboot out of the assertion
+ * handler, so this is in practise an optional operation. It is still
+ * recommended that drivers execute this to support custom firmwares
+ * with REBOOT_ON_ASSERT=0.
*
* Locks required: NONE
* Returns: Nothing. You get back a response with ERR=1, DATALEN=0
@@ -1469,11 +1526,10 @@
((_ofst) + 6)
/* MC_CMD_READ_SENSORS
- * Returns the current (value, state) for each sensor
+ * Returns the current reading from each sensor
*
- * Returns the current (value, state) [each 16bit] of each sensor supported by
- * this board, by DMA'ing a sparse array (indexed by the sensor type) into host
- * memory.
+ * Returns a sparse array of sensor readings (indexed by the sensor
+ * type) into host memory. Each array element is a dword.
*
* The MC will send a SENSOREVT event every time any sensor changes state. The
* driver is responsible for ensuring that it doesn't miss any events. The board
@@ -1486,6 +1542,12 @@
#define MC_CMD_READ_SENSORS_IN_DMA_ADDR_HI_OFST 4
#define MC_CMD_READ_SENSORS_OUT_LEN 0
+/* Sensor reading fields */
+#define MC_CMD_READ_SENSOR_VALUE_LBN 0
+#define MC_CMD_READ_SENSOR_VALUE_WIDTH 16
+#define MC_CMD_READ_SENSOR_STATE_LBN 16
+#define MC_CMD_READ_SENSOR_STATE_WIDTH 8
+
/* MC_CMD_GET_PHY_STATE:
* Report current state of PHY. A "zombie" PHY is a PHY that has failed to
@@ -1577,4 +1639,98 @@
#define MC_CMD_MAC_RESET_RESTORE_IN_LEN 0
#define MC_CMD_MAC_RESET_RESTORE_OUT_LEN 0
+
+/* MC_CMD_TEST_ASSERT:
+ * Deliberately trigger an assert-detonation in the firmware for testing
+ * purposes (i.e. to allow tests that the driver copes gracefully).
+ *
+ * Locks required: None
+ * Returns: 0
+ */
+
+#define MC_CMD_TESTASSERT 0x49
+#define MC_CMD_TESTASSERT_IN_LEN 0
+#define MC_CMD_TESTASSERT_OUT_LEN 0
+
+/* MC_CMD_WORKAROUND 0x4a
+ *
+ * Enable/Disable a given workaround. The mcfw will return EINVAL if it
+ * doesn't understand the given workaround number - which should not
+ * be treated as a hard error by client code.
+ *
+ * This op does not imply any semantics about each workaround, that's between
+ * the driver and the mcfw on a per-workaround basis.
+ *
+ * Locks required: None
+ * Returns: 0, EINVAL
+ */
+#define MC_CMD_WORKAROUND 0x4a
+#define MC_CMD_WORKAROUND_IN_LEN 8
+#define MC_CMD_WORKAROUND_IN_TYPE_OFST 0
+#define MC_CMD_WORKAROUND_BUG17230 1
+#define MC_CMD_WORKAROUND_IN_ENABLED_OFST 4
+#define MC_CMD_WORKAROUND_OUT_LEN 0
+
+/* MC_CMD_GET_PHY_MEDIA_INFO:
+ * Read media-specific data from PHY (e.g. SFP/SFP+ module ID information for
+ * SFP+ PHYs).
+ *
+ * The "media type" can be found via GET_PHY_CFG (GET_PHY_CFG_OUT_MEDIA_TYPE);
+ * the valid "page number" input values, and the output data, are interpreted
+ * on a per-type basis.
+ *
+ * For SFP+: PAGE=0 or 1 returns a 128-byte block read from module I2C address
+ * 0xA0 offset 0 or 0x80.
+ * Anything else: currently undefined.
+ *
+ * Locks required: None
+ * Return code: 0
+ */
+#define MC_CMD_GET_PHY_MEDIA_INFO 0x4b
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN 4
+#define MC_CMD_GET_PHY_MEDIA_INFO_IN_PAGE_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(_num_bytes) (4 + (_num_bytes))
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATALEN_OFST 0
+#define MC_CMD_GET_PHY_MEDIA_INFO_OUT_DATA_OFST 4
+
+/* MC_CMD_NVRAM_TEST:
+ * Test a particular NVRAM partition for valid contents (where "valid"
+ * depends on the type of partition).
+ *
+ * Locks required: None
+ * Return code: 0
+ */
+#define MC_CMD_NVRAM_TEST 0x4c
+#define MC_CMD_NVRAM_TEST_IN_LEN 4
+#define MC_CMD_NVRAM_TEST_IN_TYPE_OFST 0
+#define MC_CMD_NVRAM_TEST_OUT_LEN 4
+#define MC_CMD_NVRAM_TEST_OUT_RESULT_OFST 0
+#define MC_CMD_NVRAM_TEST_PASS 0
+#define MC_CMD_NVRAM_TEST_FAIL 1
+#define MC_CMD_NVRAM_TEST_NOTSUPP 2
+
+/* MC_CMD_MRSFP_TWEAK: (debug)
+ * Read status and/or set parameters for the "mrsfp" driver in mr_rusty builds.
+ * I2C I/O expander bits are always read; if equaliser parameters are supplied,
+ * they are configured first.
+ *
+ * Locks required: None
+ * Return code: 0, EINVAL
+ */
+#define MC_CMD_MRSFP_TWEAK 0x4d
+#define MC_CMD_MRSFP_TWEAK_IN_LEN_READ_ONLY 0
+#define MC_CMD_MRSFP_TWEAK_IN_LEN_EQ_CONFIG 16
+#define MC_CMD_MRSFP_TWEAK_IN_TXEQ_LEVEL_OFST 0 /* 0-6 low->high de-emph. */
+#define MC_CMD_MRSFP_TWEAK_IN_TXEQ_DT_CFG_OFST 4 /* 0-8 low->high ref.V */
+#define MC_CMD_MRSFP_TWEAK_IN_RXEQ_BOOST_OFST 8 /* 0-8 low->high boost */
+#define MC_CMD_MRSFP_TWEAK_IN_RXEQ_DT_CFG_OFST 12 /* 0-8 low->high ref.V */
+#define MC_CMD_MRSFP_TWEAK_OUT_LEN 12
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_INPUTS_OFST 0 /* input bits */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_OUTPUTS_OFST 4 /* output bits */
+#define MC_CMD_MRSFP_TWEAK_OUT_IOEXP_DIRECTION_OFST 8 /* dirs: 0=out, 1=in */
+
+/* Do NOT add new commands beyond 0x4f as part of 3.0 : 0x50 - 0x7f will be
+ * used for post-3.0 extensions. If you run out of space, look for gaps or
+ * commands that are unused in the existing range. */
+
#endif /* MCDI_PCOL_H */
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
index eb694af7a47..34c22fa986e 100644
--- a/drivers/net/sfc/mcdi_phy.c
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -381,6 +381,18 @@ static int efx_mcdi_phy_probe(struct efx_nic *efx)
* but by convention we don't */
efx->loopback_modes &= ~(1 << LOOPBACK_NONE);
+ /* Set the initial link mode */
+ efx_mcdi_phy_decode_link(
+ efx, &efx->link_state,
+ MCDI_DWORD(outbuf, GET_LINK_OUT_LINK_SPEED),
+ MCDI_DWORD(outbuf, GET_LINK_OUT_FLAGS),
+ MCDI_DWORD(outbuf, GET_LINK_OUT_FCNTL));
+
+ /* Default to Autonegotiated flow control if the PHY supports it */
+ efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
+ if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
+ efx->wanted_fc |= EFX_FC_AUTO;
+
return 0;
fail:
@@ -436,7 +448,7 @@ void efx_mcdi_phy_check_fcntl(struct efx_nic *efx, u32 lpa)
/* The link partner capabilities are only relevent if the
* link supports flow control autonegotiation */
- if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_ASYM_LBN))
+ if (~phy_cfg->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
return;
/* If flow control autoneg is supported and enabled, then fine */
@@ -560,6 +572,27 @@ static int efx_mcdi_phy_set_settings(struct efx_nic *efx, struct ethtool_cmd *ec
return 0;
}
+static int efx_mcdi_phy_test_alive(struct efx_nic *efx)
+{
+ u8 outbuf[MC_CMD_GET_PHY_STATE_OUT_LEN];
+ size_t outlen;
+ int rc;
+
+ BUILD_BUG_ON(MC_CMD_GET_PHY_STATE_IN_LEN != 0);
+
+ rc = efx_mcdi_rpc(efx, MC_CMD_GET_PHY_STATE, NULL, 0,
+ outbuf, sizeof(outbuf), &outlen);
+ if (rc)
+ return rc;
+
+ if (outlen < MC_CMD_GET_PHY_STATE_OUT_LEN)
+ return -EMSGSIZE;
+ if (MCDI_DWORD(outbuf, GET_PHY_STATE_STATE) != MC_CMD_PHY_STATE_OK)
+ return -EINVAL;
+
+ return 0;
+}
+
struct efx_phy_operations efx_mcdi_phy_ops = {
.probe = efx_mcdi_phy_probe,
.init = efx_port_dummy_op_int,
@@ -569,6 +602,7 @@ struct efx_phy_operations efx_mcdi_phy_ops = {
.remove = efx_mcdi_phy_remove,
.get_settings = efx_mcdi_phy_get_settings,
.set_settings = efx_mcdi_phy_set_settings,
+ .test_alive = efx_mcdi_phy_test_alive,
.run_tests = NULL,
.test_name = NULL,
};
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index 1574e52f059..0548fcbbdcd 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -335,3 +335,27 @@ enum efx_fc_type efx_mdio_get_pause(struct efx_nic *efx)
mii_advertise_flowctrl(efx->wanted_fc),
efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_LPA));
}
+
+int efx_mdio_test_alive(struct efx_nic *efx)
+{
+ int rc;
+ int devad = __ffs(efx->mdio.mmds);
+ u16 physid1, physid2;
+
+ mutex_lock(&efx->mac_lock);
+
+ physid1 = efx_mdio_read(efx, devad, MDIO_DEVID1);
+ physid2 = efx_mdio_read(efx, devad, MDIO_DEVID2);
+
+ if ((physid1 == 0x0000) || (physid1 == 0xffff) ||
+ (physid2 == 0x0000) || (physid2 == 0xffff)) {
+ EFX_ERR(efx, "no MDIO PHY present with ID %d\n",
+ efx->mdio.prtad);
+ rc = -EINVAL;
+ } else {
+ rc = efx_mdio_check_mmds(efx, efx->mdio.mmds, 0);
+ }
+
+ mutex_unlock(&efx->mac_lock);
+ return rc;
+}
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h
index f6ac9503339..f89e7192960 100644
--- a/drivers/net/sfc/mdio_10g.h
+++ b/drivers/net/sfc/mdio_10g.h
@@ -106,4 +106,7 @@ efx_mdio_set_flag(struct efx_nic *efx, int devad, int addr,
mdio_set_flag(&efx->mdio, efx->mdio.prtad, devad, addr, mask, state);
}
+/* Liveness self-test for MDIO PHYs */
+extern int efx_mdio_test_alive(struct efx_nic *efx);
+
#endif /* EFX_MDIO_10G_H */
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index d5aab5b3fa0..cb018e27209 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -18,7 +18,6 @@
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
-#include <linux/timer.h>
#include <linux/mdio.h>
#include <linux/list.h>
#include <linux/pci.h>
@@ -101,9 +100,6 @@ do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0)
* Special buffers are used for the event queues and the TX and RX
* descriptor queues for each channel. They are *not* used for the
* actual transmit and receive buffers.
- *
- * Note that for Falcon, TX and RX descriptor queues live in host memory.
- * Allocation and freeing procedures must take this into account.
*/
struct efx_special_buffer {
void *addr;
@@ -300,7 +296,7 @@ struct efx_rx_queue {
* @dma_addr: DMA base address of the buffer
* @len: Buffer length, in bytes
*
- * Falcon uses these buffers for its interrupt status registers and
+ * The NIC uses these buffers for its interrupt status registers and
* MAC stats dumps.
*/
struct efx_buffer {
@@ -516,8 +512,9 @@ struct efx_mac_operations {
* @set_settings: Set ethtool settings. Serialised by the mac_lock.
* @set_npage_adv: Set abilities advertised in (Extended) Next Page
* (only needed where AN bit is set in mmds)
+ * @test_alive: Test that PHY is 'alive' (online)
* @test_name: Get the name of a PHY-specific test/result
- * @run_tests: Run tests and record results as appropriate.
+ * @run_tests: Run tests and record results as appropriate (offline).
* Flags are the ethtool tests flags.
*/
struct efx_phy_operations {
@@ -532,6 +529,7 @@ struct efx_phy_operations {
int (*set_settings) (struct efx_nic *efx,
struct ethtool_cmd *ecmd);
void (*set_npage_adv) (struct efx_nic *efx, u32);
+ int (*test_alive) (struct efx_nic *efx);
const char *(*test_name) (struct efx_nic *efx, unsigned int index);
int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags);
};
@@ -672,7 +670,7 @@ union efx_multicast_hash {
* @irq_status: Interrupt status buffer
* @last_irq_cpu: Last CPU to handle interrupt.
* This register is written with the SMP processor ID whenever an
- * interrupt is handled. It is used by falcon_test_interrupt()
+ * interrupt is handled. It is used by efx_nic_test_interrupt()
* to verify that an interrupt has occurred.
* @spi_flash: SPI flash device
* This field will be %NULL if no flash device is present (or for Siena).
@@ -721,8 +719,7 @@ union efx_multicast_hash {
* @loopback_modes: Supported loopback mode bitmask
* @loopback_selftest: Offline self-test private state
*
- * The @priv field of the corresponding &struct net_device points to
- * this.
+ * This is stored in the private area of the &struct net_device.
*/
struct efx_nic {
char name[IFNAMSIZ];
@@ -995,7 +992,7 @@ static inline void clear_bit_le(unsigned nr, unsigned char *addr)
* that the net driver will program into the MAC as the maximum frame
* length.
*
- * The 10G MAC used in Falcon requires 8-byte alignment on the frame
+ * The 10G MAC requires 8-byte alignment on the frame
* length, so we round up to the nearest 8.
*
* Re-clocking by the XGXS on RX can reduce an IPG to 32 bits (half an
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index db44224ed2c..b06f8e34830 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -623,10 +623,6 @@ void efx_nic_remove_rx(struct efx_rx_queue *rx_queue)
*
* This writes the EVQ_RPTR_REG register for the specified channel's
* event queue.
- *
- * Note that EVQ_RPTR_REG contains the index of the "last read" event,
- * whereas channel->eventq_read_ptr contains the index of the "next to
- * read" event.
*/
void efx_nic_eventq_read_ack(struct efx_channel *channel)
{
@@ -1384,6 +1380,15 @@ static irqreturn_t efx_legacy_interrupt(int irq, void *dev_id)
efx->last_irq_cpu = raw_smp_processor_id();
EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
+ } else if (EFX_WORKAROUND_15783(efx)) {
+ /* We can't return IRQ_HANDLED more than once on seeing ISR0=0
+ * because this might be a shared interrupt, but we do need to
+ * check the channel every time and preemptively rearm it if
+ * it's idle. */
+ efx_for_each_channel(channel, efx) {
+ if (!channel->work_pending)
+ efx_nic_eventq_read_ack(channel);
+ }
}
return result;
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c
index 67eec7a6e48..1bee62c8300 100644
--- a/drivers/net/sfc/qt202x_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -445,4 +445,5 @@ struct efx_phy_operations falcon_qt202x_phy_ops = {
.remove = qt202x_phy_remove,
.get_settings = qt202x_phy_get_settings,
.set_settings = efx_mdio_set_settings,
+ .test_alive = efx_mdio_test_alive,
};
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 250c8827b84..cf0139a7d9a 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -24,9 +24,6 @@
#include "nic.h"
#include "selftest.h"
#include "workarounds.h"
-#include "spi.h"
-#include "io.h"
-#include "mdio_10g.h"
/*
* Loopback test packet structure
@@ -76,42 +73,15 @@ struct efx_loopback_state {
*
**************************************************************************/
-static int efx_test_mdio(struct efx_nic *efx, struct efx_self_tests *tests)
+static int efx_test_phy_alive(struct efx_nic *efx, struct efx_self_tests *tests)
{
int rc = 0;
- int devad;
- u16 physid1, physid2;
-
- if (efx->mdio.mode_support & MDIO_SUPPORTS_C45)
- devad = __ffs(efx->mdio.mmds);
- else if (efx->mdio.mode_support & MDIO_SUPPORTS_C22)
- devad = MDIO_DEVAD_NONE;
- else
- return 0;
-
- mutex_lock(&efx->mac_lock);
- tests->mdio = -1;
-
- physid1 = efx_mdio_read(efx, devad, MDIO_DEVID1);
- physid2 = efx_mdio_read(efx, devad, MDIO_DEVID2);
- if ((physid1 == 0x0000) || (physid1 == 0xffff) ||
- (physid2 == 0x0000) || (physid2 == 0xffff)) {
- EFX_ERR(efx, "no MDIO PHY present with ID %d\n",
- efx->mdio.prtad);
- rc = -EINVAL;
- goto out;
+ if (efx->phy_op->test_alive) {
+ rc = efx->phy_op->test_alive(efx);
+ tests->phy_alive = rc ? -1 : 1;
}
- if (EFX_IS10G(efx)) {
- rc = efx_mdio_check_mmds(efx, efx->mdio.mmds, 0);
- if (rc)
- goto out;
- }
-
-out:
- mutex_unlock(&efx->mac_lock);
- tests->mdio = rc ? -1 : 1;
return rc;
}
@@ -258,7 +228,7 @@ static int efx_test_phy(struct efx_nic *efx, struct efx_self_tests *tests,
return 0;
mutex_lock(&efx->mac_lock);
- rc = efx->phy_op->run_tests(efx, tests->phy, flags);
+ rc = efx->phy_op->run_tests(efx, tests->phy_ext, flags);
mutex_unlock(&efx->mac_lock);
return rc;
}
@@ -684,7 +654,7 @@ int efx_selftest(struct efx_nic *efx, struct efx_self_tests *tests,
/* Online (i.e. non-disruptive) testing
* This checks interrupt generation, event delivery and PHY presence. */
- rc = efx_test_mdio(efx, tests);
+ rc = efx_test_phy_alive(efx, tests);
if (rc && !rc_test)
rc_test = rc;
diff --git a/drivers/net/sfc/selftest.h b/drivers/net/sfc/selftest.h
index f6feee04c96..643bef72b99 100644
--- a/drivers/net/sfc/selftest.h
+++ b/drivers/net/sfc/selftest.h
@@ -32,7 +32,7 @@ struct efx_loopback_self_tests {
*/
struct efx_self_tests {
/* online tests */
- int mdio;
+ int phy_alive;
int nvram;
int interrupt;
int eventq_dma[EFX_MAX_CHANNELS];
@@ -40,7 +40,7 @@ struct efx_self_tests {
int eventq_poll[EFX_MAX_CHANNELS];
/* offline tests */
int registers;
- int phy[EFX_MAX_PHY_TESTS];
+ int phy_ext[EFX_MAX_PHY_TESTS];
struct efx_loopback_self_tests loopback[LOOPBACK_TEST_MAX + 1];
};
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index f8c6771e66d..1619fb5a64f 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -106,16 +106,11 @@ static int siena_probe_port(struct efx_nic *efx)
efx->mdio.mdio_read = siena_mdio_read;
efx->mdio.mdio_write = siena_mdio_write;
- /* Fill out MDIO structure and loopback modes */
+ /* Fill out MDIO structure, loopback modes, and initial link state */
rc = efx->phy_op->probe(efx);
if (rc != 0)
return rc;
- /* Initial assumption */
- efx->link_state.speed = 10000;
- efx->link_state.fd = true;
- efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
-
/* Allocate buffer for stats */
rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer,
MC_CMD_MAC_NSTATS * sizeof(u64));
@@ -139,7 +134,7 @@ void siena_remove_port(struct efx_nic *efx)
static const struct efx_nic_register_test siena_register_tests[] = {
{ FR_AZ_ADR_REGION,
- EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) },
+ EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) },
{ FR_CZ_USR_EV_CFG,
EFX_OWORD32(0x000103FF, 0x00000000, 0x00000000, 0x00000000) },
{ FR_AZ_RX_CFG,
@@ -181,6 +176,12 @@ static int siena_test_registers(struct efx_nic *efx)
static int siena_reset_hw(struct efx_nic *efx, enum reset_type method)
{
+ int rc;
+
+ /* Recover from a failed assertion pre-reset */
+ rc = efx_mcdi_handle_assertion(efx);
+ if (rc)
+ return rc;
if (method == RESET_TYPE_WORLD)
return efx_mcdi_reset_mc(efx);
@@ -582,6 +583,7 @@ struct efx_nic_type siena_a0_nic_type = {
.set_wol = siena_set_wol,
.resume_wol = siena_init_wol,
.test_registers = siena_test_registers,
+ .test_nvram = efx_mcdi_nvram_test_all,
.default_mac_ops = &efx_mcdi_mac_operations,
.revision = EFX_REV_SIENA_A0,
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index 3009c297c13..10db071bd83 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -842,6 +842,7 @@ struct efx_phy_operations falcon_sfx7101_phy_ops = {
.get_settings = tenxpress_get_settings,
.set_settings = tenxpress_set_settings,
.set_npage_adv = sfx7101_set_npage_adv,
+ .test_alive = efx_mdio_test_alive,
.test_name = sfx7101_test_name,
.run_tests = sfx7101_run_tests,
};
@@ -856,6 +857,7 @@ struct efx_phy_operations falcon_sft9001_phy_ops = {
.get_settings = tenxpress_get_settings,
.set_settings = tenxpress_set_settings,
.set_npage_adv = sft9001_set_npage_adv,
+ .test_alive = efx_mdio_test_alive,
.test_name = sft9001_test_name,
.run_tests = sft9001_run_tests,
};
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index 6b364a6c6c6..ed999d31f1f 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -660,7 +660,7 @@ static void sgiseeq_set_multicast(struct net_device *dev)
if(dev->flags & IFF_PROMISC)
sp->mode = SEEQ_RCMD_RANY;
- else if ((dev->flags & IFF_ALLMULTI) || dev->mc_count)
+ else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev))
sp->mode = SEEQ_RCMD_RBMCAST;
else
sp->mode = SEEQ_RCMD_RBCAST;
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 7402b858cab..42a35f086a9 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -1473,13 +1473,9 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
if (ret)
goto out_unregister;
- /* pritnt device infomation */
- pr_info("Base address at 0x%x, ",
- (u32)ndev->base_addr);
-
- for (i = 0; i < 5; i++)
- printk("%02X:", ndev->dev_addr[i]);
- printk("%02X, IRQ %d.\n", ndev->dev_addr[i], ndev->irq);
+ /* print device infomation */
+ pr_info("Base address at 0x%x, %pM, IRQ %d.\n",
+ (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
platform_set_drvdata(pdev, ndev);
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index 31233b4c44a..760d9e83a46 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -17,7 +17,9 @@
See the file COPYING in this distribution for more information.
- */
+*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
@@ -32,25 +34,13 @@
#include <linux/dma-mapping.h>
#include <asm/irq.h>
-#define net_drv(p, arg...) if (netif_msg_drv(p)) \
- printk(arg)
-#define net_probe(p, arg...) if (netif_msg_probe(p)) \
- printk(arg)
-#define net_link(p, arg...) if (netif_msg_link(p)) \
- printk(arg)
-#define net_intr(p, arg...) if (netif_msg_intr(p)) \
- printk(arg)
-#define net_tx_err(p, arg...) if (netif_msg_tx_err(p)) \
- printk(arg)
-
#define PHY_MAX_ADDR 32
#define PHY_ID_ANY 0x1f
#define MII_REG_ANY 0x1f
-#define DRV_VERSION "1.3"
+#define DRV_VERSION "1.4"
#define DRV_NAME "sis190"
#define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION
-#define PFX DRV_NAME ": "
#define sis190_rx_skb netif_rx
#define sis190_rx_quota(count, quota) count
@@ -294,6 +284,12 @@ struct sis190_private {
struct mii_if_info mii_if;
struct list_head first_phy;
u32 features;
+ u32 negotiated_lpa;
+ enum {
+ LNK_OFF,
+ LNK_ON,
+ LNK_AUTONEG,
+ } link_status;
};
struct sis190_phy {
@@ -334,7 +330,7 @@ static const struct {
{ "SiS 191 PCI Gigabit Ethernet adapter" },
};
-static struct pci_device_id sis190_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(sis190_pci_tbl) = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 },
{ PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 },
{ 0, },
@@ -381,7 +377,7 @@ static void __mdio_cmd(void __iomem *ioaddr, u32 ctl)
}
if (i > 99)
- printk(KERN_ERR PFX "PHY command failed !\n");
+ pr_err("PHY command failed !\n");
}
static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val)
@@ -493,18 +489,24 @@ static struct sk_buff *sis190_alloc_rx_skb(struct sis190_private *tp,
{
u32 rx_buf_sz = tp->rx_buf_sz;
struct sk_buff *skb;
+ dma_addr_t mapping;
skb = netdev_alloc_skb(tp->dev, rx_buf_sz);
- if (likely(skb)) {
- dma_addr_t mapping;
-
- mapping = pci_map_single(tp->pci_dev, skb->data, tp->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
- sis190_map_to_asic(desc, mapping, rx_buf_sz);
- } else
- sis190_make_unusable_by_asic(desc);
+ if (unlikely(!skb))
+ goto skb_alloc_failed;
+ mapping = pci_map_single(tp->pci_dev, skb->data, tp->rx_buf_sz,
+ PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(tp->pci_dev, mapping))
+ goto out;
+ sis190_map_to_asic(desc, mapping, rx_buf_sz);
return skb;
+
+out:
+ dev_kfree_skb_any(skb);
+skb_alloc_failed:
+ sis190_make_unusable_by_asic(desc);
+ return NULL;
}
static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev,
@@ -589,8 +591,7 @@ static int sis190_rx_interrupt(struct net_device *dev,
status = le32_to_cpu(desc->PSize);
- // net_intr(tp, KERN_INFO "%s: Rx PSize = %08x.\n", dev->name,
- // status);
+ //netif_info(tp, intr, dev, "Rx PSize = %08x\n", status);
if (sis190_rx_pkt_err(status, stats) < 0)
sis190_give_to_asic(desc, tp->rx_buf_sz);
@@ -601,9 +602,8 @@ static int sis190_rx_interrupt(struct net_device *dev,
struct pci_dev *pdev = tp->pci_dev;
if (unlikely(pkt_size > tp->rx_buf_sz)) {
- net_intr(tp, KERN_INFO
- "%s: (frag) status = %08x.\n",
- dev->name, status);
+ netif_info(tp, intr, dev,
+ "(frag) status = %08x\n", status);
stats->rx_dropped++;
stats->rx_length_errors++;
sis190_give_to_asic(desc, tp->rx_buf_sz);
@@ -637,12 +637,12 @@ static int sis190_rx_interrupt(struct net_device *dev,
tp->cur_rx = cur_rx;
delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx);
- if (!delta && count && netif_msg_intr(tp))
- printk(KERN_INFO "%s: no Rx buffer allocated.\n", dev->name);
+ if (!delta && count)
+ netif_info(tp, intr, dev, "no Rx buffer allocated\n");
tp->dirty_rx += delta;
- if (((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) && netif_msg_intr(tp))
- printk(KERN_EMERG "%s: Rx buffers exhausted.\n", dev->name);
+ if ((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx)
+ netif_emerg(tp, intr, dev, "Rx buffers exhausted\n");
return count;
}
@@ -751,10 +751,11 @@ static irqreturn_t sis190_interrupt(int irq, void *__dev)
SIS_W32(IntrStatus, status);
- // net_intr(tp, KERN_INFO "%s: status = %08x.\n", dev->name, status);
+// netif_info(tp, intr, dev, "status = %08x\n", status);
if (status & LinkChange) {
- net_intr(tp, KERN_INFO "%s: link change.\n", dev->name);
+ netif_info(tp, intr, dev, "link change\n");
+ del_timer(&tp->timer);
schedule_work(&tp->phy_task);
}
@@ -841,19 +842,17 @@ static void sis190_set_rx_mode(struct net_device *dev)
AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
AcceptAllPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to filter perfectly -- accept all multicasts. */
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0xffffffff;
} else {
struct dev_mc_list *mclist;
- unsigned int i;
rx_mode = AcceptBroadcast | AcceptMyPhys;
mc_filter[1] = mc_filter[0] = 0;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
int bit_nr =
ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
@@ -929,13 +928,15 @@ static void sis190_phy_task(struct work_struct *work)
if (val & BMCR_RESET) {
// FIXME: needlessly high ? -- FR 02/07/2005
mod_timer(&tp->timer, jiffies + HZ/10);
- } else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) &
- BMSR_ANEGCOMPLETE)) {
+ goto out_unlock;
+ }
+
+ val = mdio_read_latched(ioaddr, phy_id, MII_BMSR);
+ if (!(val & BMSR_ANEGCOMPLETE) && tp->link_status != LNK_AUTONEG) {
netif_carrier_off(dev);
- net_link(tp, KERN_WARNING "%s: auto-negotiating...\n",
- dev->name);
- mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
- } else {
+ netif_warn(tp, link, dev, "auto-negotiating...\n");
+ tp->link_status = LNK_AUTONEG;
+ } else if ((val & BMSR_LSTATUS) && tp->link_status != LNK_ON) {
/* Rejoice ! */
struct {
int val;
@@ -959,13 +960,13 @@ static void sis190_phy_task(struct work_struct *work)
u16 adv, autoexp, gigadv, gigrec;
val = mdio_read(ioaddr, phy_id, 0x1f);
- net_link(tp, KERN_INFO "%s: mii ext = %04x.\n", dev->name, val);
+ netif_info(tp, link, dev, "mii ext = %04x\n", val);
val = mdio_read(ioaddr, phy_id, MII_LPA);
adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
autoexp = mdio_read(ioaddr, phy_id, MII_EXPANSION);
- net_link(tp, KERN_INFO "%s: mii lpa=%04x adv=%04x exp=%04x.\n",
- dev->name, val, adv, autoexp);
+ netif_info(tp, link, dev, "mii lpa=%04x adv=%04x exp=%04x\n",
+ val, adv, autoexp);
if (val & LPA_NPAGE && autoexp & EXPANSION_NWAY) {
/* check for gigabit speed */
@@ -1004,10 +1005,14 @@ static void sis190_phy_task(struct work_struct *work)
SIS_W32(RGDelay, 0x0440);
}
- net_link(tp, KERN_INFO "%s: link on %s mode.\n", dev->name,
- p->msg);
+ tp->negotiated_lpa = p->val;
+
+ netif_info(tp, link, dev, "link on %s mode\n", p->msg);
netif_carrier_on(dev);
- }
+ tp->link_status = LNK_ON;
+ } else if (!(val & BMSR_LSTATUS) && tp->link_status != LNK_AUTONEG)
+ tp->link_status = LNK_OFF;
+ mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT);
out_unlock:
rtnl_unlock();
@@ -1191,13 +1196,17 @@ static netdev_tx_t sis190_start_xmit(struct sk_buff *skb,
if (unlikely(le32_to_cpu(desc->status) & OWNbit)) {
netif_stop_queue(dev);
- net_tx_err(tp, KERN_ERR PFX
- "%s: BUG! Tx Ring full when queue awake!\n",
- dev->name);
+ netif_err(tp, tx_err, dev,
+ "BUG! Tx Ring full when queue awake!\n");
return NETDEV_TX_BUSY;
}
mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(tp->pci_dev, mapping)) {
+ netif_err(tp, tx_err, dev,
+ "PCI mapping failed, dropping packet");
+ return NETDEV_TX_BUSY;
+ }
tp->Tx_skbuff[entry] = skb;
@@ -1211,6 +1220,12 @@ static netdev_tx_t sis190_start_xmit(struct sk_buff *skb,
wmb();
desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit);
+ if (tp->negotiated_lpa & (LPA_1000HALF | LPA_100HALF | LPA_10HALF)) {
+ /* Half Duplex */
+ desc->status |= cpu_to_le32(COLEN | CRSEN | BKFEN);
+ if (tp->negotiated_lpa & (LPA_1000HALF | LPA_1000FULL))
+ desc->status |= cpu_to_le32(EXTEN | BSTEN); /* gigabit HD */
+ }
tp->cur_tx++;
@@ -1287,9 +1302,9 @@ static u16 sis190_default_phy(struct net_device *dev)
if (mii_if->phy_id != phy_default->phy_id) {
mii_if->phy_id = phy_default->phy_id;
- net_probe(tp, KERN_INFO
- "%s: Using transceiver at address %d as default.\n",
- pci_name(tp->pci_dev), mii_if->phy_id);
+ if (netif_msg_probe(tp))
+ pr_info("%s: Using transceiver at address %d as default\n",
+ pci_name(tp->pci_dev), mii_if->phy_id);
}
status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR);
@@ -1327,14 +1342,15 @@ static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp,
((mii_status & (BMSR_100FULL | BMSR_100HALF)) ?
LAN : HOME) : p->type;
tp->features |= p->feature;
- net_probe(tp, KERN_INFO "%s: %s transceiver at address %d.\n",
- pci_name(tp->pci_dev), p->name, phy_id);
+ if (netif_msg_probe(tp))
+ pr_info("%s: %s transceiver at address %d\n",
+ pci_name(tp->pci_dev), p->name, phy_id);
} else {
phy->type = UNKNOWN;
- net_probe(tp, KERN_INFO
- "%s: unknown PHY 0x%x:0x%x transceiver at address %d\n",
- pci_name(tp->pci_dev),
- phy->id[0], (phy->id[1] & 0xfff0), phy_id);
+ if (netif_msg_probe(tp))
+ pr_info("%s: unknown PHY 0x%x:0x%x transceiver at address %d\n",
+ pci_name(tp->pci_dev),
+ phy->id[0], (phy->id[1] & 0xfff0), phy_id);
}
}
@@ -1398,8 +1414,9 @@ static int __devinit sis190_mii_probe(struct net_device *dev)
}
if (list_empty(&tp->first_phy)) {
- net_probe(tp, KERN_INFO "%s: No MII transceivers found!\n",
- pci_name(tp->pci_dev));
+ if (netif_msg_probe(tp))
+ pr_info("%s: No MII transceivers found!\n",
+ pci_name(tp->pci_dev));
rc = -EIO;
goto out;
}
@@ -1445,7 +1462,8 @@ static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
dev = alloc_etherdev(sizeof(*tp));
if (!dev) {
- net_drv(&debug, KERN_ERR PFX "unable to alloc new ethernet\n");
+ if (netif_msg_drv(&debug))
+ pr_err("unable to alloc new ethernet\n");
rc = -ENOMEM;
goto err_out_0;
}
@@ -1458,34 +1476,39 @@ static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
rc = pci_enable_device(pdev);
if (rc < 0) {
- net_probe(tp, KERN_ERR "%s: enable failure\n", pci_name(pdev));
+ if (netif_msg_probe(tp))
+ pr_err("%s: enable failure\n", pci_name(pdev));
goto err_free_dev_1;
}
rc = -ENODEV;
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- net_probe(tp, KERN_ERR "%s: region #0 is no MMIO resource.\n",
- pci_name(pdev));
+ if (netif_msg_probe(tp))
+ pr_err("%s: region #0 is no MMIO resource\n",
+ pci_name(pdev));
goto err_pci_disable_2;
}
if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) {
- net_probe(tp, KERN_ERR "%s: invalid PCI region size(s).\n",
- pci_name(pdev));
+ if (netif_msg_probe(tp))
+ pr_err("%s: invalid PCI region size(s)\n",
+ pci_name(pdev));
goto err_pci_disable_2;
}
rc = pci_request_regions(pdev, DRV_NAME);
if (rc < 0) {
- net_probe(tp, KERN_ERR PFX "%s: could not request regions.\n",
- pci_name(pdev));
+ if (netif_msg_probe(tp))
+ pr_err("%s: could not request regions\n",
+ pci_name(pdev));
goto err_pci_disable_2;
}
rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (rc < 0) {
- net_probe(tp, KERN_ERR "%s: DMA configuration failed.\n",
- pci_name(pdev));
+ if (netif_msg_probe(tp))
+ pr_err("%s: DMA configuration failed\n",
+ pci_name(pdev));
goto err_free_res_3;
}
@@ -1493,14 +1516,16 @@ static struct net_device * __devinit sis190_init_board(struct pci_dev *pdev)
ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE);
if (!ioaddr) {
- net_probe(tp, KERN_ERR "%s: cannot remap MMIO, aborting\n",
- pci_name(pdev));
+ if (netif_msg_probe(tp))
+ pr_err("%s: cannot remap MMIO, aborting\n",
+ pci_name(pdev));
rc = -EIO;
goto err_free_res_3;
}
tp->pci_dev = pdev;
tp->mmio_addr = ioaddr;
+ tp->link_status = LNK_OFF;
sis190_irq_mask_and_ack(ioaddr);
@@ -1530,9 +1555,8 @@ static void sis190_tx_timeout(struct net_device *dev)
if (tmp8 & CmdTxEnb)
SIS_W8(TxControl, tmp8 & ~CmdTxEnb);
-
- net_tx_err(tp, KERN_INFO "%s: Transmit timeout, status %08x %08x.\n",
- dev->name, SIS_R32(TxControl), SIS_R32(TxSts));
+ netif_info(tp, tx_err, dev, "Transmit timeout, status %08x %08x\n",
+ SIS_R32(TxControl), SIS_R32(TxSts));
/* Disable interrupts by clearing the interrupt mask. */
SIS_W32(IntrMask, 0x0000);
@@ -1561,15 +1585,16 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
u16 sig;
int i;
- net_probe(tp, KERN_INFO "%s: Read MAC address from EEPROM\n",
- pci_name(pdev));
+ if (netif_msg_probe(tp))
+ pr_info("%s: Read MAC address from EEPROM\n", pci_name(pdev));
/* Check to see if there is a sane EEPROM */
sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature);
if ((sig == 0xffff) || (sig == 0x0000)) {
- net_probe(tp, KERN_INFO "%s: Error EEPROM read %x.\n",
- pci_name(pdev), sig);
+ if (netif_msg_probe(tp))
+ pr_info("%s: Error EEPROM read %x\n",
+ pci_name(pdev), sig);
return -EIO;
}
@@ -1603,8 +1628,8 @@ static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
u8 reg, tmp8;
unsigned int i;
- net_probe(tp, KERN_INFO "%s: Read MAC address from APC.\n",
- pci_name(pdev));
+ if (netif_msg_probe(tp))
+ pr_info("%s: Read MAC address from APC\n", pci_name(pdev));
for (i = 0; i < ARRAY_SIZE(ids); i++) {
isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, ids[i], NULL);
@@ -1613,8 +1638,9 @@ static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
}
if (!isa_bridge) {
- net_probe(tp, KERN_INFO "%s: Can not find ISA bridge.\n",
- pci_name(pdev));
+ if (netif_msg_probe(tp))
+ pr_info("%s: Can not find ISA bridge\n",
+ pci_name(pdev));
return -EIO;
}
@@ -1695,7 +1721,7 @@ static void sis190_set_speed_auto(struct net_device *dev)
int phy_id = tp->mii_if.phy_id;
int val;
- net_link(tp, KERN_INFO "%s: Enabling Auto-negotiation.\n", dev->name);
+ netif_info(tp, link, dev, "Enabling Auto-negotiation\n");
val = mdio_read(ioaddr, phy_id, MII_ADVERTISE);
@@ -1822,7 +1848,8 @@ static int __devinit sis190_init_one(struct pci_dev *pdev,
int rc;
if (!printed_version) {
- net_drv(&debug, KERN_INFO SIS190_DRIVER_NAME " loaded.\n");
+ if (netif_msg_drv(&debug))
+ pr_info(SIS190_DRIVER_NAME " loaded\n");
printed_version = 1;
}
@@ -1862,12 +1889,14 @@ static int __devinit sis190_init_one(struct pci_dev *pdev,
if (rc < 0)
goto err_remove_mii;
- net_probe(tp, KERN_INFO "%s: %s at %p (IRQ: %d), %pM\n",
- pci_name(pdev), sis_chip_info[ent->driver_data].name,
- ioaddr, dev->irq, dev->dev_addr);
-
- net_probe(tp, KERN_INFO "%s: %s mode.\n", dev->name,
- (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
+ if (netif_msg_probe(tp)) {
+ netdev_info(dev, "%s: %s at %p (IRQ: %d), %pM\n",
+ pci_name(pdev),
+ sis_chip_info[ent->driver_data].name,
+ ioaddr, dev->irq, dev->dev_addr);
+ netdev_info(dev, "%s mode.\n",
+ (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII");
+ }
netif_carrier_off(dev);
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 7360d4bbf75..cc0c731c4f0 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -106,7 +106,7 @@ static const char * card_names[] = {
"SiS 900 PCI Fast Ethernet",
"SiS 7016 PCI Fast Ethernet"
};
-static struct pci_device_id sis900_pci_tbl [] = {
+static DEFINE_PCI_DEVICE_TABLE(sis900_pci_tbl) = {
{PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_900,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_900},
{PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_7016,
@@ -2288,7 +2288,7 @@ static void set_rx_mode(struct net_device *net_dev)
rx_mode = RFPromiscuous;
for (i = 0; i < table_entries; i++)
mc_filter[i] = 0xffff;
- } else if ((net_dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(net_dev) > multicast_filter_limit) ||
(net_dev->flags & IFF_ALLMULTI)) {
/* too many multicast addresses or accept all multicast packet */
rx_mode = RFAAB | RFAAM;
@@ -2300,9 +2300,8 @@ static void set_rx_mode(struct net_device *net_dev)
* packets */
struct dev_mc_list *mclist;
rx_mode = RFAAB;
- for (i = 0, mclist = net_dev->mc_list;
- mclist && i < net_dev->mc_count;
- i++, mclist = mclist->next) {
+
+ netdev_for_each_mc_addr(mclist, net_dev) {
unsigned int bit_nr =
sis900_mcast_bitnr(mclist->dmi_addr, sis_priv->chipset_rev);
mc_filter[bit_nr >> 4] |= (1 << (bit_nr & 0xf));
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c
index db216a72850..1921a54ea99 100644
--- a/drivers/net/skfp/skfddi.c
+++ b/drivers/net/skfp/skfddi.c
@@ -149,7 +149,7 @@ extern void mac_drv_rx_mode(struct s_smc *smc, int mode);
extern void mac_drv_clear_rx_queue(struct s_smc *smc);
extern void enable_tx_irq(struct s_smc *smc, u_short queue);
-static struct pci_device_id skfddi_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(skfddi_pci_tbl) = {
{ PCI_VENDOR_ID_SK, PCI_DEVICE_ID_SK_FP, PCI_ANY_ID, PCI_ANY_ID, },
{ } /* Terminating entry */
};
@@ -435,13 +435,7 @@ static int skfp_driver_init(struct net_device *dev)
goto fail;
}
read_address(smc, NULL);
- pr_debug(KERN_INFO "HW-Addr: %02x %02x %02x %02x %02x %02x\n",
- smc->hw.fddi_canon_addr.a[0],
- smc->hw.fddi_canon_addr.a[1],
- smc->hw.fddi_canon_addr.a[2],
- smc->hw.fddi_canon_addr.a[3],
- smc->hw.fddi_canon_addr.a[4],
- smc->hw.fddi_canon_addr.a[5]);
+ pr_debug(KERN_INFO "HW-Addr: %pMF\n", smc->hw.fddi_canon_addr.a);
memcpy(dev->dev_addr, smc->hw.fddi_canon_addr.a, 6);
smt_reset_defaults(smc, 0);
@@ -858,8 +852,7 @@ static void skfp_ctl_set_multicast_list(struct net_device *dev)
static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
{
struct s_smc *smc = netdev_priv(dev);
- struct dev_mc_list *dmi; /* ptr to multicast addr entry */
- int i;
+ struct dev_mc_list *dmi;
/* Enable promiscuous mode, if necessary */
if (dev->flags & IFF_PROMISC) {
@@ -878,29 +871,19 @@ static void skfp_ctl_set_multicast_list_wo_lock(struct net_device *dev)
if (dev->flags & IFF_ALLMULTI) {
mac_drv_rx_mode(smc, RX_ENABLE_ALLMULTI);
pr_debug(KERN_INFO "ENABLE ALL MC ADDRESSES\n");
- } else if (dev->mc_count > 0) {
- if (dev->mc_count <= FPMAX_MULTICAST) {
+ } else if (!netdev_mc_empty(dev)) {
+ if (netdev_mc_count(dev) <= FPMAX_MULTICAST) {
/* use exact filtering */
// point to first multicast addr
- dmi = dev->mc_list;
-
- for (i = 0; i < dev->mc_count; i++) {
+ netdev_for_each_mc_addr(dmi, dev) {
mac_add_multicast(smc,
(struct fddi_addr *)dmi->dmi_addr,
1);
- pr_debug(KERN_INFO "ENABLE MC ADDRESS:");
- pr_debug(" %02x %02x %02x ",
- dmi->dmi_addr[0],
- dmi->dmi_addr[1],
- dmi->dmi_addr[2]);
- pr_debug("%02x %02x %02x\n",
- dmi->dmi_addr[3],
- dmi->dmi_addr[4],
- dmi->dmi_addr[5]);
- dmi = dmi->next;
- } // for
+ pr_debug(KERN_INFO "ENABLE MC ADDRESS: %pMF\n",
+ dmi->dmi_addr);
+ }
} else { // more MC addresses than HW supports
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 379a3dc0016..d0058e5bb6a 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -23,6 +23,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -46,7 +48,6 @@
#define DRV_NAME "skge"
#define DRV_VERSION "1.13"
-#define PFX DRV_NAME " "
#define DEFAULT_TX_RING_SIZE 128
#define DEFAULT_RX_RING_SIZE 512
@@ -70,15 +71,15 @@ MODULE_AUTHOR("Stephen Hemminger <shemminger@linux-foundation.org>");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
-static const u32 default_msg
- = NETIF_MSG_DRV| NETIF_MSG_PROBE| NETIF_MSG_LINK
- | NETIF_MSG_IFUP| NETIF_MSG_IFDOWN;
+static const u32 default_msg = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFUP |
+ NETIF_MSG_IFDOWN);
static int debug = -1; /* defaults above */
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
-static const struct pci_device_id skge_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(skge_id_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940) },
{ PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940B) },
{ PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) },
@@ -187,8 +188,8 @@ static void skge_wol_init(struct skge_port *skge)
/* Force to 10/100 skge_reset will re-enable on resume */
gm_phy_write(hw, port, PHY_MARV_AUNE_ADV,
- PHY_AN_100FULL | PHY_AN_100HALF |
- PHY_AN_10FULL | PHY_AN_10HALF| PHY_AN_CSMA);
+ (PHY_AN_100FULL | PHY_AN_100HALF |
+ PHY_AN_10FULL | PHY_AN_10HALF | PHY_AN_CSMA));
/* no 1000 HD/FD */
gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, 0);
gm_phy_write(hw, port, PHY_MARV_CTRL,
@@ -257,25 +258,28 @@ static u32 skge_supported_modes(const struct skge_hw *hw)
u32 supported;
if (hw->copper) {
- supported = SUPPORTED_10baseT_Half
- | SUPPORTED_10baseT_Full
- | SUPPORTED_100baseT_Half
- | SUPPORTED_100baseT_Full
- | SUPPORTED_1000baseT_Half
- | SUPPORTED_1000baseT_Full
- | SUPPORTED_Autoneg| SUPPORTED_TP;
+ supported = (SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_1000baseT_Half |
+ SUPPORTED_1000baseT_Full |
+ SUPPORTED_Autoneg |
+ SUPPORTED_TP);
if (hw->chip_id == CHIP_ID_GENESIS)
- supported &= ~(SUPPORTED_10baseT_Half
- | SUPPORTED_10baseT_Full
- | SUPPORTED_100baseT_Half
- | SUPPORTED_100baseT_Full);
+ supported &= ~(SUPPORTED_10baseT_Half |
+ SUPPORTED_10baseT_Full |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_100baseT_Full);
else if (hw->chip_id == CHIP_ID_YUKON)
supported &= ~SUPPORTED_1000baseT_Half;
} else
- supported = SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half
- | SUPPORTED_FIBRE | SUPPORTED_Autoneg;
+ supported = (SUPPORTED_1000baseT_Full |
+ SUPPORTED_1000baseT_Half |
+ SUPPORTED_FIBRE |
+ SUPPORTED_Autoneg);
return supported;
}
@@ -365,7 +369,7 @@ static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
}
}
- return (0);
+ return 0;
}
static void skge_get_drvinfo(struct net_device *dev,
@@ -812,7 +816,7 @@ static int skge_get_eeprom_len(struct net_device *dev)
u32 reg2;
pci_read_config_dword(skge->hw->pdev, PCI_DEV_REG2, &reg2);
- return 1 << ( ((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
+ return 1 << (((reg2 & PCI_VPD_ROM_SZ) >> 14) + 8);
}
static u32 skge_vpd_read(struct pci_dev *pdev, int cap, u16 offset)
@@ -1043,7 +1047,7 @@ static int skge_rx_fill(struct net_device *dev)
skb_reserve(skb, NET_IP_ALIGN);
skge_rx_setup(skge, e, skb, skge->rx_buf_size);
- } while ( (e = e->next) != ring->start);
+ } while ((e = e->next) != ring->start);
ring->to_clean = ring->start;
return 0;
@@ -1051,7 +1055,7 @@ static int skge_rx_fill(struct net_device *dev)
static const char *skge_pause(enum pause_status status)
{
- switch(status) {
+ switch (status) {
case FLOW_STAT_NONE:
return "none";
case FLOW_STAT_REM_SEND:
@@ -1074,13 +1078,11 @@ static void skge_link_up(struct skge_port *skge)
netif_carrier_on(skge->netdev);
netif_wake_queue(skge->netdev);
- if (netif_msg_link(skge)) {
- printk(KERN_INFO PFX
- "%s: Link is up at %d Mbps, %s duplex, flow control %s\n",
- skge->netdev->name, skge->speed,
- skge->duplex == DUPLEX_FULL ? "full" : "half",
- skge_pause(skge->flow_status));
- }
+ netif_info(skge, link, skge->netdev,
+ "Link is up at %d Mbps, %s duplex, flow control %s\n",
+ skge->speed,
+ skge->duplex == DUPLEX_FULL ? "full" : "half",
+ skge_pause(skge->flow_status));
}
static void skge_link_down(struct skge_port *skge)
@@ -1089,8 +1091,7 @@ static void skge_link_down(struct skge_port *skge)
netif_carrier_off(skge->netdev);
netif_stop_queue(skge->netdev);
- if (netif_msg_link(skge))
- printk(KERN_INFO PFX "%s: Link is down.\n", skge->netdev->name);
+ netif_info(skge, link, skge->netdev, "Link is down\n");
}
@@ -1132,8 +1133,7 @@ static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg)
{
u16 v = 0;
if (__xm_phy_read(hw, port, reg, &v))
- printk(KERN_WARNING PFX "%s: phy read timed out\n",
- hw->dev[port]->name);
+ pr_warning("%s: phy read timed out\n", hw->dev[port]->name);
return v;
}
@@ -1255,8 +1255,7 @@ static void bcom_check_link(struct skge_hw *hw, int port)
lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP);
if (lpa & PHY_B_AN_RF) {
- printk(KERN_NOTICE PFX "%s: remote fault\n",
- dev->name);
+ netdev_notice(dev, "remote fault\n");
return;
}
@@ -1271,8 +1270,7 @@ static void bcom_check_link(struct skge_hw *hw, int port)
skge->duplex = DUPLEX_HALF;
break;
default:
- printk(KERN_NOTICE PFX "%s: duplex mismatch\n",
- dev->name);
+ netdev_notice(dev, "duplex mismatch\n");
return;
}
@@ -1327,7 +1325,7 @@ static void bcom_phy_init(struct skge_port *skge)
/* Optimize MDIO transfer by suppressing preamble. */
r = xm_read16(hw, port, XM_MMU_CMD);
r |= XM_MMU_NO_PRE;
- xm_write16(hw, port, XM_MMU_CMD,r);
+ xm_write16(hw, port, XM_MMU_CMD, r);
switch (id1) {
case PHY_BCOM_ID1_C0:
@@ -1464,8 +1462,7 @@ static int xm_check_link(struct net_device *dev)
lpa = xm_phy_read(hw, port, PHY_XMAC_AUNE_LP);
if (lpa & PHY_B_AN_RF) {
- printk(KERN_NOTICE PFX "%s: remote fault\n",
- dev->name);
+ netdev_notice(dev, "remote fault\n");
return 0;
}
@@ -1480,8 +1477,7 @@ static int xm_check_link(struct net_device *dev)
skge->duplex = DUPLEX_HALF;
break;
default:
- printk(KERN_NOTICE PFX "%s: duplex mismatch\n",
- dev->name);
+ netdev_notice(dev, "duplex mismatch\n");
return 0;
}
@@ -1519,7 +1515,7 @@ static void xm_link_timer(unsigned long arg)
{
struct skge_port *skge = (struct skge_port *) arg;
struct net_device *dev = skge->netdev;
- struct skge_hw *hw = skge->hw;
+ struct skge_hw *hw = skge->hw;
int port = skge->port;
int i;
unsigned long flags;
@@ -1538,7 +1534,7 @@ static void xm_link_timer(unsigned long arg)
goto link_down;
}
- /* Re-enable interrupt to detect link down */
+ /* Re-enable interrupt to detect link down */
if (xm_check_link(dev)) {
u16 msk = xm_read16(hw, port, XM_IMSK);
msk &= ~XM_IS_INP_ASS;
@@ -1569,7 +1565,7 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
udelay(1);
}
- printk(KERN_WARNING PFX "%s: genesis reset failed\n", dev->name);
+ netdev_warn(dev, "genesis reset failed\n");
reset_ok:
/* Unreset the XMAC. */
@@ -1595,7 +1591,7 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
}
- switch(hw->phy_type) {
+ switch (hw->phy_type) {
case SK_PHY_XMAC:
xm_phy_init(skge);
break;
@@ -1702,7 +1698,7 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
if (jumbo) {
/* Enable frame flushing if jumbo frames used */
- skge_write16(hw, SK_REG(port,RX_MFF_CTRL1), MFF_ENA_FLUSH);
+ skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_FLUSH);
} else {
/* enable timeout timers if normal frames */
skge_write16(hw, B3_PA_CTRL,
@@ -1717,7 +1713,7 @@ static void genesis_stop(struct skge_port *skge)
unsigned retries = 1000;
u16 cmd;
- /* Disable Tx and Rx */
+ /* Disable Tx and Rx */
cmd = xm_read16(hw, port, XM_MMU_CMD);
cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX);
xm_write16(hw, port, XM_MMU_CMD, cmd);
@@ -1792,12 +1788,11 @@ static void genesis_mac_intr(struct skge_hw *hw, int port)
struct skge_port *skge = netdev_priv(dev);
u16 status = xm_read16(hw, port, XM_ISRC);
- if (netif_msg_intr(skge))
- printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n",
- dev->name, status);
+ netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
+ "mac interrupt status 0x%x\n", status);
if (hw->phy_type == SK_PHY_XMAC && (status & XM_IS_INP_ASS)) {
- xm_link_down(hw, port);
+ xm_link_down(hw, port);
mod_timer(&skge->link_timer, jiffies + 1);
}
@@ -1831,7 +1826,7 @@ static void genesis_link_up(struct skge_port *skge)
xm_write16(hw, port, XM_MMU_CMD, cmd);
mode = xm_read32(hw, port, XM_MODE);
- if (skge->flow_status== FLOW_STAT_SYMMETRIC ||
+ if (skge->flow_status == FLOW_STAT_SYMMETRIC ||
skge->flow_status == FLOW_STAT_LOC_SEND) {
/*
* Configure Pause Frame Generation
@@ -1898,12 +1893,11 @@ static inline void bcom_phy_intr(struct skge_port *skge)
u16 isrc;
isrc = xm_phy_read(hw, port, PHY_BCOM_INT_STAT);
- if (netif_msg_intr(skge))
- printk(KERN_DEBUG PFX "%s: phy interrupt status 0x%x\n",
- skge->netdev->name, isrc);
+ netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
+ "phy interrupt status 0x%x\n", isrc);
if (isrc & PHY_B_IS_PSE)
- printk(KERN_ERR PFX "%s: uncorrectable pair swap error\n",
+ pr_err("%s: uncorrectable pair swap error\n",
hw->dev[port]->name);
/* Workaround BCom Errata:
@@ -1936,8 +1930,7 @@ static int gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
return 0;
}
- printk(KERN_WARNING PFX "%s: phy write timeout\n",
- hw->dev[port]->name);
+ pr_warning("%s: phy write timeout\n", hw->dev[port]->name);
return -EIO;
}
@@ -1965,8 +1958,7 @@ static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg)
{
u16 v = 0;
if (__gm_phy_read(hw, port, reg, &v))
- printk(KERN_WARNING PFX "%s: phy read timeout\n",
- hw->dev[port]->name);
+ pr_warning("%s: phy read timeout\n", hw->dev[port]->name);
return v;
}
@@ -2298,9 +2290,8 @@ static void yukon_mac_intr(struct skge_hw *hw, int port)
struct skge_port *skge = netdev_priv(dev);
u8 status = skge_read8(hw, SK_REG(port, GMAC_IRQ_SRC));
- if (netif_msg_intr(skge))
- printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n",
- dev->name, status);
+ netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
+ "mac interrupt status 0x%x\n", status);
if (status & GM_IS_RX_FF_OR) {
++dev->stats.rx_fifo_errors;
@@ -2379,9 +2370,8 @@ static void yukon_phy_intr(struct skge_port *skge)
istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
- if (netif_msg_intr(skge))
- printk(KERN_DEBUG PFX "%s: phy interrupt status 0x%x 0x%x\n",
- skge->netdev->name, istatus, phystat);
+ netif_printk(skge, intr, KERN_DEBUG, skge->netdev,
+ "phy interrupt status 0x%x 0x%x\n", istatus, phystat);
if (istatus & PHY_M_IS_AN_COMPL) {
if (gm_phy_read(hw, port, PHY_MARV_AUNE_LP)
@@ -2441,8 +2431,7 @@ static void yukon_phy_intr(struct skge_port *skge)
}
return;
failed:
- printk(KERN_ERR PFX "%s: autonegotiation failed (%s)\n",
- skge->netdev->name, reason);
+ pr_err("%s: autonegotiation failed (%s)\n", skge->netdev->name, reason);
/* XXX restart autonegotiation? */
}
@@ -2480,7 +2469,7 @@ static int skge_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
if (!netif_running(dev))
return -ENODEV; /* Phy still in reset */
- switch(cmd) {
+ switch (cmd) {
case SIOCGMIIPHY:
data->phy_id = hw->phy_addr;
@@ -2571,8 +2560,7 @@ static int skge_up(struct net_device *dev)
if (!is_valid_ether_addr(dev->dev_addr))
return -EINVAL;
- if (netif_msg_ifup(skge))
- printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
+ netif_info(skge, ifup, skge->netdev, "enabling interface\n");
if (dev->mtu > RX_BUF_SIZE)
skge->rx_buf_size = dev->mtu + ETH_HLEN;
@@ -2670,8 +2658,7 @@ static int skge_down(struct net_device *dev)
if (skge->mem == NULL)
return 0;
- if (netif_msg_ifdown(skge))
- printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
+ netif_info(skge, ifdown, skge->netdev, "disabling interface\n");
netif_tx_disable(dev);
@@ -2781,7 +2768,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
* does. Looks like hardware is wrong?
*/
if (ipip_hdr(skb)->protocol == IPPROTO_UDP &&
- hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON)
+ hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON)
control = BMU_TCP_CHECK;
else
control = BMU_UDP_CHECK;
@@ -2793,7 +2780,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
control = BMU_CHECK;
if (!skb_shinfo(skb)->nr_frags) /* single buffer i.e. no fragments */
- control |= BMU_EOF| BMU_IRQ_EOF;
+ control |= BMU_EOF | BMU_IRQ_EOF;
else {
struct skge_tx_desc *tf = td;
@@ -2825,15 +2812,15 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START);
- if (unlikely(netif_msg_tx_queued(skge)))
- printk(KERN_DEBUG "%s: tx queued, slot %td, len %d\n",
- dev->name, e - skge->tx_ring.start, skb->len);
+ netif_printk(skge, tx_queued, KERN_DEBUG, skge->netdev,
+ "tx queued, slot %td, len %d\n",
+ e - skge->tx_ring.start, skb->len);
skge->tx_ring.to_use = e->next;
smp_wmb();
if (skge_avail(&skge->tx_ring) <= TX_LOW_WATER) {
- pr_debug("%s: transmit queue full\n", dev->name);
+ netdev_dbg(dev, "transmit queue full\n");
netif_stop_queue(dev);
}
@@ -2858,9 +2845,8 @@ static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
PCI_DMA_TODEVICE);
if (control & BMU_EOF) {
- if (unlikely(netif_msg_tx_done(skge)))
- printk(KERN_DEBUG PFX "%s: tx done slot %td\n",
- skge->netdev->name, e - skge->tx_ring.start);
+ netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev,
+ "tx done slot %td\n", e - skge->tx_ring.start);
dev_kfree_skb(e->skb);
}
@@ -2885,8 +2871,7 @@ static void skge_tx_timeout(struct net_device *dev)
{
struct skge_port *skge = netdev_priv(dev);
- if (netif_msg_timer(skge))
- printk(KERN_DEBUG PFX "%s: tx timeout\n", dev->name);
+ netif_printk(skge, timer, KERN_DEBUG, skge->netdev, "tx timeout\n");
skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_STOP);
skge_tx_clean(dev);
@@ -2932,8 +2917,7 @@ static void genesis_set_multicast(struct net_device *dev)
struct skge_port *skge = netdev_priv(dev);
struct skge_hw *hw = skge->hw;
int port = skge->port;
- int i, count = dev->mc_count;
- struct dev_mc_list *list = dev->mc_list;
+ struct dev_mc_list *list;
u32 mode;
u8 filter[8];
@@ -2953,7 +2937,7 @@ static void genesis_set_multicast(struct net_device *dev)
skge->flow_status == FLOW_STAT_SYMMETRIC)
genesis_add_filter(filter, pause_mc_addr);
- for (i = 0; list && i < count; i++, list = list->next)
+ netdev_for_each_mc_addr(list, dev)
genesis_add_filter(filter, list->dmi_addr);
}
@@ -2972,7 +2956,7 @@ static void yukon_set_multicast(struct net_device *dev)
struct skge_port *skge = netdev_priv(dev);
struct skge_hw *hw = skge->hw;
int port = skge->port;
- struct dev_mc_list *list = dev->mc_list;
+ struct dev_mc_list *list;
int rx_pause = (skge->flow_status == FLOW_STAT_REM_SEND ||
skge->flow_status == FLOW_STAT_SYMMETRIC);
u16 reg;
@@ -2987,16 +2971,15 @@ static void yukon_set_multicast(struct net_device *dev)
reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
else if (dev->flags & IFF_ALLMULTI) /* all multicast */
memset(filter, 0xff, sizeof(filter));
- else if (dev->mc_count == 0 && !rx_pause)/* no multicast */
+ else if (netdev_mc_empty(dev) && !rx_pause)/* no multicast */
reg &= ~GM_RXCR_MCF_ENA;
else {
- int i;
reg |= GM_RXCR_MCF_ENA;
if (rx_pause)
yukon_add_filter(filter, pause_mc_addr);
- for (i = 0; list && i < dev->mc_count; i++, list = list->next)
+ netdev_for_each_mc_addr(list, dev)
yukon_add_filter(filter, list->dmi_addr);
}
@@ -3054,10 +3037,9 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
struct sk_buff *skb;
u16 len = control & BMU_BBC;
- if (unlikely(netif_msg_rx_status(skge)))
- printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n",
- dev->name, e - skge->rx_ring.start,
- status, len);
+ netif_printk(skge, rx_status, KERN_DEBUG, skge->netdev,
+ "rx slot %td status 0x%x len %d\n",
+ e - skge->rx_ring.start, status, len);
if (len > skge->rx_buf_size)
goto error;
@@ -3096,7 +3078,7 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
pci_unmap_len(e, maplen),
PCI_DMA_FROMDEVICE);
skb = e->skb;
- prefetch(skb->data);
+ prefetch(skb->data);
skge_rx_setup(skge, e, nskb, skge->rx_buf_size);
}
@@ -3111,10 +3093,9 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
return skb;
error:
- if (netif_msg_rx_err(skge))
- printk(KERN_DEBUG PFX "%s: rx err, slot %td control 0x%x status 0x%x\n",
- dev->name, e - skge->rx_ring.start,
- control, status);
+ netif_printk(skge, rx_err, KERN_DEBUG, skge->netdev,
+ "rx err, slot %td control 0x%x status 0x%x\n",
+ e - skge->rx_ring.start, control, status);
if (skge->hw->chip_id == CHIP_ID_GENESIS) {
if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR))
@@ -3574,8 +3555,7 @@ static int skge_reset(struct skge_hw *hw)
hw->ram_offset = 0x80000;
} else
hw->ram_size = t8 * 512;
- }
- else if (t8 == 0)
+ } else if (t8 == 0)
hw->ram_size = 0x20000;
else
hw->ram_size = t8 * 4096;
@@ -3729,7 +3709,7 @@ static int skge_device_event(struct notifier_block *unused,
goto done;
skge = netdev_priv(dev);
- switch(event) {
+ switch (event) {
case NETDEV_CHANGENAME:
if (skge->debugfs) {
d = debugfs_rename(skge_debug, skge->debugfs,
@@ -3737,7 +3717,7 @@ static int skge_device_event(struct notifier_block *unused,
if (d)
skge->debugfs = d;
else {
- pr_info(PFX "%s: rename failed\n", dev->name);
+ netdev_info(dev, "rename failed\n");
debugfs_remove(skge->debugfs);
}
}
@@ -3755,8 +3735,7 @@ static int skge_device_event(struct notifier_block *unused,
skge_debug, dev,
&skge_debug_fops);
if (!d || IS_ERR(d))
- pr_info(PFX "%s: debugfs create failed\n",
- dev->name);
+ netdev_info(dev, "debugfs create failed\n");
else
skge->debugfs = d;
break;
@@ -3777,7 +3756,7 @@ static __init void skge_debug_init(void)
ent = debugfs_create_dir("skge", NULL);
if (!ent || IS_ERR(ent)) {
- pr_info(PFX "debugfs create directory failed\n");
+ pr_info("debugfs create directory failed\n");
return;
}
@@ -3885,9 +3864,7 @@ static void __devinit skge_show_addr(struct net_device *dev)
{
const struct skge_port *skge = netdev_priv(dev);
- if (netif_msg_probe(skge))
- printk(KERN_INFO PFX "%s: addr %pM\n",
- dev->name, dev->dev_addr);
+ netif_info(skge, probe, skge->netdev, "addr %pM\n", dev->dev_addr);
}
static int __devinit skge_probe(struct pci_dev *pdev,
@@ -3937,7 +3914,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
err = -ENOMEM;
/* space for skge@pci:0000:04:00.0 */
- hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:" )
+ hw = kzalloc(sizeof(*hw) + strlen(DRV_NAME "@pci:")
+ strlen(pci_name(pdev)) + 1, GFP_KERNEL);
if (!hw) {
dev_err(&pdev->dev, "cannot allocate hardware struct\n");
@@ -3960,9 +3937,10 @@ static int __devinit skge_probe(struct pci_dev *pdev,
if (err)
goto err_out_iounmap;
- printk(KERN_INFO PFX DRV_VERSION " addr 0x%llx irq %d chip %s rev %d\n",
- (unsigned long long)pci_resource_start(pdev, 0), pdev->irq,
- skge_board_name(hw), hw->chip_rev);
+ pr_info("%s addr 0x%llx irq %d chip %s rev %d\n",
+ DRV_VERSION,
+ (unsigned long long)pci_resource_start(pdev, 0), pdev->irq,
+ skge_board_name(hw), hw->chip_rev);
dev = skge_devinit(hw, 0, using_dac);
if (!dev)
@@ -4032,7 +4010,8 @@ static void __devexit skge_remove(struct pci_dev *pdev)
flush_scheduled_work();
- if ((dev1 = hw->dev[1]))
+ dev1 = hw->dev[1];
+ if (dev1)
unregister_netdev(dev1);
dev0 = hw->dev[0];
unregister_netdev(dev0);
@@ -4119,8 +4098,7 @@ static int skge_resume(struct pci_dev *pdev)
err = skge_up(dev);
if (err) {
- printk(KERN_ERR PFX "%s: could not up: %d\n",
- dev->name, err);
+ netdev_err(dev, "could not up: %d\n", err);
dev_close(dev);
goto out;
}
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 67249c3c9f5..653bdd76ef4 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -22,6 +22,8 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/crc32.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -50,8 +52,7 @@
#include "sky2.h"
#define DRV_NAME "sky2"
-#define DRV_VERSION "1.26"
-#define PFX DRV_NAME " "
+#define DRV_VERSION "1.27"
/*
* The Yukon II chipset takes 64 bit command blocks (called list elements)
@@ -251,6 +252,8 @@ static void sky2_power_on(struct sky2_hw *hw)
sky2_pci_write32(hw, PCI_CFG_REG_1, 0);
+ sky2_write16(hw, B0_CTST, Y2_HW_WOL_ON);
+
/* Enable workaround for dev 4.107 on Yukon-Ultra & Extreme */
reg = sky2_read32(hw, B2_GP_IO);
reg |= GLB_GPIO_STAT_RACE_DIS;
@@ -731,7 +734,6 @@ static void sky2_wol_init(struct sky2_port *sky2)
unsigned port = sky2->port;
enum flow_control save_mode;
u16 ctrl;
- u32 reg1;
/* Bring hardware out of reset */
sky2_write16(hw, B0_CTST, CS_RST_CLR);
@@ -782,14 +784,11 @@ static void sky2_wol_init(struct sky2_port *sky2)
ctrl |= WOL_CTL_DIS_PME_ON_PATTERN|WOL_CTL_DIS_PATTERN_UNIT;
sky2_write16(hw, WOL_REGS(port, WOL_CTRL_STAT), ctrl);
- /* Turn on legacy PCI-Express PME mode */
- reg1 = sky2_pci_read32(hw, PCI_DEV_REG1);
- reg1 |= PCI_Y2_PME_LEGACY;
- sky2_pci_write32(hw, PCI_DEV_REG1, reg1);
+ /* Disable PiG firmware */
+ sky2_write16(hw, B0_CTST, Y2_HW_WOL_OFF);
/* block receiver */
sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
-
}
static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port)
@@ -800,29 +799,15 @@ static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port)
hw->chip_rev != CHIP_REV_YU_EX_A0) ||
hw->chip_id >= CHIP_ID_YUKON_FE_P) {
/* Yukon-Extreme B0 and further Extreme devices */
- /* enable Store & Forward mode for TX */
-
- if (dev->mtu <= ETH_DATA_LEN)
- sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
- TX_JUMBO_DIS | TX_STFW_ENA);
-
- else
- sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
- TX_JUMBO_ENA| TX_STFW_ENA);
- } else {
- if (dev->mtu <= ETH_DATA_LEN)
- sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA);
- else {
- /* set Tx GMAC FIFO Almost Empty Threshold */
- sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR),
- (ECU_JUMBO_WM << 16) | ECU_AE_THR);
-
- sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_DIS);
+ sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA);
+ } else if (dev->mtu > ETH_DATA_LEN) {
+ /* set Tx GMAC FIFO Almost Empty Threshold */
+ sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR),
+ (ECU_JUMBO_WM << 16) | ECU_AE_THR);
- /* Can't do offload because of lack of store/forward */
- dev->features &= ~(NETIF_F_TSO | NETIF_F_SG | NETIF_F_ALL_CSUM);
- }
- }
+ sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_DIS);
+ } else
+ sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_STFW_ENA);
}
static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
@@ -1065,6 +1050,40 @@ static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
return le;
}
+static unsigned sky2_get_rx_threshold(struct sky2_port* sky2)
+{
+ unsigned size;
+
+ /* Space needed for frame data + headers rounded up */
+ size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8);
+
+ /* Stopping point for hardware truncation */
+ return (size - 8) / sizeof(u32);
+}
+
+static unsigned sky2_get_rx_data_size(struct sky2_port* sky2)
+{
+ struct rx_ring_info *re;
+ unsigned size;
+
+ /* Space needed for frame data + headers rounded up */
+ size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8);
+
+ sky2->rx_nfrags = size >> PAGE_SHIFT;
+ BUG_ON(sky2->rx_nfrags > ARRAY_SIZE(re->frag_addr));
+
+ /* Compute residue after pages */
+ size -= sky2->rx_nfrags << PAGE_SHIFT;
+
+ /* Optimize to handle small packets and headers */
+ if (size < copybreak)
+ size = copybreak;
+ if (size < ETH_HLEN)
+ size = ETH_HLEN;
+
+ return size;
+}
+
/* Build description to hardware for one receive segment */
static void sky2_rx_add(struct sky2_port *sky2, u8 op,
dma_addr_t map, unsigned len)
@@ -1103,18 +1122,39 @@ static int sky2_rx_map_skb(struct pci_dev *pdev, struct rx_ring_info *re,
int i;
re->data_addr = pci_map_single(pdev, skb->data, size, PCI_DMA_FROMDEVICE);
- if (unlikely(pci_dma_mapping_error(pdev, re->data_addr)))
- return -EIO;
+ if (pci_dma_mapping_error(pdev, re->data_addr))
+ goto mapping_error;
pci_unmap_len_set(re, data_size, size);
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
- re->frag_addr[i] = pci_map_page(pdev,
- skb_shinfo(skb)->frags[i].page,
- skb_shinfo(skb)->frags[i].page_offset,
- skb_shinfo(skb)->frags[i].size,
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ re->frag_addr[i] = pci_map_page(pdev, frag->page,
+ frag->page_offset,
+ frag->size,
PCI_DMA_FROMDEVICE);
+
+ if (pci_dma_mapping_error(pdev, re->frag_addr[i]))
+ goto map_page_error;
+ }
return 0;
+
+map_page_error:
+ while (--i >= 0) {
+ pci_unmap_page(pdev, re->frag_addr[i],
+ skb_shinfo(skb)->frags[i].size,
+ PCI_DMA_FROMDEVICE);
+ }
+
+ pci_unmap_single(pdev, re->data_addr, pci_unmap_len(re, data_size),
+ PCI_DMA_FROMDEVICE);
+
+mapping_error:
+ if (net_ratelimit())
+ dev_warn(&pdev->dev, "%s: rx mapping error\n",
+ skb->dev->name);
+ return -EIO;
}
static void sky2_rx_unmap_skb(struct pci_dev *pdev, struct rx_ring_info *re)
@@ -1173,8 +1213,7 @@ static void sky2_rx_stop(struct sky2_port *sky2)
== sky2_read8(hw, RB_ADDR(rxq, Q_RL)))
goto stopped;
- printk(KERN_WARNING PFX "%s: receiver stop failed\n",
- sky2->netdev->name);
+ netdev_warn(sky2->netdev, "receiver stop failed\n");
stopped:
sky2_write32(hw, Q_ADDR(rxq, Q_CSR), BMU_RST_SET | BMU_FIFO_RST);
@@ -1324,8 +1363,32 @@ static inline void sky2_rx_update(struct sky2_port *sky2, unsigned rxq)
sky2_put_idx(sky2->hw, rxq, sky2->rx_put);
}
+static int sky2_alloc_rx_skbs(struct sky2_port *sky2)
+{
+ struct sky2_hw *hw = sky2->hw;
+ unsigned i;
+
+ sky2->rx_data_size = sky2_get_rx_data_size(sky2);
+
+ /* Fill Rx ring */
+ for (i = 0; i < sky2->rx_pending; i++) {
+ struct rx_ring_info *re = sky2->rx_ring + i;
+
+ re->skb = sky2_rx_alloc(sky2);
+ if (!re->skb)
+ return -ENOMEM;
+
+ if (sky2_rx_map_skb(hw->pdev, re, sky2->rx_data_size)) {
+ dev_kfree_skb(re->skb);
+ re->skb = NULL;
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
/*
- * Allocate and setup receiver buffer pool.
+ * Setup receiver buffer pool.
* Normal case this ends up creating one list element for skb
* in the receive ring. Worst case if using large MTU and each
* allocation falls on a different 64 bit region, that results
@@ -1333,12 +1396,12 @@ static inline void sky2_rx_update(struct sky2_port *sky2, unsigned rxq)
* One element is used for checksum enable/disable, and one
* extra to avoid wrap.
*/
-static int sky2_rx_start(struct sky2_port *sky2)
+static void sky2_rx_start(struct sky2_port *sky2)
{
struct sky2_hw *hw = sky2->hw;
struct rx_ring_info *re;
unsigned rxq = rxqaddr[sky2->port];
- unsigned i, size, thresh;
+ unsigned i, thresh;
sky2->rx_put = sky2->rx_next = 0;
sky2_qset(hw, rxq);
@@ -1359,40 +1422,9 @@ static int sky2_rx_start(struct sky2_port *sky2)
if (!(hw->flags & SKY2_HW_NEW_LE))
rx_set_checksum(sky2);
- /* Space needed for frame data + headers rounded up */
- size = roundup(sky2->netdev->mtu + ETH_HLEN + VLAN_HLEN, 8);
-
- /* Stopping point for hardware truncation */
- thresh = (size - 8) / sizeof(u32);
-
- sky2->rx_nfrags = size >> PAGE_SHIFT;
- BUG_ON(sky2->rx_nfrags > ARRAY_SIZE(re->frag_addr));
-
- /* Compute residue after pages */
- size -= sky2->rx_nfrags << PAGE_SHIFT;
-
- /* Optimize to handle small packets and headers */
- if (size < copybreak)
- size = copybreak;
- if (size < ETH_HLEN)
- size = ETH_HLEN;
-
- sky2->rx_data_size = size;
-
- /* Fill Rx ring */
+ /* submit Rx ring */
for (i = 0; i < sky2->rx_pending; i++) {
re = sky2->rx_ring + i;
-
- re->skb = sky2_rx_alloc(sky2);
- if (!re->skb)
- goto nomem;
-
- if (sky2_rx_map_skb(hw->pdev, re, sky2->rx_data_size)) {
- dev_kfree_skb(re->skb);
- re->skb = NULL;
- goto nomem;
- }
-
sky2_rx_submit(sky2, re);
}
@@ -1402,6 +1434,7 @@ static int sky2_rx_start(struct sky2_port *sky2)
* the register is limited to 9 bits, so if you do frames > 2052
* you better get the MTU right!
*/
+ thresh = sky2_get_rx_threshold(sky2);
if (thresh > 0x1ff)
sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_TRUNC_OFF);
else {
@@ -1433,13 +1466,6 @@ static int sky2_rx_start(struct sky2_port *sky2)
sky2_write32(hw, Q_ADDR(txqaddr[sky2->port], Q_TEST),
TBMU_TEST_HOME_ADD_FIX_EN | TBMU_TEST_ROUTING_ADD_FIX_EN);
}
-
-
-
- return 0;
-nomem:
- sky2_rx_clean(sky2);
- return -ENOMEM;
}
static int sky2_alloc_buffers(struct sky2_port *sky2)
@@ -1470,7 +1496,7 @@ static int sky2_alloc_buffers(struct sky2_port *sky2)
if (!sky2->rx_ring)
goto nomem;
- return 0;
+ return sky2_alloc_rx_skbs(sky2);
nomem:
return -ENOMEM;
}
@@ -1479,6 +1505,8 @@ static void sky2_free_buffers(struct sky2_port *sky2)
{
struct sky2_hw *hw = sky2->hw;
+ sky2_rx_clean(sky2);
+
if (sky2->rx_le) {
pci_free_consistent(hw->pdev, RX_LE_BYTES,
sky2->rx_le, sky2->rx_le_map);
@@ -1497,16 +1525,16 @@ static void sky2_free_buffers(struct sky2_port *sky2)
sky2->rx_ring = NULL;
}
-/* Bring up network interface. */
-static int sky2_up(struct net_device *dev)
+static void sky2_hw_up(struct sky2_port *sky2)
{
- struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
unsigned port = sky2->port;
- u32 imask, ramsize;
- int cap, err;
+ u32 ramsize;
+ int cap;
struct net_device *otherdev = hw->dev[sky2->port^1];
+ tx_init(sky2);
+
/*
* On dual port PCI-X card, there is an problem where status
* can be received out of order due to split transactions
@@ -1518,16 +1546,7 @@ static int sky2_up(struct net_device *dev)
cmd = sky2_pci_read16(hw, cap + PCI_X_CMD);
cmd &= ~PCI_X_CMD_MAX_SPLIT;
sky2_pci_write16(hw, cap + PCI_X_CMD, cmd);
-
- }
-
- netif_carrier_off(dev);
-
- err = sky2_alloc_buffers(sky2);
- if (err)
- goto err_out;
-
- tx_init(sky2);
+ }
sky2_mac_init(hw, port);
@@ -1536,7 +1555,7 @@ static int sky2_up(struct net_device *dev)
if (ramsize > 0) {
u32 rxspace;
- pr_debug(PFX "%s: ram buffer %dK\n", dev->name, ramsize);
+ netdev_dbg(sky2->netdev, "ram buffer %dK\n", ramsize);
if (ramsize < 16)
rxspace = ramsize / 2;
else
@@ -1568,18 +1587,33 @@ static int sky2_up(struct net_device *dev)
sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL);
#endif
- err = sky2_rx_start(sky2);
+ sky2_rx_start(sky2);
+}
+
+/* Bring up network interface. */
+static int sky2_up(struct net_device *dev)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+ unsigned port = sky2->port;
+ u32 imask;
+ int err;
+
+ netif_carrier_off(dev);
+
+ err = sky2_alloc_buffers(sky2);
if (err)
goto err_out;
+ sky2_hw_up(sky2);
+
/* Enable interrupts from phy/mac for port */
imask = sky2_read32(hw, B0_IMSK);
imask |= portirq_msk[port];
sky2_write32(hw, B0_IMSK, imask);
sky2_read32(hw, B0_IMSK);
- if (netif_msg_ifup(sky2))
- printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
+ netif_info(sky2, ifup, dev, "enabling interface\n");
return 0;
@@ -1662,9 +1696,8 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
goto mapping_error;
slot = sky2->tx_prod;
- if (unlikely(netif_msg_tx_queued(sky2)))
- printk(KERN_DEBUG "%s: tx queued, slot %u, len %d\n",
- dev->name, slot, skb->len);
+ netif_printk(sky2, tx_queued, KERN_DEBUG, dev,
+ "tx queued, slot %u, len %d\n", slot, skb->len);
/* Send high bits if needed */
upper = upper_32_bits(mapping);
@@ -1829,9 +1862,8 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
sky2_tx_unmap(sky2->hw->pdev, re);
if (skb) {
- if (unlikely(netif_msg_tx_done(sky2)))
- printk(KERN_DEBUG "%s: tx done %u\n",
- dev->name, idx);
+ netif_printk(sky2, tx_done, KERN_DEBUG, dev,
+ "tx done %u\n", idx);
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
@@ -1845,10 +1877,6 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
sky2->tx_cons = idx;
smp_mb();
-
- /* Wake unless it's detached, and called e.g. from sky2_down() */
- if (tx_avail(sky2) > MAX_SKB_TX_LE + 4 && netif_device_present(dev))
- netif_wake_queue(dev);
}
static void sky2_tx_reset(struct sky2_hw *hw, unsigned port)
@@ -1873,21 +1901,11 @@ static void sky2_tx_reset(struct sky2_hw *hw, unsigned port)
sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
}
-/* Network shutdown */
-static int sky2_down(struct net_device *dev)
+static void sky2_hw_down(struct sky2_port *sky2)
{
- struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
unsigned port = sky2->port;
u16 ctrl;
- u32 imask;
-
- /* Never really got started! */
- if (!sky2->tx_le)
- return 0;
-
- if (netif_msg_ifdown(sky2))
- printk(KERN_INFO PFX "%s: disabling interface\n", dev->name);
/* Force flow control off */
sky2_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
@@ -1920,15 +1938,6 @@ static int sky2_down(struct net_device *dev)
sky2_rx_stop(sky2);
- /* Disable port IRQ */
- imask = sky2_read32(hw, B0_IMSK);
- imask &= ~portirq_msk[port];
- sky2_write32(hw, B0_IMSK, imask);
- sky2_read32(hw, B0_IMSK);
-
- synchronize_irq(hw->pdev->irq);
- napi_synchronize(&hw->napi);
-
spin_lock_bh(&sky2->phy_lock);
sky2_phy_power_down(hw, port);
spin_unlock_bh(&sky2->phy_lock);
@@ -1937,8 +1946,29 @@ static int sky2_down(struct net_device *dev)
/* Free any pending frames stuck in HW queue */
sky2_tx_complete(sky2, sky2->tx_prod);
+}
- sky2_rx_clean(sky2);
+/* Network shutdown */
+static int sky2_down(struct net_device *dev)
+{
+ struct sky2_port *sky2 = netdev_priv(dev);
+ struct sky2_hw *hw = sky2->hw;
+
+ /* Never really got started! */
+ if (!sky2->tx_le)
+ return 0;
+
+ netif_info(sky2, ifdown, dev, "disabling interface\n");
+
+ /* Disable port IRQ */
+ sky2_write32(hw, B0_IMSK,
+ sky2_read32(hw, B0_IMSK) & ~portirq_msk[sky2->port]);
+ sky2_read32(hw, B0_IMSK);
+
+ synchronize_irq(hw->pdev->irq);
+ napi_synchronize(&hw->napi);
+
+ sky2_hw_down(sky2);
sky2_free_buffers(sky2);
@@ -1994,12 +2024,11 @@ static void sky2_link_up(struct sky2_port *sky2)
sky2_write8(hw, SK_REG(port, LNK_LED_REG),
LINKLED_ON | LINKLED_BLINK_OFF | LINKLED_LINKSYNC_OFF);
- if (netif_msg_link(sky2))
- printk(KERN_INFO PFX
- "%s: Link is up at %d Mbps, %s duplex, flow control %s\n",
- sky2->netdev->name, sky2->speed,
- sky2->duplex == DUPLEX_FULL ? "full" : "half",
- fc_name[sky2->flow_status]);
+ netif_info(sky2, link, sky2->netdev,
+ "Link is up at %d Mbps, %s duplex, flow control %s\n",
+ sky2->speed,
+ sky2->duplex == DUPLEX_FULL ? "full" : "half",
+ fc_name[sky2->flow_status]);
}
static void sky2_link_down(struct sky2_port *sky2)
@@ -2019,8 +2048,7 @@ static void sky2_link_down(struct sky2_port *sky2)
/* Turn off link LED */
sky2_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);
- if (netif_msg_link(sky2))
- printk(KERN_INFO PFX "%s: Link is down.\n", sky2->netdev->name);
+ netif_info(sky2, link, sky2->netdev, "Link is down\n");
sky2_phy_init(hw, port);
}
@@ -2042,13 +2070,12 @@ static int sky2_autoneg_done(struct sky2_port *sky2, u16 aux)
advert = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV);
lpa = gm_phy_read(hw, port, PHY_MARV_AUNE_LP);
if (lpa & PHY_M_AN_RF) {
- printk(KERN_ERR PFX "%s: remote fault", sky2->netdev->name);
+ netdev_err(sky2->netdev, "remote fault\n");
return -1;
}
if (!(aux & PHY_M_PS_SPDUP_RES)) {
- printk(KERN_ERR PFX "%s: speed/duplex mismatch",
- sky2->netdev->name);
+ netdev_err(sky2->netdev, "speed/duplex mismatch\n");
return -1;
}
@@ -2110,9 +2137,8 @@ static void sky2_phy_intr(struct sky2_hw *hw, unsigned port)
istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
- if (netif_msg_intr(sky2))
- printk(KERN_INFO PFX "%s: phy interrupt status 0x%x 0x%x\n",
- sky2->netdev->name, istatus, phystat);
+ netif_info(sky2, intr, sky2->netdev, "phy interrupt status 0x%x 0x%x\n",
+ istatus, phystat);
if (istatus & PHY_M_IS_AN_COMPL) {
if (sky2_autoneg_done(sky2, phystat) == 0)
@@ -2166,13 +2192,12 @@ static void sky2_tx_timeout(struct net_device *dev)
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
- if (netif_msg_timer(sky2))
- printk(KERN_ERR PFX "%s: tx timeout\n", dev->name);
+ netif_err(sky2, timer, dev, "tx timeout\n");
- printk(KERN_DEBUG PFX "%s: transmit ring %u .. %u report=%u done=%u\n",
- dev->name, sky2->tx_cons, sky2->tx_prod,
- sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX),
- sky2_read16(hw, Q_ADDR(txqaddr[sky2->port], Q_DONE)));
+ netdev_printk(KERN_DEBUG, dev, "transmit ring %u .. %u report=%u done=%u\n",
+ sky2->tx_cons, sky2->tx_prod,
+ sky2_read16(hw, sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX),
+ sky2_read16(hw, Q_ADDR(txqaddr[sky2->port], Q_DONE)));
/* can't restart safely under softirq */
schedule_work(&hw->restart_work);
@@ -2187,14 +2212,20 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
u16 ctl, mode;
u32 imask;
+ /* MTU size outside the spec */
if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
return -EINVAL;
+ /* MTU > 1500 on yukon FE and FE+ not allowed */
if (new_mtu > ETH_DATA_LEN &&
(hw->chip_id == CHIP_ID_YUKON_FE ||
hw->chip_id == CHIP_ID_YUKON_FE_P))
return -EINVAL;
+ /* TSO, etc on Yukon Ultra and MTU > 1500 not supported */
+ if (new_mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_EC_U)
+ dev->features &= ~(NETIF_F_TSO|NETIF_F_SG|NETIF_F_ALL_CSUM);
+
if (!netif_running(dev)) {
dev->mtu = new_mtu;
return 0;
@@ -2229,7 +2260,11 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
sky2_write8(hw, RB_ADDR(rxqaddr[port], RB_CTRL), RB_ENA_OP_MD);
- err = sky2_rx_start(sky2);
+ err = sky2_alloc_rx_skbs(sky2);
+ if (!err)
+ sky2_rx_start(sky2);
+ else
+ sky2_rx_clean(sky2);
sky2_write32(hw, B0_IMSK, imask);
sky2_read32(hw, B0_Y2_SP_LISR);
@@ -2306,30 +2341,32 @@ static struct sk_buff *receive_new(struct sky2_port *sky2,
struct rx_ring_info *re,
unsigned int length)
{
- struct sk_buff *skb, *nskb;
+ struct sk_buff *skb;
+ struct rx_ring_info nre;
unsigned hdr_space = sky2->rx_data_size;
- /* Don't be tricky about reusing pages (yet) */
- nskb = sky2_rx_alloc(sky2);
- if (unlikely(!nskb))
- return NULL;
+ nre.skb = sky2_rx_alloc(sky2);
+ if (unlikely(!nre.skb))
+ goto nobuf;
+
+ if (sky2_rx_map_skb(sky2->hw->pdev, &nre, hdr_space))
+ goto nomap;
skb = re->skb;
sky2_rx_unmap_skb(sky2->hw->pdev, re);
-
prefetch(skb->data);
- re->skb = nskb;
- if (sky2_rx_map_skb(sky2->hw->pdev, re, hdr_space)) {
- dev_kfree_skb(nskb);
- re->skb = skb;
- return NULL;
- }
+ *re = nre;
if (skb_shinfo(skb)->nr_frags)
skb_put_frags(skb, hdr_space, length);
else
skb_put(skb, length);
return skb;
+
+nomap:
+ dev_kfree_skb(nre.skb);
+nobuf:
+ return NULL;
}
/*
@@ -2350,9 +2387,9 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
count -= VLAN_HLEN;
#endif
- if (unlikely(netif_msg_rx_status(sky2)))
- printk(KERN_DEBUG PFX "%s: rx slot %u status 0x%x len %d\n",
- dev->name, sky2->rx_next, status, length);
+ netif_printk(sky2, rx_status, KERN_DEBUG, dev,
+ "rx slot %u status 0x%x len %d\n",
+ sky2->rx_next, status, length);
sky2->rx_next = (sky2->rx_next + 1) % sky2->rx_pending;
prefetch(sky2->rx_ring + sky2->rx_next);
@@ -2381,6 +2418,9 @@ okay:
skb = receive_copy(sky2, re, length);
else
skb = receive_new(sky2, re, length);
+
+ dev->stats.rx_dropped += (skb == NULL);
+
resubmit:
sky2_rx_submit(sky2, re);
@@ -2390,9 +2430,10 @@ len_error:
/* Truncation of overlength packets
causes PHY length to not match MAC length */
++dev->stats.rx_length_errors;
- if (netif_msg_rx_err(sky2) && net_ratelimit())
- pr_info(PFX "%s: rx length error: status %#x length %d\n",
- dev->name, status, length);
+ if (net_ratelimit())
+ netif_info(sky2, rx_err, dev,
+ "rx length error: status %#x length %d\n",
+ status, length);
goto resubmit;
error:
@@ -2402,9 +2443,9 @@ error:
goto resubmit;
}
- if (netif_msg_rx_err(sky2) && net_ratelimit())
- printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n",
- dev->name, status, length);
+ if (net_ratelimit())
+ netif_info(sky2, rx_err, dev,
+ "rx error, status 0x%x length %d\n", status, length);
if (status & (GMR_FS_LONG_ERR | GMR_FS_UN_SIZE))
dev->stats.rx_length_errors++;
@@ -2421,8 +2462,13 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
{
struct sky2_port *sky2 = netdev_priv(dev);
- if (netif_running(dev))
+ if (netif_running(dev)) {
sky2_tx_complete(sky2, last);
+
+ /* Wake unless it's detached, and called e.g. from sky2_down() */
+ if (tx_avail(sky2) > MAX_SKB_TX_LE + 4)
+ netif_wake_queue(dev);
+ }
}
static inline void sky2_skb_rx(const struct sky2_port *sky2,
@@ -2458,6 +2504,32 @@ static inline void sky2_rx_done(struct sky2_hw *hw, unsigned port,
}
}
+static void sky2_rx_checksum(struct sky2_port *sky2, u32 status)
+{
+ /* If this happens then driver assuming wrong format for chip type */
+ BUG_ON(sky2->hw->flags & SKY2_HW_NEW_LE);
+
+ /* Both checksum counters are programmed to start at
+ * the same offset, so unless there is a problem they
+ * should match. This failure is an early indication that
+ * hardware receive checksumming won't work.
+ */
+ if (likely((u16)(status >> 16) == (u16)status)) {
+ struct sk_buff *skb = sky2->rx_ring[sky2->rx_next].skb;
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ skb->csum = le16_to_cpu(status);
+ } else {
+ dev_notice(&sky2->hw->pdev->dev,
+ "%s: receive checksum problem (status = %#x)\n",
+ sky2->netdev->name, status);
+
+ /* Disable checksum offload */
+ sky2->flags &= ~SKY2_FLAG_RX_CHECKSUM;
+ sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR),
+ BMU_DIS_RX_CHKSUM);
+ }
+}
+
/* Process status response ring */
static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
{
@@ -2492,11 +2564,10 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
case OP_RXSTAT:
total_packets[port]++;
total_bytes[port] += length;
+
skb = sky2_receive(dev, length, status);
- if (unlikely(!skb)) {
- dev->stats.rx_dropped++;
+ if (!skb)
break;
- }
/* This chip reports checksum status differently */
if (hw->flags & SKY2_HW_NEW_LE) {
@@ -2527,37 +2598,8 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
/* fall through */
#endif
case OP_RXCHKS:
- if (!(sky2->flags & SKY2_FLAG_RX_CHECKSUM))
- break;
-
- /* If this happens then driver assuming wrong format */
- if (unlikely(hw->flags & SKY2_HW_NEW_LE)) {
- if (net_ratelimit())
- printk(KERN_NOTICE "%s: unexpected"
- " checksum status\n",
- dev->name);
- break;
- }
-
- /* Both checksum counters are programmed to start at
- * the same offset, so unless there is a problem they
- * should match. This failure is an early indication that
- * hardware receive checksumming won't work.
- */
- if (likely(status >> 16 == (status & 0xffff))) {
- skb = sky2->rx_ring[sky2->rx_next].skb;
- skb->ip_summed = CHECKSUM_COMPLETE;
- skb->csum = le16_to_cpu(status);
- } else {
- printk(KERN_NOTICE PFX "%s: hardware receive "
- "checksum problem (status = %#x)\n",
- dev->name, status);
- sky2->flags &= ~SKY2_FLAG_RX_CHECKSUM;
-
- sky2_write32(sky2->hw,
- Q_ADDR(rxqaddr[port], Q_CSR),
- BMU_DIS_RX_CHKSUM);
- }
+ if (likely(sky2->flags & SKY2_FLAG_RX_CHECKSUM))
+ sky2_rx_checksum(sky2, status);
break;
case OP_TXINDEXLE:
@@ -2571,8 +2613,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
default:
if (net_ratelimit())
- printk(KERN_WARNING PFX
- "unknown status opcode 0x%x\n", opcode);
+ pr_warning("unknown status opcode 0x%x\n", opcode);
}
} while (hw->st_idx != idx);
@@ -2591,41 +2632,37 @@ static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status)
struct net_device *dev = hw->dev[port];
if (net_ratelimit())
- printk(KERN_INFO PFX "%s: hw error interrupt status 0x%x\n",
- dev->name, status);
+ netdev_info(dev, "hw error interrupt status 0x%x\n", status);
if (status & Y2_IS_PAR_RD1) {
if (net_ratelimit())
- printk(KERN_ERR PFX "%s: ram data read parity error\n",
- dev->name);
+ netdev_err(dev, "ram data read parity error\n");
/* Clear IRQ */
sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR);
}
if (status & Y2_IS_PAR_WR1) {
if (net_ratelimit())
- printk(KERN_ERR PFX "%s: ram data write parity error\n",
- dev->name);
+ netdev_err(dev, "ram data write parity error\n");
sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR);
}
if (status & Y2_IS_PAR_MAC1) {
if (net_ratelimit())
- printk(KERN_ERR PFX "%s: MAC parity error\n", dev->name);
+ netdev_err(dev, "MAC parity error\n");
sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE);
}
if (status & Y2_IS_PAR_RX1) {
if (net_ratelimit())
- printk(KERN_ERR PFX "%s: RX parity error\n", dev->name);
+ netdev_err(dev, "RX parity error\n");
sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR);
}
if (status & Y2_IS_TCP_TXA1) {
if (net_ratelimit())
- printk(KERN_ERR PFX "%s: TCP segmentation error\n",
- dev->name);
+ netdev_err(dev, "TCP segmentation error\n");
sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP);
}
}
@@ -2683,9 +2720,7 @@ static void sky2_mac_intr(struct sky2_hw *hw, unsigned port)
struct sky2_port *sky2 = netdev_priv(dev);
u8 status = sky2_read8(hw, SK_REG(port, GMAC_IRQ_SRC));
- if (netif_msg_intr(sky2))
- printk(KERN_INFO PFX "%s: mac interrupt status 0x%x\n",
- dev->name, status);
+ netif_info(sky2, intr, dev, "mac interrupt status 0x%x\n", status);
if (status & GM_IS_RX_CO_OV)
gma_read16(hw, port, GM_RX_IRQ_SRC);
@@ -2710,8 +2745,7 @@ static void sky2_le_error(struct sky2_hw *hw, unsigned port, u16 q)
struct net_device *dev = hw->dev[port];
u16 idx = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX));
- dev_err(&hw->pdev->dev, PFX
- "%s: descriptor error q=%#x get=%u put=%u\n",
+ dev_err(&hw->pdev->dev, "%s: descriptor error q=%#x get=%u put=%u\n",
dev->name, (unsigned) q, (unsigned) idx,
(unsigned) sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX)));
@@ -2736,9 +2770,10 @@ static int sky2_rx_hung(struct net_device *dev)
/* Check if the PCI RX hang */
(fifo_rp == sky2->check.fifo_rp &&
fifo_lev != 0 && fifo_lev >= sky2->check.fifo_lev))) {
- printk(KERN_DEBUG PFX "%s: hung mac %d:%d fifo %d (%d:%d)\n",
- dev->name, mac_lev, mac_rp, fifo_lev, fifo_rp,
- sky2_read8(hw, Q_ADDR(rxq, Q_WP)));
+ netdev_printk(KERN_DEBUG, dev,
+ "hung mac %d:%d fifo %d (%d:%d)\n",
+ mac_lev, mac_rp, fifo_lev,
+ fifo_rp, sky2_read8(hw, Q_ADDR(rxq, Q_WP)));
return 1;
} else {
sky2->check.last = dev->last_rx;
@@ -2769,8 +2804,7 @@ static void sky2_watchdog(unsigned long arg)
/* For chips with Rx FIFO, check if stuck */
if ((hw->flags & SKY2_HW_RAM_BUFFER) &&
sky2_rx_hung(dev)) {
- pr_info(PFX "%s: receiver hang detected\n",
- dev->name);
+ netdev_info(dev, "receiver hang detected\n");
schedule_work(&hw->restart_work);
return;
}
@@ -3010,11 +3044,20 @@ static void sky2_reset(struct sky2_hw *hw)
u32 hwe_mask = Y2_HWE_ALL_MASK;
/* disable ASF */
- if (hw->chip_id == CHIP_ID_YUKON_EX) {
+ if (hw->chip_id == CHIP_ID_YUKON_EX
+ || hw->chip_id == CHIP_ID_YUKON_SUPR) {
+ sky2_write32(hw, CPU_WDOG, 0);
status = sky2_read16(hw, HCU_CCSR);
status &= ~(HCU_CCSR_AHB_RST | HCU_CCSR_CPU_RST_MODE |
HCU_CCSR_UC_STATE_MSK);
+ /*
+ * CPU clock divider shouldn't be used because
+ * - ASF firmware may malfunction
+ * - Yukon-Supreme: Parallel FLASH doesn't support divided clocks
+ */
+ status &= ~HCU_CCSR_CPU_CLK_DIVIDE_MSK;
sky2_write16(hw, HCU_CCSR, status);
+ sky2_write32(hw, CPU_WDOG, 0);
} else
sky2_write8(hw, B28_Y2_ASF_STAT_CMD, Y2_ASF_RESET);
sky2_write16(hw, B0_CTST, Y2_ASF_DISABLE);
@@ -3097,7 +3140,7 @@ static void sky2_reset(struct sky2_hw *hw)
/* check if PSMv2 was running before */
reg = sky2_pci_read16(hw, PSM_CONFIG_REG3);
if (reg & PCI_EXP_LNKCTL_ASPMC) {
- int cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
/* restore the PCIe Link Control register */
sky2_pci_write16(hw, cap + PCI_EXP_LNKCTL, reg);
}
@@ -3188,7 +3231,9 @@ static void sky2_reset(struct sky2_hw *hw)
static void sky2_detach(struct net_device *dev)
{
if (netif_running(dev)) {
+ netif_tx_lock(dev);
netif_device_detach(dev); /* stop txq */
+ netif_tx_unlock(dev);
sky2_down(dev);
}
}
@@ -3201,8 +3246,7 @@ static int sky2_reattach(struct net_device *dev)
if (netif_running(dev)) {
err = sky2_up(dev);
if (err) {
- printk(KERN_INFO PFX "%s: could not restart %d\n",
- dev->name, err);
+ netdev_info(dev, "could not restart %d\n", err);
dev_close(dev);
} else {
netif_device_attach(dev);
@@ -3216,48 +3260,53 @@ static int sky2_reattach(struct net_device *dev)
static void sky2_restart(struct work_struct *work)
{
struct sky2_hw *hw = container_of(work, struct sky2_hw, restart_work);
+ u32 imask;
int i;
rtnl_lock();
- for (i = 0; i < hw->ports; i++)
- sky2_detach(hw->dev[i]);
napi_disable(&hw->napi);
+ synchronize_irq(hw->pdev->irq);
+ imask = sky2_read32(hw, B0_IMSK);
sky2_write32(hw, B0_IMSK, 0);
- sky2_reset(hw);
- sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
- napi_enable(&hw->napi);
- for (i = 0; i < hw->ports; i++)
- sky2_reattach(hw->dev[i]);
+ for (i = 0; i < hw->ports; i++) {
+ struct net_device *dev = hw->dev[i];
+ struct sky2_port *sky2 = netdev_priv(dev);
- rtnl_unlock();
-}
+ if (!netif_running(dev))
+ continue;
-static inline u8 sky2_wol_supported(const struct sky2_hw *hw)
-{
- return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0;
-}
+ netif_carrier_off(dev);
+ netif_tx_disable(dev);
+ sky2_hw_down(sky2);
+ }
-static void sky2_hw_set_wol(struct sky2_hw *hw)
-{
- int wol = 0;
- int i;
+ sky2_reset(hw);
for (i = 0; i < hw->ports; i++) {
struct net_device *dev = hw->dev[i];
struct sky2_port *sky2 = netdev_priv(dev);
- if (sky2->wol)
- wol = 1;
+ if (!netif_running(dev))
+ continue;
+
+ sky2_hw_up(sky2);
+ netif_wake_queue(dev);
}
- if (hw->chip_id == CHIP_ID_YUKON_EC_U ||
- hw->chip_id == CHIP_ID_YUKON_EX ||
- hw->chip_id == CHIP_ID_YUKON_FE_P)
- sky2_write32(hw, B0_CTST, wol ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF);
+ sky2_write32(hw, B0_IMSK, imask);
+ sky2_read32(hw, B0_IMSK);
+
+ sky2_read32(hw, B0_Y2_SP_LISR);
+ napi_enable(&hw->napi);
- device_set_wakeup_enable(&hw->pdev->dev, wol);
+ rtnl_unlock();
+}
+
+static inline u8 sky2_wol_supported(const struct sky2_hw *hw)
+{
+ return sky2_is_copper(hw) ? (WAKE_PHY | WAKE_MAGIC) : 0;
}
static void sky2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -3278,11 +3327,6 @@ static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
return -EOPNOTSUPP;
sky2->wol = wol->wolopts;
-
- sky2_hw_set_wol(hw);
-
- if (!netif_running(dev))
- sky2_wol_init(sky2);
return 0;
}
@@ -3577,7 +3621,7 @@ static void sky2_set_multicast(struct net_device *dev)
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
unsigned port = sky2->port;
- struct dev_mc_list *list = dev->mc_list;
+ struct dev_mc_list *list;
u16 reg;
u8 filter[8];
int rx_pause;
@@ -3593,16 +3637,15 @@ static void sky2_set_multicast(struct net_device *dev)
reg &= ~(GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
else if (dev->flags & IFF_ALLMULTI)
memset(filter, 0xff, sizeof(filter));
- else if (dev->mc_count == 0 && !rx_pause)
+ else if (netdev_mc_empty(dev) && !rx_pause)
reg &= ~GM_RXCR_MCF_ENA;
else {
- int i;
reg |= GM_RXCR_MCF_ENA;
if (rx_pause)
sky2_add_filter(filter, pause_mc_addr);
- for (i = 0; list && i < dev->mc_count; i++, list = list->next)
+ netdev_for_each_mc_addr(list, dev)
sky2_add_filter(filter, list->dmi_addr);
}
@@ -3864,6 +3907,50 @@ static int sky2_get_regs_len(struct net_device *dev)
return 0x4000;
}
+static int sky2_reg_access_ok(struct sky2_hw *hw, unsigned int b)
+{
+ /* This complicated switch statement is to make sure and
+ * only access regions that are unreserved.
+ * Some blocks are only valid on dual port cards.
+ */
+ switch (b) {
+ /* second port */
+ case 5: /* Tx Arbiter 2 */
+ case 9: /* RX2 */
+ case 14 ... 15: /* TX2 */
+ case 17: case 19: /* Ram Buffer 2 */
+ case 22 ... 23: /* Tx Ram Buffer 2 */
+ case 25: /* Rx MAC Fifo 1 */
+ case 27: /* Tx MAC Fifo 2 */
+ case 31: /* GPHY 2 */
+ case 40 ... 47: /* Pattern Ram 2 */
+ case 52: case 54: /* TCP Segmentation 2 */
+ case 112 ... 116: /* GMAC 2 */
+ return hw->ports > 1;
+
+ case 0: /* Control */
+ case 2: /* Mac address */
+ case 4: /* Tx Arbiter 1 */
+ case 7: /* PCI express reg */
+ case 8: /* RX1 */
+ case 12 ... 13: /* TX1 */
+ case 16: case 18:/* Rx Ram Buffer 1 */
+ case 20 ... 21: /* Tx Ram Buffer 1 */
+ case 24: /* Rx MAC Fifo 1 */
+ case 26: /* Tx MAC Fifo 1 */
+ case 28 ... 29: /* Descriptor and status unit */
+ case 30: /* GPHY 1*/
+ case 32 ... 39: /* Pattern Ram 1 */
+ case 48: case 50: /* TCP Segmentation 1 */
+ case 56 ... 60: /* PCI space */
+ case 80 ... 84: /* GMAC 1 */
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
/*
* Returns copy of control register region
* Note: ethtool_get_regs always provides full size (16k) buffer
@@ -3878,55 +3965,13 @@ static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs,
regs->version = 1;
for (b = 0; b < 128; b++) {
- /* This complicated switch statement is to make sure and
- * only access regions that are unreserved.
- * Some blocks are only valid on dual port cards.
- * and block 3 has some special diagnostic registers that
- * are poison.
- */
- switch (b) {
- case 3:
- /* skip diagnostic ram region */
+ /* skip poisonous diagnostic ram region in block 3 */
+ if (b == 3)
memcpy_fromio(p + 0x10, io + 0x10, 128 - 0x10);
- break;
-
- /* dual port cards only */
- case 5: /* Tx Arbiter 2 */
- case 9: /* RX2 */
- case 14 ... 15: /* TX2 */
- case 17: case 19: /* Ram Buffer 2 */
- case 22 ... 23: /* Tx Ram Buffer 2 */
- case 25: /* Rx MAC Fifo 1 */
- case 27: /* Tx MAC Fifo 2 */
- case 31: /* GPHY 2 */
- case 40 ... 47: /* Pattern Ram 2 */
- case 52: case 54: /* TCP Segmentation 2 */
- case 112 ... 116: /* GMAC 2 */
- if (sky2->hw->ports == 1)
- goto reserved;
- /* fall through */
- case 0: /* Control */
- case 2: /* Mac address */
- case 4: /* Tx Arbiter 1 */
- case 7: /* PCI express reg */
- case 8: /* RX1 */
- case 12 ... 13: /* TX1 */
- case 16: case 18:/* Rx Ram Buffer 1 */
- case 20 ... 21: /* Tx Ram Buffer 1 */
- case 24: /* Rx MAC Fifo 1 */
- case 26: /* Tx MAC Fifo 1 */
- case 28 ... 29: /* Descriptor and status unit */
- case 30: /* GPHY 1*/
- case 32 ... 39: /* Pattern Ram 1 */
- case 48: case 50: /* TCP Segmentation 1 */
- case 56 ... 60: /* PCI space */
- case 80 ... 84: /* GMAC 1 */
+ else if (sky2_reg_access_ok(sky2->hw, b))
memcpy_fromio(p, io, 128);
- break;
- default:
-reserved:
+ else
memset(p, 0, 128);
- }
p += 128;
io += 128;
@@ -3978,7 +4023,7 @@ static int sky2_vpd_wait(const struct sky2_hw *hw, int cap, u16 busy)
while ( (sky2_pci_read16(hw, cap + PCI_VPD_ADDR) & PCI_VPD_ADDR_F) == busy) {
/* Can take up to 10.6 ms for write */
if (time_after(jiffies, start + HZ/4)) {
- dev_err(&hw->pdev->dev, PFX "VPD cycle timed out");
+ dev_err(&hw->pdev->dev, "VPD cycle timed out\n");
return -ETIMEDOUT;
}
mdelay(1);
@@ -4312,8 +4357,7 @@ static int sky2_device_event(struct notifier_block *unused,
case NETDEV_GOING_DOWN:
if (sky2->debugfs) {
- printk(KERN_DEBUG PFX "%s: remove debugfs\n",
- dev->name);
+ netdev_printk(KERN_DEBUG, dev, "remove debugfs\n");
debugfs_remove(sky2->debugfs);
sky2->debugfs = NULL;
}
@@ -4466,9 +4510,7 @@ static void __devinit sky2_show_addr(struct net_device *dev)
{
const struct sky2_port *sky2 = netdev_priv(dev);
- if (netif_msg_probe(sky2))
- printk(KERN_INFO PFX "%s: addr %pM\n",
- dev->name, dev->dev_addr);
+ netif_info(sky2, probe, dev, "addr %pM\n", dev->dev_addr);
}
/* Handle software interrupt used during MSI test */
@@ -4774,7 +4816,6 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
}
-#ifdef CONFIG_PM
static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct sky2_hw *hw = pci_get_drvdata(pdev);
@@ -4799,6 +4840,8 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
wol |= sky2->wol;
}
+ device_set_wakeup_enable(&pdev->dev, wol != 0);
+
sky2_write32(hw, B0_IMSK, 0);
napi_disable(&hw->napi);
sky2_power_aux(hw);
@@ -4811,6 +4854,7 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
return 0;
}
+#ifdef CONFIG_PM
static int sky2_resume(struct pci_dev *pdev)
{
struct sky2_hw *hw = pci_get_drvdata(pdev);
@@ -4830,10 +4874,11 @@ static int sky2_resume(struct pci_dev *pdev)
pci_enable_wake(pdev, PCI_D0, 0);
/* Re-enable all clocks */
- if (hw->chip_id == CHIP_ID_YUKON_EX ||
- hw->chip_id == CHIP_ID_YUKON_EC_U ||
- hw->chip_id == CHIP_ID_YUKON_FE_P)
- sky2_pci_write32(hw, PCI_DEV_REG3, 0);
+ err = pci_write_config_dword(pdev, PCI_DEV_REG3, 0);
+ if (err) {
+ dev_err(&pdev->dev, "PCI write config failed\n");
+ goto out;
+ }
sky2_reset(hw);
sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
@@ -4859,34 +4904,7 @@ out:
static void sky2_shutdown(struct pci_dev *pdev)
{
- struct sky2_hw *hw = pci_get_drvdata(pdev);
- int i, wol = 0;
-
- if (!hw)
- return;
-
- rtnl_lock();
- del_timer_sync(&hw->watchdog_timer);
-
- for (i = 0; i < hw->ports; i++) {
- struct net_device *dev = hw->dev[i];
- struct sky2_port *sky2 = netdev_priv(dev);
-
- if (sky2->wol) {
- wol = 1;
- sky2_wol_init(sky2);
- }
- }
-
- if (wol)
- sky2_power_aux(hw);
- rtnl_unlock();
-
- pci_enable_wake(pdev, PCI_D3hot, wol);
- pci_enable_wake(pdev, PCI_D3cold, wol);
-
- pci_disable_device(pdev);
- pci_set_power_state(pdev, PCI_D3hot);
+ sky2_suspend(pdev, PMSG_SUSPEND);
}
static struct pci_driver sky2_driver = {
@@ -4903,7 +4921,7 @@ static struct pci_driver sky2_driver = {
static int __init sky2_init_module(void)
{
- pr_info(PFX "driver version " DRV_VERSION "\n");
+ pr_info("driver version " DRV_VERSION "\n");
sky2_debug_init();
return pci_register_driver(&sky2_driver);
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 365d79c7d83..a5e182dd981 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -1895,14 +1895,14 @@ enum {
/* TX_GMF_CTRL_T 32 bit Tx GMAC FIFO Control/Test */
enum {
- TX_STFW_DIS = 1<<31,/* Disable Store & Forward (Yukon-EC Ultra) */
- TX_STFW_ENA = 1<<30,/* Enable Store & Forward (Yukon-EC Ultra) */
+ TX_STFW_DIS = 1<<31,/* Disable Store & Forward */
+ TX_STFW_ENA = 1<<30,/* Enable Store & Forward */
TX_VLAN_TAG_ON = 1<<25,/* enable VLAN tagging */
TX_VLAN_TAG_OFF = 1<<24,/* disable VLAN tagging */
- TX_JUMBO_ENA = 1<<23,/* PCI Jumbo Mode enable (Yukon-EC Ultra) */
- TX_JUMBO_DIS = 1<<22,/* PCI Jumbo Mode enable (Yukon-EC Ultra) */
+ TX_PCI_JUM_ENA = 1<<23,/* PCI Jumbo Mode enable */
+ TX_PCI_JUM_DIS = 1<<22,/* PCI Jumbo Mode enable */
GMF_WSP_TST_ON = 1<<18,/* Write Shadow Pointer Test On */
GMF_WSP_TST_OFF = 1<<17,/* Write Shadow Pointer Test Off */
@@ -2156,7 +2156,7 @@ struct tx_ring_info {
struct sk_buff *skb;
unsigned long flags;
#define TX_MAP_SINGLE 0x0001
-#define TX_MAP_PAGE 000002
+#define TX_MAP_PAGE 0x0002
DECLARE_PCI_UNMAP_ADDR(mapaddr);
DECLARE_PCI_UNMAP_LEN(maplen);
};
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c
index 44ebbaa7457..9871a2b61f8 100644
--- a/drivers/net/smc911x.c
+++ b/drivers/net/smc911x.c
@@ -1323,7 +1323,7 @@ static void smc911x_set_multicast_list(struct net_device *dev)
* I don't need to zero the multicast table, because the flag is
* checked before the table is
*/
- else if (dev->flags & IFF_ALLMULTI || dev->mc_count > 16) {
+ else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) {
DBG(SMC_DEBUG_MISC, "%s: RCR_ALMUL\n", dev->name);
mcr |= MAC_CR_MCPAS_;
}
@@ -1340,8 +1340,7 @@ static void smc911x_set_multicast_list(struct net_device *dev)
* the number of the 32 bit register, while the low 5 bits are the bit
* within that register.
*/
- else if (dev->mc_count) {
- int i;
+ else if (!netdev_mc_empty(dev)) {
struct dev_mc_list *cur_addr;
/* Set the Hash perfec mode */
@@ -1350,8 +1349,7 @@ static void smc911x_set_multicast_list(struct net_device *dev)
/* start with a table of all zeros: reject all */
memset(multicast_table, 0, sizeof(multicast_table));
- cur_addr = dev->mc_list;
- for (i = 0; i < dev->mc_count; i++, cur_addr = cur_addr->next) {
+ netdev_for_each_mc_addr(cur_addr, dev) {
u32 position;
/* do we have a pointer here? */
@@ -2017,10 +2015,8 @@ static int __devinit smc911x_probe(struct net_device *dev)
"set using ifconfig\n", dev->name);
} else {
/* Print the Ethernet address */
- printk("%s: Ethernet addr: ", dev->name);
- for (i = 0; i < 5; i++)
- printk("%2.2x:", dev->dev_addr[i]);
- printk("%2.2x\n", dev->dev_addr[5]);
+ printk("%s: Ethernet addr: %pM\n",
+ dev->name, dev->dev_addr);
}
if (lp->phy_type == 0) {
diff --git a/drivers/net/smc911x.h b/drivers/net/smc911x.h
index 05adb6a666c..3269292efec 100644
--- a/drivers/net/smc911x.h
+++ b/drivers/net/smc911x.h
@@ -42,12 +42,12 @@
#define SMC_USE_16BIT 0
#define SMC_USE_32BIT 1
#define SMC_IRQ_SENSE IRQF_TRIGGER_LOW
-#elif defined(CONFIG_ARCH_OMAP34XX)
+#elif defined(CONFIG_ARCH_OMAP3)
#define SMC_USE_16BIT 0
#define SMC_USE_32BIT 1
#define SMC_IRQ_SENSE IRQF_TRIGGER_LOW
#define SMC_MEM_RESERVED 1
-#elif defined(CONFIG_ARCH_OMAP24XX)
+#elif defined(CONFIG_ARCH_OMAP2)
#define SMC_USE_16BIT 0
#define SMC_USE_32BIT 1
#define SMC_IRQ_SENSE IRQF_TRIGGER_LOW
diff --git a/drivers/net/smc9194.c b/drivers/net/smc9194.c
index 8371b82323a..f9a960e7fc1 100644
--- a/drivers/net/smc9194.c
+++ b/drivers/net/smc9194.c
@@ -434,18 +434,18 @@ static void smc_shutdown( int ioaddr )
*/
-static void smc_setmulticast( int ioaddr, int count, struct dev_mc_list * addrs ) {
+static void smc_setmulticast(int ioaddr, struct net_device *dev)
+{
int i;
unsigned char multicast_table[ 8 ];
- struct dev_mc_list * cur_addr;
+ struct dev_mc_list *cur_addr;
/* table for flipping the order of 3 bits */
unsigned char invert3[] = { 0, 4, 2, 6, 1, 5, 3, 7 };
/* start with a table of all zeros: reject all */
memset( multicast_table, 0, sizeof( multicast_table ) );
- cur_addr = addrs;
- for ( i = 0; i < count ; i ++, cur_addr = cur_addr->next ) {
+ netdev_for_each_mc_addr(cur_addr, dev) {
int position;
/* do we have a pointer here? */
@@ -1542,7 +1542,7 @@ static void smc_set_multicast_list(struct net_device *dev)
/* We just get all multicast packets even if we only want them
. from one source. This will be changed at some future
. point. */
- else if (dev->mc_count ) {
+ else if (!netdev_mc_empty(dev)) {
/* support hardware multicasting */
/* be sure I get rid of flags I might have set */
@@ -1550,7 +1550,7 @@ static void smc_set_multicast_list(struct net_device *dev)
ioaddr + RCR );
/* NOTE: this has to set the bank, so make sure it is the
last thing called. The bank is set to zero at the top */
- smc_setmulticast( ioaddr, dev->mc_count, dev->mc_list );
+ smc_setmulticast(ioaddr, dev);
}
else {
outw( inw( ioaddr + RCR ) & ~(RCR_PROMISC | RCR_ALMUL),
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index ea4fae79d6e..fc1b5a1a358 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -1395,7 +1395,7 @@ static void smc_set_multicast_list(struct net_device *dev)
* I don't need to zero the multicast table, because the flag is
* checked before the table is
*/
- else if (dev->flags & IFF_ALLMULTI || dev->mc_count > 16) {
+ else if (dev->flags & IFF_ALLMULTI || netdev_mc_count(dev) > 16) {
DBG(2, "%s: RCR_ALMUL\n", dev->name);
lp->rcr_cur_mode |= RCR_ALMUL;
}
@@ -1412,8 +1412,7 @@ static void smc_set_multicast_list(struct net_device *dev)
* the number of the 8 bit register, while the low 3 bits are the bit
* within that register.
*/
- else if (dev->mc_count) {
- int i;
+ else if (!netdev_mc_empty(dev)) {
struct dev_mc_list *cur_addr;
/* table for flipping the order of 3 bits */
@@ -1422,13 +1421,9 @@ static void smc_set_multicast_list(struct net_device *dev)
/* start with a table of all zeros: reject all */
memset(multicast_table, 0, sizeof(multicast_table));
- cur_addr = dev->mc_list;
- for (i = 0; i < dev->mc_count; i++, cur_addr = cur_addr->next) {
+ netdev_for_each_mc_addr(cur_addr, dev) {
int position;
- /* do we have a pointer here? */
- if (!cur_addr)
- break;
/* make sure this is a multicast address -
shouldn't this be a given if we have it here ? */
if (!(*cur_addr->dmi_addr & 1))
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index 494cd91ea39..4fd1d8b3878 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -770,29 +770,25 @@ static int smsc911x_mii_probe(struct net_device *dev)
{
struct smsc911x_data *pdata = netdev_priv(dev);
struct phy_device *phydev = NULL;
- int phy_addr;
+ int ret;
/* find the first phy */
- for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
- if (pdata->mii_bus->phy_map[phy_addr]) {
- phydev = pdata->mii_bus->phy_map[phy_addr];
- SMSC_TRACE(PROBE, "PHY %d: addr %d, phy_id 0x%08X",
- phy_addr, phydev->addr, phydev->phy_id);
- break;
- }
- }
-
+ phydev = phy_find_first(pdata->mii_bus);
if (!phydev) {
pr_err("%s: no PHY found\n", dev->name);
return -ENODEV;
}
- phydev = phy_connect(dev, dev_name(&phydev->dev),
- &smsc911x_phy_adjust_link, 0, pdata->config.phy_interface);
+ SMSC_TRACE(PROBE, "PHY %d: addr %d, phy_id 0x%08X",
+ phy_addr, phydev->addr, phydev->phy_id);
- if (IS_ERR(phydev)) {
+ ret = phy_connect_direct(dev, phydev,
+ &smsc911x_phy_adjust_link, 0,
+ pdata->config.phy_interface);
+
+ if (ret) {
pr_err("%s: Could not attach to PHY\n", dev->name);
- return PTR_ERR(phydev);
+ return ret;
}
pr_info("%s: attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
@@ -1383,33 +1379,24 @@ static void smsc911x_set_multicast_list(struct net_device *dev)
pdata->clear_bits_mask = (MAC_CR_PRMS_ | MAC_CR_HPFILT_);
pdata->hashhi = 0;
pdata->hashlo = 0;
- } else if (dev->mc_count > 0) {
+ } else if (!netdev_mc_empty(dev)) {
/* Enabling specific multicast addresses */
unsigned int hash_high = 0;
unsigned int hash_low = 0;
- unsigned int count = 0;
- struct dev_mc_list *mc_list = dev->mc_list;
+ struct dev_mc_list *mc_list;
pdata->set_bits_mask = MAC_CR_HPFILT_;
pdata->clear_bits_mask = (MAC_CR_PRMS_ | MAC_CR_MCPAS_);
- while (mc_list) {
- count++;
- if ((mc_list->dmi_addrlen) == ETH_ALEN) {
- unsigned int bitnum =
- smsc911x_hash(mc_list->dmi_addr);
- unsigned int mask = 0x01 << (bitnum & 0x1F);
- if (bitnum & 0x20)
- hash_high |= mask;
- else
- hash_low |= mask;
- } else {
- SMSC_WARNING(DRV, "dmi_addrlen != 6");
- }
- mc_list = mc_list->next;
+ netdev_for_each_mc_addr(mc_list, dev) {
+ unsigned int bitnum = smsc911x_hash(mc_list->dmi_addr);
+ unsigned int mask = 0x01 << (bitnum & 0x1F);
+
+ if (bitnum & 0x20)
+ hash_high |= mask;
+ else
+ hash_low |= mask;
}
- if (count != (unsigned int)dev->mc_count)
- SMSC_WARNING(DRV, "mc_count != dev->mc_count");
pdata->hashhi = hash_high;
pdata->hashlo = hash_low;
diff --git a/drivers/net/smsc9420.c b/drivers/net/smsc9420.c
index 12f0f5d74e3..30110a11d73 100644
--- a/drivers/net/smsc9420.c
+++ b/drivers/net/smsc9420.c
@@ -80,7 +80,7 @@ struct smsc9420_pdata {
int last_carrier;
};
-static const struct pci_device_id smsc9420_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(smsc9420_id_table) = {
{ PCI_VENDOR_ID_9420, PCI_DEVICE_ID_9420, PCI_ANY_ID, PCI_ANY_ID, },
{ 0, }
};
@@ -1062,12 +1062,12 @@ static void smsc9420_set_multicast_list(struct net_device *dev)
mac_cr &= (~MAC_CR_PRMS_);
mac_cr |= MAC_CR_MCPAS_;
mac_cr &= (~MAC_CR_HPFILT_);
- } else if (dev->mc_count > 0) {
- struct dev_mc_list *mc_list = dev->mc_list;
+ } else if (!netdev_mc_empty(dev)) {
+ struct dev_mc_list *mc_list;
u32 hash_lo = 0, hash_hi = 0;
smsc_dbg(HW, "Multicast filter enabled");
- while (mc_list) {
+ netdev_for_each_mc_addr(mc_list, dev) {
u32 bit_num = smsc9420_hash(mc_list->dmi_addr);
u32 mask = 1 << (bit_num & 0x1F);
@@ -1076,7 +1076,6 @@ static void smsc9420_set_multicast_list(struct net_device *dev)
else
hash_lo |= mask;
- mc_list = mc_list->next;
}
smsc9420_reg_write(pd, HASHH, hash_hi);
smsc9420_reg_write(pd, HASHL, hash_lo);
diff --git a/drivers/net/sonic.c b/drivers/net/sonic.c
index 9599ce77ef8..287c251075e 100644
--- a/drivers/net/sonic.c
+++ b/drivers/net/sonic.c
@@ -531,7 +531,7 @@ static void sonic_multicast_list(struct net_device *dev)
{
struct sonic_local *lp = netdev_priv(dev);
unsigned int rcr;
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
unsigned char *addr;
int i;
@@ -541,19 +541,22 @@ static void sonic_multicast_list(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
rcr |= SONIC_RCR_PRO;
} else {
- if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 15)) {
+ if ((dev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(dev) > 15)) {
rcr |= SONIC_RCR_AMC;
} else {
if (sonic_debug > 2)
- printk("sonic_multicast_list: mc_count %d\n", dev->mc_count);
+ printk("sonic_multicast_list: mc_count %d\n",
+ netdev_mc_count(dev));
sonic_set_cam_enable(dev, 1); /* always enable our own address */
- for (i = 1; i <= dev->mc_count; i++) {
+ i = 1;
+ netdev_for_each_mc_addr(dmi, dev) {
addr = dmi->dmi_addr;
- dmi = dmi->next;
sonic_cda_put(dev, i, SONIC_CD_CAP0, addr[1] << 8 | addr[0]);
sonic_cda_put(dev, i, SONIC_CD_CAP1, addr[3] << 8 | addr[2]);
sonic_cda_put(dev, i, SONIC_CD_CAP2, addr[5] << 8 | addr[4]);
sonic_set_cam_enable(dev, sonic_get_cam_enable(dev) | (1 << i));
+ i++;
}
SONIC_WRITE(SONIC_CDC, 16);
/* issue Load CAM command */
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 218524857bf..2f8a8c32021 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -72,7 +72,7 @@ MODULE_PARM_DESC(tx_descriptors, "number of descriptors used " \
char spider_net_driver_name[] = "spidernet";
-static struct pci_device_id spider_net_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(spider_net_pci_tbl) = {
{ PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SPIDER_NET,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ 0, }
@@ -646,7 +646,7 @@ spider_net_set_multi(struct net_device *netdev)
hash = spider_net_get_multicast_hash(netdev, netdev->broadcast); */
set_bit(0xfd, bitmask);
- for (mc = netdev->mc_list; mc; mc = mc->next) {
+ netdev_for_each_mc_addr(mc, netdev) {
hash = spider_net_get_multicast_hash(netdev, mc->dmi_addr);
set_bit(hash, bitmask);
}
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index f9521136a86..6dfa6989901 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -301,7 +301,7 @@ enum chipset {
CH_6915 = 0,
};
-static struct pci_device_id starfire_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(starfire_pci_tbl) = {
{ 0x9004, 0x6915, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_6915 },
{ 0, }
};
@@ -1796,22 +1796,22 @@ static void set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
rx_mode |= AcceptAll;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
rx_mode |= AcceptBroadcast|AcceptAllMulticast|PerfectFilter;
- } else if (dev->mc_count <= 14) {
+ } else if (netdev_mc_count(dev) <= 14) {
/* Use the 16 element perfect filter, skip first two entries. */
void __iomem *filter_addr = ioaddr + PerfFilterTable + 2 * 16;
__be16 *eaddrs;
- for (i = 2, mclist = dev->mc_list; mclist && i < dev->mc_count + 2;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
eaddrs = (__be16 *)mclist->dmi_addr;
writew(be16_to_cpu(eaddrs[2]), filter_addr); filter_addr += 4;
writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 8;
}
eaddrs = (__be16 *)dev->dev_addr;
+ i = netdev_mc_count(dev) + 2;
while (i++ < 16) {
writew(be16_to_cpu(eaddrs[0]), filter_addr); filter_addr += 4;
writew(be16_to_cpu(eaddrs[1]), filter_addr); filter_addr += 4;
@@ -1825,8 +1825,7 @@ static void set_rx_mode(struct net_device *dev)
__le16 mc_filter[32] __attribute__ ((aligned(sizeof(long)))); /* Multicast hash filter */
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
/* The chip uses the upper 9 CRC bits
as index into the hash table */
int bit_nr = ether_crc_le(ETH_ALEN, mclist->dmi_addr) >> 23;
diff --git a/drivers/net/stmmac/Kconfig b/drivers/net/stmmac/Kconfig
index 35eaa5251d7..fb287649a30 100644
--- a/drivers/net/stmmac/Kconfig
+++ b/drivers/net/stmmac/Kconfig
@@ -4,8 +4,9 @@ config STMMAC_ETH
select PHYLIB
depends on NETDEVICES && CPU_SUBTYPE_ST40
help
- This is the driver for the ST MAC 10/100/1000 on-chip Ethernet
- controllers. ST Ethernet IPs are built around a Synopsys IP Core.
+ This is the driver for the Ethernet IPs are built around a
+ Synopsys IP Core and fully tested on the STMicroelectronics
+ platforms.
if STMMAC_ETH
@@ -32,7 +33,8 @@ config STMMAC_TIMER
default n
help
Use an external timer for mitigating the number of network
- interrupts.
+ interrupts. Currently, for SH architectures, it is possible
+ to use the TMU channel 2 and the SH-RTC device.
choice
prompt "Select Timer device"
diff --git a/drivers/net/stmmac/Makefile b/drivers/net/stmmac/Makefile
index b2d7a5564df..c776af15fe1 100644
--- a/drivers/net/stmmac/Makefile
+++ b/drivers/net/stmmac/Makefile
@@ -1,4 +1,5 @@
obj-$(CONFIG_STMMAC_ETH) += stmmac.o
stmmac-$(CONFIG_STMMAC_TIMER) += stmmac_timer.o
-stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \
- mac100.o gmac.o $(stmmac-y)
+stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o \
+ dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
+ dwmac100.o $(stmmac-y)
diff --git a/drivers/net/stmmac/common.h b/drivers/net/stmmac/common.h
index e49e5188e88..2a58172e986 100644
--- a/drivers/net/stmmac/common.h
+++ b/drivers/net/stmmac/common.h
@@ -23,132 +23,7 @@
*******************************************************************************/
#include "descs.h"
-#include <linux/io.h>
-
-/* *********************************************
- DMA CRS Control and Status Register Mapping
- * *********************************************/
-#define DMA_BUS_MODE 0x00001000 /* Bus Mode */
-#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */
-#define DMA_RCV_POLL_DEMAND 0x00001008 /* Received Poll Demand */
-#define DMA_RCV_BASE_ADDR 0x0000100c /* Receive List Base */
-#define DMA_TX_BASE_ADDR 0x00001010 /* Transmit List Base */
-#define DMA_STATUS 0x00001014 /* Status Register */
-#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */
-#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
-#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
-#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */
-#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
-
-/* ********************************
- DMA Control register defines
- * ********************************/
-#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
-#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
-
-/* **************************************
- DMA Interrupt Enable register defines
- * **************************************/
-/**** NORMAL INTERRUPT ****/
-#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
-#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
-#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavailable */
-#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
-#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */
-
-#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
- DMA_INTR_ENA_TIE)
-
-/**** ABNORMAL INTERRUPT ****/
-#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
-#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
-#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */
-#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */
-#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */
-#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */
-#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
-#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */
-#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */
-#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */
-
-#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
- DMA_INTR_ENA_UNE)
-
-/* DMA default interrupt mask */
-#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
-
-/* ****************************
- * DMA Status register defines
- * ****************************/
-#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
-#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
-#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int. */
-#define DMA_STATUS_GMI 0x08000000
-#define DMA_STATUS_GLI 0x04000000
-#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
-#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
-#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
-#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */
-#define DMA_STATUS_TS_SHIFT 20
-#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */
-#define DMA_STATUS_RS_SHIFT 17
-#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
-#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
-#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
-#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
-#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
-#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
-#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
-#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
-#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
-#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
-#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
-#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
-#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
-#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
-#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
-
-/* Other defines */
-#define HASH_TABLE_SIZE 64
-#define PAUSE_TIME 0x200
-
-/* Flow Control defines */
-#define FLOW_OFF 0
-#define FLOW_RX 1
-#define FLOW_TX 2
-#define FLOW_AUTO (FLOW_TX | FLOW_RX)
-
-/* DMA STORE-AND-FORWARD Operation Mode */
-#define SF_DMA_MODE 1
-
-#define HW_CSUM 1
-#define NO_HW_CSUM 0
-
-/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
-#define BUF_SIZE_16KiB 16384
-#define BUF_SIZE_8KiB 8192
-#define BUF_SIZE_4KiB 4096
-#define BUF_SIZE_2KiB 2048
-
-/* Power Down and WOL */
-#define PMT_NOT_SUPPORTED 0
-#define PMT_SUPPORTED 1
-
-/* Common MAC defines */
-#define MAC_CTRL_REG 0x00000000 /* MAC Control */
-#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
-#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
-
-/* MAC Management Counters register */
-#define MMC_CONTROL 0x00000100 /* MMC Control */
-#define MMC_HIGH_INTR 0x00000104 /* MMC High Interrupt */
-#define MMC_LOW_INTR 0x00000108 /* MMC Low Interrupt */
-#define MMC_HIGH_INTR_MASK 0x0000010c /* MMC High Interrupt Mask */
-#define MMC_LOW_INTR_MASK 0x00000110 /* MMC Low Interrupt Mask */
-
-#define MMC_CONTROL_MAX_FRM_MASK 0x0003ff8 /* Maximum Frame Size */
-#define MMC_CONTROL_MAX_FRM_SHIFT 3
-#define MMC_CONTROL_MAX_FRAME 0x7FF
+#include <linux/netdevice.h>
struct stmmac_extra_stats {
/* Transmit errors */
@@ -169,7 +44,7 @@ struct stmmac_extra_stats {
unsigned long rx_toolong;
unsigned long rx_collision;
unsigned long rx_crc;
- unsigned long rx_lenght;
+ unsigned long rx_length;
unsigned long rx_mii;
unsigned long rx_multicast;
unsigned long rx_gmac_overflow;
@@ -198,66 +73,62 @@ struct stmmac_extra_stats {
unsigned long normal_irq_n;
};
-/* GMAC core can compute the checksums in HW. */
-enum rx_frame_status {
+#define HASH_TABLE_SIZE 64
+#define PAUSE_TIME 0x200
+
+/* Flow Control defines */
+#define FLOW_OFF 0
+#define FLOW_RX 1
+#define FLOW_TX 2
+#define FLOW_AUTO (FLOW_TX | FLOW_RX)
+
+#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */
+
+#define HW_CSUM 1
+#define NO_HW_CSUM 0
+enum rx_frame_status { /* IPC status */
good_frame = 0,
discard_frame = 1,
csum_none = 2,
};
-static inline void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
- unsigned int high, unsigned int low)
-{
- unsigned long data;
-
- data = (addr[5] << 8) | addr[4];
- writel(data, ioaddr + high);
- data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
- writel(data, ioaddr + low);
+enum tx_dma_irq_status {
+ tx_hard_error = 1,
+ tx_hard_error_bump_tc = 2,
+ handle_tx_rx = 3,
+};
- return;
-}
+/* GMAC TX FIFO is 8K, Rx FIFO is 16K */
+#define BUF_SIZE_16KiB 16384
+#define BUF_SIZE_8KiB 8192
+#define BUF_SIZE_4KiB 4096
+#define BUF_SIZE_2KiB 2048
-static inline void stmmac_get_mac_addr(unsigned long ioaddr,
- unsigned char *addr, unsigned int high,
- unsigned int low)
-{
- unsigned int hi_addr, lo_addr;
+/* Power Down and WOL */
+#define PMT_NOT_SUPPORTED 0
+#define PMT_SUPPORTED 1
- /* Read the MAC address from the hardware */
- hi_addr = readl(ioaddr + high);
- lo_addr = readl(ioaddr + low);
+/* Common MAC defines */
+#define MAC_CTRL_REG 0x00000000 /* MAC Control */
+#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
+#define MAC_RNABLE_RX 0x00000004 /* Receiver Enable */
- /* Extract the MAC address from the high and low words */
- addr[0] = lo_addr & 0xff;
- addr[1] = (lo_addr >> 8) & 0xff;
- addr[2] = (lo_addr >> 16) & 0xff;
- addr[3] = (lo_addr >> 24) & 0xff;
- addr[4] = hi_addr & 0xff;
- addr[5] = (hi_addr >> 8) & 0xff;
+/* MAC Management Counters register */
+#define MMC_CONTROL 0x00000100 /* MMC Control */
+#define MMC_HIGH_INTR 0x00000104 /* MMC High Interrupt */
+#define MMC_LOW_INTR 0x00000108 /* MMC Low Interrupt */
+#define MMC_HIGH_INTR_MASK 0x0000010c /* MMC High Interrupt Mask */
+#define MMC_LOW_INTR_MASK 0x00000110 /* MMC Low Interrupt Mask */
- return;
-}
+#define MMC_CONTROL_MAX_FRM_MASK 0x0003ff8 /* Maximum Frame Size */
+#define MMC_CONTROL_MAX_FRM_SHIFT 3
+#define MMC_CONTROL_MAX_FRAME 0x7FF
-struct stmmac_ops {
- /* MAC core initialization */
- void (*core_init) (unsigned long ioaddr) ____cacheline_aligned;
- /* DMA core initialization */
- int (*dma_init) (unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx);
- /* Dump MAC registers */
- void (*dump_mac_regs) (unsigned long ioaddr);
- /* Dump DMA registers */
- void (*dump_dma_regs) (unsigned long ioaddr);
- /* Set tx/rx threshold in the csr6 register
- * An invalid value enables the store-and-forward mode */
- void (*dma_mode) (unsigned long ioaddr, int txmode, int rxmode);
- /* To track extra statistic (if supported) */
- void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
- unsigned long ioaddr);
- /* RX descriptor ring initialization */
+struct stmmac_desc_ops {
+ /* DMA RX descriptor ring initialization */
void (*init_rx_desc) (struct dma_desc *p, unsigned int ring_size,
- int disable_rx_ic);
- /* TX descriptor ring initialization */
+ int disable_rx_ic);
+ /* DMA TX descriptor ring initialization */
void (*init_tx_desc) (struct dma_desc *p, unsigned int ring_size);
/* Invoked by the xmit function to prepare the tx descriptor */
@@ -281,7 +152,6 @@ struct stmmac_ops {
/* Get the buffer size from the descriptor */
int (*get_tx_len) (struct dma_desc *p);
/* Handle extra events on specific interrupts hw dependent */
- void (*host_irq_status) (unsigned long ioaddr);
int (*get_rx_owner) (struct dma_desc *p);
void (*set_rx_owner) (struct dma_desc *p);
/* Get the receive frame size */
@@ -289,6 +159,37 @@ struct stmmac_ops {
/* Return the reception status looking at the RDES1 */
int (*rx_status) (void *data, struct stmmac_extra_stats *x,
struct dma_desc *p);
+};
+
+struct stmmac_dma_ops {
+ /* DMA core initialization */
+ int (*init) (unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx);
+ /* Dump DMA registers */
+ void (*dump_regs) (unsigned long ioaddr);
+ /* Set tx/rx threshold in the csr6 register
+ * An invalid value enables the store-and-forward mode */
+ void (*dma_mode) (unsigned long ioaddr, int txmode, int rxmode);
+ /* To track extra statistic (if supported) */
+ void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
+ unsigned long ioaddr);
+ void (*enable_dma_transmission) (unsigned long ioaddr);
+ void (*enable_dma_irq) (unsigned long ioaddr);
+ void (*disable_dma_irq) (unsigned long ioaddr);
+ void (*start_tx) (unsigned long ioaddr);
+ void (*stop_tx) (unsigned long ioaddr);
+ void (*start_rx) (unsigned long ioaddr);
+ void (*stop_rx) (unsigned long ioaddr);
+ int (*dma_interrupt) (unsigned long ioaddr,
+ struct stmmac_extra_stats *x);
+};
+
+struct stmmac_ops {
+ /* MAC core initialization */
+ void (*core_init) (unsigned long ioaddr) ____cacheline_aligned;
+ /* Dump MAC registers */
+ void (*dump_regs) (unsigned long ioaddr);
+ /* Handle extra events on specific interrupts hw dependent */
+ void (*host_irq_status) (unsigned long ioaddr);
/* Multicast filter setting */
void (*set_filter) (struct net_device *dev);
/* Flow control setting */
@@ -298,9 +199,9 @@ struct stmmac_ops {
void (*pmt) (unsigned long ioaddr, unsigned long mode);
/* Set/Get Unicast MAC addresses */
void (*set_umac_addr) (unsigned long ioaddr, unsigned char *addr,
- unsigned int reg_n);
+ unsigned int reg_n);
void (*get_umac_addr) (unsigned long ioaddr, unsigned char *addr,
- unsigned int reg_n);
+ unsigned int reg_n);
};
struct mac_link {
@@ -314,17 +215,19 @@ struct mii_regs {
unsigned int data; /* MII Data */
};
-struct hw_cap {
- unsigned int version; /* Core Version register (GMAC) */
- unsigned int pmt; /* Power-Down mode (GMAC) */
+struct mac_device_info {
+ struct stmmac_ops *mac;
+ struct stmmac_desc_ops *desc;
+ struct stmmac_dma_ops *dma;
+ unsigned int pmt; /* support Power-Down */
+ struct mii_regs mii; /* MII register Addresses */
struct mac_link link;
- struct mii_regs mii;
};
-struct mac_device_info {
- struct hw_cap hw;
- struct stmmac_ops *ops;
-};
+struct mac_device_info *dwmac1000_setup(unsigned long addr);
+struct mac_device_info *dwmac100_setup(unsigned long addr);
-struct mac_device_info *gmac_setup(unsigned long addr);
-struct mac_device_info *mac100_setup(unsigned long addr);
+extern void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
+ unsigned int high, unsigned int low);
+extern void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr,
+ unsigned int high, unsigned int low);
diff --git a/drivers/net/stmmac/descs.h b/drivers/net/stmmac/descs.h
index 6d2a0b2f5e5..63a03e26469 100644
--- a/drivers/net/stmmac/descs.h
+++ b/drivers/net/stmmac/descs.h
@@ -1,6 +1,6 @@
/*******************************************************************************
- Header File to describe the DMA descriptors
- Use enhanced descriptors in case of GMAC Cores.
+ Header File to describe the DMA descriptors.
+ Enhanced descriptors have been in case of DWMAC1000 Cores.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/stmmac/mac100.c b/drivers/net/stmmac/dwmac100.c
index 625171b6062..803b0373d84 100644
--- a/drivers/net/stmmac/mac100.c
+++ b/drivers/net/stmmac/dwmac100.c
@@ -26,23 +26,23 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#include <linux/netdevice.h>
#include <linux/crc32.h>
#include <linux/mii.h>
#include <linux/phy.h>
#include "common.h"
-#include "mac100.h"
+#include "dwmac100.h"
+#include "dwmac_dma.h"
-#undef MAC100_DEBUG
-/*#define MAC100_DEBUG*/
-#ifdef MAC100_DEBUG
+#undef DWMAC100_DEBUG
+/*#define DWMAC100_DEBUG*/
+#ifdef DWMAC100_DEBUG
#define DBG(fmt, args...) printk(fmt, ## args)
#else
#define DBG(fmt, args...) do { } while (0)
#endif
-static void mac100_core_init(unsigned long ioaddr)
+static void dwmac100_core_init(unsigned long ioaddr)
{
u32 value = readl(ioaddr + MAC_CONTROL);
@@ -54,43 +54,43 @@ static void mac100_core_init(unsigned long ioaddr)
return;
}
-static void mac100_dump_mac_regs(unsigned long ioaddr)
+static void dwmac100_dump_mac_regs(unsigned long ioaddr)
{
pr_info("\t----------------------------------------------\n"
- "\t MAC100 CSR (base addr = 0x%8x)\n"
- "\t----------------------------------------------\n",
- (unsigned int)ioaddr);
+ "\t DWMAC 100 CSR (base addr = 0x%8x)\n"
+ "\t----------------------------------------------\n",
+ (unsigned int)ioaddr);
pr_info("\tcontrol reg (offset 0x%x): 0x%08x\n", MAC_CONTROL,
- readl(ioaddr + MAC_CONTROL));
+ readl(ioaddr + MAC_CONTROL));
pr_info("\taddr HI (offset 0x%x): 0x%08x\n ", MAC_ADDR_HIGH,
- readl(ioaddr + MAC_ADDR_HIGH));
+ readl(ioaddr + MAC_ADDR_HIGH));
pr_info("\taddr LO (offset 0x%x): 0x%08x\n", MAC_ADDR_LOW,
- readl(ioaddr + MAC_ADDR_LOW));
+ readl(ioaddr + MAC_ADDR_LOW));
pr_info("\tmulticast hash HI (offset 0x%x): 0x%08x\n",
- MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH));
+ MAC_HASH_HIGH, readl(ioaddr + MAC_HASH_HIGH));
pr_info("\tmulticast hash LO (offset 0x%x): 0x%08x\n",
- MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW));
+ MAC_HASH_LOW, readl(ioaddr + MAC_HASH_LOW));
pr_info("\tflow control (offset 0x%x): 0x%08x\n",
MAC_FLOW_CTRL, readl(ioaddr + MAC_FLOW_CTRL));
pr_info("\tVLAN1 tag (offset 0x%x): 0x%08x\n", MAC_VLAN1,
- readl(ioaddr + MAC_VLAN1));
+ readl(ioaddr + MAC_VLAN1));
pr_info("\tVLAN2 tag (offset 0x%x): 0x%08x\n", MAC_VLAN2,
- readl(ioaddr + MAC_VLAN2));
+ readl(ioaddr + MAC_VLAN2));
pr_info("\n\tMAC management counter registers\n");
pr_info("\t MMC crtl (offset 0x%x): 0x%08x\n",
- MMC_CONTROL, readl(ioaddr + MMC_CONTROL));
+ MMC_CONTROL, readl(ioaddr + MMC_CONTROL));
pr_info("\t MMC High Interrupt (offset 0x%x): 0x%08x\n",
- MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR));
+ MMC_HIGH_INTR, readl(ioaddr + MMC_HIGH_INTR));
pr_info("\t MMC Low Interrupt (offset 0x%x): 0x%08x\n",
- MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR));
+ MMC_LOW_INTR, readl(ioaddr + MMC_LOW_INTR));
pr_info("\t MMC High Interrupt Mask (offset 0x%x): 0x%08x\n",
- MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK));
+ MMC_HIGH_INTR_MASK, readl(ioaddr + MMC_HIGH_INTR_MASK));
pr_info("\t MMC Low Interrupt Mask (offset 0x%x): 0x%08x\n",
- MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK));
+ MMC_LOW_INTR_MASK, readl(ioaddr + MMC_LOW_INTR_MASK));
return;
}
-static int mac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
+static int dwmac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
u32 dma_rx)
{
u32 value = readl(ioaddr + DMA_BUS_MODE);
@@ -117,7 +117,7 @@ static int mac100_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
/* Store and Forward capability is not used at all..
* The transmit threshold can be programmed by
* setting the TTC bits in the DMA control register.*/
-static void mac100_dma_operation_mode(unsigned long ioaddr, int txmode,
+static void dwmac100_dma_operation_mode(unsigned long ioaddr, int txmode,
int rxmode)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
@@ -134,11 +134,11 @@ static void mac100_dma_operation_mode(unsigned long ioaddr, int txmode,
return;
}
-static void mac100_dump_dma_regs(unsigned long ioaddr)
+static void dwmac100_dump_dma_regs(unsigned long ioaddr)
{
int i;
- DBG(KERN_DEBUG "MAC100 DMA CSR \n");
+ DBG(KERN_DEBUG "DWMAC 100 DMA CSR \n");
for (i = 0; i < 9; i++)
pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i,
(DMA_BUS_MODE + i * 4),
@@ -151,8 +151,9 @@ static void mac100_dump_dma_regs(unsigned long ioaddr)
}
/* DMA controller has two counters to track the number of
- the receive missed frames. */
-static void mac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
+ * the receive missed frames. */
+static void dwmac100_dma_diagnostic_fr(void *data,
+ struct stmmac_extra_stats *x,
unsigned long ioaddr)
{
struct net_device_stats *stats = (struct net_device_stats *)data;
@@ -181,7 +182,8 @@ static void mac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
return;
}
-static int mac100_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
+static int dwmac100_get_tx_frame_status(void *data,
+ struct stmmac_extra_stats *x,
struct dma_desc *p, unsigned long ioaddr)
{
int ret = 0;
@@ -217,7 +219,7 @@ static int mac100_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
return ret;
}
-static int mac100_get_tx_len(struct dma_desc *p)
+static int dwmac100_get_tx_len(struct dma_desc *p)
{
return p->des01.tx.buffer1_size;
}
@@ -226,14 +228,15 @@ static int mac100_get_tx_len(struct dma_desc *p)
* and, if required, updates the multicast statistics.
* In case of success, it returns csum_none becasue the device
* is not able to compute the csum in HW. */
-static int mac100_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
+static int dwmac100_get_rx_frame_status(void *data,
+ struct stmmac_extra_stats *x,
struct dma_desc *p)
{
int ret = csum_none;
struct net_device_stats *stats = (struct net_device_stats *)data;
if (unlikely(p->des01.rx.last_descriptor == 0)) {
- pr_warning("mac100 Error: Oversized Ethernet "
+ pr_warning("dwmac100 Error: Oversized Ethernet "
"frame spanned multiple buffers\n");
stats->rx_length_errors++;
return discard_frame;
@@ -262,7 +265,7 @@ static int mac100_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
ret = discard_frame;
if (unlikely(p->des01.rx.length_error)) {
- x->rx_lenght++;
+ x->rx_length++;
ret = discard_frame;
}
if (unlikely(p->des01.rx.mii_error)) {
@@ -276,24 +279,24 @@ static int mac100_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
return ret;
}
-static void mac100_irq_status(unsigned long ioaddr)
+static void dwmac100_irq_status(unsigned long ioaddr)
{
return;
}
-static void mac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
+static void dwmac100_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
unsigned int reg_n)
{
stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
}
-static void mac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
+static void dwmac100_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
unsigned int reg_n)
{
stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
}
-static void mac100_set_filter(struct net_device *dev)
+static void dwmac100_set_filter(struct net_device *dev)
{
unsigned long ioaddr = dev->base_addr;
u32 value = readl(ioaddr + MAC_CONTROL);
@@ -302,29 +305,27 @@ static void mac100_set_filter(struct net_device *dev)
value |= MAC_CONTROL_PR;
value &= ~(MAC_CONTROL_PM | MAC_CONTROL_IF | MAC_CONTROL_HO |
MAC_CONTROL_HP);
- } else if ((dev->mc_count > HASH_TABLE_SIZE)
+ } else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE)
|| (dev->flags & IFF_ALLMULTI)) {
value |= MAC_CONTROL_PM;
value &= ~(MAC_CONTROL_PR | MAC_CONTROL_IF | MAC_CONTROL_HO);
writel(0xffffffff, ioaddr + MAC_HASH_HIGH);
writel(0xffffffff, ioaddr + MAC_HASH_LOW);
- } else if (dev->mc_count == 0) { /* no multicast */
+ } else if (netdev_mc_empty(dev)) { /* no multicast */
value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF |
MAC_CONTROL_HO | MAC_CONTROL_HP);
} else {
- int i;
u32 mc_filter[2];
struct dev_mc_list *mclist;
/* Perfect filter mode for physical address and Hash
filter for multicast */
value |= MAC_CONTROL_HP;
- value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF
- | MAC_CONTROL_HO);
+ value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR |
+ MAC_CONTROL_IF | MAC_CONTROL_HO);
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list;
- mclist && i < dev->mc_count; i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
/* The upper 6 bits of the calculated CRC are used to
* index the contens of the hash table */
int bit_nr =
@@ -347,7 +348,7 @@ static void mac100_set_filter(struct net_device *dev)
return;
}
-static void mac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
+static void dwmac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
unsigned int fc, unsigned int pause_time)
{
unsigned int flow = MAC_FLOW_CTRL_ENABLE;
@@ -359,13 +360,15 @@ static void mac100_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
return;
}
-/* No PMT module supported in our SoC for the Ethernet Controller. */
-static void mac100_pmt(unsigned long ioaddr, unsigned long mode)
+/* No PMT module supported for this Ethernet Controller.
+ * Tested on ST platforms only.
+ */
+static void dwmac100_pmt(unsigned long ioaddr, unsigned long mode)
{
return;
}
-static void mac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
+static void dwmac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
int disable_rx_ic)
{
int i;
@@ -381,7 +384,7 @@ static void mac100_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
return;
}
-static void mac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
+static void dwmac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
{
int i;
for (i = 0; i < ring_size; i++) {
@@ -393,32 +396,32 @@ static void mac100_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
return;
}
-static int mac100_get_tx_owner(struct dma_desc *p)
+static int dwmac100_get_tx_owner(struct dma_desc *p)
{
return p->des01.tx.own;
}
-static int mac100_get_rx_owner(struct dma_desc *p)
+static int dwmac100_get_rx_owner(struct dma_desc *p)
{
return p->des01.rx.own;
}
-static void mac100_set_tx_owner(struct dma_desc *p)
+static void dwmac100_set_tx_owner(struct dma_desc *p)
{
p->des01.tx.own = 1;
}
-static void mac100_set_rx_owner(struct dma_desc *p)
+static void dwmac100_set_rx_owner(struct dma_desc *p)
{
p->des01.rx.own = 1;
}
-static int mac100_get_tx_ls(struct dma_desc *p)
+static int dwmac100_get_tx_ls(struct dma_desc *p)
{
return p->des01.tx.last_segment;
}
-static void mac100_release_tx_desc(struct dma_desc *p)
+static void dwmac100_release_tx_desc(struct dma_desc *p)
{
int ter = p->des01.tx.end_ring;
@@ -444,74 +447,91 @@ static void mac100_release_tx_desc(struct dma_desc *p)
return;
}
-static void mac100_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+static void dwmac100_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
int csum_flag)
{
p->des01.tx.first_segment = is_fs;
p->des01.tx.buffer1_size = len;
}
-static void mac100_clear_tx_ic(struct dma_desc *p)
+static void dwmac100_clear_tx_ic(struct dma_desc *p)
{
p->des01.tx.interrupt = 0;
}
-static void mac100_close_tx_desc(struct dma_desc *p)
+static void dwmac100_close_tx_desc(struct dma_desc *p)
{
p->des01.tx.last_segment = 1;
p->des01.tx.interrupt = 1;
}
-static int mac100_get_rx_frame_len(struct dma_desc *p)
+static int dwmac100_get_rx_frame_len(struct dma_desc *p)
{
return p->des01.rx.frame_length;
}
-struct stmmac_ops mac100_driver = {
- .core_init = mac100_core_init,
- .dump_mac_regs = mac100_dump_mac_regs,
- .dma_init = mac100_dma_init,
- .dump_dma_regs = mac100_dump_dma_regs,
- .dma_mode = mac100_dma_operation_mode,
- .dma_diagnostic_fr = mac100_dma_diagnostic_fr,
- .tx_status = mac100_get_tx_frame_status,
- .rx_status = mac100_get_rx_frame_status,
- .get_tx_len = mac100_get_tx_len,
- .set_filter = mac100_set_filter,
- .flow_ctrl = mac100_flow_ctrl,
- .pmt = mac100_pmt,
- .init_rx_desc = mac100_init_rx_desc,
- .init_tx_desc = mac100_init_tx_desc,
- .get_tx_owner = mac100_get_tx_owner,
- .get_rx_owner = mac100_get_rx_owner,
- .release_tx_desc = mac100_release_tx_desc,
- .prepare_tx_desc = mac100_prepare_tx_desc,
- .clear_tx_ic = mac100_clear_tx_ic,
- .close_tx_desc = mac100_close_tx_desc,
- .get_tx_ls = mac100_get_tx_ls,
- .set_tx_owner = mac100_set_tx_owner,
- .set_rx_owner = mac100_set_rx_owner,
- .get_rx_frame_len = mac100_get_rx_frame_len,
- .host_irq_status = mac100_irq_status,
- .set_umac_addr = mac100_set_umac_addr,
- .get_umac_addr = mac100_get_umac_addr,
+struct stmmac_ops dwmac100_ops = {
+ .core_init = dwmac100_core_init,
+ .dump_regs = dwmac100_dump_mac_regs,
+ .host_irq_status = dwmac100_irq_status,
+ .set_filter = dwmac100_set_filter,
+ .flow_ctrl = dwmac100_flow_ctrl,
+ .pmt = dwmac100_pmt,
+ .set_umac_addr = dwmac100_set_umac_addr,
+ .get_umac_addr = dwmac100_get_umac_addr,
};
-struct mac_device_info *mac100_setup(unsigned long ioaddr)
+struct stmmac_dma_ops dwmac100_dma_ops = {
+ .init = dwmac100_dma_init,
+ .dump_regs = dwmac100_dump_dma_regs,
+ .dma_mode = dwmac100_dma_operation_mode,
+ .dma_diagnostic_fr = dwmac100_dma_diagnostic_fr,
+ .enable_dma_transmission = dwmac_enable_dma_transmission,
+ .enable_dma_irq = dwmac_enable_dma_irq,
+ .disable_dma_irq = dwmac_disable_dma_irq,
+ .start_tx = dwmac_dma_start_tx,
+ .stop_tx = dwmac_dma_stop_tx,
+ .start_rx = dwmac_dma_start_rx,
+ .stop_rx = dwmac_dma_stop_rx,
+ .dma_interrupt = dwmac_dma_interrupt,
+};
+
+struct stmmac_desc_ops dwmac100_desc_ops = {
+ .tx_status = dwmac100_get_tx_frame_status,
+ .rx_status = dwmac100_get_rx_frame_status,
+ .get_tx_len = dwmac100_get_tx_len,
+ .init_rx_desc = dwmac100_init_rx_desc,
+ .init_tx_desc = dwmac100_init_tx_desc,
+ .get_tx_owner = dwmac100_get_tx_owner,
+ .get_rx_owner = dwmac100_get_rx_owner,
+ .release_tx_desc = dwmac100_release_tx_desc,
+ .prepare_tx_desc = dwmac100_prepare_tx_desc,
+ .clear_tx_ic = dwmac100_clear_tx_ic,
+ .close_tx_desc = dwmac100_close_tx_desc,
+ .get_tx_ls = dwmac100_get_tx_ls,
+ .set_tx_owner = dwmac100_set_tx_owner,
+ .set_rx_owner = dwmac100_set_rx_owner,
+ .get_rx_frame_len = dwmac100_get_rx_frame_len,
+};
+
+struct mac_device_info *dwmac100_setup(unsigned long ioaddr)
{
struct mac_device_info *mac;
mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
- pr_info("\tMAC 10/100\n");
+ pr_info("\tDWMAC100\n");
+
+ mac->mac = &dwmac100_ops;
+ mac->desc = &dwmac100_desc_ops;
+ mac->dma = &dwmac100_dma_ops;
- mac->ops = &mac100_driver;
- mac->hw.pmt = PMT_NOT_SUPPORTED;
- mac->hw.link.port = MAC_CONTROL_PS;
- mac->hw.link.duplex = MAC_CONTROL_F;
- mac->hw.link.speed = 0;
- mac->hw.mii.addr = MAC_MII_ADDR;
- mac->hw.mii.data = MAC_MII_DATA;
+ mac->pmt = PMT_NOT_SUPPORTED;
+ mac->link.port = MAC_CONTROL_PS;
+ mac->link.duplex = MAC_CONTROL_F;
+ mac->link.speed = 0;
+ mac->mii.addr = MAC_MII_ADDR;
+ mac->mii.data = MAC_MII_DATA;
return mac;
}
diff --git a/drivers/net/stmmac/mac100.h b/drivers/net/stmmac/dwmac100.h
index 0f8f110d004..0f8f110d004 100644
--- a/drivers/net/stmmac/mac100.h
+++ b/drivers/net/stmmac/dwmac100.h
diff --git a/drivers/net/stmmac/gmac.h b/drivers/net/stmmac/dwmac1000.h
index 2e82d6c9a14..62dca0e384e 100644
--- a/drivers/net/stmmac/gmac.h
+++ b/drivers/net/stmmac/dwmac1000.h
@@ -20,6 +20,9 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
+#include <linux/phy.h>
+#include "common.h"
+
#define GMAC_CONTROL 0x00000000 /* Configuration */
#define GMAC_FRAME_FILTER 0x00000004 /* Frame Filter */
#define GMAC_HASH_HIGH 0x00000008 /* Multicast Hash Table High */
@@ -32,7 +35,7 @@
#define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */
#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */
-enum gmac_irq_status {
+enum dwmac1000_irq_status {
time_stamp_irq = 0x0200,
mmc_rx_csum_offload_irq = 0x0080,
mmc_tx_irq = 0x0040,
@@ -202,3 +205,16 @@ enum rtc_control {
#define GMAC_MMC_RX_INTR 0x104
#define GMAC_MMC_TX_INTR 0x108
#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
+
+#undef DWMAC1000_DEBUG
+/* #define DWMAC1000__DEBUG */
+#undef FRAME_FILTER_DEBUG
+/* #define FRAME_FILTER_DEBUG */
+#ifdef DWMAC1000__DEBUG
+#define DBG(fmt, args...) printk(fmt, ## args)
+#else
+#define DBG(fmt, args...) do { } while (0)
+#endif
+
+extern struct stmmac_dma_ops dwmac1000_dma_ops;
+extern struct stmmac_desc_ops dwmac1000_desc_ops;
diff --git a/drivers/net/stmmac/dwmac1000_core.c b/drivers/net/stmmac/dwmac1000_core.c
new file mode 100644
index 00000000000..a6538ae4694
--- /dev/null
+++ b/drivers/net/stmmac/dwmac1000_core.c
@@ -0,0 +1,243 @@
+/*******************************************************************************
+ This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
+ DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
+ developing this code.
+
+ This only implements the mac core functions for this chip.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/crc32.h>
+#include "dwmac1000.h"
+
+static void dwmac1000_core_init(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + GMAC_CONTROL);
+ value |= GMAC_CORE_INIT;
+ writel(value, ioaddr + GMAC_CONTROL);
+
+ /* STBus Bridge Configuration */
+ /*writel(0xc5608, ioaddr + 0x00007000);*/
+
+ /* Freeze MMC counters */
+ writel(0x8, ioaddr + GMAC_MMC_CTRL);
+ /* Mask GMAC interrupts */
+ writel(0x207, ioaddr + GMAC_INT_MASK);
+
+#ifdef STMMAC_VLAN_TAG_USED
+ /* Tag detection without filtering */
+ writel(0x0, ioaddr + GMAC_VLAN_TAG);
+#endif
+ return;
+}
+
+static void dwmac1000_dump_regs(unsigned long ioaddr)
+{
+ int i;
+ pr_info("\tDWMAC1000 regs (base addr = 0x%8x)\n", (unsigned int)ioaddr);
+
+ for (i = 0; i < 55; i++) {
+ int offset = i * 4;
+ pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
+ offset, readl(ioaddr + offset));
+ }
+ return;
+}
+
+static void dwmac1000_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
+ GMAC_ADDR_LOW(reg_n));
+}
+
+static void dwmac1000_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
+ unsigned int reg_n)
+{
+ stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
+ GMAC_ADDR_LOW(reg_n));
+}
+
+static void dwmac1000_set_filter(struct net_device *dev)
+{
+ unsigned long ioaddr = dev->base_addr;
+ unsigned int value = 0;
+
+ DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
+ __func__, netdev_mc_count(dev), netdev_uc_count(dev));
+
+ if (dev->flags & IFF_PROMISC)
+ value = GMAC_FRAME_FILTER_PR;
+ else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE)
+ || (dev->flags & IFF_ALLMULTI)) {
+ value = GMAC_FRAME_FILTER_PM; /* pass all multi */
+ writel(0xffffffff, ioaddr + GMAC_HASH_HIGH);
+ writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
+ } else if (!netdev_mc_empty(dev)) {
+ u32 mc_filter[2];
+ struct dev_mc_list *mclist;
+
+ /* Hash filter for multicast */
+ value = GMAC_FRAME_FILTER_HMC;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ netdev_for_each_mc_addr(mclist, dev) {
+ /* The upper 6 bits of the calculated CRC are used to
+ index the contens of the hash table */
+ int bit_nr =
+ bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26;
+ /* The most significant bit determines the register to
+ * use (H/L) while the other 5 bits determine the bit
+ * within the register. */
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ }
+ writel(mc_filter[0], ioaddr + GMAC_HASH_LOW);
+ writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH);
+ }
+
+ /* Handle multiple unicast addresses (perfect filtering)*/
+ if (netdev_uc_count(dev) > GMAC_MAX_UNICAST_ADDRESSES)
+ /* Switch to promiscuous mode is more than 16 addrs
+ are required */
+ value |= GMAC_FRAME_FILTER_PR;
+ else {
+ int reg = 1;
+ struct netdev_hw_addr *ha;
+
+ netdev_for_each_uc_addr(ha, dev) {
+ dwmac1000_set_umac_addr(ioaddr, ha->addr, reg);
+ reg++;
+ }
+ }
+
+#ifdef FRAME_FILTER_DEBUG
+ /* Enable Receive all mode (to debug filtering_fail errors) */
+ value |= GMAC_FRAME_FILTER_RA;
+#endif
+ writel(value, ioaddr + GMAC_FRAME_FILTER);
+
+ DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
+ "HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER),
+ readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
+
+ return;
+}
+
+static void dwmac1000_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
+ unsigned int fc, unsigned int pause_time)
+{
+ unsigned int flow = 0;
+
+ DBG(KERN_DEBUG "GMAC Flow-Control:\n");
+ if (fc & FLOW_RX) {
+ DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
+ flow |= GMAC_FLOW_CTRL_RFE;
+ }
+ if (fc & FLOW_TX) {
+ DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
+ flow |= GMAC_FLOW_CTRL_TFE;
+ }
+
+ if (duplex) {
+ DBG(KERN_DEBUG "\tduplex mode: pause time: %d\n", pause_time);
+ flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
+ }
+
+ writel(flow, ioaddr + GMAC_FLOW_CTRL);
+ return;
+}
+
+static void dwmac1000_pmt(unsigned long ioaddr, unsigned long mode)
+{
+ unsigned int pmt = 0;
+
+ if (mode == WAKE_MAGIC) {
+ DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
+ pmt |= power_down | magic_pkt_en;
+ } else if (mode == WAKE_UCAST) {
+ DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
+ pmt |= global_unicast;
+ }
+
+ writel(pmt, ioaddr + GMAC_PMT);
+ return;
+}
+
+
+static void dwmac1000_irq_status(unsigned long ioaddr)
+{
+ u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
+
+ /* Not used events (e.g. MMC interrupts) are not handled. */
+ if ((intr_status & mmc_tx_irq))
+ DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
+ readl(ioaddr + GMAC_MMC_TX_INTR));
+ if (unlikely(intr_status & mmc_rx_irq))
+ DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
+ readl(ioaddr + GMAC_MMC_RX_INTR));
+ if (unlikely(intr_status & mmc_rx_csum_offload_irq))
+ DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
+ readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
+ if (unlikely(intr_status & pmt_irq)) {
+ DBG(KERN_DEBUG "GMAC: received Magic frame\n");
+ /* clear the PMT bits 5 and 6 by reading the PMT
+ * status register. */
+ readl(ioaddr + GMAC_PMT);
+ }
+
+ return;
+}
+
+struct stmmac_ops dwmac1000_ops = {
+ .core_init = dwmac1000_core_init,
+ .dump_regs = dwmac1000_dump_regs,
+ .host_irq_status = dwmac1000_irq_status,
+ .set_filter = dwmac1000_set_filter,
+ .flow_ctrl = dwmac1000_flow_ctrl,
+ .pmt = dwmac1000_pmt,
+ .set_umac_addr = dwmac1000_set_umac_addr,
+ .get_umac_addr = dwmac1000_get_umac_addr,
+};
+
+struct mac_device_info *dwmac1000_setup(unsigned long ioaddr)
+{
+ struct mac_device_info *mac;
+ u32 uid = readl(ioaddr + GMAC_VERSION);
+
+ pr_info("\tDWMAC1000 - user ID: 0x%x, Synopsys ID: 0x%x\n",
+ ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff));
+
+ mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
+
+ mac->mac = &dwmac1000_ops;
+ mac->desc = &dwmac1000_desc_ops;
+ mac->dma = &dwmac1000_dma_ops;
+
+ mac->pmt = PMT_SUPPORTED;
+ mac->link.port = GMAC_CONTROL_PS;
+ mac->link.duplex = GMAC_CONTROL_DM;
+ mac->link.speed = GMAC_CONTROL_FES;
+ mac->mii.addr = GMAC_MII_ADDR;
+ mac->mii.data = GMAC_MII_DATA;
+
+ return mac;
+}
diff --git a/drivers/net/stmmac/gmac.c b/drivers/net/stmmac/dwmac1000_dma.c
index 52586ee6895..39d436a2da6 100644
--- a/drivers/net/stmmac/gmac.c
+++ b/drivers/net/stmmac/dwmac1000_dma.c
@@ -3,6 +3,8 @@
DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
developing this code.
+ This contains the functions to handle the dma and descriptors.
+
Copyright (C) 2007-2009 STMicroelectronics Ltd
This program is free software; you can redistribute it and/or modify it
@@ -24,41 +26,11 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#include <linux/netdevice.h>
-#include <linux/crc32.h>
-#include <linux/mii.h>
-#include <linux/phy.h>
-
-#include "stmmac.h"
-#include "gmac.h"
-
-#undef GMAC_DEBUG
-/*#define GMAC_DEBUG*/
-#undef FRAME_FILTER_DEBUG
-/*#define FRAME_FILTER_DEBUG*/
-#ifdef GMAC_DEBUG
-#define DBG(fmt, args...) printk(fmt, ## args)
-#else
-#define DBG(fmt, args...) do { } while (0)
-#endif
+#include "dwmac1000.h"
+#include "dwmac_dma.h"
-static void gmac_dump_regs(unsigned long ioaddr)
-{
- int i;
- pr_info("\t----------------------------------------------\n"
- "\t GMAC registers (base addr = 0x%8x)\n"
- "\t----------------------------------------------\n",
- (unsigned int)ioaddr);
-
- for (i = 0; i < 55; i++) {
- int offset = i * 4;
- pr_info("\tReg No. %d (offset 0x%x): 0x%08x\n", i,
- offset, readl(ioaddr + offset));
- }
- return;
-}
-
-static int gmac_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx)
+static int dwmac1000_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx,
+ u32 dma_rx)
{
u32 value = readl(ioaddr + DMA_BUS_MODE);
/* DMA SW reset */
@@ -87,7 +59,7 @@ static int gmac_dma_init(unsigned long ioaddr, int pbl, u32 dma_tx, u32 dma_rx)
}
/* Transmit FIFO flush operation */
-static void gmac_flush_tx_fifo(unsigned long ioaddr)
+static void dwmac1000_flush_tx_fifo(unsigned long ioaddr)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
@@ -95,7 +67,7 @@ static void gmac_flush_tx_fifo(unsigned long ioaddr)
do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
}
-static void gmac_dma_operation_mode(unsigned long ioaddr, int txmode,
+static void dwmac1000_dma_operation_mode(unsigned long ioaddr, int txmode,
int rxmode)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
@@ -148,13 +120,13 @@ static void gmac_dma_operation_mode(unsigned long ioaddr, int txmode,
}
/* Not yet implemented --- no RMON module */
-static void gmac_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
- unsigned long ioaddr)
+static void dwmac1000_dma_diagnostic_fr(void *data,
+ struct stmmac_extra_stats *x, unsigned long ioaddr)
{
return;
}
-static void gmac_dump_dma_regs(unsigned long ioaddr)
+static void dwmac1000_dump_dma_regs(unsigned long ioaddr)
{
int i;
pr_info(" DMA registers\n");
@@ -169,8 +141,9 @@ static void gmac_dump_dma_regs(unsigned long ioaddr)
return;
}
-static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
- struct dma_desc *p, unsigned long ioaddr)
+static int dwmac1000_get_tx_frame_status(void *data,
+ struct stmmac_extra_stats *x,
+ struct dma_desc *p, unsigned long ioaddr)
{
int ret = 0;
struct net_device_stats *stats = (struct net_device_stats *)data;
@@ -185,7 +158,7 @@ static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
if (unlikely(p->des01.etx.frame_flushed)) {
DBG(KERN_ERR "\tframe_flushed error\n");
x->tx_frame_flushed++;
- gmac_flush_tx_fifo(ioaddr);
+ dwmac1000_flush_tx_fifo(ioaddr);
}
if (unlikely(p->des01.etx.loss_carrier)) {
@@ -213,7 +186,7 @@ static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
if (unlikely(p->des01.etx.underflow_error)) {
DBG(KERN_ERR "\tunderflow error\n");
- gmac_flush_tx_fifo(ioaddr);
+ dwmac1000_flush_tx_fifo(ioaddr);
x->tx_underflow++;
}
@@ -225,7 +198,7 @@ static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
if (unlikely(p->des01.etx.payload_error)) {
DBG(KERN_ERR "\tAddr/Payload csum error\n");
x->tx_payload_error++;
- gmac_flush_tx_fifo(ioaddr);
+ dwmac1000_flush_tx_fifo(ioaddr);
}
ret = -1;
@@ -245,19 +218,19 @@ static int gmac_get_tx_frame_status(void *data, struct stmmac_extra_stats *x,
return ret;
}
-static int gmac_get_tx_len(struct dma_desc *p)
+static int dwmac1000_get_tx_len(struct dma_desc *p)
{
return p->des01.etx.buffer1_size;
}
-static int gmac_coe_rdes0(int ipc_err, int type, int payload_err)
+static int dwmac1000_coe_rdes0(int ipc_err, int type, int payload_err)
{
int ret = good_frame;
u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
/* bits 5 7 0 | Frame status
* ----------------------------------------------------------
- * 0 0 0 | IEEE 802.3 Type frame (lenght < 1536 octects)
+ * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects)
* 1 0 0 | IPv4/6 No CSUM errorS.
* 1 0 1 | IPv4/6 CSUM PAYLOAD error
* 1 1 0 | IPv4/6 CSUM IP HR error
@@ -293,8 +266,8 @@ static int gmac_coe_rdes0(int ipc_err, int type, int payload_err)
return ret;
}
-static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
- struct dma_desc *p)
+static int dwmac1000_get_rx_frame_status(void *data,
+ struct stmmac_extra_stats *x, struct dma_desc *p)
{
int ret = good_frame;
struct net_device_stats *stats = (struct net_device_stats *)data;
@@ -339,7 +312,7 @@ static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
* It doesn't match with the information reported into the databook.
* At any rate, we need to understand if the CSUM hw computation is ok
* and report this info to the upper layers. */
- ret = gmac_coe_rdes0(p->des01.erx.ipc_csum_error,
+ ret = dwmac1000_coe_rdes0(p->des01.erx.ipc_csum_error,
p->des01.erx.frame_type, p->des01.erx.payload_csum_error);
if (unlikely(p->des01.erx.dribbling)) {
@@ -358,7 +331,7 @@ static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
}
if (unlikely(p->des01.erx.length_error)) {
DBG(KERN_ERR "GMAC RX: length_error error\n");
- x->rx_lenght++;
+ x->rx_length++;
ret = discard_frame;
}
#ifdef STMMAC_VLAN_TAG_USED
@@ -370,181 +343,7 @@ static int gmac_get_rx_frame_status(void *data, struct stmmac_extra_stats *x,
return ret;
}
-static void gmac_irq_status(unsigned long ioaddr)
-{
- u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
-
- /* Not used events (e.g. MMC interrupts) are not handled. */
- if ((intr_status & mmc_tx_irq))
- DBG(KERN_DEBUG "GMAC: MMC tx interrupt: 0x%08x\n",
- readl(ioaddr + GMAC_MMC_TX_INTR));
- if (unlikely(intr_status & mmc_rx_irq))
- DBG(KERN_DEBUG "GMAC: MMC rx interrupt: 0x%08x\n",
- readl(ioaddr + GMAC_MMC_RX_INTR));
- if (unlikely(intr_status & mmc_rx_csum_offload_irq))
- DBG(KERN_DEBUG "GMAC: MMC rx csum offload: 0x%08x\n",
- readl(ioaddr + GMAC_MMC_RX_CSUM_OFFLOAD));
- if (unlikely(intr_status & pmt_irq)) {
- DBG(KERN_DEBUG "GMAC: received Magic frame\n");
- /* clear the PMT bits 5 and 6 by reading the PMT
- * status register. */
- readl(ioaddr + GMAC_PMT);
- }
-
- return;
-}
-
-static void gmac_core_init(unsigned long ioaddr)
-{
- u32 value = readl(ioaddr + GMAC_CONTROL);
- value |= GMAC_CORE_INIT;
- writel(value, ioaddr + GMAC_CONTROL);
-
- /* STBus Bridge Configuration */
- /*writel(0xc5608, ioaddr + 0x00007000);*/
-
- /* Freeze MMC counters */
- writel(0x8, ioaddr + GMAC_MMC_CTRL);
- /* Mask GMAC interrupts */
- writel(0x207, ioaddr + GMAC_INT_MASK);
-
-#ifdef STMMAC_VLAN_TAG_USED
- /* Tag detection without filtering */
- writel(0x0, ioaddr + GMAC_VLAN_TAG);
-#endif
- return;
-}
-
-static void gmac_set_umac_addr(unsigned long ioaddr, unsigned char *addr,
- unsigned int reg_n)
-{
- stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
- GMAC_ADDR_LOW(reg_n));
-}
-
-static void gmac_get_umac_addr(unsigned long ioaddr, unsigned char *addr,
- unsigned int reg_n)
-{
- stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
- GMAC_ADDR_LOW(reg_n));
-}
-
-static void gmac_set_filter(struct net_device *dev)
-{
- unsigned long ioaddr = dev->base_addr;
- unsigned int value = 0;
-
- DBG(KERN_INFO "%s: # mcasts %d, # unicast %d\n",
- __func__, dev->mc_count, dev->uc_count);
-
- if (dev->flags & IFF_PROMISC)
- value = GMAC_FRAME_FILTER_PR;
- else if ((dev->mc_count > HASH_TABLE_SIZE)
- || (dev->flags & IFF_ALLMULTI)) {
- value = GMAC_FRAME_FILTER_PM; /* pass all multi */
- writel(0xffffffff, ioaddr + GMAC_HASH_HIGH);
- writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
- } else if (dev->mc_count > 0) {
- int i;
- u32 mc_filter[2];
- struct dev_mc_list *mclist;
-
- /* Hash filter for multicast */
- value = GMAC_FRAME_FILTER_HMC;
-
- memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list;
- mclist && i < dev->mc_count; i++, mclist = mclist->next) {
- /* The upper 6 bits of the calculated CRC are used to
- index the contens of the hash table */
- int bit_nr =
- bitrev32(~crc32_le(~0, mclist->dmi_addr, 6)) >> 26;
- /* The most significant bit determines the register to
- * use (H/L) while the other 5 bits determine the bit
- * within the register. */
- mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
- }
- writel(mc_filter[0], ioaddr + GMAC_HASH_LOW);
- writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH);
- }
-
- /* Handle multiple unicast addresses (perfect filtering)*/
- if (dev->uc_count > GMAC_MAX_UNICAST_ADDRESSES)
- /* Switch to promiscuous mode is more than 16 addrs
- are required */
- value |= GMAC_FRAME_FILTER_PR;
- else {
- int i;
- struct dev_addr_list *uc_ptr = dev->uc_list;
-
- for (i = 0; i < dev->uc_count; i++) {
- gmac_set_umac_addr(ioaddr, uc_ptr->da_addr,
- i + 1);
-
- DBG(KERN_INFO "\t%d "
- "- Unicast addr %02x:%02x:%02x:%02x:%02x:"
- "%02x\n", i + 1,
- uc_ptr->da_addr[0], uc_ptr->da_addr[1],
- uc_ptr->da_addr[2], uc_ptr->da_addr[3],
- uc_ptr->da_addr[4], uc_ptr->da_addr[5]);
- uc_ptr = uc_ptr->next;
- }
- }
-
-#ifdef FRAME_FILTER_DEBUG
- /* Enable Receive all mode (to debug filtering_fail errors) */
- value |= GMAC_FRAME_FILTER_RA;
-#endif
- writel(value, ioaddr + GMAC_FRAME_FILTER);
-
- DBG(KERN_INFO "\tFrame Filter reg: 0x%08x\n\tHash regs: "
- "HI 0x%08x, LO 0x%08x\n", readl(ioaddr + GMAC_FRAME_FILTER),
- readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
-
- return;
-}
-
-static void gmac_flow_ctrl(unsigned long ioaddr, unsigned int duplex,
- unsigned int fc, unsigned int pause_time)
-{
- unsigned int flow = 0;
-
- DBG(KERN_DEBUG "GMAC Flow-Control:\n");
- if (fc & FLOW_RX) {
- DBG(KERN_DEBUG "\tReceive Flow-Control ON\n");
- flow |= GMAC_FLOW_CTRL_RFE;
- }
- if (fc & FLOW_TX) {
- DBG(KERN_DEBUG "\tTransmit Flow-Control ON\n");
- flow |= GMAC_FLOW_CTRL_TFE;
- }
-
- if (duplex) {
- DBG(KERN_DEBUG "\tduplex mode: pause time: %d\n", pause_time);
- flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
- }
-
- writel(flow, ioaddr + GMAC_FLOW_CTRL);
- return;
-}
-
-static void gmac_pmt(unsigned long ioaddr, unsigned long mode)
-{
- unsigned int pmt = 0;
-
- if (mode == WAKE_MAGIC) {
- DBG(KERN_DEBUG "GMAC: WOL Magic frame\n");
- pmt |= power_down | magic_pkt_en;
- } else if (mode == WAKE_UCAST) {
- DBG(KERN_DEBUG "GMAC: WOL on global unicast\n");
- pmt |= global_unicast;
- }
-
- writel(pmt, ioaddr + GMAC_PMT);
- return;
-}
-
-static void gmac_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
+static void dwmac1000_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
int disable_rx_ic)
{
int i;
@@ -562,7 +361,7 @@ static void gmac_init_rx_desc(struct dma_desc *p, unsigned int ring_size,
return;
}
-static void gmac_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
+static void dwmac1000_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
{
int i;
@@ -576,32 +375,32 @@ static void gmac_init_tx_desc(struct dma_desc *p, unsigned int ring_size)
return;
}
-static int gmac_get_tx_owner(struct dma_desc *p)
+static int dwmac1000_get_tx_owner(struct dma_desc *p)
{
return p->des01.etx.own;
}
-static int gmac_get_rx_owner(struct dma_desc *p)
+static int dwmac1000_get_rx_owner(struct dma_desc *p)
{
return p->des01.erx.own;
}
-static void gmac_set_tx_owner(struct dma_desc *p)
+static void dwmac1000_set_tx_owner(struct dma_desc *p)
{
p->des01.etx.own = 1;
}
-static void gmac_set_rx_owner(struct dma_desc *p)
+static void dwmac1000_set_rx_owner(struct dma_desc *p)
{
p->des01.erx.own = 1;
}
-static int gmac_get_tx_ls(struct dma_desc *p)
+static int dwmac1000_get_tx_ls(struct dma_desc *p)
{
return p->des01.etx.last_segment;
}
-static void gmac_release_tx_desc(struct dma_desc *p)
+static void dwmac1000_release_tx_desc(struct dma_desc *p)
{
int ter = p->des01.etx.end_ring;
@@ -611,7 +410,7 @@ static void gmac_release_tx_desc(struct dma_desc *p)
return;
}
-static void gmac_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+static void dwmac1000_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
int csum_flag)
{
p->des01.etx.first_segment = is_fs;
@@ -625,69 +424,51 @@ static void gmac_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
p->des01.etx.checksum_insertion = cic_full;
}
-static void gmac_clear_tx_ic(struct dma_desc *p)
+static void dwmac1000_clear_tx_ic(struct dma_desc *p)
{
p->des01.etx.interrupt = 0;
}
-static void gmac_close_tx_desc(struct dma_desc *p)
+static void dwmac1000_close_tx_desc(struct dma_desc *p)
{
p->des01.etx.last_segment = 1;
p->des01.etx.interrupt = 1;
}
-static int gmac_get_rx_frame_len(struct dma_desc *p)
+static int dwmac1000_get_rx_frame_len(struct dma_desc *p)
{
return p->des01.erx.frame_length;
}
-struct stmmac_ops gmac_driver = {
- .core_init = gmac_core_init,
- .dump_mac_regs = gmac_dump_regs,
- .dma_init = gmac_dma_init,
- .dump_dma_regs = gmac_dump_dma_regs,
- .dma_mode = gmac_dma_operation_mode,
- .dma_diagnostic_fr = gmac_dma_diagnostic_fr,
- .tx_status = gmac_get_tx_frame_status,
- .rx_status = gmac_get_rx_frame_status,
- .get_tx_len = gmac_get_tx_len,
- .set_filter = gmac_set_filter,
- .flow_ctrl = gmac_flow_ctrl,
- .pmt = gmac_pmt,
- .init_rx_desc = gmac_init_rx_desc,
- .init_tx_desc = gmac_init_tx_desc,
- .get_tx_owner = gmac_get_tx_owner,
- .get_rx_owner = gmac_get_rx_owner,
- .release_tx_desc = gmac_release_tx_desc,
- .prepare_tx_desc = gmac_prepare_tx_desc,
- .clear_tx_ic = gmac_clear_tx_ic,
- .close_tx_desc = gmac_close_tx_desc,
- .get_tx_ls = gmac_get_tx_ls,
- .set_tx_owner = gmac_set_tx_owner,
- .set_rx_owner = gmac_set_rx_owner,
- .get_rx_frame_len = gmac_get_rx_frame_len,
- .host_irq_status = gmac_irq_status,
- .set_umac_addr = gmac_set_umac_addr,
- .get_umac_addr = gmac_get_umac_addr,
+struct stmmac_dma_ops dwmac1000_dma_ops = {
+ .init = dwmac1000_dma_init,
+ .dump_regs = dwmac1000_dump_dma_regs,
+ .dma_mode = dwmac1000_dma_operation_mode,
+ .dma_diagnostic_fr = dwmac1000_dma_diagnostic_fr,
+ .enable_dma_transmission = dwmac_enable_dma_transmission,
+ .enable_dma_irq = dwmac_enable_dma_irq,
+ .disable_dma_irq = dwmac_disable_dma_irq,
+ .start_tx = dwmac_dma_start_tx,
+ .stop_tx = dwmac_dma_stop_tx,
+ .start_rx = dwmac_dma_start_rx,
+ .stop_rx = dwmac_dma_stop_rx,
+ .dma_interrupt = dwmac_dma_interrupt,
};
-struct mac_device_info *gmac_setup(unsigned long ioaddr)
-{
- struct mac_device_info *mac;
- u32 uid = readl(ioaddr + GMAC_VERSION);
-
- pr_info("\tGMAC - user ID: 0x%x, Synopsys ID: 0x%x\n",
- ((uid & 0x0000ff00) >> 8), (uid & 0x000000ff));
-
- mac = kzalloc(sizeof(const struct mac_device_info), GFP_KERNEL);
-
- mac->ops = &gmac_driver;
- mac->hw.pmt = PMT_SUPPORTED;
- mac->hw.link.port = GMAC_CONTROL_PS;
- mac->hw.link.duplex = GMAC_CONTROL_DM;
- mac->hw.link.speed = GMAC_CONTROL_FES;
- mac->hw.mii.addr = GMAC_MII_ADDR;
- mac->hw.mii.data = GMAC_MII_DATA;
-
- return mac;
-}
+struct stmmac_desc_ops dwmac1000_desc_ops = {
+ .tx_status = dwmac1000_get_tx_frame_status,
+ .rx_status = dwmac1000_get_rx_frame_status,
+ .get_tx_len = dwmac1000_get_tx_len,
+ .init_rx_desc = dwmac1000_init_rx_desc,
+ .init_tx_desc = dwmac1000_init_tx_desc,
+ .get_tx_owner = dwmac1000_get_tx_owner,
+ .get_rx_owner = dwmac1000_get_rx_owner,
+ .release_tx_desc = dwmac1000_release_tx_desc,
+ .prepare_tx_desc = dwmac1000_prepare_tx_desc,
+ .clear_tx_ic = dwmac1000_clear_tx_ic,
+ .close_tx_desc = dwmac1000_close_tx_desc,
+ .get_tx_ls = dwmac1000_get_tx_ls,
+ .set_tx_owner = dwmac1000_set_tx_owner,
+ .set_rx_owner = dwmac1000_set_rx_owner,
+ .get_rx_frame_len = dwmac1000_get_rx_frame_len,
+};
diff --git a/drivers/net/stmmac/dwmac_dma.h b/drivers/net/stmmac/dwmac_dma.h
new file mode 100644
index 00000000000..de848d9f606
--- /dev/null
+++ b/drivers/net/stmmac/dwmac_dma.h
@@ -0,0 +1,107 @@
+/*******************************************************************************
+ DWMAC DMA Header file.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+/* DMA CRS Control and Status Register Mapping */
+#define DMA_BUS_MODE 0x00001000 /* Bus Mode */
+#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */
+#define DMA_RCV_POLL_DEMAND 0x00001008 /* Received Poll Demand */
+#define DMA_RCV_BASE_ADDR 0x0000100c /* Receive List Base */
+#define DMA_TX_BASE_ADDR 0x00001010 /* Transmit List Base */
+#define DMA_STATUS 0x00001014 /* Status Register */
+#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */
+#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
+#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
+#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */
+#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
+
+/* DMA Control register defines */
+#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
+#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
+
+/* DMA Normal interrupt */
+#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
+#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
+#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavailable */
+#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
+#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */
+
+#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
+ DMA_INTR_ENA_TIE)
+
+/* DMA Abnormal interrupt */
+#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
+#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
+#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */
+#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */
+#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */
+#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */
+#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
+#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */
+#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */
+#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */
+
+#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
+ DMA_INTR_ENA_UNE)
+
+/* DMA default interrupt mask */
+#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
+
+/* DMA Status register defines */
+#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
+#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
+#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */
+#define DMA_STATUS_GMI 0x08000000
+#define DMA_STATUS_GLI 0x04000000
+#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
+#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
+#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
+#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */
+#define DMA_STATUS_TS_SHIFT 20
+#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */
+#define DMA_STATUS_RS_SHIFT 17
+#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
+#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
+#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
+#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
+#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
+#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
+#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
+#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
+#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
+#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
+#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
+#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
+#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
+#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
+#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
+
+extern void dwmac_enable_dma_transmission(unsigned long ioaddr);
+extern void dwmac_enable_dma_irq(unsigned long ioaddr);
+extern void dwmac_disable_dma_irq(unsigned long ioaddr);
+extern void dwmac_dma_start_tx(unsigned long ioaddr);
+extern void dwmac_dma_stop_tx(unsigned long ioaddr);
+extern void dwmac_dma_start_rx(unsigned long ioaddr);
+extern void dwmac_dma_stop_rx(unsigned long ioaddr);
+extern int dwmac_dma_interrupt(unsigned long ioaddr,
+ struct stmmac_extra_stats *x);
diff --git a/drivers/net/stmmac/dwmac_lib.c b/drivers/net/stmmac/dwmac_lib.c
new file mode 100644
index 00000000000..d4adb1eaa44
--- /dev/null
+++ b/drivers/net/stmmac/dwmac_lib.c
@@ -0,0 +1,263 @@
+/*******************************************************************************
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+ This program is free software; you can redistribute it and/or modify it
+ under the terms and conditions of the GNU General Public License,
+ version 2, as published by the Free Software Foundation.
+
+ This program is distributed in the hope it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+ The full GNU General Public License is included in this distribution in
+ the file called "COPYING".
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/io.h>
+#include "common.h"
+#include "dwmac_dma.h"
+
+#undef DWMAC_DMA_DEBUG
+#ifdef DWMAC_DMA_DEBUG
+#define DBG(fmt, args...) printk(fmt, ## args)
+#else
+#define DBG(fmt, args...) do { } while (0)
+#endif
+
+/* CSR1 enables the transmit DMA to check for new descriptor */
+void dwmac_enable_dma_transmission(unsigned long ioaddr)
+{
+ writel(1, ioaddr + DMA_XMT_POLL_DEMAND);
+}
+
+void dwmac_enable_dma_irq(unsigned long ioaddr)
+{
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
+}
+
+void dwmac_disable_dma_irq(unsigned long ioaddr)
+{
+ writel(0, ioaddr + DMA_INTR_ENA);
+}
+
+void dwmac_dma_start_tx(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value |= DMA_CONTROL_ST;
+ writel(value, ioaddr + DMA_CONTROL);
+ return;
+}
+
+void dwmac_dma_stop_tx(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value &= ~DMA_CONTROL_ST;
+ writel(value, ioaddr + DMA_CONTROL);
+ return;
+}
+
+void dwmac_dma_start_rx(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value |= DMA_CONTROL_SR;
+ writel(value, ioaddr + DMA_CONTROL);
+
+ return;
+}
+
+void dwmac_dma_stop_rx(unsigned long ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value &= ~DMA_CONTROL_SR;
+ writel(value, ioaddr + DMA_CONTROL);
+
+ return;
+}
+
+#ifdef DWMAC_DMA_DEBUG
+static void show_tx_process_state(unsigned int status)
+{
+ unsigned int state;
+ state = (status & DMA_STATUS_TS_MASK) >> DMA_STATUS_TS_SHIFT;
+
+ switch (state) {
+ case 0:
+ pr_info("- TX (Stopped): Reset or Stop command\n");
+ break;
+ case 1:
+ pr_info("- TX (Running):Fetching the Tx desc\n");
+ break;
+ case 2:
+ pr_info("- TX (Running): Waiting for end of tx\n");
+ break;
+ case 3:
+ pr_info("- TX (Running): Reading the data "
+ "and queuing the data into the Tx buf\n");
+ break;
+ case 6:
+ pr_info("- TX (Suspended): Tx Buff Underflow "
+ "or an unavailable Transmit descriptor\n");
+ break;
+ case 7:
+ pr_info("- TX (Running): Closing Tx descriptor\n");
+ break;
+ default:
+ break;
+ }
+ return;
+}
+
+static void show_rx_process_state(unsigned int status)
+{
+ unsigned int state;
+ state = (status & DMA_STATUS_RS_MASK) >> DMA_STATUS_RS_SHIFT;
+
+ switch (state) {
+ case 0:
+ pr_info("- RX (Stopped): Reset or Stop command\n");
+ break;
+ case 1:
+ pr_info("- RX (Running): Fetching the Rx desc\n");
+ break;
+ case 2:
+ pr_info("- RX (Running):Checking for end of pkt\n");
+ break;
+ case 3:
+ pr_info("- RX (Running): Waiting for Rx pkt\n");
+ break;
+ case 4:
+ pr_info("- RX (Suspended): Unavailable Rx buf\n");
+ break;
+ case 5:
+ pr_info("- RX (Running): Closing Rx descriptor\n");
+ break;
+ case 6:
+ pr_info("- RX(Running): Flushing the current frame"
+ " from the Rx buf\n");
+ break;
+ case 7:
+ pr_info("- RX (Running): Queuing the Rx frame"
+ " from the Rx buf into memory\n");
+ break;
+ default:
+ break;
+ }
+ return;
+}
+#endif
+
+int dwmac_dma_interrupt(unsigned long ioaddr,
+ struct stmmac_extra_stats *x)
+{
+ int ret = 0;
+ /* read the status register (CSR5) */
+ u32 intr_status = readl(ioaddr + DMA_STATUS);
+
+ DBG(INFO, "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
+#ifdef DWMAC_DMA_DEBUG
+ /* It displays the DMA process states (CSR5 register) */
+ show_tx_process_state(intr_status);
+ show_rx_process_state(intr_status);
+#endif
+ /* ABNORMAL interrupts */
+ if (unlikely(intr_status & DMA_STATUS_AIS)) {
+ DBG(INFO, "CSR5[15] DMA ABNORMAL IRQ: ");
+ if (unlikely(intr_status & DMA_STATUS_UNF)) {
+ DBG(INFO, "transmit underflow\n");
+ ret = tx_hard_error_bump_tc;
+ x->tx_undeflow_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_TJT)) {
+ DBG(INFO, "transmit jabber\n");
+ x->tx_jabber_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_OVF)) {
+ DBG(INFO, "recv overflow\n");
+ x->rx_overflow_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_RU)) {
+ DBG(INFO, "receive buffer unavailable\n");
+ x->rx_buf_unav_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_RPS)) {
+ DBG(INFO, "receive process stopped\n");
+ x->rx_process_stopped_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_RWT)) {
+ DBG(INFO, "receive watchdog\n");
+ x->rx_watchdog_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_ETI)) {
+ DBG(INFO, "transmit early interrupt\n");
+ x->tx_early_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_TPS)) {
+ DBG(INFO, "transmit process stopped\n");
+ x->tx_process_stopped_irq++;
+ ret = tx_hard_error;
+ }
+ if (unlikely(intr_status & DMA_STATUS_FBI)) {
+ DBG(INFO, "fatal bus error\n");
+ x->fatal_bus_error_irq++;
+ ret = tx_hard_error;
+ }
+ }
+ /* TX/RX NORMAL interrupts */
+ if (intr_status & DMA_STATUS_NIS) {
+ x->normal_irq_n++;
+ if (likely((intr_status & DMA_STATUS_RI) ||
+ (intr_status & (DMA_STATUS_TI))))
+ ret = handle_tx_rx;
+ }
+ /* Optional hardware blocks, interrupts should be disabled */
+ if (unlikely(intr_status &
+ (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
+ pr_info("%s: unexpected status %08x\n", __func__, intr_status);
+ /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
+ writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
+
+ DBG(INFO, "\n\n");
+ return ret;
+}
+
+
+void stmmac_set_mac_addr(unsigned long ioaddr, u8 addr[6],
+ unsigned int high, unsigned int low)
+{
+ unsigned long data;
+
+ data = (addr[5] << 8) | addr[4];
+ writel(data, ioaddr + high);
+ data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+ writel(data, ioaddr + low);
+
+ return;
+}
+
+void stmmac_get_mac_addr(unsigned long ioaddr, unsigned char *addr,
+ unsigned int high, unsigned int low)
+{
+ unsigned int hi_addr, lo_addr;
+
+ /* Read the MAC address from the hardware */
+ hi_addr = readl(ioaddr + high);
+ lo_addr = readl(ioaddr + low);
+
+ /* Extract the MAC address from the high and low words */
+ addr[0] = lo_addr & 0xff;
+ addr[1] = (lo_addr >> 8) & 0xff;
+ addr[2] = (lo_addr >> 16) & 0xff;
+ addr[3] = (lo_addr >> 24) & 0xff;
+ addr[4] = hi_addr & 0xff;
+ addr[5] = (hi_addr >> 8) & 0xff;
+
+ return;
+}
+
diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h
index 6d2eae3040e..ba35e6943cf 100644
--- a/drivers/net/stmmac/stmmac.h
+++ b/drivers/net/stmmac/stmmac.h
@@ -20,7 +20,8 @@
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#define DRV_MODULE_VERSION "Oct_09"
+#define DRV_MODULE_VERSION "Jan_2010"
+#include <linux/stmmac.h>
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
#define STMMAC_VLAN_TAG_USED
@@ -57,7 +58,7 @@ struct stmmac_priv {
int rx_csum;
unsigned int dma_buf_sz;
struct device *device;
- struct mac_device_info *mac_type;
+ struct mac_device_info *hw;
struct stmmac_extra_stats xstats;
struct napi_struct napi;
@@ -69,6 +70,7 @@ struct stmmac_priv {
int phy_mask;
int (*phy_reset) (void *priv);
void (*fix_mac_speed) (void *priv, unsigned int speed);
+ void (*bus_setup)(unsigned long ioaddr);
void *bsp_priv;
int phy_irq;
@@ -93,6 +95,28 @@ struct stmmac_priv {
#endif
};
+#ifdef CONFIG_STM_DRIVERS
+#include <linux/stm/pad.h>
+static inline int stmmac_claim_resource(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct plat_stmmacenet_data *plat_dat = pdev->dev.platform_data;
+
+ /* Pad routing setup */
+ if (IS_ERR(devm_stm_pad_claim(&pdev->dev, plat_dat->pad_config,
+ dev_name(&pdev->dev)))) {
+ printk(KERN_ERR "%s: Failed to request pads!\n", __func__);
+ ret = -ENODEV;
+ }
+ return ret;
+}
+#else
+static inline int stmmac_claim_resource(struct platform_device *pdev)
+{
+ return 0;
+}
+#endif
+
extern int stmmac_mdio_unregister(struct net_device *ndev);
extern int stmmac_mdio_register(struct net_device *ndev);
extern void stmmac_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c
index 694ebe6a075..c021eaa3ca6 100644
--- a/drivers/net/stmmac/stmmac_ethtool.c
+++ b/drivers/net/stmmac/stmmac_ethtool.c
@@ -28,6 +28,7 @@
#include <linux/phy.h>
#include "stmmac.h"
+#include "dwmac_dma.h"
#define REG_SPACE_SIZE 0x1054
#define MAC100_ETHTOOL_NAME "st_mac100"
@@ -61,7 +62,7 @@ static const struct stmmac_stats stmmac_gstrings_stats[] = {
STMMAC_STAT(rx_toolong),
STMMAC_STAT(rx_collision),
STMMAC_STAT(rx_crc),
- STMMAC_STAT(rx_lenght),
+ STMMAC_STAT(rx_length),
STMMAC_STAT(rx_mii),
STMMAC_STAT(rx_multicast),
STMMAC_STAT(rx_gmac_overflow),
@@ -268,8 +269,8 @@ stmmac_set_pauseparam(struct net_device *netdev,
}
} else {
unsigned long ioaddr = netdev->base_addr;
- priv->mac_type->ops->flow_ctrl(ioaddr, phy->duplex,
- priv->flow_ctrl, priv->pause);
+ priv->hw->mac->flow_ctrl(ioaddr, phy->duplex,
+ priv->flow_ctrl, priv->pause);
}
spin_unlock(&priv->lock);
return ret;
@@ -283,8 +284,8 @@ static void stmmac_get_ethtool_stats(struct net_device *dev,
int i;
/* Update HW stats if supported */
- priv->mac_type->ops->dma_diagnostic_fr(&dev->stats, &priv->xstats,
- ioaddr);
+ priv->hw->dma->dma_diagnostic_fr(&dev->stats, (void *) &priv->xstats,
+ ioaddr);
for (i = 0; i < STMMAC_STATS_LEN; i++) {
char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 508fba8fa07..a6733612d64 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -32,7 +32,6 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
-#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
#include <linux/ip.h>
@@ -45,7 +44,6 @@
#include <linux/phy.h>
#include <linux/if_vlan.h>
#include <linux/dma-mapping.h>
-#include <linux/stm/soc.h>
#include "stmmac.h"
#define STMMAC_RESOURCE_NAME "stmmaceth"
@@ -226,41 +224,38 @@ static void stmmac_adjust_link(struct net_device *dev)
if (phydev->duplex != priv->oldduplex) {
new_state = 1;
if (!(phydev->duplex))
- ctrl &= ~priv->mac_type->hw.link.duplex;
+ ctrl &= ~priv->hw->link.duplex;
else
- ctrl |= priv->mac_type->hw.link.duplex;
+ ctrl |= priv->hw->link.duplex;
priv->oldduplex = phydev->duplex;
}
/* Flow Control operation */
if (phydev->pause)
- priv->mac_type->ops->flow_ctrl(ioaddr, phydev->duplex,
- fc, pause_time);
+ priv->hw->mac->flow_ctrl(ioaddr, phydev->duplex,
+ fc, pause_time);
if (phydev->speed != priv->speed) {
new_state = 1;
switch (phydev->speed) {
case 1000:
if (likely(priv->is_gmac))
- ctrl &= ~priv->mac_type->hw.link.port;
+ ctrl &= ~priv->hw->link.port;
break;
case 100:
case 10:
if (priv->is_gmac) {
- ctrl |= priv->mac_type->hw.link.port;
+ ctrl |= priv->hw->link.port;
if (phydev->speed == SPEED_100) {
- ctrl |=
- priv->mac_type->hw.link.
- speed;
+ ctrl |= priv->hw->link.speed;
} else {
- ctrl &=
- ~(priv->mac_type->hw.
- link.speed);
+ ctrl &= ~(priv->hw->link.speed);
}
} else {
- ctrl &= ~priv->mac_type->hw.link.port;
+ ctrl &= ~priv->hw->link.port;
}
- priv->fix_mac_speed(priv->bsp_priv,
- phydev->speed);
+ if (likely(priv->fix_mac_speed))
+ priv->fix_mac_speed(priv->bsp_priv,
+ phydev->speed);
break;
default:
if (netif_msg_link(priv))
@@ -305,8 +300,8 @@ static int stmmac_init_phy(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
struct phy_device *phydev;
- char phy_id[BUS_ID_SIZE]; /* PHY to connect */
- char bus_id[BUS_ID_SIZE];
+ char phy_id[MII_BUS_ID_SIZE + 3];
+ char bus_id[MII_BUS_ID_SIZE];
priv->oldlink = 0;
priv->speed = 0;
@@ -318,7 +313,8 @@ static int stmmac_init_phy(struct net_device *dev)
}
snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->bus_id);
- snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, bus_id, priv->phy_addr);
+ snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
+ priv->phy_addr);
pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id);
phydev = phy_connect(dev, phy_id, &stmmac_adjust_link, 0,
@@ -508,8 +504,8 @@ static void init_dma_desc_rings(struct net_device *dev)
priv->cur_tx = 0;
/* Clear the Rx/Tx descriptors */
- priv->mac_type->ops->init_rx_desc(priv->dma_rx, rxsize, dis_ic);
- priv->mac_type->ops->init_tx_desc(priv->dma_tx, txsize);
+ priv->hw->desc->init_rx_desc(priv->dma_rx, rxsize, dis_ic);
+ priv->hw->desc->init_tx_desc(priv->dma_tx, txsize);
if (netif_msg_hw(priv)) {
pr_info("RX descriptor ring:\n");
@@ -544,8 +540,8 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
struct dma_desc *p = priv->dma_tx + i;
if (p->des2)
dma_unmap_single(priv->device, p->des2,
- priv->mac_type->ops->get_tx_len(p),
- DMA_TO_DEVICE);
+ priv->hw->desc->get_tx_len(p),
+ DMA_TO_DEVICE);
dev_kfree_skb_any(priv->tx_skbuff[i]);
priv->tx_skbuff[i] = NULL;
}
@@ -575,50 +571,6 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
}
/**
- * stmmac_dma_start_tx
- * @ioaddr: device I/O address
- * Description: this function starts the DMA tx process.
- */
-static void stmmac_dma_start_tx(unsigned long ioaddr)
-{
- u32 value = readl(ioaddr + DMA_CONTROL);
- value |= DMA_CONTROL_ST;
- writel(value, ioaddr + DMA_CONTROL);
- return;
-}
-
-static void stmmac_dma_stop_tx(unsigned long ioaddr)
-{
- u32 value = readl(ioaddr + DMA_CONTROL);
- value &= ~DMA_CONTROL_ST;
- writel(value, ioaddr + DMA_CONTROL);
- return;
-}
-
-/**
- * stmmac_dma_start_rx
- * @ioaddr: device I/O address
- * Description: this function starts the DMA rx process.
- */
-static void stmmac_dma_start_rx(unsigned long ioaddr)
-{
- u32 value = readl(ioaddr + DMA_CONTROL);
- value |= DMA_CONTROL_SR;
- writel(value, ioaddr + DMA_CONTROL);
-
- return;
-}
-
-static void stmmac_dma_stop_rx(unsigned long ioaddr)
-{
- u32 value = readl(ioaddr + DMA_CONTROL);
- value &= ~DMA_CONTROL_SR;
- writel(value, ioaddr + DMA_CONTROL);
-
- return;
-}
-
-/**
* stmmac_dma_operation_mode - HW DMA operation mode
* @priv : pointer to the private device structure.
* Description: it sets the DMA operation mode: tx/rx DMA thresholds
@@ -629,18 +581,18 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
{
if (!priv->is_gmac) {
/* MAC 10/100 */
- priv->mac_type->ops->dma_mode(priv->dev->base_addr, tc, 0);
+ priv->hw->dma->dma_mode(priv->dev->base_addr, tc, 0);
priv->tx_coe = NO_HW_CSUM;
} else {
if ((priv->dev->mtu <= ETH_DATA_LEN) && (tx_coe)) {
- priv->mac_type->ops->dma_mode(priv->dev->base_addr,
- SF_DMA_MODE, SF_DMA_MODE);
+ priv->hw->dma->dma_mode(priv->dev->base_addr,
+ SF_DMA_MODE, SF_DMA_MODE);
tc = SF_DMA_MODE;
priv->tx_coe = HW_CSUM;
} else {
/* Checksum computation is performed in software. */
- priv->mac_type->ops->dma_mode(priv->dev->base_addr, tc,
- SF_DMA_MODE);
+ priv->hw->dma->dma_mode(priv->dev->base_addr, tc,
+ SF_DMA_MODE);
priv->tx_coe = NO_HW_CSUM;
}
}
@@ -649,88 +601,6 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
return;
}
-#ifdef STMMAC_DEBUG
-/**
- * show_tx_process_state
- * @status: tx descriptor status field
- * Description: it shows the Transmit Process State for CSR5[22:20]
- */
-static void show_tx_process_state(unsigned int status)
-{
- unsigned int state;
- state = (status & DMA_STATUS_TS_MASK) >> DMA_STATUS_TS_SHIFT;
-
- switch (state) {
- case 0:
- pr_info("- TX (Stopped): Reset or Stop command\n");
- break;
- case 1:
- pr_info("- TX (Running):Fetching the Tx desc\n");
- break;
- case 2:
- pr_info("- TX (Running): Waiting for end of tx\n");
- break;
- case 3:
- pr_info("- TX (Running): Reading the data "
- "and queuing the data into the Tx buf\n");
- break;
- case 6:
- pr_info("- TX (Suspended): Tx Buff Underflow "
- "or an unavailable Transmit descriptor\n");
- break;
- case 7:
- pr_info("- TX (Running): Closing Tx descriptor\n");
- break;
- default:
- break;
- }
- return;
-}
-
-/**
- * show_rx_process_state
- * @status: rx descriptor status field
- * Description: it shows the Receive Process State for CSR5[19:17]
- */
-static void show_rx_process_state(unsigned int status)
-{
- unsigned int state;
- state = (status & DMA_STATUS_RS_MASK) >> DMA_STATUS_RS_SHIFT;
-
- switch (state) {
- case 0:
- pr_info("- RX (Stopped): Reset or Stop command\n");
- break;
- case 1:
- pr_info("- RX (Running): Fetching the Rx desc\n");
- break;
- case 2:
- pr_info("- RX (Running):Checking for end of pkt\n");
- break;
- case 3:
- pr_info("- RX (Running): Waiting for Rx pkt\n");
- break;
- case 4:
- pr_info("- RX (Suspended): Unavailable Rx buf\n");
- break;
- case 5:
- pr_info("- RX (Running): Closing Rx descriptor\n");
- break;
- case 6:
- pr_info("- RX(Running): Flushing the current frame"
- " from the Rx buf\n");
- break;
- case 7:
- pr_info("- RX (Running): Queuing the Rx frame"
- " from the Rx buf into memory\n");
- break;
- default:
- break;
- }
- return;
-}
-#endif
-
/**
* stmmac_tx:
* @priv: private driver structure
@@ -748,16 +618,16 @@ static void stmmac_tx(struct stmmac_priv *priv)
struct dma_desc *p = priv->dma_tx + entry;
/* Check if the descriptor is owned by the DMA. */
- if (priv->mac_type->ops->get_tx_owner(p))
+ if (priv->hw->desc->get_tx_owner(p))
break;
/* Verify tx error by looking at the last segment */
- last = priv->mac_type->ops->get_tx_ls(p);
+ last = priv->hw->desc->get_tx_ls(p);
if (likely(last)) {
int tx_error =
- priv->mac_type->ops->tx_status(&priv->dev->stats,
- &priv->xstats,
- p, ioaddr);
+ priv->hw->desc->tx_status(&priv->dev->stats,
+ &priv->xstats, p,
+ ioaddr);
if (likely(tx_error == 0)) {
priv->dev->stats.tx_packets++;
priv->xstats.tx_pkt_n++;
@@ -769,7 +639,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
if (likely(p->des2))
dma_unmap_single(priv->device, p->des2,
- priv->mac_type->ops->get_tx_len(p),
+ priv->hw->desc->get_tx_len(p),
DMA_TO_DEVICE);
if (unlikely(p->des3))
p->des3 = 0;
@@ -790,7 +660,7 @@ static void stmmac_tx(struct stmmac_priv *priv)
priv->tx_skbuff[entry] = NULL;
}
- priv->mac_type->ops->release_tx_desc(p);
+ priv->hw->desc->release_tx_desc(p);
entry = (++priv->dirty_tx) % txsize;
}
@@ -814,7 +684,7 @@ static inline void stmmac_enable_irq(struct stmmac_priv *priv)
priv->tm->timer_start(tmrate);
else
#endif
- writel(DMA_INTR_DEFAULT_MASK, priv->dev->base_addr + DMA_INTR_ENA);
+ priv->hw->dma->enable_dma_irq(priv->dev->base_addr);
}
static inline void stmmac_disable_irq(struct stmmac_priv *priv)
@@ -824,7 +694,7 @@ static inline void stmmac_disable_irq(struct stmmac_priv *priv)
priv->tm->timer_stop();
else
#endif
- writel(0, priv->dev->base_addr + DMA_INTR_ENA);
+ priv->hw->dma->disable_dma_irq(priv->dev->base_addr);
}
static int stmmac_has_work(struct stmmac_priv *priv)
@@ -832,7 +702,7 @@ static int stmmac_has_work(struct stmmac_priv *priv)
unsigned int has_work = 0;
int rxret, tx_work = 0;
- rxret = priv->mac_type->ops->get_rx_owner(priv->dma_rx +
+ rxret = priv->hw->desc->get_rx_owner(priv->dma_rx +
(priv->cur_rx % priv->dma_rx_size));
if (priv->dirty_tx != priv->cur_tx)
@@ -883,12 +753,12 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
{
netif_stop_queue(priv->dev);
- stmmac_dma_stop_tx(priv->dev->base_addr);
+ priv->hw->dma->stop_tx(priv->dev->base_addr);
dma_free_tx_skbufs(priv);
- priv->mac_type->ops->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
+ priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
priv->dirty_tx = 0;
priv->cur_tx = 0;
- stmmac_dma_start_tx(priv->dev->base_addr);
+ priv->hw->dma->start_tx(priv->dev->base_addr);
priv->dev->stats.tx_errors++;
netif_wake_queue(priv->dev);
@@ -896,95 +766,27 @@ static void stmmac_tx_err(struct stmmac_priv *priv)
return;
}
-/**
- * stmmac_dma_interrupt - Interrupt handler for the driver
- * @dev: net device structure
- * Description: Interrupt handler for the driver (DMA).
- */
-static void stmmac_dma_interrupt(struct net_device *dev)
-{
- unsigned long ioaddr = dev->base_addr;
- struct stmmac_priv *priv = netdev_priv(dev);
- /* read the status register (CSR5) */
- u32 intr_status = readl(ioaddr + DMA_STATUS);
-
- DBG(intr, INFO, "%s: [CSR5: 0x%08x]\n", __func__, intr_status);
-#ifdef STMMAC_DEBUG
- /* It displays the DMA transmit process state (CSR5 register) */
- if (netif_msg_tx_done(priv))
- show_tx_process_state(intr_status);
- if (netif_msg_rx_status(priv))
- show_rx_process_state(intr_status);
-#endif
- /* ABNORMAL interrupts */
- if (unlikely(intr_status & DMA_STATUS_AIS)) {
- DBG(intr, INFO, "CSR5[15] DMA ABNORMAL IRQ: ");
- if (unlikely(intr_status & DMA_STATUS_UNF)) {
- DBG(intr, INFO, "transmit underflow\n");
- if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) {
- /* Try to bump up the threshold */
- tc += 64;
- priv->mac_type->ops->dma_mode(ioaddr, tc,
- SF_DMA_MODE);
- priv->xstats.threshold = tc;
- }
- stmmac_tx_err(priv);
- priv->xstats.tx_undeflow_irq++;
- }
- if (unlikely(intr_status & DMA_STATUS_TJT)) {
- DBG(intr, INFO, "transmit jabber\n");
- priv->xstats.tx_jabber_irq++;
- }
- if (unlikely(intr_status & DMA_STATUS_OVF)) {
- DBG(intr, INFO, "recv overflow\n");
- priv->xstats.rx_overflow_irq++;
- }
- if (unlikely(intr_status & DMA_STATUS_RU)) {
- DBG(intr, INFO, "receive buffer unavailable\n");
- priv->xstats.rx_buf_unav_irq++;
- }
- if (unlikely(intr_status & DMA_STATUS_RPS)) {
- DBG(intr, INFO, "receive process stopped\n");
- priv->xstats.rx_process_stopped_irq++;
- }
- if (unlikely(intr_status & DMA_STATUS_RWT)) {
- DBG(intr, INFO, "receive watchdog\n");
- priv->xstats.rx_watchdog_irq++;
- }
- if (unlikely(intr_status & DMA_STATUS_ETI)) {
- DBG(intr, INFO, "transmit early interrupt\n");
- priv->xstats.tx_early_irq++;
- }
- if (unlikely(intr_status & DMA_STATUS_TPS)) {
- DBG(intr, INFO, "transmit process stopped\n");
- priv->xstats.tx_process_stopped_irq++;
- stmmac_tx_err(priv);
- }
- if (unlikely(intr_status & DMA_STATUS_FBI)) {
- DBG(intr, INFO, "fatal bus error\n");
- priv->xstats.fatal_bus_error_irq++;
- stmmac_tx_err(priv);
+static void stmmac_dma_interrupt(struct stmmac_priv *priv)
+{
+ unsigned long ioaddr = priv->dev->base_addr;
+ int status;
+
+ status = priv->hw->dma->dma_interrupt(priv->dev->base_addr,
+ &priv->xstats);
+ if (likely(status == handle_tx_rx))
+ _stmmac_schedule(priv);
+
+ else if (unlikely(status == tx_hard_error_bump_tc)) {
+ /* Try to bump up the dma threshold on this failure */
+ if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) {
+ tc += 64;
+ priv->hw->dma->dma_mode(ioaddr, tc, SF_DMA_MODE);
+ priv->xstats.threshold = tc;
}
- }
-
- /* TX/RX NORMAL interrupts */
- if (intr_status & DMA_STATUS_NIS) {
- priv->xstats.normal_irq_n++;
- if (likely((intr_status & DMA_STATUS_RI) ||
- (intr_status & (DMA_STATUS_TI))))
- _stmmac_schedule(priv);
- }
-
- /* Optional hardware blocks, interrupts should be disabled */
- if (unlikely(intr_status &
- (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
- pr_info("%s: unexpected status %08x\n", __func__, intr_status);
-
- /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
- writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
-
- DBG(intr, INFO, "\n\n");
+ stmmac_tx_err(priv);
+ } else if (unlikely(status == tx_hard_error))
+ stmmac_tx_err(priv);
return;
}
@@ -1058,17 +860,20 @@ static int stmmac_open(struct net_device *dev)
init_dma_desc_rings(dev);
/* DMA initialization and SW reset */
- if (unlikely(priv->mac_type->ops->dma_init(ioaddr,
- priv->pbl, priv->dma_tx_phy, priv->dma_rx_phy) < 0)) {
+ if (unlikely(priv->hw->dma->init(ioaddr, priv->pbl, priv->dma_tx_phy,
+ priv->dma_rx_phy) < 0)) {
pr_err("%s: DMA initialization failed\n", __func__);
return -1;
}
/* Copy the MAC addr into the HW */
- priv->mac_type->ops->set_umac_addr(ioaddr, dev->dev_addr, 0);
+ priv->hw->mac->set_umac_addr(ioaddr, dev->dev_addr, 0);
+ /* If required, perform hw setup of the bus. */
+ if (priv->bus_setup)
+ priv->bus_setup(ioaddr);
/* Initialize the MAC Core */
- priv->mac_type->ops->core_init(ioaddr);
+ priv->hw->mac->core_init(ioaddr);
priv->shutdown = 0;
@@ -1089,16 +894,16 @@ static int stmmac_open(struct net_device *dev)
/* Start the ball rolling... */
DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
- stmmac_dma_start_tx(ioaddr);
- stmmac_dma_start_rx(ioaddr);
+ priv->hw->dma->start_tx(ioaddr);
+ priv->hw->dma->start_rx(ioaddr);
#ifdef CONFIG_STMMAC_TIMER
priv->tm->timer_start(tmrate);
#endif
/* Dump DMA/MAC registers */
if (netif_msg_hw(priv)) {
- priv->mac_type->ops->dump_mac_regs(ioaddr);
- priv->mac_type->ops->dump_dma_regs(ioaddr);
+ priv->hw->mac->dump_regs(ioaddr);
+ priv->hw->dma->dump_regs(ioaddr);
}
if (priv->phydev)
@@ -1142,8 +947,8 @@ static int stmmac_release(struct net_device *dev)
free_irq(dev->irq, dev);
/* Stop TX/RX DMA and clear the descriptors */
- stmmac_dma_stop_tx(dev->base_addr);
- stmmac_dma_stop_rx(dev->base_addr);
+ priv->hw->dma->stop_tx(dev->base_addr);
+ priv->hw->dma->stop_rx(dev->base_addr);
/* Release and free the Rx/Tx resources */
free_dma_desc_resources(priv);
@@ -1214,8 +1019,8 @@ static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb,
desc->des2 = dma_map_single(priv->device, skb->data,
BUF_SIZE_8KiB, DMA_TO_DEVICE);
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
- priv->mac_type->ops->prepare_tx_desc(desc, 1, BUF_SIZE_8KiB,
- csum_insertion);
+ priv->hw->desc->prepare_tx_desc(desc, 1, BUF_SIZE_8KiB,
+ csum_insertion);
entry = (++priv->cur_tx) % txsize;
desc = priv->dma_tx + entry;
@@ -1224,16 +1029,16 @@ static unsigned int stmmac_handle_jumbo_frames(struct sk_buff *skb,
skb->data + BUF_SIZE_8KiB,
buf2_size, DMA_TO_DEVICE);
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
- priv->mac_type->ops->prepare_tx_desc(desc, 0,
- buf2_size, csum_insertion);
- priv->mac_type->ops->set_tx_owner(desc);
+ priv->hw->desc->prepare_tx_desc(desc, 0, buf2_size,
+ csum_insertion);
+ priv->hw->desc->set_tx_owner(desc);
priv->tx_skbuff[entry] = NULL;
} else {
desc->des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
desc->des3 = desc->des2 + BUF_SIZE_4KiB;
- priv->mac_type->ops->prepare_tx_desc(desc, 1, nopaged_len,
- csum_insertion);
+ priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
+ csum_insertion);
}
return entry;
}
@@ -1301,8 +1106,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int nopaged_len = skb_headlen(skb);
desc->des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
- priv->mac_type->ops->prepare_tx_desc(desc, 1, nopaged_len,
- csum_insertion);
+ priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
+ csum_insertion);
}
for (i = 0; i < nfrags; i++) {
@@ -1317,21 +1122,20 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
frag->page_offset,
len, DMA_TO_DEVICE);
priv->tx_skbuff[entry] = NULL;
- priv->mac_type->ops->prepare_tx_desc(desc, 0, len,
- csum_insertion);
- priv->mac_type->ops->set_tx_owner(desc);
+ priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion);
+ priv->hw->desc->set_tx_owner(desc);
}
/* Interrupt on completition only for the latest segment */
- priv->mac_type->ops->close_tx_desc(desc);
+ priv->hw->desc->close_tx_desc(desc);
#ifdef CONFIG_STMMAC_TIMER
/* Clean IC while using timer */
if (likely(priv->tm->enable))
- priv->mac_type->ops->clear_tx_ic(desc);
+ priv->hw->desc->clear_tx_ic(desc);
#endif
/* To avoid raise condition */
- priv->mac_type->ops->set_tx_owner(first);
+ priv->hw->desc->set_tx_owner(first);
priv->cur_tx++;
@@ -1353,8 +1157,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
dev->stats.tx_bytes += skb->len;
- /* CSR1 enables the transmit DMA to check for new descriptor */
- writel(1, dev->base_addr + DMA_XMT_POLL_DEMAND);
+ priv->hw->dma->enable_dma_transmission(dev->base_addr);
return NETDEV_TX_OK;
}
@@ -1391,7 +1194,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
}
RX_DBG(KERN_INFO "\trefill entry #%d\n", entry);
}
- priv->mac_type->ops->set_rx_owner(p + entry);
+ priv->hw->desc->set_rx_owner(p + entry);
}
return;
}
@@ -1412,7 +1215,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
}
#endif
count = 0;
- while (!priv->mac_type->ops->get_rx_owner(p)) {
+ while (!priv->hw->desc->get_rx_owner(p)) {
int status;
if (count >= limit)
@@ -1425,15 +1228,14 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit)
prefetch(p_next);
/* read the status of the incoming frame */
- status = (priv->mac_type->ops->rx_status(&priv->dev->stats,
- &priv->xstats, p));
+ status = (priv->hw->desc->rx_status(&priv->dev->stats,
+ &priv->xstats, p));
if (unlikely(status == discard_frame))
priv->dev->stats.rx_errors++;
else {
struct sk_buff *skb;
/* Length should omit the CRC */
- int frame_len =
- priv->mac_type->ops->get_rx_frame_len(p) - 4;
+ int frame_len = priv->hw->desc->get_rx_frame_len(p) - 4;
#ifdef STMMAC_RX_DEBUG
if (frame_len > ETH_FRAME_LEN)
@@ -1569,7 +1371,7 @@ static void stmmac_multicast_list(struct net_device *dev)
struct stmmac_priv *priv = netdev_priv(dev);
spin_lock(&priv->lock);
- priv->mac_type->ops->set_filter(dev);
+ priv->hw->mac->set_filter(dev);
spin_unlock(&priv->lock);
return;
}
@@ -1623,9 +1425,10 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
if (priv->is_gmac) {
unsigned long ioaddr = dev->base_addr;
/* To handle GMAC own interrupts */
- priv->mac_type->ops->host_irq_status(ioaddr);
+ priv->hw->mac->host_irq_status(ioaddr);
}
- stmmac_dma_interrupt(dev);
+
+ stmmac_dma_interrupt(priv);
return IRQ_HANDLED;
}
@@ -1744,7 +1547,7 @@ static int stmmac_probe(struct net_device *dev)
netif_napi_add(dev, &priv->napi, stmmac_poll, 64);
/* Get the MAC address */
- priv->mac_type->ops->get_umac_addr(dev->base_addr, dev->dev_addr, 0);
+ priv->hw->mac->get_umac_addr(dev->base_addr, dev->dev_addr, 0);
if (!is_valid_ether_addr(dev->dev_addr))
pr_warning("\tno valid MAC address;"
@@ -1779,16 +1582,16 @@ static int stmmac_mac_device_setup(struct net_device *dev)
struct mac_device_info *device;
if (priv->is_gmac)
- device = gmac_setup(ioaddr);
+ device = dwmac1000_setup(ioaddr);
else
- device = mac100_setup(ioaddr);
+ device = dwmac100_setup(ioaddr);
if (!device)
return -ENOMEM;
- priv->mac_type = device;
+ priv->hw = device;
- priv->wolenabled = priv->mac_type->hw.pmt; /* PMT supported */
+ priv->wolenabled = priv->hw->pmt; /* PMT supported */
if (priv->wolenabled == PMT_SUPPORTED)
priv->wolopts = WAKE_MAGIC; /* Magic Frame */
@@ -1797,8 +1600,7 @@ static int stmmac_mac_device_setup(struct net_device *dev)
static int stmmacphy_dvr_probe(struct platform_device *pdev)
{
- struct plat_stmmacphy_data *plat_dat;
- plat_dat = (struct plat_stmmacphy_data *)((pdev->dev).platform_data);
+ struct plat_stmmacphy_data *plat_dat = pdev->dev.platform_data;
pr_debug("stmmacphy_dvr_probe: added phy for bus %d\n",
plat_dat->bus_id);
@@ -1830,9 +1632,7 @@ static struct platform_driver stmmacphy_driver = {
static int stmmac_associate_phy(struct device *dev, void *data)
{
struct stmmac_priv *priv = (struct stmmac_priv *)data;
- struct plat_stmmacphy_data *plat_dat;
-
- plat_dat = (struct plat_stmmacphy_data *)(dev->platform_data);
+ struct plat_stmmacphy_data *plat_dat = dev->platform_data;
DBG(probe, DEBUG, "%s: checking phy for bus %d\n", __func__,
plat_dat->bus_id);
@@ -1922,7 +1722,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
priv = netdev_priv(ndev);
priv->device = &(pdev->dev);
priv->dev = ndev;
- plat_dat = (struct plat_stmmacenet_data *)((pdev->dev).platform_data);
+ plat_dat = pdev->dev.platform_data;
priv->bus_id = plat_dat->bus_id;
priv->pbl = plat_dat->pbl; /* TLI */
priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */
@@ -1932,6 +1732,11 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
/* Set the I/O base addr */
ndev->base_addr = (unsigned long)addr;
+ /* Verify embedded resource for the platform */
+ ret = stmmac_claim_resource(pdev);
+ if (ret < 0)
+ goto out;
+
/* MAC HW revice detection */
ret = stmmac_mac_device_setup(ndev);
if (ret < 0)
@@ -1952,6 +1757,7 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
}
priv->fix_mac_speed = plat_dat->fix_mac_speed;
+ priv->bus_setup = plat_dat->bus_setup;
priv->bsp_priv = plat_dat->bsp_priv;
pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
@@ -1986,12 +1792,13 @@ out:
static int stmmac_dvr_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
struct resource *res;
pr_info("%s:\n\tremoving driver", __func__);
- stmmac_dma_stop_rx(ndev->base_addr);
- stmmac_dma_stop_tx(ndev->base_addr);
+ priv->hw->dma->stop_rx(ndev->base_addr);
+ priv->hw->dma->stop_tx(ndev->base_addr);
stmmac_mac_disable_rx(ndev->base_addr);
stmmac_mac_disable_tx(ndev->base_addr);
@@ -2038,21 +1845,20 @@ static int stmmac_suspend(struct platform_device *pdev, pm_message_t state)
napi_disable(&priv->napi);
/* Stop TX/RX DMA */
- stmmac_dma_stop_tx(dev->base_addr);
- stmmac_dma_stop_rx(dev->base_addr);
+ priv->hw->dma->stop_tx(dev->base_addr);
+ priv->hw->dma->stop_rx(dev->base_addr);
/* Clear the Rx/Tx descriptors */
- priv->mac_type->ops->init_rx_desc(priv->dma_rx,
- priv->dma_rx_size, dis_ic);
- priv->mac_type->ops->init_tx_desc(priv->dma_tx,
- priv->dma_tx_size);
+ priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size,
+ dis_ic);
+ priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
stmmac_mac_disable_tx(dev->base_addr);
if (device_may_wakeup(&(pdev->dev))) {
/* Enable Power down mode by programming the PMT regs */
if (priv->wolenabled == PMT_SUPPORTED)
- priv->mac_type->ops->pmt(dev->base_addr,
- priv->wolopts);
+ priv->hw->mac->pmt(dev->base_addr,
+ priv->wolopts);
} else {
stmmac_mac_disable_rx(dev->base_addr);
}
@@ -2093,15 +1899,15 @@ static int stmmac_resume(struct platform_device *pdev)
* from another devices (e.g. serial console). */
if (device_may_wakeup(&(pdev->dev)))
if (priv->wolenabled == PMT_SUPPORTED)
- priv->mac_type->ops->pmt(dev->base_addr, 0);
+ priv->hw->mac->pmt(dev->base_addr, 0);
netif_device_attach(dev);
/* Enable the MAC and DMA */
stmmac_mac_enable_rx(ioaddr);
stmmac_mac_enable_tx(ioaddr);
- stmmac_dma_start_tx(ioaddr);
- stmmac_dma_start_rx(ioaddr);
+ priv->hw->dma->start_tx(ioaddr);
+ priv->hw->dma->start_rx(ioaddr);
#ifdef CONFIG_STMMAC_TIMER
priv->tm->timer_start(tmrate);
diff --git a/drivers/net/stmmac/stmmac_mdio.c b/drivers/net/stmmac/stmmac_mdio.c
index 8498552a22f..fffe1d037fe 100644
--- a/drivers/net/stmmac/stmmac_mdio.c
+++ b/drivers/net/stmmac/stmmac_mdio.c
@@ -24,7 +24,6 @@
Maintainer: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
-#include <linux/netdevice.h>
#include <linux/mii.h>
#include <linux/phy.h>
@@ -48,8 +47,8 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
struct net_device *ndev = bus->priv;
struct stmmac_priv *priv = netdev_priv(ndev);
unsigned long ioaddr = ndev->base_addr;
- unsigned int mii_address = priv->mac_type->hw.mii.addr;
- unsigned int mii_data = priv->mac_type->hw.mii.data;
+ unsigned int mii_address = priv->hw->mii.addr;
+ unsigned int mii_data = priv->hw->mii.data;
int data;
u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
@@ -80,8 +79,8 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
struct net_device *ndev = bus->priv;
struct stmmac_priv *priv = netdev_priv(ndev);
unsigned long ioaddr = ndev->base_addr;
- unsigned int mii_address = priv->mac_type->hw.mii.addr;
- unsigned int mii_data = priv->mac_type->hw.mii.data;
+ unsigned int mii_address = priv->hw->mii.addr;
+ unsigned int mii_data = priv->hw->mii.data;
u16 value =
(((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
@@ -112,7 +111,7 @@ static int stmmac_mdio_reset(struct mii_bus *bus)
struct net_device *ndev = bus->priv;
struct stmmac_priv *priv = netdev_priv(ndev);
unsigned long ioaddr = ndev->base_addr;
- unsigned int mii_address = priv->mac_type->hw.mii.addr;
+ unsigned int mii_address = priv->hw->mii.addr;
if (priv->phy_reset) {
pr_debug("stmmac_mdio_reset: calling phy_reset\n");
diff --git a/drivers/net/sun3_82586.c b/drivers/net/sun3_82586.c
index b447a871942..2f6a760e5f2 100644
--- a/drivers/net/sun3_82586.c
+++ b/drivers/net/sun3_82586.c
@@ -413,8 +413,8 @@ static int init586(struct net_device *dev)
volatile struct iasetup_cmd_struct *ias_cmd;
volatile struct tdr_cmd_struct *tdr_cmd;
volatile struct mcsetup_cmd_struct *mc_cmd;
- struct dev_mc_list *dmi=dev->mc_list;
- int num_addrs=dev->mc_count;
+ struct dev_mc_list *dmi;
+ int num_addrs=netdev_mc_count(dev);
ptr = (void *) ((char *)p->scb + sizeof(struct scb_struct));
@@ -536,8 +536,10 @@ static int init586(struct net_device *dev)
mc_cmd->cmd_link = 0xffff;
mc_cmd->mc_cnt = swab16(num_addrs * 6);
- for(i=0;i<num_addrs;i++,dmi=dmi->next)
- memcpy((char *) mc_cmd->mc_list[i], dmi->dmi_addr,6);
+ i = 0;
+ netdev_for_each_mc_addr(dmi, dev)
+ memcpy((char *) mc_cmd->mc_list[i++],
+ dmi->dmi_addr, ETH_ALEN);
p->scb->cbl_offset = make16(mc_cmd);
p->scb->cmd_cuc = CUC_START;
diff --git a/drivers/net/sun3lance.c b/drivers/net/sun3lance.c
index 0ca4241b4f6..99998862c22 100644
--- a/drivers/net/sun3lance.c
+++ b/drivers/net/sun3lance.c
@@ -917,7 +917,7 @@ static void set_multicast_list( struct net_device *dev )
REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */
} else {
short multicast_table[4];
- int num_addrs = dev->mc_count;
+ int num_addrs = netdev_mc_count(dev);
int i;
/* We don't use the multicast table, but rely on upper-layer
* filtering. */
diff --git a/drivers/net/sunbmac.c b/drivers/net/sunbmac.c
index 25e81ebd9cd..a0bd361d5ec 100644
--- a/drivers/net/sunbmac.c
+++ b/drivers/net/sunbmac.c
@@ -999,7 +999,7 @@ static void bigmac_set_multicast(struct net_device *dev)
{
struct bigmac *bp = netdev_priv(dev);
void __iomem *bregs = bp->bregs;
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
char *addrs;
int i;
u32 tmp, crc;
@@ -1013,7 +1013,7 @@ static void bigmac_set_multicast(struct net_device *dev)
while ((sbus_readl(bregs + BMAC_RXCFG) & BIGMAC_RXCFG_ENABLE) != 0)
udelay(20);
- if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
+ if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
sbus_writel(0xffff, bregs + BMAC_HTABLE0);
sbus_writel(0xffff, bregs + BMAC_HTABLE1);
sbus_writel(0xffff, bregs + BMAC_HTABLE2);
@@ -1028,9 +1028,8 @@ static void bigmac_set_multicast(struct net_device *dev)
for (i = 0; i < 4; i++)
hash_table[i] = 0;
- for (i = 0; i < dev->mc_count; i++) {
+ netdev_for_each_mc_addr(dmi, dev) {
addrs = dmi->dmi_addr;
- dmi = dmi->next;
if (!(*addrs & 1))
continue;
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index d58e1891ca6..a855934dfc3 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -206,7 +206,7 @@ IVc. Errata
#define USE_IO_OPS 1
#endif
-static const struct pci_device_id sundance_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(sundance_pci_tbl) = {
{ 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 },
{ 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 },
{ 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 },
@@ -1517,19 +1517,18 @@ static void set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
memset(mc_filter, 0xff, sizeof(mc_filter));
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
memset(mc_filter, 0xff, sizeof(mc_filter));
rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
- } else if (dev->mc_count) {
+ } else if (!netdev_mc_empty(dev)) {
struct dev_mc_list *mclist;
int bit;
int index;
int crc;
memset (mc_filter, 0, sizeof (mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
crc = ether_crc_le (ETH_ALEN, mclist->dmi_addr);
for (index=0, bit=0; bit < 6; bit++, crc <<= 1)
if (crc & 0x80000000) index |= 1 << bit;
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index b571a1babab..4344017bfae 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -107,7 +107,7 @@ MODULE_LICENSE("GPL");
#define GEM_MODULE_NAME "gem"
#define PFX GEM_MODULE_NAME ": "
-static struct pci_device_id gem_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(gem_pci_tbl) = {
{ PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
@@ -1837,7 +1837,7 @@ static u32 gem_setup_multicast(struct gem *gp)
int i;
if ((gp->dev->flags & IFF_ALLMULTI) ||
- (gp->dev->mc_count > 256)) {
+ (netdev_mc_count(gp->dev) > 256)) {
for (i=0; i<16; i++)
writel(0xffff, gp->regs + MAC_HASH0 + (i << 2));
rxcfg |= MAC_RXCFG_HFE;
@@ -1846,17 +1846,13 @@ static u32 gem_setup_multicast(struct gem *gp)
} else {
u16 hash_table[16];
u32 crc;
- struct dev_mc_list *dmi = gp->dev->mc_list;
+ struct dev_mc_list *dmi;
int i;
- for (i = 0; i < 16; i++)
- hash_table[i] = 0;
-
- for (i = 0; i < gp->dev->mc_count; i++) {
+ memset(hash_table, 0, sizeof(hash_table));
+ netdev_for_each_mc_addr(dmi, gp->dev) {
char *addrs = dmi->dmi_addr;
- dmi = dmi->next;
-
if (!(*addrs & 1))
continue;
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index 6762f1c6ec8..b17dbb11bd6 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -1516,24 +1516,20 @@ static int happy_meal_init(struct happy_meal *hp)
HMD(("htable, "));
if ((hp->dev->flags & IFF_ALLMULTI) ||
- (hp->dev->mc_count > 64)) {
+ (netdev_mc_count(hp->dev) > 64)) {
hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
} else if ((hp->dev->flags & IFF_PROMISC) == 0) {
u16 hash_table[4];
- struct dev_mc_list *dmi = hp->dev->mc_list;
+ struct dev_mc_list *dmi;
char *addrs;
- int i;
u32 crc;
- for (i = 0; i < 4; i++)
- hash_table[i] = 0;
-
- for (i = 0; i < hp->dev->mc_count; i++) {
+ memset(hash_table, 0, sizeof(hash_table));
+ netdev_for_each_mc_addr(dmi, hp->dev) {
addrs = dmi->dmi_addr;
- dmi = dmi->next;
if (!(*addrs & 1))
continue;
@@ -2366,14 +2362,13 @@ static void happy_meal_set_multicast(struct net_device *dev)
{
struct happy_meal *hp = netdev_priv(dev);
void __iomem *bregs = hp->bigmacregs;
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
char *addrs;
- int i;
u32 crc;
spin_lock_irq(&hp->happy_lock);
- if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
+ if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
@@ -2384,12 +2379,9 @@ static void happy_meal_set_multicast(struct net_device *dev)
} else {
u16 hash_table[4];
- for (i = 0; i < 4; i++)
- hash_table[i] = 0;
-
- for (i = 0; i < dev->mc_count; i++) {
+ memset(hash_table, 0, sizeof(hash_table));
+ netdev_for_each_mc_addr(dmi, dev) {
addrs = dmi->dmi_addr;
- dmi = dmi->next;
if (!(*addrs & 1))
continue;
@@ -3211,7 +3203,7 @@ static void __devexit happy_meal_pci_remove(struct pci_dev *pdev)
dev_set_drvdata(&pdev->dev, NULL);
}
-static struct pci_device_id happymeal_pci_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(happymeal_pci_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) },
{ } /* Terminating entry */
};
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 64e7d08c878..d7c73f478ef 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -1170,9 +1170,8 @@ static int lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
static void lance_load_multicast(struct net_device *dev)
{
struct lance_private *lp = netdev_priv(dev);
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
char *addrs;
- int i;
u32 crc;
u32 val;
@@ -1196,9 +1195,8 @@ static void lance_load_multicast(struct net_device *dev)
return;
/* Add addresses */
- for (i = 0; i < dev->mc_count; i++) {
+ netdev_for_each_mc_addr(dmi, dev) {
addrs = dmi->dmi_addr;
- dmi = dmi->next;
/* multicast address? */
if (!(*addrs & 1))
diff --git a/drivers/net/sunqe.c b/drivers/net/sunqe.c
index 45c383f285e..be637dce944 100644
--- a/drivers/net/sunqe.c
+++ b/drivers/net/sunqe.c
@@ -627,7 +627,7 @@ static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
static void qe_set_multicast(struct net_device *dev)
{
struct sunqe *qep = netdev_priv(dev);
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
u8 new_mconfig = qep->mconfig;
char *addrs;
int i;
@@ -636,7 +636,7 @@ static void qe_set_multicast(struct net_device *dev)
/* Lock out others. */
netif_stop_queue(dev);
- if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
+ if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
qep->mregs + MREGS_IACONFIG);
while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
@@ -650,12 +650,9 @@ static void qe_set_multicast(struct net_device *dev)
u16 hash_table[4];
u8 *hbytes = (unsigned char *) &hash_table[0];
- for (i = 0; i < 4; i++)
- hash_table[i] = 0;
-
- for (i = 0; i < dev->mc_count; i++) {
+ memset(hash_table, 0, sizeof(hash_table));
+ netdev_for_each_mc_addr(dmi, dev) {
addrs = dmi->dmi_addr;
- dmi = dmi->next;
if (!(*addrs & 1))
continue;
diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c
index bc74db0d12f..6b1b7cea7f6 100644
--- a/drivers/net/sunvnet.c
+++ b/drivers/net/sunvnet.c
@@ -765,7 +765,7 @@ static void __update_mc_list(struct vnet *vp, struct net_device *dev)
{
struct dev_addr_list *p;
- for (p = dev->mc_list; p; p = p->next) {
+ netdev_for_each_mc_addr(p, dev) {
struct vnet_mcast_entry *m;
m = __vnet_mc_find(vp, p->dmi_addr);
@@ -1062,10 +1062,7 @@ static struct vnet * __devinit vnet_new(const u64 *local_mac)
goto err_out_free_dev;
}
- printk(KERN_INFO "%s: Sun LDOM vnet ", dev->name);
-
- for (i = 0; i < 6; i++)
- printk("%2.2x%c", dev->dev_addr[i], i == 5 ? '\n' : ':');
+ printk(KERN_INFO "%s: Sun LDOM vnet %pM\n", dev->name, dev->dev_addr);
list_add(&vp->list, &vnet_list);
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index d71c1976072..49bd84c0d58 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -65,7 +65,7 @@ static const struct {
{ "TOSHIBA TC35815/TX4939" },
};
-static const struct pci_device_id tc35815_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(tc35815_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815CF), .driver_data = TC35815CF },
{PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_NWU), .driver_data = TC35815_NWU },
{PCI_DEVICE(PCI_VENDOR_ID_TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_TC35815_TX4939), .driver_data = TC35815_TX4939 },
@@ -402,6 +402,7 @@ struct tc35815_local {
* by this lock as well.
*/
spinlock_t lock;
+ spinlock_t rx_lock;
struct mii_bus *mii_bus;
struct phy_device *phy_dev;
@@ -835,6 +836,7 @@ static int __devinit tc35815_init_one(struct pci_dev *pdev,
INIT_WORK(&lp->restart_work, tc35815_restart_work);
spin_lock_init(&lp->lock);
+ spin_lock_init(&lp->rx_lock);
lp->pci_dev = pdev;
lp->chiptype = ent->driver_data;
@@ -1186,6 +1188,7 @@ static void tc35815_restart(struct net_device *dev)
printk(KERN_ERR "%s: BMCR reset failed.\n", dev->name);
}
+ spin_lock_bh(&lp->rx_lock);
spin_lock_irq(&lp->lock);
tc35815_chip_reset(dev);
tc35815_clear_queues(dev);
@@ -1193,6 +1196,7 @@ static void tc35815_restart(struct net_device *dev)
/* Reconfigure CAM again since tc35815_chip_init() initialize it. */
tc35815_set_multicast_list(dev);
spin_unlock_irq(&lp->lock);
+ spin_unlock_bh(&lp->rx_lock);
netif_wake_queue(dev);
}
@@ -1211,11 +1215,14 @@ static void tc35815_schedule_restart(struct net_device *dev)
struct tc35815_local *lp = netdev_priv(dev);
struct tc35815_regs __iomem *tr =
(struct tc35815_regs __iomem *)dev->base_addr;
+ unsigned long flags;
/* disable interrupts */
+ spin_lock_irqsave(&lp->lock, flags);
tc_writel(0, &tr->Int_En);
tc_writel(tc_readl(&tr->DMA_Ctl) | DMA_IntMask, &tr->DMA_Ctl);
schedule_work(&lp->restart_work);
+ spin_unlock_irqrestore(&lp->lock, flags);
}
static void tc35815_tx_timeout(struct net_device *dev)
@@ -1436,7 +1443,9 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit)
if (status & Int_IntMacTx) {
/* Transmit complete. */
lp->lstats.tx_ints++;
+ spin_lock_irq(&lp->lock);
tc35815_txdone(dev);
+ spin_unlock_irq(&lp->lock);
if (ret < 0)
ret = 0;
}
@@ -1649,7 +1658,7 @@ static int tc35815_poll(struct napi_struct *napi, int budget)
int received = 0, handled;
u32 status;
- spin_lock(&lp->lock);
+ spin_lock(&lp->rx_lock);
status = tc_readl(&tr->Int_Src);
do {
/* BLEx, FDAEx will be cleared later */
@@ -1667,7 +1676,7 @@ static int tc35815_poll(struct napi_struct *napi, int budget)
}
status = tc_readl(&tr->Int_Src);
} while (status);
- spin_unlock(&lp->lock);
+ spin_unlock(&lp->rx_lock);
if (received < budget) {
napi_complete(napi);
@@ -1940,23 +1949,23 @@ tc35815_set_multicast_list(struct net_device *dev)
/* Enable promiscuous mode */
tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl);
} else if ((dev->flags & IFF_ALLMULTI) ||
- dev->mc_count > CAM_ENTRY_MAX - 3) {
+ netdev_mc_count(dev) > CAM_ENTRY_MAX - 3) {
/* CAM 0, 1, 20 are reserved. */
/* Disable promiscuous mode, use normal mode. */
tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc, &tr->CAM_Ctl);
- } else if (dev->mc_count) {
- struct dev_mc_list *cur_addr = dev->mc_list;
+ } else if (!netdev_mc_empty(dev)) {
+ struct dev_mc_list *cur_addr;
int i;
int ena_bits = CAM_Ena_Bit(CAM_ENTRY_SOURCE);
tc_writel(0, &tr->CAM_Ctl);
/* Walk the address list, and load the filter */
- for (i = 0; i < dev->mc_count; i++, cur_addr = cur_addr->next) {
- if (!cur_addr)
- break;
+ i = 0;
+ netdev_for_each_mc_addr(cur_addr, dev) {
/* entry 0,1 is reserved. */
tc35815_set_cam_entry(dev, i + 2, cur_addr->dmi_addr);
ena_bits |= CAM_Ena_Bit(i + 2);
+ i++;
}
tc_writel(ena_bits, &tr->CAM_Ena);
tc_writel(CAM_CompEn | CAM_BroadAcc, &tr->CAM_Ctl);
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index 80b404f2b93..0c9780217c8 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -62,9 +62,11 @@
*
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include "tehuti.h"
-static struct pci_device_id __devinitdata bdx_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(bdx_pci_tbl) = {
{0x1FC9, 0x3009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x1FC9, 0x3010, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0x1FC9, 0x3014, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
@@ -105,26 +107,24 @@ static void print_hw_id(struct pci_dev *pdev)
pci_read_config_word(pdev, PCI_LINK_STATUS_REG, &pci_link_status);
pci_read_config_word(pdev, PCI_DEV_CTRL_REG, &pci_ctrl);
- printk(KERN_INFO "tehuti: %s%s\n", BDX_NIC_NAME,
- nic->port_num == 1 ? "" : ", 2-Port");
- printk(KERN_INFO
- "tehuti: srom 0x%x fpga %d build %u lane# %d"
- " max_pl 0x%x mrrs 0x%x\n",
- readl(nic->regs + SROM_VER), readl(nic->regs + FPGA_VER) & 0xFFF,
- readl(nic->regs + FPGA_SEED),
- GET_LINK_STATUS_LANES(pci_link_status),
- GET_DEV_CTRL_MAXPL(pci_ctrl), GET_DEV_CTRL_MRRS(pci_ctrl));
+ pr_info("%s%s\n", BDX_NIC_NAME,
+ nic->port_num == 1 ? "" : ", 2-Port");
+ pr_info("srom 0x%x fpga %d build %u lane# %d max_pl 0x%x mrrs 0x%x\n",
+ readl(nic->regs + SROM_VER), readl(nic->regs + FPGA_VER) & 0xFFF,
+ readl(nic->regs + FPGA_SEED),
+ GET_LINK_STATUS_LANES(pci_link_status),
+ GET_DEV_CTRL_MAXPL(pci_ctrl), GET_DEV_CTRL_MRRS(pci_ctrl));
}
static void print_fw_id(struct pci_nic *nic)
{
- printk(KERN_INFO "tehuti: fw 0x%x\n", readl(nic->regs + FW_VER));
+ pr_info("fw 0x%x\n", readl(nic->regs + FW_VER));
}
static void print_eth_id(struct net_device *ndev)
{
- printk(KERN_INFO "%s: %s, Port %c\n", ndev->name, BDX_NIC_NAME,
- (ndev->if_port == 0) ? 'A' : 'B');
+ netdev_info(ndev, "%s, Port %c\n",
+ BDX_NIC_NAME, (ndev->if_port == 0) ? 'A' : 'B');
}
@@ -160,7 +160,7 @@ bdx_fifo_init(struct bdx_priv *priv, struct fifo *f, int fsz_type,
f->va = pci_alloc_consistent(priv->pdev,
memsz + FIFO_EXTRA_SPACE, &f->da);
if (!f->va) {
- ERR("pci_alloc_consistent failed\n");
+ pr_err("pci_alloc_consistent failed\n");
RET(-ENOMEM);
}
f->reg_CFG0 = reg_CFG0;
@@ -204,13 +204,13 @@ static void bdx_link_changed(struct bdx_priv *priv)
if (netif_carrier_ok(priv->ndev)) {
netif_stop_queue(priv->ndev);
netif_carrier_off(priv->ndev);
- ERR("%s: Link Down\n", priv->ndev->name);
+ netdev_err(priv->ndev, "Link Down\n");
}
} else {
if (!netif_carrier_ok(priv->ndev)) {
netif_wake_queue(priv->ndev);
netif_carrier_on(priv->ndev);
- ERR("%s: Link Up\n", priv->ndev->name);
+ netdev_err(priv->ndev, "Link Up\n");
}
}
}
@@ -226,10 +226,10 @@ static void bdx_isr_extra(struct bdx_priv *priv, u32 isr)
bdx_link_changed(priv);
if (isr & IR_PCIE_LINK)
- ERR("%s: PCI-E Link Fault\n", priv->ndev->name);
+ netdev_err(priv->ndev, "PCI-E Link Fault\n");
if (isr & IR_PCIE_TOUT)
- ERR("%s: PCI-E Time Out\n", priv->ndev->name);
+ netdev_err(priv->ndev, "PCI-E Time Out\n");
}
@@ -345,7 +345,7 @@ out:
release_firmware(fw);
if (rc) {
- ERR("%s: firmware loading failed\n", priv->ndev->name);
+ netdev_err(priv->ndev, "firmware loading failed\n");
if (rc == -EIO)
DBG("VPC = 0x%x VIC = 0x%x INIT_STATUS = 0x%x i=%d\n",
READ_REG(priv, regVPC),
@@ -419,9 +419,11 @@ static int bdx_hw_start(struct bdx_priv *priv)
WRITE_REG(priv, regGMAC_RXF_A, GMAC_RX_FILTER_OSEN |
GMAC_RX_FILTER_AM | GMAC_RX_FILTER_AB);
-#define BDX_IRQ_TYPE ((priv->nic->irq_type == IRQ_MSI)?0:IRQF_SHARED)
- if ((rc = request_irq(priv->pdev->irq, bdx_isr_napi, BDX_IRQ_TYPE,
- ndev->name, ndev)))
+#define BDX_IRQ_TYPE ((priv->nic->irq_type == IRQ_MSI) ? 0 : IRQF_SHARED)
+
+ rc = request_irq(priv->pdev->irq, bdx_isr_napi, BDX_IRQ_TYPE,
+ ndev->name, ndev);
+ if (rc)
goto err_irq;
bdx_enable_interrupts(priv);
@@ -462,7 +464,7 @@ static int bdx_hw_reset_direct(void __iomem *regs)
readl(regs + regRXD_CFG0_0);
return 0;
}
- ERR("tehuti: HW reset failed\n");
+ pr_err("HW reset failed\n");
return 1; /* failure */
}
@@ -486,7 +488,7 @@ static int bdx_hw_reset(struct bdx_priv *priv)
READ_REG(priv, regRXD_CFG0_0);
return 0;
}
- ERR("tehuti: HW reset failed\n");
+ pr_err("HW reset failed\n");
return 1; /* failure */
}
@@ -510,8 +512,7 @@ static int bdx_sw_reset(struct bdx_priv *priv)
mdelay(10);
}
if (i == 50)
- ERR("%s: SW reset timeout. continuing anyway\n",
- priv->ndev->name);
+ netdev_err(priv->ndev, "SW reset timeout. continuing anyway\n");
/* 6. disable intrs */
WRITE_REG(priv, regRDINTCM0, 0);
@@ -604,18 +605,15 @@ static int bdx_open(struct net_device *ndev)
if (netif_running(ndev))
netif_stop_queue(priv->ndev);
- if ((rc = bdx_tx_init(priv)))
- goto err;
-
- if ((rc = bdx_rx_init(priv)))
- goto err;
-
- if ((rc = bdx_fw_load(priv)))
+ if ((rc = bdx_tx_init(priv)) ||
+ (rc = bdx_rx_init(priv)) ||
+ (rc = bdx_fw_load(priv)))
goto err;
bdx_rx_alloc_skbs(priv, &priv->rxf_fifo0);
- if ((rc = bdx_hw_start(priv)))
+ rc = bdx_hw_start(priv);
+ if (rc)
goto err;
napi_enable(&priv->napi);
@@ -647,7 +645,7 @@ static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd)
if (cmd != SIOCDEVPRIVATE) {
error = copy_from_user(data, ifr->ifr_data, sizeof(data));
if (error) {
- ERR("cant copy from user\n");
+ pr_err("cant copy from user\n");
RET(error);
}
DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]);
@@ -708,7 +706,7 @@ static void __bdx_vlan_rx_vid(struct net_device *ndev, uint16_t vid, int enable)
ENTER;
DBG2("vid=%d value=%d\n", (int)vid, enable);
if (unlikely(vid >= 4096)) {
- ERR("tehuti: invalid VID: %u (> 4096)\n", vid);
+ pr_err("invalid VID: %u (> 4096)\n", vid);
RET();
}
reg = regVLAN_0 + (vid / 32) * 4;
@@ -776,8 +774,8 @@ static int bdx_change_mtu(struct net_device *ndev, int new_mtu)
/* enforce minimum frame size */
if (new_mtu < ETH_ZLEN) {
- ERR("%s: %s mtu %d is less then minimal %d\n",
- BDX_DRV_NAME, ndev->name, new_mtu, ETH_ZLEN);
+ netdev_err(ndev, "mtu %d is less then minimal %d\n",
+ new_mtu, ETH_ZLEN);
RET(-EINVAL);
}
@@ -808,7 +806,7 @@ static void bdx_setmulti(struct net_device *ndev)
/* set IMF to accept all multicast frmaes */
for (i = 0; i < MAC_MCST_HASH_NUM; i++)
WRITE_REG(priv, regRX_MCST_HASH0 + i * 4, ~0);
- } else if (ndev->mc_count) {
+ } else if (!netdev_mc_empty(ndev)) {
u8 hash;
struct dev_mc_list *mclist;
u32 reg, val;
@@ -826,10 +824,8 @@ static void bdx_setmulti(struct net_device *ndev)
/* TBD: sort addreses and write them in ascending order
* into RX_MAC_MCST regs. we skip this phase now and accept ALL
* multicast frames throu IMF */
- mclist = ndev->mc_list;
-
/* accept the rest of addresses throu IMF */
- for (; mclist; mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, ndev) {
hash = 0;
for (i = 0; i < ETH_ALEN; i++)
hash ^= mclist->dmi_addr[i];
@@ -840,7 +836,7 @@ static void bdx_setmulti(struct net_device *ndev)
}
} else {
- DBG("only own mac %d\n", ndev->mc_count);
+ DBG("only own mac %d\n", netdev_mc_count(ndev));
rxf_val |= GMAC_RX_FILTER_AB;
}
WRITE_REG(priv, regGMAC_RXF_A, rxf_val);
@@ -1028,17 +1024,16 @@ static int bdx_rx_init(struct bdx_priv *priv)
regRXF_CFG0_0, regRXF_CFG1_0,
regRXF_RPTR_0, regRXF_WPTR_0))
goto err_mem;
- if (!
- (priv->rxdb =
- bdx_rxdb_create(priv->rxf_fifo0.m.memsz /
- sizeof(struct rxf_desc))))
+ priv->rxdb = bdx_rxdb_create(priv->rxf_fifo0.m.memsz /
+ sizeof(struct rxf_desc));
+ if (!priv->rxdb)
goto err_mem;
priv->rxf_fifo0.m.pktsz = priv->ndev->mtu + VLAN_ETH_HLEN;
return 0;
err_mem:
- ERR("%s: %s: Rx init failed\n", BDX_DRV_NAME, priv->ndev->name);
+ netdev_err(priv->ndev, "Rx init failed\n");
return -ENOMEM;
}
@@ -1115,8 +1110,9 @@ static void bdx_rx_alloc_skbs(struct bdx_priv *priv, struct rxf_fifo *f)
ENTER;
dno = bdx_rxdb_available(db) - 1;
while (dno > 0) {
- if (!(skb = dev_alloc_skb(f->m.pktsz + NET_IP_ALIGN))) {
- ERR("NO MEM: dev_alloc_skb failed\n");
+ skb = dev_alloc_skb(f->m.pktsz + NET_IP_ALIGN);
+ if (!skb) {
+ pr_err("NO MEM: dev_alloc_skb failed\n");
break;
}
skb->dev = priv->ndev;
@@ -1337,9 +1333,7 @@ static int bdx_rx_receive(struct bdx_priv *priv, struct rxd_fifo *f, int budget)
static void print_rxdd(struct rxd_desc *rxdd, u32 rxd_val1, u16 len,
u16 rxd_vlan)
{
- DBG("ERROR: rxdd bc %d rxfq %d to %d type %d err %d rxp %d "
- "pkt_id %d vtag %d len %d vlan_id %d cfi %d prio %d "
- "va_lo %d va_hi %d\n",
+ DBG("ERROR: rxdd bc %d rxfq %d to %d type %d err %d rxp %d pkt_id %d vtag %d len %d vlan_id %d cfi %d prio %d va_lo %d va_hi %d\n",
GET_RXD_BC(rxd_val1), GET_RXD_RXFQ(rxd_val1), GET_RXD_TO(rxd_val1),
GET_RXD_TYPE(rxd_val1), GET_RXD_ERR(rxd_val1),
GET_RXD_RXP(rxd_val1), GET_RXD_PKT_ID(rxd_val1),
@@ -1591,7 +1585,7 @@ static int bdx_tx_init(struct bdx_priv *priv)
return 0;
err_mem:
- ERR("tehuti: %s: Tx init failed\n", priv->ndev->name);
+ netdev_err(priv->ndev, "Tx init failed\n");
return -ENOMEM;
}
@@ -1609,7 +1603,7 @@ static inline int bdx_tx_space(struct bdx_priv *priv)
fsize = f->m.rptr - f->m.wptr;
if (fsize <= 0)
fsize = f->m.memsz + fsize;
- return (fsize);
+ return fsize;
}
/* bdx_tx_transmit - send packet to NIC
@@ -1937,8 +1931,9 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
RET(-ENOMEM);
/************** pci *****************/
- if ((err = pci_enable_device(pdev))) /* it trigers interrupt, dunno why. */
- goto err_pci; /* it's not a problem though */
+ err = pci_enable_device(pdev);
+ if (err) /* it triggers interrupt, dunno why. */
+ goto err_pci; /* it's not a problem though */
if (!(err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) &&
!(err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))) {
@@ -1946,14 +1941,14 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
} else {
if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) ||
(err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))) {
- printk(KERN_ERR "tehuti: No usable DMA configuration"
- ", aborting\n");
+ pr_err("No usable DMA configuration, aborting\n");
goto err_dma;
}
pci_using_dac = 0;
}
- if ((err = pci_request_regions(pdev, BDX_DRV_NAME)))
+ err = pci_request_regions(pdev, BDX_DRV_NAME);
+ if (err)
goto err_dma;
pci_set_master(pdev);
@@ -1961,25 +1956,26 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pciaddr = pci_resource_start(pdev, 0);
if (!pciaddr) {
err = -EIO;
- ERR("tehuti: no MMIO resource\n");
+ pr_err("no MMIO resource\n");
goto err_out_res;
}
- if ((regionSize = pci_resource_len(pdev, 0)) < BDX_REGS_SIZE) {
+ regionSize = pci_resource_len(pdev, 0);
+ if (regionSize < BDX_REGS_SIZE) {
err = -EIO;
- ERR("tehuti: MMIO resource (%x) too small\n", regionSize);
+ pr_err("MMIO resource (%x) too small\n", regionSize);
goto err_out_res;
}
nic->regs = ioremap(pciaddr, regionSize);
if (!nic->regs) {
err = -EIO;
- ERR("tehuti: ioremap failed\n");
+ pr_err("ioremap failed\n");
goto err_out_res;
}
if (pdev->irq < 2) {
err = -EIO;
- ERR("tehuti: invalid irq (%d)\n", pdev->irq);
+ pr_err("invalid irq (%d)\n", pdev->irq);
goto err_out_iomap;
}
pci_set_drvdata(pdev, nic);
@@ -1996,8 +1992,9 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
nic->irq_type = IRQ_INTX;
#ifdef BDX_MSI
if ((readl(nic->regs + FPGA_VER) & 0xFFF) >= 378) {
- if ((err = pci_enable_msi(pdev)))
- ERR("Tehuti: Can't eneble msi. error is %d\n", err);
+ err = pci_enable_msi(pdev);
+ if (err)
+ pr_err("Can't eneble msi. error is %d\n", err);
else
nic->irq_type = IRQ_MSI;
} else
@@ -2006,9 +2003,10 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/************** netdev **************/
for (port = 0; port < nic->port_num; port++) {
- if (!(ndev = alloc_etherdev(sizeof(struct bdx_priv)))) {
+ ndev = alloc_etherdev(sizeof(struct bdx_priv));
+ if (!ndev) {
err = -ENOMEM;
- printk(KERN_ERR "tehuti: alloc_etherdev failed\n");
+ pr_err("alloc_etherdev failed\n");
goto err_out_iomap;
}
@@ -2075,12 +2073,13 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/*bdx_hw_reset(priv); */
if (bdx_read_mac(priv)) {
- printk(KERN_ERR "tehuti: load MAC address failed\n");
+ pr_err("load MAC address failed\n");
goto err_out_iomap;
}
SET_NETDEV_DEV(ndev, &pdev->dev);
- if ((err = register_netdev(ndev))) {
- printk(KERN_ERR "tehuti: register_netdev failed\n");
+ err = register_netdev(ndev);
+ if (err) {
+ pr_err("register_netdev failed\n");
goto err_out_free;
}
netif_carrier_off(ndev);
@@ -2294,13 +2293,13 @@ bdx_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecoal)
/* Convert RX fifo size to number of pending packets */
static inline int bdx_rx_fifo_size_to_packets(int rx_size)
{
- return ((FIFO_SIZE * (1 << rx_size)) / sizeof(struct rxf_desc));
+ return (FIFO_SIZE * (1 << rx_size)) / sizeof(struct rxf_desc);
}
/* Convert TX fifo size to number of pending packets */
static inline int bdx_tx_fifo_size_to_packets(int tx_size)
{
- return ((FIFO_SIZE * (1 << tx_size)) / BDX_TXF_DESC_SZ);
+ return (FIFO_SIZE * (1 << tx_size)) / BDX_TXF_DESC_SZ;
}
/*
@@ -2392,10 +2391,10 @@ static int bdx_get_sset_count(struct net_device *netdev, int stringset)
case ETH_SS_STATS:
BDX_ASSERT(ARRAY_SIZE(bdx_stat_names)
!= sizeof(struct bdx_stats) / sizeof(u64));
- return ((priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0);
- default:
- return -EINVAL;
+ return (priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0;
}
+
+ return -EINVAL;
}
/*
@@ -2493,10 +2492,8 @@ static struct pci_driver bdx_pci_driver = {
*/
static void __init print_driver_id(void)
{
- printk(KERN_INFO "%s: %s, %s\n", BDX_DRV_NAME, BDX_DRV_DESC,
- BDX_DRV_VERSION);
- printk(KERN_INFO "%s: Options: hw_csum %s\n", BDX_DRV_NAME,
- BDX_MSI_STRING);
+ pr_info("%s, %s\n", BDX_DRV_DESC, BDX_DRV_VERSION);
+ pr_info("Options: hw_csum %s\n", BDX_MSI_STRING);
}
static int __init bdx_module_init(void)
diff --git a/drivers/net/tehuti.h b/drivers/net/tehuti.h
index 124141909e4..a19dcf8b6b5 100644
--- a/drivers/net/tehuti.h
+++ b/drivers/net/tehuti.h
@@ -529,28 +529,34 @@ struct txd_desc {
/* Debugging Macros */
-#define ERR(fmt, args...) printk(KERN_ERR fmt, ## args)
-#define DBG2(fmt, args...) \
- printk(KERN_ERR "%s:%-5d: " fmt, __func__, __LINE__, ## args)
+#define DBG2(fmt, args...) \
+ pr_err("%s:%-5d: " fmt, __func__, __LINE__, ## args)
#define BDX_ASSERT(x) BUG_ON(x)
#ifdef DEBUG
-#define ENTER do { \
- printk(KERN_ERR "%s:%-5d: ENTER\n", __func__, __LINE__); \
+#define ENTER \
+do { \
+ pr_err("%s:%-5d: ENTER\n", __func__, __LINE__); \
} while (0)
-#define RET(args...) do { \
- printk(KERN_ERR "%s:%-5d: RETURN\n", __func__, __LINE__); \
-return args; } while (0)
+#define RET(args...) \
+do { \
+ pr_err("%s:%-5d: RETURN\n", __func__, __LINE__); \
+ return args; \
+} while (0)
-#define DBG(fmt, args...) \
- printk(KERN_ERR "%s:%-5d: " fmt, __func__, __LINE__, ## args)
+#define DBG(fmt, args...) \
+ pr_err("%s:%-5d: " fmt, __func__, __LINE__, ## args)
#else
-#define ENTER do { } while (0)
+#define ENTER do { } while (0)
#define RET(args...) return args
-#define DBG(fmt, args...) do { } while (0)
+#define DBG(fmt, args...) \
+do { \
+ if (0) \
+ pr_err(fmt, ##args); \
+} while (0)
#endif
#endif /* _BDX__H */
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 7f82b0238e0..0fa7688ab48 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -67,9 +67,8 @@
#include "tg3.h"
#define DRV_MODULE_NAME "tg3"
-#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "3.106"
-#define DRV_MODULE_RELDATE "January 12, 2010"
+#define DRV_MODULE_VERSION "3.108"
+#define DRV_MODULE_RELDATE "February 17, 2010"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@@ -158,7 +157,7 @@
#define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
static char version[] __devinitdata =
- DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+ DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
@@ -174,7 +173,7 @@ static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
module_param(tg3_debug, int, 0);
MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
-static struct pci_device_id tg3_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
@@ -244,6 +243,12 @@ static struct pci_device_id tg3_pci_tbl[] = {
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
{PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
{PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
{PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -636,7 +641,6 @@ static void tg3_disable_ints(struct tg3 *tp)
static void tg3_enable_ints(struct tg3 *tp)
{
int i;
- u32 coal_now = 0;
tp->irq_sync = 0;
wmb();
@@ -644,13 +648,14 @@ static void tg3_enable_ints(struct tg3 *tp)
tw32(TG3PCI_MISC_HOST_CTRL,
(tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
+ tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
for (i = 0; i < tp->irq_cnt; i++) {
struct tg3_napi *tnapi = &tp->napi[i];
tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
- coal_now |= tnapi->coal_now;
+ tp->coal_now |= tnapi->coal_now;
}
/* Force an initial interrupt */
@@ -658,8 +663,9 @@ static void tg3_enable_ints(struct tg3 *tp)
(tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
else
- tw32(HOSTCC_MODE, tp->coalesce_mode |
- HOSTCC_MODE_ENABLE | coal_now);
+ tw32(HOSTCC_MODE, tp->coal_now);
+
+ tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
}
static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
@@ -948,17 +954,17 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
- case TG3_PHY_ID_BCM50610:
- case TG3_PHY_ID_BCM50610M:
+ case PHY_ID_BCM50610:
+ case PHY_ID_BCM50610M:
val = MAC_PHYCFG2_50610_LED_MODES;
break;
- case TG3_PHY_ID_BCMAC131:
+ case PHY_ID_BCMAC131:
val = MAC_PHYCFG2_AC131_LED_MODES;
break;
- case TG3_PHY_ID_RTL8211C:
+ case PHY_ID_RTL8211C:
val = MAC_PHYCFG2_RTL8211C_LED_MODES;
break;
- case TG3_PHY_ID_RTL8201E:
+ case PHY_ID_RTL8201E:
val = MAC_PHYCFG2_RTL8201E_LED_MODES;
break;
default:
@@ -977,7 +983,7 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
return;
}
- if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
+ if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE))
val |= MAC_PHYCFG2_EMODE_MASK_MASK |
MAC_PHYCFG2_FMODE_MASK_MASK |
MAC_PHYCFG2_GMODE_MASK_MASK |
@@ -990,7 +996,7 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
val = tr32(MAC_PHYCFG1);
val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
- if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
+ if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
@@ -1008,7 +1014,7 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
MAC_RGMII_MODE_TX_ENABLE |
MAC_RGMII_MODE_TX_LOWPWR |
MAC_RGMII_MODE_TX_RESET);
- if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
+ if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)) {
if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
val |= MAC_RGMII_MODE_RX_INT_B |
MAC_RGMII_MODE_RX_QUALITY |
@@ -1028,6 +1034,17 @@ static void tg3_mdio_start(struct tg3 *tp)
tw32_f(MAC_MI_MODE, tp->mi_mode);
udelay(80);
+ if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ tg3_mdio_config_5785(tp);
+}
+
+static int tg3_mdio_init(struct tg3 *tp)
+{
+ int i;
+ u32 reg;
+ struct phy_device *phydev;
+
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
u32 funcnum, is_serdes;
@@ -1047,17 +1064,6 @@ static void tg3_mdio_start(struct tg3 *tp)
} else
tp->phy_addr = TG3_PHY_MII_ADDR;
- if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
- tg3_mdio_config_5785(tp);
-}
-
-static int tg3_mdio_init(struct tg3 *tp)
-{
- int i;
- u32 reg;
- struct phy_device *phydev;
-
tg3_mdio_start(tp);
if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
@@ -1092,8 +1098,7 @@ static int tg3_mdio_init(struct tg3 *tp)
i = mdiobus_register(tp->mdio_bus);
if (i) {
- printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
- tp->dev->name, i);
+ netdev_warn(tp->dev, "mdiobus_reg failed (0x%x)\n", i);
mdiobus_free(tp->mdio_bus);
return i;
}
@@ -1101,35 +1106,35 @@ static int tg3_mdio_init(struct tg3 *tp)
phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
if (!phydev || !phydev->drv) {
- printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
+ netdev_warn(tp->dev, "No PHY devices\n");
mdiobus_unregister(tp->mdio_bus);
mdiobus_free(tp->mdio_bus);
return -ENODEV;
}
switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
- case TG3_PHY_ID_BCM57780:
+ case PHY_ID_BCM57780:
phydev->interface = PHY_INTERFACE_MODE_GMII;
phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
break;
- case TG3_PHY_ID_BCM50610:
- case TG3_PHY_ID_BCM50610M:
+ case PHY_ID_BCM50610:
+ case PHY_ID_BCM50610M:
phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
PHY_BRCM_RX_REFCLK_UNUSED |
PHY_BRCM_DIS_TXCRXC_NOENRGY |
PHY_BRCM_AUTO_PWRDWN_ENABLE;
- if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
+ if (tp->tg3_flags3 & TG3_FLG3_RGMII_INBAND_DISABLE)
phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
/* fallthru */
- case TG3_PHY_ID_RTL8211C:
+ case PHY_ID_RTL8211C:
phydev->interface = PHY_INTERFACE_MODE_RGMII;
break;
- case TG3_PHY_ID_RTL8201E:
- case TG3_PHY_ID_BCMAC131:
+ case PHY_ID_RTL8201E:
+ case PHY_ID_BCMAC131:
phydev->interface = PHY_INTERFACE_MODE_MII;
phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
@@ -1245,27 +1250,22 @@ static void tg3_ump_link_report(struct tg3 *tp)
static void tg3_link_report(struct tg3 *tp)
{
if (!netif_carrier_ok(tp->dev)) {
- if (netif_msg_link(tp))
- printk(KERN_INFO PFX "%s: Link is down.\n",
- tp->dev->name);
+ netif_info(tp, link, tp->dev, "Link is down\n");
tg3_ump_link_report(tp);
} else if (netif_msg_link(tp)) {
- printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
- tp->dev->name,
- (tp->link_config.active_speed == SPEED_1000 ?
- 1000 :
- (tp->link_config.active_speed == SPEED_100 ?
- 100 : 10)),
- (tp->link_config.active_duplex == DUPLEX_FULL ?
- "full" : "half"));
-
- printk(KERN_INFO PFX
- "%s: Flow control is %s for TX and %s for RX.\n",
- tp->dev->name,
- (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
- "on" : "off",
- (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
- "on" : "off");
+ netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
+ (tp->link_config.active_speed == SPEED_1000 ?
+ 1000 :
+ (tp->link_config.active_speed == SPEED_100 ?
+ 100 : 10)),
+ (tp->link_config.active_duplex == DUPLEX_FULL ?
+ "full" : "half"));
+
+ netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
+ (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
+ "on" : "off",
+ (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
+ "on" : "off");
tg3_ump_link_report(tp);
}
}
@@ -1464,7 +1464,7 @@ static int tg3_phy_init(struct tg3 *tp)
phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
phydev->dev_flags, phydev->interface);
if (IS_ERR(phydev)) {
- printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
+ netdev_err(tp->dev, "Could not attach to PHY\n");
return PTR_ERR(phydev);
}
@@ -1564,7 +1564,9 @@ static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
{
u32 reg;
- if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
+ if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
+ (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
+ (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
return;
if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
@@ -1939,6 +1941,10 @@ static int tg3_phy_reset(struct tg3 *tp)
}
}
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
+ (tp->tg3_flags2 & TG3_FLG2_MII_SERDES))
+ return 0;
+
tg3_phy_apply_otp(tp);
if (tp->tg3_flags3 & TG3_FLG3_PHY_ENABLE_APD)
@@ -1982,7 +1988,7 @@ out:
}
/* Set Extended packet length bit (bit 14) on all chips that */
/* support jumbo frames */
- if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
+ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
/* Cannot do read-modify-write on 5401 */
tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
} else if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
@@ -2019,7 +2025,9 @@ static void tg3_frob_aux_power(struct tg3 *tp)
{
struct tg3 *tp_peer = tp;
- if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
+ /* The GPIOs do something completely different on 57765. */
+ if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
return;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
@@ -2132,7 +2140,7 @@ static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
{
if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
return 1;
- else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
+ else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
if (speed != SPEED_10)
return 1;
} else if (speed == SPEED_10)
@@ -2485,8 +2493,8 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
break;
default:
- printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
- tp->dev->name, state);
+ netdev_err(tp->dev, "Invalid power state (D%d) requested\n",
+ state);
return -EINVAL;
}
@@ -2548,11 +2556,11 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
phy_start_aneg(phydev);
phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
- if (phyid != TG3_PHY_ID_BCMAC131) {
- phyid &= TG3_PHY_OUI_MASK;
- if (phyid == TG3_PHY_OUI_1 ||
- phyid == TG3_PHY_OUI_2 ||
- phyid == TG3_PHY_OUI_3)
+ if (phyid != PHY_ID_BCMAC131) {
+ phyid &= PHY_BCM_OUI_MASK;
+ if (phyid == PHY_BCM_OUI_1 ||
+ phyid == PHY_BCM_OUI_2 ||
+ phyid == PHY_BCM_OUI_3)
do_low_power = true;
}
}
@@ -3062,7 +3070,7 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
if (force_reset)
tg3_phy_reset(tp);
- if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
+ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
tg3_readphy(tp, MII_BMSR, &bmsr);
if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
@@ -3083,7 +3091,8 @@ static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
}
}
- if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
+ if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
+ TG3_PHY_REV_BCM5401_B0 &&
!(bmsr & BMSR_LSTATUS) &&
tp->link_config.active_speed == SPEED_1000) {
err = tg3_phy_reset(tp);
@@ -3238,7 +3247,7 @@ relink:
/* ??? Without this setting Netgear GA302T PHY does not
* ??? send/receive packets...
*/
- if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
+ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
tw32_f(MAC_MI_MODE, tp->mi_mode);
@@ -3953,7 +3962,7 @@ static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
tw32_f(MAC_MODE, tp->mac_mode);
udelay(40);
- if (tp->phy_id == PHY_ID_BCM8002)
+ if (tp->phy_id == TG3_PHY_ID_BCM8002)
tg3_init_bcm8002(tp);
/* Enable link change event even when serdes polling. */
@@ -4326,10 +4335,8 @@ static void tg3_tx_recover(struct tg3 *tp)
BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
tp->write32_tx_mbox == tg3_write_indirect_mbox);
- printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
- "mapped I/O cycles to the network device, attempting to "
- "recover. Please report the problem to the driver maintainer "
- "and include system chipset information.\n", tp->dev->name);
+ netdev_warn(tp->dev, "The system may be re-ordering memory-mapped I/O cycles to the network device, attempting to recover\n"
+ "Please report the problem to the driver maintainer and include system chipset information.\n");
spin_lock(&tp->lock);
tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
@@ -4538,6 +4545,12 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi,
pci_unmap_addr(src_map, mapping));
dest_desc->addr_hi = src_desc->addr_hi;
dest_desc->addr_lo = src_desc->addr_lo;
+
+ /* Ensure that the update to the skb happens after the physical
+ * addresses have been transferred to the new BD location.
+ */
+ smp_wmb();
+
src_map->skb = NULL;
}
@@ -4638,11 +4651,16 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
if (skb_size < 0)
goto drop_it;
- ri->skb = NULL;
-
pci_unmap_single(tp->pdev, dma_addr, skb_size,
PCI_DMA_FROMDEVICE);
+ /* Ensure that the update to the skb happens
+ * after the usage of the old DMA mapping.
+ */
+ smp_wmb();
+
+ ri->skb = NULL;
+
skb_put(skb, len);
} else {
struct sk_buff *copy_skb;
@@ -4719,7 +4737,7 @@ next_pkt_nopost:
tw32_rx_mbox(tnapi->consmbox, sw_idx);
/* Refill RX ring(s). */
- if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) || tnapi == &tp->napi[1]) {
+ if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) {
if (work_mask & RXD_OPAQUE_RING_STD) {
tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
@@ -4741,7 +4759,8 @@ next_pkt_nopost:
tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE;
- napi_schedule(&tp->napi[1].napi);
+ if (tnapi != &tp->napi[1])
+ napi_schedule(&tp->napi[1].napi);
}
return received;
@@ -4773,12 +4792,12 @@ static void tg3_poll_link(struct tg3 *tp)
}
}
-static void tg3_rx_prodring_xfer(struct tg3 *tp,
- struct tg3_rx_prodring_set *dpr,
- struct tg3_rx_prodring_set *spr)
+static int tg3_rx_prodring_xfer(struct tg3 *tp,
+ struct tg3_rx_prodring_set *dpr,
+ struct tg3_rx_prodring_set *spr)
{
u32 si, di, cpycnt, src_prod_idx;
- int i;
+ int i, err = 0;
while (1) {
src_prod_idx = spr->rx_std_prod_idx;
@@ -4801,6 +4820,23 @@ static void tg3_rx_prodring_xfer(struct tg3 *tp,
si = spr->rx_std_cons_idx;
di = dpr->rx_std_prod_idx;
+ for (i = di; i < di + cpycnt; i++) {
+ if (dpr->rx_std_buffers[i].skb) {
+ cpycnt = i - di;
+ err = -ENOSPC;
+ break;
+ }
+ }
+
+ if (!cpycnt)
+ break;
+
+ /* Ensure that updates to the rx_std_buffers ring and the
+ * shadowed hardware producer ring from tg3_recycle_skb() are
+ * ordered correctly WRT the skb check above.
+ */
+ smp_rmb();
+
memcpy(&dpr->rx_std_buffers[di],
&spr->rx_std_buffers[si],
cpycnt * sizeof(struct ring_info));
@@ -4841,6 +4877,23 @@ static void tg3_rx_prodring_xfer(struct tg3 *tp,
si = spr->rx_jmb_cons_idx;
di = dpr->rx_jmb_prod_idx;
+ for (i = di; i < di + cpycnt; i++) {
+ if (dpr->rx_jmb_buffers[i].skb) {
+ cpycnt = i - di;
+ err = -ENOSPC;
+ break;
+ }
+ }
+
+ if (!cpycnt)
+ break;
+
+ /* Ensure that updates to the rx_jmb_buffers ring and the
+ * shadowed hardware producer ring from tg3_recycle_skb() are
+ * ordered correctly WRT the skb check above.
+ */
+ smp_rmb();
+
memcpy(&dpr->rx_jmb_buffers[di],
&spr->rx_jmb_buffers[si],
cpycnt * sizeof(struct ring_info));
@@ -4858,6 +4911,8 @@ static void tg3_rx_prodring_xfer(struct tg3 *tp,
dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) %
TG3_RX_JUMBO_RING_SIZE;
}
+
+ return err;
}
static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
@@ -4879,27 +4934,29 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
work_done += tg3_rx(tnapi, budget - work_done);
if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
- int i;
- u32 std_prod_idx = tp->prodring[0].rx_std_prod_idx;
- u32 jmb_prod_idx = tp->prodring[0].rx_jmb_prod_idx;
+ struct tg3_rx_prodring_set *dpr = &tp->prodring[0];
+ int i, err = 0;
+ u32 std_prod_idx = dpr->rx_std_prod_idx;
+ u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
- for (i = 2; i < tp->irq_cnt; i++)
- tg3_rx_prodring_xfer(tp, tnapi->prodring,
- tp->napi[i].prodring);
+ for (i = 1; i < tp->irq_cnt; i++)
+ err |= tg3_rx_prodring_xfer(tp, dpr,
+ tp->napi[i].prodring);
wmb();
- if (std_prod_idx != tp->prodring[0].rx_std_prod_idx) {
- u32 mbox = TG3_RX_STD_PROD_IDX_REG;
- tw32_rx_mbox(mbox, tp->prodring[0].rx_std_prod_idx);
- }
+ if (std_prod_idx != dpr->rx_std_prod_idx)
+ tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
+ dpr->rx_std_prod_idx);
- if (jmb_prod_idx != tp->prodring[0].rx_jmb_prod_idx) {
- u32 mbox = TG3_RX_JMB_PROD_IDX_REG;
- tw32_rx_mbox(mbox, tp->prodring[0].rx_jmb_prod_idx);
- }
+ if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
+ tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
+ dpr->rx_jmb_prod_idx);
mmiowb();
+
+ if (err)
+ tw32_f(HOSTCC_MODE, tp->coal_now);
}
return work_done;
@@ -5203,8 +5260,7 @@ static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
err = tg3_init_hw(tp, reset_phy);
if (err) {
- printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
- "aborting.\n", tp->dev->name);
+ netdev_err(tp->dev, "Failed to re-initialize device, aborting\n");
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
tg3_full_unlock(tp);
del_timer_sync(&tp->timer);
@@ -5277,10 +5333,10 @@ out:
static void tg3_dump_short_state(struct tg3 *tp)
{
- printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
- tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
- printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
- tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
+ netdev_err(tp->dev, "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
+ tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
+ netdev_err(tp->dev, "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
+ tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
}
static void tg3_tx_timeout(struct net_device *dev)
@@ -5288,8 +5344,7 @@ static void tg3_tx_timeout(struct net_device *dev)
struct tg3 *tp = netdev_priv(dev);
if (netif_msg_tx_err(tp)) {
- printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
- dev->name);
+ netdev_err(dev, "transmit timed out, resetting\n");
tg3_dump_short_state(tp);
}
@@ -5453,8 +5508,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
netif_tx_stop_queue(txq);
/* This is a hard error, log it. */
- printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
- "queue awake!\n", dev->name);
+ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
}
return NETDEV_TX_BUSY;
}
@@ -5657,8 +5711,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
netif_tx_stop_queue(txq);
/* This is a hard error, log it. */
- printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
- "queue awake!\n", dev->name);
+ netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
}
return NETDEV_TX_BUSY;
}
@@ -6005,11 +6058,8 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
/* Now allocate fresh SKBs for each rx ring. */
for (i = 0; i < tp->rx_pending; i++) {
if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
- printk(KERN_WARNING PFX
- "%s: Using a smaller RX standard ring, "
- "only %d out of %d buffers were allocated "
- "successfully.\n",
- tp->dev->name, i, tp->rx_pending);
+ netdev_warn(tp->dev, "Using a smaller RX standard ring, only %d out of %d buffers were allocated successfully\n",
+ i, tp->rx_pending);
if (i == 0)
goto initfail;
tp->rx_pending = i;
@@ -6022,31 +6072,28 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
memset(tpr->rx_jmb, 0, TG3_RX_JUMBO_RING_BYTES);
- if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
- for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
- struct tg3_rx_buffer_desc *rxd;
+ if (!(tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE))
+ goto done;
- rxd = &tpr->rx_jmb[i].std;
- rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
- rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
- RXD_FLAG_JUMBO;
- rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
- (i << RXD_OPAQUE_INDEX_SHIFT));
- }
+ for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
+ struct tg3_rx_buffer_desc *rxd;
- for (i = 0; i < tp->rx_jumbo_pending; i++) {
- if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO,
- i) < 0) {
- printk(KERN_WARNING PFX
- "%s: Using a smaller RX jumbo ring, "
- "only %d out of %d buffers were "
- "allocated successfully.\n",
- tp->dev->name, i, tp->rx_jumbo_pending);
- if (i == 0)
- goto initfail;
- tp->rx_jumbo_pending = i;
- break;
- }
+ rxd = &tpr->rx_jmb[i].std;
+ rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
+ rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
+ RXD_FLAG_JUMBO;
+ rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
+ (i << RXD_OPAQUE_INDEX_SHIFT));
+ }
+
+ for (i = 0; i < tp->rx_jumbo_pending; i++) {
+ if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
+ netdev_warn(tp->dev, "Using a smaller RX jumbo ring, only %d out of %d buffers were allocated successfully\n",
+ i, tp->rx_jumbo_pending);
+ if (i == 0)
+ goto initfail;
+ tp->rx_jumbo_pending = i;
+ break;
}
}
@@ -6159,8 +6206,7 @@ static void tg3_free_rings(struct tg3 *tp)
dev_kfree_skb_any(skb);
}
- if (tp->irq_cnt == 1 || j != tp->irq_cnt - 1)
- tg3_rx_prodring_free(tp, &tp->prodring[j]);
+ tg3_rx_prodring_free(tp, &tp->prodring[j]);
}
}
@@ -6196,9 +6242,10 @@ static int tg3_init_rings(struct tg3 *tp)
if (tnapi->rx_rcb)
memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
- if ((tp->irq_cnt == 1 || i != tp->irq_cnt - 1) &&
- tg3_rx_prodring_alloc(tp, &tp->prodring[i]))
+ if (tg3_rx_prodring_alloc(tp, &tp->prodring[i])) {
+ tg3_free_rings(tp);
return -ENOMEM;
+ }
}
return 0;
@@ -6245,7 +6292,7 @@ static void tg3_free_consistent(struct tg3 *tp)
tp->hw_stats = NULL;
}
- for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++)
+ for (i = 0; i < tp->irq_cnt; i++)
tg3_rx_prodring_fini(tp, &tp->prodring[i]);
}
@@ -6257,7 +6304,7 @@ static int tg3_alloc_consistent(struct tg3 *tp)
{
int i;
- for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++) {
+ for (i = 0; i < tp->irq_cnt; i++) {
if (tg3_rx_prodring_init(tp, &tp->prodring[i]))
goto err_out;
}
@@ -6322,10 +6369,7 @@ static int tg3_alloc_consistent(struct tg3 *tp)
break;
}
- if (tp->irq_cnt == 1)
- tnapi->prodring = &tp->prodring[0];
- else if (i)
- tnapi->prodring = &tp->prodring[i - 1];
+ tnapi->prodring = &tp->prodring[i];
/*
* If multivector RSS is enabled, vector 0 does not handle
@@ -6389,8 +6433,7 @@ static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int
}
if (i == MAX_WAIT_CNT && !silent) {
- printk(KERN_ERR PFX "tg3_stop_block timed out, "
- "ofs=%lx enable_bit=%x\n",
+ pr_err("tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
ofs, enable_bit);
return -ENODEV;
}
@@ -6437,9 +6480,8 @@ static int tg3_abort_hw(struct tg3 *tp, int silent)
break;
}
if (i >= MAX_WAIT_CNT) {
- printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
- "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
- tp->dev->name, tr32(MAC_TX_MODE));
+ netdev_err(tp->dev, "%s timed out, TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
+ __func__, tr32(MAC_TX_MODE));
err |= -ENODEV;
}
@@ -6660,8 +6702,14 @@ static int tg3_poll_fw(struct tg3 *tp)
!(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
- printk(KERN_INFO PFX "%s: No firmware running.\n",
- tp->dev->name);
+ netdev_info(tp->dev, "No firmware running\n");
+ }
+
+ if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
+ /* The 57765 A0 needs a little more
+ * time to do some important work.
+ */
+ mdelay(10);
}
return 0;
@@ -7082,10 +7130,8 @@ static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
}
if (i >= 10000) {
- printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
- "and %s CPU\n",
- tp->dev->name,
- (offset == RX_CPU_BASE ? "RX" : "TX"));
+ netdev_err(tp->dev, "%s timed out, %s CPU\n",
+ __func__, offset == RX_CPU_BASE ? "RX" : "TX");
return -ENODEV;
}
@@ -7110,9 +7156,8 @@ static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_b
if (cpu_base == TX_CPU_BASE &&
(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
- printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
- "TX cpu firmware on %s which is 5705.\n",
- tp->dev->name);
+ netdev_err(tp->dev, "%s: Trying to load TX cpu firmware which is 5705\n",
+ __func__);
return -EINVAL;
}
@@ -7191,10 +7236,8 @@ static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
udelay(1000);
}
if (i >= 5) {
- printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
- "to set RX CPU PC, is %08x should be %08x\n",
- tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
- info.fw_base);
+ netdev_err(tp->dev, "tg3_load_firmware fails to set RX CPU PC, is %08x should be %08x\n",
+ tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
return -ENODEV;
}
tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
@@ -7257,10 +7300,8 @@ static int tg3_load_tso_firmware(struct tg3 *tp)
udelay(1000);
}
if (i >= 5) {
- printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
- "to set CPU PC, is %08x should be %08x\n",
- tp->dev->name, tr32(cpu_base + CPU_PC),
- info.fw_base);
+ netdev_err(tp->dev, "%s fails to set CPU PC, is %08x should be %08x\n",
+ __func__, tr32(cpu_base + CPU_PC), info.fw_base);
return -ENODEV;
}
tw32(cpu_base + CPU_STATE, 0xffffffff);
@@ -7439,10 +7480,13 @@ static void tg3_rings_reset(struct tg3 *tp)
for (i = 1; i < TG3_IRQ_MAX_VECS; i++) {
tp->napi[i].tx_prod = 0;
tp->napi[i].tx_cons = 0;
- tw32_mailbox(tp->napi[i].prodmbox, 0);
+ if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
+ tw32_mailbox(tp->napi[i].prodmbox, 0);
tw32_rx_mbox(tp->napi[i].consmbox, 0);
tw32_mailbox_f(tp->napi[i].int_mbox, 1);
}
+ if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS))
+ tw32_mailbox(tp->napi[0].prodmbox, 0);
} else {
tp->napi[0].tx_prod = 0;
tp->napi[0].tx_cons = 0;
@@ -7528,8 +7572,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tg3_abort_hw(tp, 1);
}
- if (reset_phy &&
- !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
+ if (reset_phy)
tg3_phy_reset(tp);
err = tg3_chip_reset(tp);
@@ -7574,6 +7617,20 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
}
+ if (tp->tg3_flags3 & TG3_FLG3_L1PLLPD_EN) {
+ u32 grc_mode = tr32(GRC_MODE);
+
+ /* Access the lower 1K of PL PCIE block registers. */
+ val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
+ tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
+
+ val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
+ tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
+ val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
+
+ tw32(GRC_MODE, grc_mode);
+ }
+
/* This works around an issue with Athlon chipsets on
* B3 tigon3 silicon. This bit has no effect on any
* other revision. But do not set this on PCI Express
@@ -7705,8 +7762,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
udelay(10);
}
if (i >= 2000) {
- printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
- tp->dev->name);
+ netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
return -ENODEV;
}
@@ -7772,7 +7828,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
(RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
BDINFO_FLAGS_USE_EXT_RECV);
- if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
NIC_SRAM_RX_JUMBO_BUFFER_DESC);
} else {
@@ -7834,6 +7890,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
RDMAC_MODE_LNGREAD_ENAB);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+ rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
+
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
@@ -8143,7 +8202,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
/* Prevent chip from dropping frames when flow control
* is enabled.
*/
- tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ val = 1;
+ else
+ val = 2;
+ tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
@@ -8562,10 +8625,8 @@ static int tg3_test_msi(struct tg3 *tp)
return err;
/* MSI test failed, go back to INTx mode */
- printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
- "switching to INTx mode. Please report this failure to "
- "the PCI maintainer and include system chipset information.\n",
- tp->dev->name);
+ netdev_warn(tp->dev, "No interrupt was generated using MSI, switching to INTx mode\n"
+ "Please report this failure to the PCI maintainer and include system chipset information\n");
free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
@@ -8598,8 +8659,8 @@ static int tg3_request_firmware(struct tg3 *tp)
const __be32 *fw_data;
if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
- printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n",
- tp->dev->name, tp->fw_needed);
+ netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
+ tp->fw_needed);
return -ENOENT;
}
@@ -8612,8 +8673,8 @@ static int tg3_request_firmware(struct tg3 *tp)
tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
if (tp->fw_len < (tp->fw->size - 12)) {
- printk(KERN_ERR "%s: bogus length %d in \"%s\"\n",
- tp->dev->name, tp->fw_len, tp->fw_needed);
+ netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
+ tp->fw_len, tp->fw_needed);
release_firmware(tp->fw);
tp->fw = NULL;
return -EINVAL;
@@ -8651,9 +8712,8 @@ static bool tg3_enable_msix(struct tg3 *tp)
return false;
if (pci_enable_msix(tp->pdev, msix_ent, rc))
return false;
- printk(KERN_NOTICE
- "%s: Requested %d MSI-X vectors, received %d\n",
- tp->dev->name, tp->irq_cnt, rc);
+ netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
+ tp->irq_cnt, rc);
tp->irq_cnt = rc;
}
@@ -8678,8 +8738,7 @@ static void tg3_ints_init(struct tg3 *tp)
/* All MSI supporting chips should support tagged
* status. Assert that this is the case.
*/
- printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
- "Not using MSI.\n", tp->dev->name);
+ netdev_warn(tp->dev, "MSI without TAGGED? Not using MSI\n");
goto defcfg;
}
@@ -8724,12 +8783,10 @@ static int tg3_open(struct net_device *dev)
if (err)
return err;
} else if (err) {
- printk(KERN_WARNING "%s: TSO capability disabled.\n",
- tp->dev->name);
+ netdev_warn(tp->dev, "TSO capability disabled\n");
tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
} else if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
- printk(KERN_NOTICE "%s: TSO capability restored.\n",
- tp->dev->name);
+ netdev_notice(tp->dev, "TSO capability restored\n");
tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
}
}
@@ -9395,21 +9452,18 @@ static void __tg3_set_rx_mode(struct net_device *dev)
} else if (dev->flags & IFF_ALLMULTI) {
/* Accept all multicast. */
tg3_set_multi (tp, 1);
- } else if (dev->mc_count < 1) {
+ } else if (netdev_mc_empty(dev)) {
/* Reject all multicast. */
tg3_set_multi (tp, 0);
} else {
/* Accept one or more multicast(s). */
struct dev_mc_list *mclist;
- unsigned int i;
u32 mc_filter[4] = { 0, };
u32 regidx;
u32 bit;
u32 crc;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
-
+ netdev_for_each_mc_addr(mclist, dev) {
crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
bit = ~crc & 0x7f;
regidx = (bit & 0x60) >> 5;
@@ -10001,56 +10055,66 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
int err = 0;
if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
- if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
- return -EAGAIN;
+ u32 newadv;
+ struct phy_device *phydev;
- if (epause->autoneg) {
- u32 newadv;
- struct phy_device *phydev;
+ phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
- phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
+ if (!(phydev->supported & SUPPORTED_Pause) ||
+ (!(phydev->supported & SUPPORTED_Asym_Pause) &&
+ ((epause->rx_pause && !epause->tx_pause) ||
+ (!epause->rx_pause && epause->tx_pause))))
+ return -EINVAL;
- if (epause->rx_pause) {
- if (epause->tx_pause)
- newadv = ADVERTISED_Pause;
- else
- newadv = ADVERTISED_Pause |
- ADVERTISED_Asym_Pause;
- } else if (epause->tx_pause) {
- newadv = ADVERTISED_Asym_Pause;
+ tp->link_config.flowctrl = 0;
+ if (epause->rx_pause) {
+ tp->link_config.flowctrl |= FLOW_CTRL_RX;
+
+ if (epause->tx_pause) {
+ tp->link_config.flowctrl |= FLOW_CTRL_TX;
+ newadv = ADVERTISED_Pause;
} else
- newadv = 0;
-
- if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
- u32 oldadv = phydev->advertising &
- (ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
- if (oldadv != newadv) {
- phydev->advertising &=
- ~(ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
- phydev->advertising |= newadv;
- err = phy_start_aneg(phydev);
+ newadv = ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause;
+ } else if (epause->tx_pause) {
+ tp->link_config.flowctrl |= FLOW_CTRL_TX;
+ newadv = ADVERTISED_Asym_Pause;
+ } else
+ newadv = 0;
+
+ if (epause->autoneg)
+ tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
+ else
+ tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
+
+ if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
+ u32 oldadv = phydev->advertising &
+ (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
+ if (oldadv != newadv) {
+ phydev->advertising &=
+ ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+ phydev->advertising |= newadv;
+ if (phydev->autoneg) {
+ /*
+ * Always renegotiate the link to
+ * inform our link partner of our
+ * flow control settings, even if the
+ * flow control is forced. Let
+ * tg3_adjust_link() do the final
+ * flow control setup.
+ */
+ return phy_start_aneg(phydev);
}
- } else {
- tp->link_config.advertising &=
- ~(ADVERTISED_Pause |
- ADVERTISED_Asym_Pause);
- tp->link_config.advertising |= newadv;
}
- } else {
- if (epause->rx_pause)
- tp->link_config.flowctrl |= FLOW_CTRL_RX;
- else
- tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
-
- if (epause->tx_pause)
- tp->link_config.flowctrl |= FLOW_CTRL_TX;
- else
- tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
- if (netif_running(dev))
+ if (!epause->autoneg)
tg3_setup_flow_control(tp, 0, 0);
+ } else {
+ tp->link_config.orig_advertising &=
+ ~(ADVERTISED_Pause |
+ ADVERTISED_Asym_Pause);
+ tp->link_config.orig_advertising |= newadv;
}
} else {
int irq_sync = 0;
@@ -10584,8 +10648,7 @@ static int tg3_test_registers(struct tg3 *tp)
out:
if (netif_msg_hw(tp))
- printk(KERN_ERR PFX "Register test failed at offset %x\n",
- offset);
+ pr_err("Register test failed at offset %x\n", offset);
tw32(offset, save_val);
return -EIO;
}
@@ -10640,12 +10703,27 @@ static int tg3_test_memory(struct tg3 *tp)
{ 0x00008000, 0x01000},
{ 0x00010000, 0x01000},
{ 0xffffffff, 0x00000}
+ }, mem_tbl_5717[] = {
+ { 0x00000200, 0x00008},
+ { 0x00010000, 0x0a000},
+ { 0x00020000, 0x13c00},
+ { 0xffffffff, 0x00000}
+ }, mem_tbl_57765[] = {
+ { 0x00000200, 0x00008},
+ { 0x00004000, 0x00800},
+ { 0x00006000, 0x09800},
+ { 0x00010000, 0x0a000},
+ { 0xffffffff, 0x00000}
};
struct mem_entry *mem_tbl;
int err = 0;
int i;
- if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+ mem_tbl = mem_tbl_5717;
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ mem_tbl = mem_tbl_57765;
+ else if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
mem_tbl = mem_tbl_5755;
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
mem_tbl = mem_tbl_5906;
@@ -10678,12 +10756,12 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
struct tg3_napi *tnapi, *rnapi;
struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
+ tnapi = &tp->napi[0];
+ rnapi = &tp->napi[0];
if (tp->irq_cnt > 1) {
- tnapi = &tp->napi[1];
rnapi = &tp->napi[1];
- } else {
- tnapi = &tp->napi[0];
- rnapi = &tp->napi[0];
+ if (tp->tg3_flags3 & TG3_FLG3_ENABLE_TSS)
+ tnapi = &tp->napi[1];
}
coal_now = tnapi->coal_now | rnapi->coal_now;
@@ -10720,8 +10798,12 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
- tg3_writephy(tp, MII_TG3_FET_PTEST, 0x1800);
+ tg3_writephy(tp, MII_TG3_FET_PTEST,
+ MII_TG3_FET_PTEST_FRC_TX_LINK |
+ MII_TG3_FET_PTEST_FRC_TX_LOCK);
+ /* The write needs to be flushed for the AC131 */
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
+ tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
mac_mode |= MAC_MODE_PORT_MODE_MII;
} else
mac_mode |= MAC_MODE_PORT_MODE_GMII;
@@ -10733,9 +10815,10 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
tw32_f(MAC_RX_MODE, tp->rx_mode);
}
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
- if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
+ u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
+ if (masked_phy_id == TG3_PHY_ID_BCM5401)
mac_mode &= ~MAC_MODE_LINK_POLARITY;
- else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
+ else if (masked_phy_id == TG3_PHY_ID_BCM5411)
mac_mode |= MAC_MODE_LINK_POLARITY;
tg3_writephy(tp, MII_TG3_EXT_CTRL,
MII_TG3_EXT_CTRL_LNK3_LED_MODE);
@@ -11692,8 +11775,8 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
tp->tg3_flags |= TG3_FLAG_NVRAM;
if (tg3_nvram_lock(tp)) {
- printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
- "tg3_nvram_init failed.\n", tp->dev->name);
+ netdev_warn(tp->dev, "Cannot get nvram lock, %s failed\n",
+ __func__);
return;
}
tg3_enable_nvram_access(tp);
@@ -11991,45 +12074,71 @@ struct subsys_tbl_ent {
u32 phy_id;
};
-static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
+static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
/* Broadcom boards. */
- { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
- { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
- { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
- { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
- { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
- { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
- { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
- { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
- { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
- { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
- { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
+ { TG3PCI_SUBVENDOR_ID_BROADCOM,
+ TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
/* 3com boards. */
- { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
- { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
- { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
- { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
- { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
+ { TG3PCI_SUBVENDOR_ID_3COM,
+ TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
+ { TG3PCI_SUBVENDOR_ID_3COM,
+ TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_3COM,
+ TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
+ { TG3PCI_SUBVENDOR_ID_3COM,
+ TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_3COM,
+ TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
/* DELL boards. */
- { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
- { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
- { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
- { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
+ { TG3PCI_SUBVENDOR_ID_DELL,
+ TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
+ { TG3PCI_SUBVENDOR_ID_DELL,
+ TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
+ { TG3PCI_SUBVENDOR_ID_DELL,
+ TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
+ { TG3PCI_SUBVENDOR_ID_DELL,
+ TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
/* Compaq boards. */
- { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
- { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
- { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
- { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
- { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
+ { TG3PCI_SUBVENDOR_ID_COMPAQ,
+ TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_COMPAQ,
+ TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_COMPAQ,
+ TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
+ { TG3PCI_SUBVENDOR_ID_COMPAQ,
+ TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
+ { TG3PCI_SUBVENDOR_ID_COMPAQ,
+ TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
/* IBM boards. */
- { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
+ { TG3PCI_SUBVENDOR_ID_IBM,
+ TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
};
-static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
+static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
{
int i;
@@ -12070,7 +12179,7 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
val = tr32(MEMARB_MODE);
tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
- tp->phy_id = PHY_ID_INVALID;
+ tp->phy_id = TG3_PHY_ID_INVALID;
tp->led_ctrl = LED_CTRL_MODE_PHY_1;
/* Assume an onboard device and WOL capable by default. */
@@ -12244,8 +12353,8 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
}
- if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
- tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
+ if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
+ tp->tg3_flags3 |= TG3_FLG3_RGMII_INBAND_DISABLE;
if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
@@ -12321,7 +12430,7 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
err = 0;
if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
- hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
+ hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
} else {
/* Now read the physical PHY_ID from the chip and verify
* that it is sane. If it doesn't look good, we fall back
@@ -12335,17 +12444,17 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
- hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
+ hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
}
- if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
+ if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
tp->phy_id = hw_phy_id;
- if (hw_phy_id_masked == PHY_ID_BCM8002)
+ if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
else
tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
} else {
- if (tp->phy_id != PHY_ID_INVALID) {
+ if (tp->phy_id != TG3_PHY_ID_INVALID) {
/* Do nothing, phy ID already set up in
* tg3_get_eeprom_hw_cfg().
*/
@@ -12355,13 +12464,13 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
/* No eeprom signature? Try the hardcoded
* subsys device table.
*/
- p = lookup_by_subsys(tp);
+ p = tg3_lookup_by_subsys(tp);
if (!p)
return -ENODEV;
tp->phy_id = p->phy_id;
if (!tp->phy_id ||
- tp->phy_id == PHY_ID_BCM8002)
+ tp->phy_id == TG3_PHY_ID_BCM8002)
tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
}
}
@@ -12413,13 +12522,11 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
}
skip_phy_reset:
- if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
+ if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
err = tg3_init_5401phy_dsp(tp);
if (err)
return err;
- }
- if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
err = tg3_init_5401phy_dsp(tp);
}
@@ -12440,7 +12547,8 @@ skip_phy_reset:
static void __devinit tg3_read_partno(struct tg3 *tp)
{
unsigned char vpd_data[TG3_NVM_VPD_LEN]; /* in little-endian format */
- unsigned int i;
+ unsigned int block_end, rosize, len;
+ int i = 0;
u32 magic;
if ((tp->tg3_flags3 & TG3_FLG3_NO_NVRAM) ||
@@ -12462,7 +12570,7 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
}
} else {
ssize_t cnt;
- unsigned int pos = 0, i = 0;
+ unsigned int pos = 0;
for (; pos < TG3_NVM_VPD_LEN && i < 3; i++, pos += cnt) {
cnt = pci_read_vpd(tp->pdev, pos,
@@ -12477,51 +12585,33 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
goto out_not_found;
}
- /* Now parse and find the part number. */
- for (i = 0; i < TG3_NVM_VPD_LEN - 2; ) {
- unsigned char val = vpd_data[i];
- unsigned int block_end;
-
- if (val == 0x82 || val == 0x91) {
- i = (i + 3 +
- (vpd_data[i + 1] +
- (vpd_data[i + 2] << 8)));
- continue;
- }
-
- if (val != 0x90)
- goto out_not_found;
+ i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
+ PCI_VPD_LRDT_RO_DATA);
+ if (i < 0)
+ goto out_not_found;
- block_end = (i + 3 +
- (vpd_data[i + 1] +
- (vpd_data[i + 2] << 8)));
- i += 3;
+ rosize = pci_vpd_lrdt_size(&vpd_data[i]);
+ block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
+ i += PCI_VPD_LRDT_TAG_SIZE;
- if (block_end > TG3_NVM_VPD_LEN)
- goto out_not_found;
+ if (block_end > TG3_NVM_VPD_LEN)
+ goto out_not_found;
- while (i < (block_end - 2)) {
- if (vpd_data[i + 0] == 'P' &&
- vpd_data[i + 1] == 'N') {
- int partno_len = vpd_data[i + 2];
+ i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
+ PCI_VPD_RO_KEYWORD_PARTNO);
+ if (i < 0)
+ goto out_not_found;
- i += 3;
- if (partno_len > TG3_BPN_SIZE ||
- (partno_len + i) > TG3_NVM_VPD_LEN)
- goto out_not_found;
+ len = pci_vpd_info_field_size(&vpd_data[i]);
- memcpy(tp->board_part_number,
- &vpd_data[i], partno_len);
+ i += PCI_VPD_INFO_FLD_HDR_SIZE;
+ if (len > TG3_BPN_SIZE ||
+ (len + i) > TG3_NVM_VPD_LEN)
+ goto out_not_found;
- /* Success. */
- return;
- }
- i += 3 + vpd_data[i + 2];
- }
+ memcpy(tp->board_part_number, &vpd_data[i], len);
- /* Part number not found. */
- goto out_not_found;
- }
+ return;
out_not_found:
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
@@ -12538,8 +12628,24 @@ out_not_found:
else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 &&
tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
strcpy(tp->board_part_number, "BCM57788");
- else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
+ strcpy(tp->board_part_number, "BCM57761");
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
strcpy(tp->board_part_number, "BCM57765");
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
+ strcpy(tp->board_part_number, "BCM57781");
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
+ strcpy(tp->board_part_number, "BCM57785");
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
+ strcpy(tp->board_part_number, "BCM57791");
+ else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
+ strcpy(tp->board_part_number, "BCM57795");
else
strcpy(tp->board_part_number, "none");
}
@@ -12642,6 +12748,12 @@ static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
case TG3_EEPROM_SB_REVISION_3:
offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
break;
+ case TG3_EEPROM_SB_REVISION_4:
+ offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
+ break;
+ case TG3_EEPROM_SB_REVISION_5:
+ offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
+ break;
default:
return;
}
@@ -13102,6 +13214,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
tp->tg3_flags3 |= TG3_FLG3_CLKREQ_BUG;
+ } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
+ tp->tg3_flags3 |= TG3_FLG3_L1PLLPD_EN;
}
} else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
@@ -13109,8 +13223,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
if (!tp->pcix_cap) {
- printk(KERN_ERR PFX "Cannot find PCI-X "
- "capability, aborting.\n");
+ pr_err("Cannot find PCI-X capability, aborting\n");
return -EIO;
}
@@ -13290,7 +13403,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
@@ -13306,8 +13420,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
/* Force the chip into D0. */
err = tg3_set_power_state(tp, PCI_D0);
if (err) {
- printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
- pci_name(tp->pdev));
+ pr_err("(%s) transition to D0 failed\n", pci_name(tp->pdev));
return err;
}
@@ -13474,12 +13587,14 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
+ tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
(tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET))
tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
err = tg3_phy_probe(tp);
if (err) {
- printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
+ pr_err("(%s) phy probe failed, err %d\n",
pci_name(tp->pdev), err);
/* ... but do not return immediately ... */
tg3_mdio_fini(tp);
@@ -13989,7 +14104,8 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
/* Send the buffer to the chip. */
ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
if (ret) {
- printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
+ pr_err("tg3_test_dma() Write the buffer failed %d\n",
+ ret);
break;
}
@@ -13999,7 +14115,8 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
u32 val;
tg3_read_mem(tp, 0x2100 + (i*4), &val);
if (le32_to_cpu(val) != p[i]) {
- printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
+ pr_err(" tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n",
+ val, i);
/* ret = -ENODEV here? */
}
p[i] = 0;
@@ -14008,7 +14125,8 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
/* Now read it back. */
ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
if (ret) {
- printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
+ pr_err("tg3_test_dma() Read the buffer failed %d\n",
+ ret);
break;
}
@@ -14025,7 +14143,8 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
break;
} else {
- printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
+ pr_err("tg3_test_dma() buffer corrupted on read back! (%d != %d)\n",
+ p[i], i);
ret = -ENODEV;
goto out;
}
@@ -14086,9 +14205,22 @@ static void __devinit tg3_init_link_config(struct tg3 *tp)
static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
{
- if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
- GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765) {
+ if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
+ tp->bufmgr_config.mbuf_read_dma_low_water =
+ DEFAULT_MB_RDMA_LOW_WATER_5705;
+ tp->bufmgr_config.mbuf_mac_rx_low_water =
+ DEFAULT_MB_MACRX_LOW_WATER_57765;
+ tp->bufmgr_config.mbuf_high_water =
+ DEFAULT_MB_HIGH_WATER_57765;
+
+ tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
+ DEFAULT_MB_RDMA_LOW_WATER_5705;
+ tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
+ DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
+ tp->bufmgr_config.mbuf_high_water_jumbo =
+ DEFAULT_MB_HIGH_WATER_JUMBO_57765;
+ } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
tp->bufmgr_config.mbuf_read_dma_low_water =
DEFAULT_MB_RDMA_LOW_WATER_5705;
tp->bufmgr_config.mbuf_mac_rx_low_water =
@@ -14130,26 +14262,28 @@ static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
static char * __devinit tg3_phy_string(struct tg3 *tp)
{
- switch (tp->phy_id & PHY_ID_MASK) {
- case PHY_ID_BCM5400: return "5400";
- case PHY_ID_BCM5401: return "5401";
- case PHY_ID_BCM5411: return "5411";
- case PHY_ID_BCM5701: return "5701";
- case PHY_ID_BCM5703: return "5703";
- case PHY_ID_BCM5704: return "5704";
- case PHY_ID_BCM5705: return "5705";
- case PHY_ID_BCM5750: return "5750";
- case PHY_ID_BCM5752: return "5752";
- case PHY_ID_BCM5714: return "5714";
- case PHY_ID_BCM5780: return "5780";
- case PHY_ID_BCM5755: return "5755";
- case PHY_ID_BCM5787: return "5787";
- case PHY_ID_BCM5784: return "5784";
- case PHY_ID_BCM5756: return "5722/5756";
- case PHY_ID_BCM5906: return "5906";
- case PHY_ID_BCM5761: return "5761";
- case PHY_ID_BCM5717: return "5717";
- case PHY_ID_BCM8002: return "8002/serdes";
+ switch (tp->phy_id & TG3_PHY_ID_MASK) {
+ case TG3_PHY_ID_BCM5400: return "5400";
+ case TG3_PHY_ID_BCM5401: return "5401";
+ case TG3_PHY_ID_BCM5411: return "5411";
+ case TG3_PHY_ID_BCM5701: return "5701";
+ case TG3_PHY_ID_BCM5703: return "5703";
+ case TG3_PHY_ID_BCM5704: return "5704";
+ case TG3_PHY_ID_BCM5705: return "5705";
+ case TG3_PHY_ID_BCM5750: return "5750";
+ case TG3_PHY_ID_BCM5752: return "5752";
+ case TG3_PHY_ID_BCM5714: return "5714";
+ case TG3_PHY_ID_BCM5780: return "5780";
+ case TG3_PHY_ID_BCM5755: return "5755";
+ case TG3_PHY_ID_BCM5787: return "5787";
+ case TG3_PHY_ID_BCM5784: return "5784";
+ case TG3_PHY_ID_BCM5756: return "5722/5756";
+ case TG3_PHY_ID_BCM5906: return "5906";
+ case TG3_PHY_ID_BCM5761: return "5761";
+ case TG3_PHY_ID_BCM5718C: return "5718C";
+ case TG3_PHY_ID_BCM5718S: return "5718S";
+ case TG3_PHY_ID_BCM57765: return "57765";
+ case TG3_PHY_ID_BCM8002: return "8002/serdes";
case 0: return "serdes";
default: return "unknown";
}
@@ -14291,7 +14425,6 @@ static const struct net_device_ops tg3_netdev_ops_dma_bug = {
static int __devinit tg3_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
- static int tg3_version_printed = 0;
struct net_device *dev;
struct tg3 *tp;
int i, err, pm_cap;
@@ -14299,20 +14432,17 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
char str[40];
u64 dma_mask, persist_dma_mask;
- if (tg3_version_printed++ == 0)
- printk(KERN_INFO "%s", version);
+ printk_once(KERN_INFO "%s\n", version);
err = pci_enable_device(pdev);
if (err) {
- printk(KERN_ERR PFX "Cannot enable PCI device, "
- "aborting.\n");
+ pr_err("Cannot enable PCI device, aborting\n");
return err;
}
err = pci_request_regions(pdev, DRV_MODULE_NAME);
if (err) {
- printk(KERN_ERR PFX "Cannot obtain PCI resources, "
- "aborting.\n");
+ pr_err("Cannot obtain PCI resources, aborting\n");
goto err_out_disable_pdev;
}
@@ -14321,15 +14451,14 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
/* Find power-management capability. */
pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
if (pm_cap == 0) {
- printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
- "aborting.\n");
+ pr_err("Cannot find PowerManagement capability, aborting\n");
err = -EIO;
goto err_out_free_res;
}
dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
if (!dev) {
- printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
+ pr_err("Etherdev alloc failed, aborting\n");
err = -ENOMEM;
goto err_out_free_res;
}
@@ -14379,8 +14508,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
tp->regs = pci_ioremap_bar(pdev, BAR_0);
if (!tp->regs) {
- printk(KERN_ERR PFX "Cannot map device registers, "
- "aborting.\n");
+ netdev_err(dev, "Cannot map device registers, aborting\n");
err = -ENOMEM;
goto err_out_free_dev;
}
@@ -14396,8 +14524,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = tg3_get_invariants(tp);
if (err) {
- printk(KERN_ERR PFX "Problem fetching invariants of chip, "
- "aborting.\n");
+ netdev_err(dev, "Problem fetching invariants of chip, aborting\n");
goto err_out_iounmap;
}
@@ -14432,8 +14559,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = pci_set_consistent_dma_mask(pdev,
persist_dma_mask);
if (err < 0) {
- printk(KERN_ERR PFX "Unable to obtain 64 bit "
- "DMA for consistent allocations\n");
+ netdev_err(dev, "Unable to obtain 64 bit DMA for consistent allocations\n");
goto err_out_iounmap;
}
}
@@ -14441,8 +14567,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
if (err || dma_mask == DMA_BIT_MASK(32)) {
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- printk(KERN_ERR PFX "No usable DMA configuration, "
- "aborting.\n");
+ netdev_err(dev, "No usable DMA configuration, aborting\n");
goto err_out_iounmap;
}
}
@@ -14491,16 +14616,14 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = tg3_get_device_address(tp);
if (err) {
- printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
- "aborting.\n");
+ netdev_err(dev, "Could not obtain valid ethernet address, aborting\n");
goto err_out_iounmap;
}
if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
if (!tp->aperegs) {
- printk(KERN_ERR PFX "Cannot map APE registers, "
- "aborting.\n");
+ netdev_err(dev, "Cannot map APE registers, aborting\n");
err = -ENOMEM;
goto err_out_iounmap;
}
@@ -14524,7 +14647,7 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = tg3_test_dma(tp);
if (err) {
- printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
+ netdev_err(dev, "DMA engine test failed, aborting\n");
goto err_out_apeunmap;
}
@@ -14585,45 +14708,39 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
err = register_netdev(dev);
if (err) {
- printk(KERN_ERR PFX "Cannot register net device, "
- "aborting.\n");
+ netdev_err(dev, "Cannot register net device, aborting\n");
goto err_out_apeunmap;
}
- printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
- dev->name,
- tp->board_part_number,
- tp->pci_chip_rev_id,
- tg3_bus_string(tp, str),
- dev->dev_addr);
+ netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
+ tp->board_part_number,
+ tp->pci_chip_rev_id,
+ tg3_bus_string(tp, str),
+ dev->dev_addr);
if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
struct phy_device *phydev;
phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
- printk(KERN_INFO
- "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
- tp->dev->name, phydev->drv->name,
- dev_name(&phydev->dev));
+ netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
+ phydev->drv->name, dev_name(&phydev->dev));
} else
- printk(KERN_INFO
- "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
- tp->dev->name, tg3_phy_string(tp),
- ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
- ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
- "10/100/1000Base-T")),
- (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
-
- printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
- dev->name,
- (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
- (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
- (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
- (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
- (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
- printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
- dev->name, tp->dma_rwctrl,
- (pdev->dma_mask == DMA_BIT_MASK(32)) ? 32 :
- (((u64) pdev->dma_mask == DMA_BIT_MASK(40)) ? 40 : 64));
+ netdev_info(dev, "attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
+ tg3_phy_string(tp),
+ ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
+ ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
+ "10/100/1000Base-T")),
+ (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
+
+ netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
+ (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
+ (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
+ (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
+ (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
+ (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
+ netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
+ tp->dma_rwctrl,
+ pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
+ ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
return 0;
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 8a167912902..574a1cc4d35 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -56,7 +56,39 @@
#define TG3PCI_DEVICE_TIGON3_57765 0x16b4
#define TG3PCI_DEVICE_TIGON3_57791 0x16b2
#define TG3PCI_DEVICE_TIGON3_57795 0x16b6
-/* 0x04 --> 0x64 unused */
+/* 0x04 --> 0x2c unused */
+#define TG3PCI_SUBVENDOR_ID_BROADCOM PCI_VENDOR_ID_BROADCOM
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6 0x1644
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5 0x0001
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6 0x0002
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9 0x0003
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1 0x0005
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8 0x0006
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7 0x0007
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10 0x0008
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12 0x8008
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1 0x0009
+#define TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2 0x8009
+#define TG3PCI_SUBVENDOR_ID_3COM PCI_VENDOR_ID_3COM
+#define TG3PCI_SUBDEVICE_ID_3COM_3C996T 0x1000
+#define TG3PCI_SUBDEVICE_ID_3COM_3C996BT 0x1006
+#define TG3PCI_SUBDEVICE_ID_3COM_3C996SX 0x1004
+#define TG3PCI_SUBDEVICE_ID_3COM_3C1000T 0x1007
+#define TG3PCI_SUBDEVICE_ID_3COM_3C940BR01 0x1008
+#define TG3PCI_SUBVENDOR_ID_DELL PCI_VENDOR_ID_DELL
+#define TG3PCI_SUBDEVICE_ID_DELL_VIPER 0x00d1
+#define TG3PCI_SUBDEVICE_ID_DELL_JAGUAR 0x0106
+#define TG3PCI_SUBDEVICE_ID_DELL_MERLOT 0x0109
+#define TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT 0x010a
+#define TG3PCI_SUBVENDOR_ID_COMPAQ PCI_VENDOR_ID_COMPAQ
+#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE 0x007c
+#define TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2 0x009a
+#define TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING 0x007d
+#define TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780 0x0085
+#define TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2 0x0099
+#define TG3PCI_SUBVENDOR_ID_IBM PCI_VENDOR_ID_IBM
+#define TG3PCI_SUBDEVICE_ID_IBM_5703SAX2 0x0281
+/* 0x30 --> 0x64 unused */
#define TG3PCI_MSI_DATA 0x00000064
/* 0x66 --> 0x68 unused */
#define TG3PCI_MISC_HOST_CTRL 0x00000068
@@ -110,6 +142,7 @@
#define CHIPREV_ID_57780_A0 0x57780000
#define CHIPREV_ID_57780_A1 0x57780001
#define CHIPREV_ID_5717_A0 0x05717000
+#define CHIPREV_ID_57765_A0 0x57785000
#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12)
#define ASIC_REV_5700 0x07
#define ASIC_REV_5701 0x00
@@ -1206,14 +1239,18 @@
#define DEFAULT_MB_MACRX_LOW_WATER 0x00000020
#define DEFAULT_MB_MACRX_LOW_WATER_5705 0x00000010
#define DEFAULT_MB_MACRX_LOW_WATER_5906 0x00000004
+#define DEFAULT_MB_MACRX_LOW_WATER_57765 0x0000002a
#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO 0x00000098
#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780 0x0000004b
+#define DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765 0x0000007e
#define BUFMGR_MB_HIGH_WATER 0x00004418
#define DEFAULT_MB_HIGH_WATER 0x00000060
#define DEFAULT_MB_HIGH_WATER_5705 0x00000060
#define DEFAULT_MB_HIGH_WATER_5906 0x00000010
+#define DEFAULT_MB_HIGH_WATER_57765 0x000000a0
#define DEFAULT_MB_HIGH_WATER_JUMBO 0x0000017c
#define DEFAULT_MB_HIGH_WATER_JUMBO_5780 0x00000096
+#define DEFAULT_MB_HIGH_WATER_JUMBO_57765 0x000000ea
#define BUFMGR_RX_MB_ALLOC_REQ 0x0000441c
#define BUFMGR_MB_ALLOC_BIT 0x10000000
#define BUFMGR_RX_MB_ALLOC_RESP 0x00004420
@@ -1253,6 +1290,7 @@
#define RDMAC_MODE_MBUF_SBD_CRPT_ENAB 0x00002000
#define RDMAC_MODE_FIFO_SIZE_128 0x00020000
#define RDMAC_MODE_FIFO_LONG_BURST 0x00030000
+#define RDMAC_MODE_MULT_DMA_RD_DIS 0x01000000
#define RDMAC_MODE_IPV4_LSO_EN 0x08000000
#define RDMAC_MODE_IPV6_LSO_EN 0x10000000
#define RDMAC_STATUS 0x00004804
@@ -1543,6 +1581,8 @@
#define GRC_MODE_HOST_SENDBDS 0x00020000
#define GRC_MODE_NO_TX_PHDR_CSUM 0x00100000
#define GRC_MODE_NVRAM_WR_ENABLE 0x00200000
+#define GRC_MODE_PCIE_TL_SEL 0x00000000
+#define GRC_MODE_PCIE_PL_SEL 0x00400000
#define GRC_MODE_NO_RX_PHDR_CSUM 0x00800000
#define GRC_MODE_IRQ_ON_TX_CPU_ATTN 0x01000000
#define GRC_MODE_IRQ_ON_RX_CPU_ATTN 0x02000000
@@ -1550,7 +1590,13 @@
#define GRC_MODE_IRQ_ON_DMA_ATTN 0x08000000
#define GRC_MODE_IRQ_ON_FLOW_ATTN 0x10000000
#define GRC_MODE_4X_NIC_SEND_RINGS 0x20000000
+#define GRC_MODE_PCIE_DL_SEL 0x20000000
#define GRC_MODE_MCAST_FRM_ENABLE 0x40000000
+#define GRC_MODE_PCIE_HI_1K_EN 0x80000000
+#define GRC_MODE_PCIE_PORT_MASK (GRC_MODE_PCIE_TL_SEL | \
+ GRC_MODE_PCIE_PL_SEL | \
+ GRC_MODE_PCIE_DL_SEL | \
+ GRC_MODE_PCIE_HI_1K_EN)
#define GRC_MISC_CFG 0x00006804
#define GRC_MISC_CFG_CORECLK_RESET 0x00000001
#define GRC_MISC_CFG_PRESCALAR_MASK 0x000000fe
@@ -1804,6 +1850,11 @@
/* 0x7e74 --> 0x8000 unused */
+/* Alternate PCIE definitions */
+#define TG3_PCIE_TLDLPL_PORT 0x00007c00
+#define TG3_PCIE_PL_LO_PHYCTL1 0x00000004
+#define TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN 0x00001000
+
/* OTP bit definitions */
#define TG3_OTP_AGCTGT_MASK 0x000000e0
#define TG3_OTP_AGCTGT_SHIFT 1
@@ -1845,6 +1896,8 @@
#define TG3_EEPROM_SB_REVISION_0 0x00000000
#define TG3_EEPROM_SB_REVISION_2 0x00020000
#define TG3_EEPROM_SB_REVISION_3 0x00030000
+#define TG3_EEPROM_SB_REVISION_4 0x00040000
+#define TG3_EEPROM_SB_REVISION_5 0x00050000
#define TG3_EEPROM_MAGIC_HW 0xabcd
#define TG3_EEPROM_MAGIC_HW_MSK 0xffff
@@ -1862,6 +1915,8 @@
#define TG3_EEPROM_SB_F1R2_EDH_OFF 0x14
#define TG3_EEPROM_SB_F1R2_MBA_OFF 0x10
#define TG3_EEPROM_SB_F1R3_EDH_OFF 0x18
+#define TG3_EEPROM_SB_F1R4_EDH_OFF 0x1c
+#define TG3_EEPROM_SB_F1R5_EDH_OFF 0x20
#define TG3_EEPROM_SB_EDH_MAJ_MASK 0x00000700
#define TG3_EEPROM_SB_EDH_MAJ_SHFT 8
#define TG3_EEPROM_SB_EDH_MIN_MASK 0x000000ff
@@ -1956,7 +2011,7 @@
#define NIC_SRAM_DATA_CFG_4 0x00000d60
#define NIC_SRAM_GMII_MODE 0x00000002
-#define NIC_SRAM_RGMII_STD_IBND_DISABLE 0x00000004
+#define NIC_SRAM_RGMII_INBAND_DISABLE 0x00000004
#define NIC_SRAM_RGMII_EXT_IBND_RX_EN 0x00000008
#define NIC_SRAM_RGMII_EXT_IBND_TX_EN 0x00000010
@@ -2093,6 +2148,9 @@
/* Fast Ethernet Tranceiver definitions */
#define MII_TG3_FET_PTEST 0x17
+#define MII_TG3_FET_PTEST_FRC_TX_LINK 0x1000
+#define MII_TG3_FET_PTEST_FRC_TX_LOCK 0x0800
+
#define MII_TG3_FET_TEST 0x1f
#define MII_TG3_FET_SHADOW_EN 0x0080
@@ -2682,6 +2740,7 @@ struct tg3 {
struct net_device *dev;
struct pci_dev *pdev;
+ u32 coal_now;
u32 msg_enable;
/* begin "tx thread" cacheline section */
@@ -2700,7 +2759,7 @@ struct tg3 {
struct vlan_group *vlgrp;
#endif
- struct tg3_rx_prodring_set prodring[TG3_IRQ_MAX_VECS - 1];
+ struct tg3_rx_prodring_set prodring[TG3_IRQ_MAX_VECS];
/* begin "everything else" cacheline(s) section */
@@ -2798,7 +2857,7 @@ struct tg3 {
#define TG3_FLG3_USE_PHYLIB 0x00000010
#define TG3_FLG3_MDIOBUS_INITED 0x00000020
#define TG3_FLG3_PHY_CONNECTED 0x00000080
-#define TG3_FLG3_RGMII_STD_IBND_DISABLE 0x00000100
+#define TG3_FLG3_RGMII_INBAND_DISABLE 0x00000100
#define TG3_FLG3_RGMII_EXT_IBND_RX_EN 0x00000200
#define TG3_FLG3_RGMII_EXT_IBND_TX_EN 0x00000400
#define TG3_FLG3_CLKREQ_BUG 0x00000800
@@ -2812,6 +2871,7 @@ struct tg3 {
#define TG3_FLG3_40BIT_DMA_LIMIT_BUG 0x00100000
#define TG3_FLG3_SHORT_DMA_BUG 0x00200000
#define TG3_FLG3_USE_JUMBO_BDFLAG 0x00400000
+#define TG3_FLG3_L1PLLPD_EN 0x00800000
struct timer_list timer;
u16 timer_counter;
@@ -2861,42 +2921,50 @@ struct tg3 {
/* PHY info */
u32 phy_id;
-#define PHY_ID_MASK 0xfffffff0
-#define PHY_ID_BCM5400 0x60008040
-#define PHY_ID_BCM5401 0x60008050
-#define PHY_ID_BCM5411 0x60008070
-#define PHY_ID_BCM5701 0x60008110
-#define PHY_ID_BCM5703 0x60008160
-#define PHY_ID_BCM5704 0x60008190
-#define PHY_ID_BCM5705 0x600081a0
-#define PHY_ID_BCM5750 0x60008180
-#define PHY_ID_BCM5752 0x60008100
-#define PHY_ID_BCM5714 0x60008340
-#define PHY_ID_BCM5780 0x60008350
-#define PHY_ID_BCM5755 0xbc050cc0
-#define PHY_ID_BCM5787 0xbc050ce0
-#define PHY_ID_BCM5756 0xbc050ed0
-#define PHY_ID_BCM5784 0xbc050fa0
-#define PHY_ID_BCM5761 0xbc050fd0
-#define PHY_ID_BCM5717 0x5c0d8a00
-#define PHY_ID_BCM5906 0xdc00ac40
-#define PHY_ID_BCM8002 0x60010140
-#define PHY_ID_INVALID 0xffffffff
-#define PHY_ID_REV_MASK 0x0000000f
-#define PHY_REV_BCM5401_B0 0x1
-#define PHY_REV_BCM5401_B2 0x3
-#define PHY_REV_BCM5401_C0 0x6
-#define PHY_REV_BCM5411_X0 0x1 /* Found on Netgear GA302T */
-#define TG3_PHY_ID_BCM50610 0x143bd60
-#define TG3_PHY_ID_BCM50610M 0x143bd70
-#define TG3_PHY_ID_BCMAC131 0x143bc70
-#define TG3_PHY_ID_RTL8211C 0x001cc910
-#define TG3_PHY_ID_RTL8201E 0x00008200
-#define TG3_PHY_ID_BCM57780 0x03625d90
-#define TG3_PHY_OUI_MASK 0xfffffc00
-#define TG3_PHY_OUI_1 0x00206000
-#define TG3_PHY_OUI_2 0x0143bc00
-#define TG3_PHY_OUI_3 0x03625c00
+#define TG3_PHY_ID_MASK 0xfffffff0
+#define TG3_PHY_ID_BCM5400 0x60008040
+#define TG3_PHY_ID_BCM5401 0x60008050
+#define TG3_PHY_ID_BCM5411 0x60008070
+#define TG3_PHY_ID_BCM5701 0x60008110
+#define TG3_PHY_ID_BCM5703 0x60008160
+#define TG3_PHY_ID_BCM5704 0x60008190
+#define TG3_PHY_ID_BCM5705 0x600081a0
+#define TG3_PHY_ID_BCM5750 0x60008180
+#define TG3_PHY_ID_BCM5752 0x60008100
+#define TG3_PHY_ID_BCM5714 0x60008340
+#define TG3_PHY_ID_BCM5780 0x60008350
+#define TG3_PHY_ID_BCM5755 0xbc050cc0
+#define TG3_PHY_ID_BCM5787 0xbc050ce0
+#define TG3_PHY_ID_BCM5756 0xbc050ed0
+#define TG3_PHY_ID_BCM5784 0xbc050fa0
+#define TG3_PHY_ID_BCM5761 0xbc050fd0
+#define TG3_PHY_ID_BCM5718C 0x5c0d8a00
+#define TG3_PHY_ID_BCM5718S 0xbc050ff0
+#define TG3_PHY_ID_BCM57765 0x5c0d8a40
+#define TG3_PHY_ID_BCM5906 0xdc00ac40
+#define TG3_PHY_ID_BCM8002 0x60010140
+#define TG3_PHY_ID_INVALID 0xffffffff
+
+#define PHY_ID_RTL8211C 0x001cc910
+#define PHY_ID_RTL8201E 0x00008200
+
+#define TG3_PHY_ID_REV_MASK 0x0000000f
+#define TG3_PHY_REV_BCM5401_B0 0x1
+
+ /* This macro assumes the passed PHY ID is
+ * already masked with TG3_PHY_ID_MASK.
+ */
+#define TG3_KNOWN_PHY_ID(X) \
+ ((X) == TG3_PHY_ID_BCM5400 || (X) == TG3_PHY_ID_BCM5401 || \
+ (X) == TG3_PHY_ID_BCM5411 || (X) == TG3_PHY_ID_BCM5701 || \
+ (X) == TG3_PHY_ID_BCM5703 || (X) == TG3_PHY_ID_BCM5704 || \
+ (X) == TG3_PHY_ID_BCM5705 || (X) == TG3_PHY_ID_BCM5750 || \
+ (X) == TG3_PHY_ID_BCM5752 || (X) == TG3_PHY_ID_BCM5714 || \
+ (X) == TG3_PHY_ID_BCM5780 || (X) == TG3_PHY_ID_BCM5787 || \
+ (X) == TG3_PHY_ID_BCM5755 || (X) == TG3_PHY_ID_BCM5756 || \
+ (X) == TG3_PHY_ID_BCM5906 || (X) == TG3_PHY_ID_BCM5761 || \
+ (X) == TG3_PHY_ID_BCM5718C || (X) == TG3_PHY_ID_BCM5718S || \
+ (X) == TG3_PHY_ID_BCM57765 || (X) == TG3_PHY_ID_BCM8002)
u32 led_ctrl;
u32 phy_otp;
@@ -2909,20 +2977,6 @@ struct tg3 {
u32 pci_clock_ctrl;
struct pci_dev *pdev_peer;
- /* This macro assumes the passed PHY ID is already masked
- * with PHY_ID_MASK.
- */
-#define KNOWN_PHY_ID(X) \
- ((X) == PHY_ID_BCM5400 || (X) == PHY_ID_BCM5401 || \
- (X) == PHY_ID_BCM5411 || (X) == PHY_ID_BCM5701 || \
- (X) == PHY_ID_BCM5703 || (X) == PHY_ID_BCM5704 || \
- (X) == PHY_ID_BCM5705 || (X) == PHY_ID_BCM5750 || \
- (X) == PHY_ID_BCM5752 || (X) == PHY_ID_BCM5714 || \
- (X) == PHY_ID_BCM5780 || (X) == PHY_ID_BCM5787 || \
- (X) == PHY_ID_BCM5755 || (X) == PHY_ID_BCM5756 || \
- (X) == PHY_ID_BCM5906 || (X) == PHY_ID_BCM5761 || \
- (X) == PHY_ID_BCM5717 || (X) == PHY_ID_BCM8002)
-
struct tg3_hw_stats *hw_stats;
dma_addr_t stats_mapping;
struct work_struct reset_task;
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index fabaeffb315..390540c101c 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -254,7 +254,7 @@ static struct board {
{ "Compaq NetFlex-3/E", TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */
};
-static struct pci_device_id tlan_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(tlan_pci_tbl) = {
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100,
@@ -338,7 +338,7 @@ static int TLan_PhyInternalService( struct net_device * );
static int TLan_PhyDp83840aCheck( struct net_device * );
*/
-static int TLan_MiiReadReg( struct net_device *, u16, u16, u16 * );
+static bool TLan_MiiReadReg( struct net_device *, u16, u16, u16 * );
static void TLan_MiiSendData( u16, u32, unsigned );
static void TLan_MiiSync( u16 );
static void TLan_MiiWriteReg( struct net_device *, u16, u16, u16 );
@@ -1314,7 +1314,7 @@ static struct net_device_stats *TLan_GetStats( struct net_device *dev )
static void TLan_SetMulticastList( struct net_device *dev )
{
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
u32 hash1 = 0;
u32 hash2 = 0;
int i;
@@ -1335,7 +1335,8 @@ static void TLan_SetMulticastList( struct net_device *dev )
TLan_DioWrite32( dev->base_addr, TLAN_HASH_1, 0xFFFFFFFF );
TLan_DioWrite32( dev->base_addr, TLAN_HASH_2, 0xFFFFFFFF );
} else {
- for ( i = 0; i < dev->mc_count; i++ ) {
+ i = 0;
+ netdev_for_each_mc_addr(dmi, dev) {
if ( i < 3 ) {
TLan_SetMac( dev, i + 1,
(char *) &dmi->dmi_addr );
@@ -1346,7 +1347,7 @@ static void TLan_SetMulticastList( struct net_device *dev )
else
hash2 |= ( 1 << ( offset - 32 ) );
}
- dmi = dmi->next;
+ i++;
}
for ( ; i < 3; i++ )
TLan_SetMac( dev, i + 1, NULL );
@@ -2204,7 +2205,7 @@ TLan_ResetAdapter( struct net_device *dev )
u32 data;
u8 data8;
- priv->tlanFullDuplex = FALSE;
+ priv->tlanFullDuplex = false;
priv->phyOnline=0;
netif_carrier_off(dev);
@@ -2259,7 +2260,7 @@ TLan_ResetAdapter( struct net_device *dev )
TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x0a );
} else if ( priv->duplex == TLAN_DUPLEX_FULL ) {
TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x00 );
- priv->tlanFullDuplex = TRUE;
+ priv->tlanFullDuplex = true;
} else {
TLan_DioWrite8( dev->base_addr, TLAN_ACOMMIT, 0x08 );
}
@@ -2651,14 +2652,14 @@ static void TLan_PhyStartLink( struct net_device *dev )
TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0000);
} else if ( priv->speed == TLAN_SPEED_10 &&
priv->duplex == TLAN_DUPLEX_FULL) {
- priv->tlanFullDuplex = TRUE;
+ priv->tlanFullDuplex = true;
TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x0100);
} else if ( priv->speed == TLAN_SPEED_100 &&
priv->duplex == TLAN_DUPLEX_HALF) {
TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2000);
} else if ( priv->speed == TLAN_SPEED_100 &&
priv->duplex == TLAN_DUPLEX_FULL) {
- priv->tlanFullDuplex = TRUE;
+ priv->tlanFullDuplex = true;
TLan_MiiWriteReg( dev, phy, MII_GEN_CTL, 0x2100);
} else {
@@ -2695,7 +2696,7 @@ static void TLan_PhyStartLink( struct net_device *dev )
tctl &= ~TLAN_TC_AUISEL;
if ( priv->duplex == TLAN_DUPLEX_FULL ) {
control |= MII_GC_DUPLEX;
- priv->tlanFullDuplex = TRUE;
+ priv->tlanFullDuplex = true;
}
if ( priv->speed == TLAN_SPEED_100 ) {
control |= MII_GC_SPEEDSEL;
@@ -2750,9 +2751,9 @@ static void TLan_PhyFinishAutoNeg( struct net_device *dev )
TLan_MiiReadReg( dev, phy, MII_AN_LPA, &an_lpa );
mode = an_adv & an_lpa & 0x03E0;
if ( mode & 0x0100 ) {
- priv->tlanFullDuplex = TRUE;
+ priv->tlanFullDuplex = true;
} else if ( ! ( mode & 0x0080 ) && ( mode & 0x0040 ) ) {
- priv->tlanFullDuplex = TRUE;
+ priv->tlanFullDuplex = true;
}
if ( ( ! ( mode & 0x0180 ) ) &&
@@ -2855,8 +2856,8 @@ void TLan_PhyMonitor( struct net_device *dev )
* TLan_MiiReadReg
*
* Returns:
- * 0 if ack received ok
- * 1 otherwise.
+ * false if ack received ok
+ * true if no ack received or other error
*
* Parms:
* dev The device structure containing
@@ -2875,17 +2876,17 @@ void TLan_PhyMonitor( struct net_device *dev )
*
**************************************************************/
-static int TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val )
+static bool TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val )
{
u8 nack;
u16 sio, tmp;
u32 i;
- int err;
+ bool err;
int minten;
TLanPrivateInfo *priv = netdev_priv(dev);
unsigned long flags = 0;
- err = FALSE;
+ err = false;
outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
@@ -2918,7 +2919,7 @@ static int TLan_MiiReadReg( struct net_device *dev, u16 phy, u16 reg, u16 *val )
TLan_SetBit(TLAN_NET_SIO_MCLK, sio);
}
tmp = 0xffff;
- err = TRUE;
+ err = true;
} else { /* ACK, so read data */
for (tmp = 0, i = 0x8000; i; i >>= 1) {
TLan_ClearBit(TLAN_NET_SIO_MCLK, sio);
diff --git a/drivers/net/tlan.h b/drivers/net/tlan.h
index 4b82f283e98..d13ff12d750 100644
--- a/drivers/net/tlan.h
+++ b/drivers/net/tlan.h
@@ -31,9 +31,6 @@
*
****************************************************************/
-#define FALSE 0
-#define TRUE 1
-
#define TLAN_MIN_FRAME_SIZE 64
#define TLAN_MAX_FRAME_SIZE 1600
diff --git a/drivers/net/tokenring/3c359.c b/drivers/net/tokenring/3c359.c
index cf552d1d962..0fb930feea4 100644
--- a/drivers/net/tokenring/3c359.c
+++ b/drivers/net/tokenring/3c359.c
@@ -117,7 +117,7 @@ MODULE_PARM_DESC(message_level, "3c359: Level of reported messages") ;
* will be stuck with 1555 lines of hex #'s in the code.
*/
-static struct pci_device_id xl_pci_tbl[] =
+static DEFINE_PCI_DEVICE_TABLE(xl_pci_tbl) =
{
{PCI_VENDOR_ID_3COM,PCI_DEVICE_ID_3COM_3C359, PCI_ANY_ID, PCI_ANY_ID, },
{ } /* terminate list */
@@ -1390,10 +1390,9 @@ static int xl_close(struct net_device *dev)
static void xl_set_rx_mode(struct net_device *dev)
{
struct xl_private *xl_priv = netdev_priv(dev);
- struct dev_mc_list *dmi ;
+ struct dev_mc_list *dmi;
unsigned char dev_mc_address[4] ;
u16 options ;
- int i ;
if (dev->flags & IFF_PROMISC)
options = 0x0004 ;
@@ -1408,7 +1407,7 @@ static void xl_set_rx_mode(struct net_device *dev)
dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
- for (i=0,dmi=dev->mc_list;i < dev->mc_count; i++,dmi = dmi->next) {
+ netdev_for_each_mc_addr(dmi, dev) {
dev_mc_address[0] |= dmi->dmi_addr[2] ;
dev_mc_address[1] |= dmi->dmi_addr[3] ;
dev_mc_address[2] |= dmi->dmi_addr[4] ;
diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
index b9db1b5a58a..515f122777a 100644
--- a/drivers/net/tokenring/abyss.c
+++ b/drivers/net/tokenring/abyss.c
@@ -45,7 +45,7 @@ static char version[] __devinitdata =
#define ABYSS_IO_EXTENT 64
-static struct pci_device_id abyss_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(abyss_pci_tbl) = {
{ PCI_VENDOR_ID_MADGE, PCI_DEVICE_ID_MADGE_MK2,
PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_TOKEN_RING << 8, 0x00ffffff, },
{ } /* Terminating entry */
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index 66272f2a075..1a0967246e2 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -995,13 +995,11 @@ static void tok_set_multicast_list(struct net_device *dev)
/*BMS ifconfig tr down or hot unplug a PCMCIA card ??hownowbrowncow*/
if (/*BMSHELPdev->start == 0 ||*/ ti->open_status != OPEN) return;
address[0] = address[1] = address[2] = address[3] = 0;
- mclist = dev->mc_list;
- for (i = 0; i < dev->mc_count; i++) {
+ netdev_for_each_mc_addr(mclist, dev) {
address[0] |= mclist->dmi_addr[2];
address[1] |= mclist->dmi_addr[3];
address[2] |= mclist->dmi_addr[4];
address[3] |= mclist->dmi_addr[5];
- mclist = mclist->next;
}
SET_PAGE(ti->srb_page);
for (i = 0; i < sizeof(struct srb_set_funct_addr); i++)
diff --git a/drivers/net/tokenring/lanstreamer.c b/drivers/net/tokenring/lanstreamer.c
index d6ccd59c7d0..dd028fee9dc 100644
--- a/drivers/net/tokenring/lanstreamer.c
+++ b/drivers/net/tokenring/lanstreamer.c
@@ -146,7 +146,7 @@
static char version[] = "LanStreamer.c v0.4.0 03/08/01 - Mike Sullivan\n"
" v0.5.3 11/13/02 - Kent Yoder";
-static struct pci_device_id streamer_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(streamer_pci_tbl) = {
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_TR, PCI_ANY_ID, PCI_ANY_ID,},
{} /* terminating entry */
};
@@ -1268,7 +1268,6 @@ static void streamer_set_rx_mode(struct net_device *dev)
__u8 options = 0;
struct dev_mc_list *dmi;
unsigned char dev_mc_address[5];
- int i;
writel(streamer_priv->srb, streamer_mmio + LAPA);
options = streamer_priv->streamer_copy_all_options;
@@ -1303,8 +1302,7 @@ static void streamer_set_rx_mode(struct net_device *dev)
writel(streamer_priv->srb,streamer_mmio+LAPA);
dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
- for (i=0,dmi=dev->mc_list;i < dev->mc_count; i++,dmi = dmi->next)
- {
+ netdev_for_each_mc_addr(dmi, dev) {
dev_mc_address[0] |= dmi->dmi_addr[2] ;
dev_mc_address[1] |= dmi->dmi_addr[3] ;
dev_mc_address[2] |= dmi->dmi_addr[4] ;
diff --git a/drivers/net/tokenring/olympic.c b/drivers/net/tokenring/olympic.c
index df32025c513..3a25e0434ae 100644
--- a/drivers/net/tokenring/olympic.c
+++ b/drivers/net/tokenring/olympic.c
@@ -172,7 +172,7 @@ module_param_array(message_level, int, NULL, 0) ;
static int network_monitor[OLYMPIC_MAX_ADAPTERS] = {0,};
module_param_array(network_monitor, int, NULL, 0);
-static struct pci_device_id olympic_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(olympic_pci_tbl) = {
{PCI_VENDOR_ID_IBM,PCI_DEVICE_ID_IBM_TR_WAKE,PCI_ANY_ID,PCI_ANY_ID,},
{ } /* Terminating Entry */
};
@@ -1139,9 +1139,8 @@ static void olympic_set_rx_mode(struct net_device *dev)
u8 __iomem *olympic_mmio = olympic_priv->olympic_mmio ;
u8 options = 0;
u8 __iomem *srb;
- struct dev_mc_list *dmi ;
+ struct dev_mc_list *dmi;
unsigned char dev_mc_address[4] ;
- int i ;
writel(olympic_priv->srb,olympic_mmio+LAPA);
srb=olympic_priv->olympic_lap + (olympic_priv->srb & (~0xf800));
@@ -1178,7 +1177,7 @@ static void olympic_set_rx_mode(struct net_device *dev)
dev_mc_address[0] = dev_mc_address[1] = dev_mc_address[2] = dev_mc_address[3] = 0 ;
- for (i=0,dmi=dev->mc_list;i < dev->mc_count; i++,dmi = dmi->next) {
+ netdev_for_each_mc_addr(dmi, dev) {
dev_mc_address[0] |= dmi->dmi_addr[2] ;
dev_mc_address[1] |= dmi->dmi_addr[3] ;
dev_mc_address[2] |= dmi->dmi_addr[4] ;
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index e3c42f5ac4a..21a01753312 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -1212,10 +1212,9 @@ static void tms380tr_set_multicast_list(struct net_device *dev)
}
else
{
- int i;
- struct dev_mc_list *mclist = dev->mc_list;
- for (i=0; i< dev->mc_count; i++)
- {
+ struct dev_mc_list *mclist;
+
+ netdev_for_each_mc_addr(mclist, dev) {
((char *)(&tp->ocpl.FunctAddr))[0] |=
mclist->dmi_addr[2];
((char *)(&tp->ocpl.FunctAddr))[1] |=
@@ -1224,7 +1223,6 @@ static void tms380tr_set_multicast_list(struct net_device *dev)
mclist->dmi_addr[4];
((char *)(&tp->ocpl.FunctAddr))[3] |=
mclist->dmi_addr[5];
- mclist = mclist->next;
}
}
tms380tr_exec_cmd(dev, OC_SET_FUNCT_ADDR);
diff --git a/drivers/net/tokenring/tmspci.c b/drivers/net/tokenring/tmspci.c
index f92fe86fdca..d4c7c0c0a3d 100644
--- a/drivers/net/tokenring/tmspci.c
+++ b/drivers/net/tokenring/tmspci.c
@@ -57,7 +57,7 @@ static struct card_info card_info_table[] = {
{ {0x03, 0x01}, "3Com Token Link Velocity"},
};
-static struct pci_device_id tmspci_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(tmspci_pci_tbl) = {
{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_TOKENRING, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_TR, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
{ PCI_VENDOR_ID_TCONRAD, PCI_DEVICE_ID_TCONRAD_TOKENRING, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
index a69c4a48bab..647cdd1d4e2 100644
--- a/drivers/net/tsi108_eth.c
+++ b/drivers/net/tsi108_eth.c
@@ -1184,29 +1184,19 @@ static void tsi108_set_rx_mode(struct net_device *dev)
rxcfg &= ~(TSI108_EC_RXCFG_UFE | TSI108_EC_RXCFG_MFE);
- if (dev->flags & IFF_ALLMULTI || dev->mc_count) {
+ if (dev->flags & IFF_ALLMULTI || !netdev_mc_empty(dev)) {
int i;
- struct dev_mc_list *mc = dev->mc_list;
+ struct dev_mc_list *mc;
rxcfg |= TSI108_EC_RXCFG_MFE | TSI108_EC_RXCFG_MC_HASH;
memset(data->mc_hash, 0, sizeof(data->mc_hash));
- while (mc) {
+ netdev_for_each_mc_addr(mc, dev) {
u32 hash, crc;
- if (mc->dmi_addrlen == 6) {
- crc = ether_crc(6, mc->dmi_addr);
- hash = crc >> 23;
-
- __set_bit(hash, &data->mc_hash[0]);
- } else {
- printk(KERN_ERR
- "%s: got multicast address of length %d instead of 6.\n",
- dev->name,
- mc->dmi_addrlen);
- }
-
- mc = mc->next;
+ crc = ether_crc(6, mc->dmi_addr);
+ hash = crc >> 23;
+ __set_bit(hash, &data->mc_hash[0]);
}
TSI_WRITE(TSI108_EC_HASHADDR,
diff --git a/drivers/net/tulip/21142.c b/drivers/net/tulip/21142.c
index 9f6742fad6c..007d8e75666 100644
--- a/drivers/net/tulip/21142.c
+++ b/drivers/net/tulip/21142.c
@@ -43,8 +43,8 @@ void t21142_media_task(struct work_struct *work)
if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000)
csr12 |= 6;
if (tulip_debug > 2)
- printk(KERN_INFO"%s: 21143 negotiation status %8.8x, %s.\n",
- dev->name, csr12, medianame[dev->if_port]);
+ dev_info(&dev->dev, "21143 negotiation status %08x, %s\n",
+ csr12, medianame[dev->if_port]);
if (tulip_media_cap[dev->if_port] & MediaIsMII) {
if (tulip_check_duplex(dev) < 0) {
netif_carrier_off(dev);
@@ -56,23 +56,26 @@ void t21142_media_task(struct work_struct *work)
} else if (tp->nwayset) {
/* Don't screw up a negotiated session! */
if (tulip_debug > 1)
- printk(KERN_INFO"%s: Using NWay-set %s media, csr12 %8.8x.\n",
- dev->name, medianame[dev->if_port], csr12);
+ dev_info(&dev->dev,
+ "Using NWay-set %s media, csr12 %08x\n",
+ medianame[dev->if_port], csr12);
} else if (tp->medialock) {
;
} else if (dev->if_port == 3) {
if (csr12 & 2) { /* No 100mbps link beat, revert to 10mbps. */
if (tulip_debug > 1)
- printk(KERN_INFO"%s: No 21143 100baseTx link beat, %8.8x, "
- "trying NWay.\n", dev->name, csr12);
+ dev_info(&dev->dev,
+ "No 21143 100baseTx link beat, %08x, trying NWay\n",
+ csr12);
t21142_start_nway(dev);
next_tick = 3*HZ;
}
} else if ((csr12 & 0x7000) != 0x5000) {
/* Negotiation failed. Search media types. */
if (tulip_debug > 1)
- printk(KERN_INFO"%s: 21143 negotiation failed, status %8.8x.\n",
- dev->name, csr12);
+ dev_info(&dev->dev,
+ "21143 negotiation failed, status %08x\n",
+ csr12);
if (!(csr12 & 4)) { /* 10mbps link beat good. */
new_csr6 = 0x82420000;
dev->if_port = 0;
@@ -90,8 +93,8 @@ void t21142_media_task(struct work_struct *work)
iowrite32(1, ioaddr + CSR13);
}
if (tulip_debug > 1)
- printk(KERN_INFO"%s: Testing new 21143 media %s.\n",
- dev->name, medianame[dev->if_port]);
+ dev_info(&dev->dev, "Testing new 21143 media %s\n",
+ medianame[dev->if_port]);
if (new_csr6 != (tp->csr6 & ~0x00D5)) {
tp->csr6 &= 0x00D5;
tp->csr6 |= new_csr6;
@@ -119,8 +122,8 @@ void t21142_start_nway(struct net_device *dev)
tp->nway = tp->mediasense = 1;
tp->nwayset = tp->lpar = 0;
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Restarting 21143 autonegotiation, csr14=%8.8x.\n",
- dev->name, csr14);
+ printk(KERN_DEBUG "%s: Restarting 21143 autonegotiation, csr14=%08x\n",
+ dev->name, csr14);
iowrite32(0x0001, ioaddr + CSR13);
udelay(100);
iowrite32(csr14, ioaddr + CSR14);
@@ -147,8 +150,9 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
if ((csr14 & 0x80) && (csr12 & 0x7000) != 0x5000)
csr12 |= 6;
if (tulip_debug > 1)
- printk(KERN_INFO"%s: 21143 link status interrupt %8.8x, CSR5 %x, "
- "%8.8x.\n", dev->name, csr12, csr5, csr14);
+ dev_info(&dev->dev,
+ "21143 link status interrupt %08x, CSR5 %x, %08x\n",
+ csr12, csr5, csr14);
/* If NWay finished and we have a negotiated partner capability. */
if (tp->nway && !tp->nwayset && (csr12 & 0x7000) == 0x5000) {
@@ -171,14 +175,15 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
if (tulip_debug > 1) {
if (tp->nwayset)
- printk(KERN_INFO "%s: Switching to %s based on link "
- "negotiation %4.4x & %4.4x = %4.4x.\n",
- dev->name, medianame[dev->if_port], tp->sym_advertise,
- tp->lpar, negotiated);
+ dev_info(&dev->dev,
+ "Switching to %s based on link negotiation %04x & %04x = %04x\n",
+ medianame[dev->if_port],
+ tp->sym_advertise, tp->lpar,
+ negotiated);
else
- printk(KERN_INFO "%s: Autonegotiation failed, using %s,"
- " link beat status %4.4x.\n",
- dev->name, medianame[dev->if_port], csr12);
+ dev_info(&dev->dev,
+ "Autonegotiation failed, using %s, link beat status %04x\n",
+ medianame[dev->if_port], csr12);
}
if (tp->mtable) {
@@ -201,14 +206,14 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
#if 0 /* Restart shouldn't be needed. */
iowrite32(tp->csr6 | RxOn, ioaddr + CSR6);
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: Restarting Tx and Rx, CSR5 is %8.8x.\n",
- dev->name, ioread32(ioaddr + CSR5));
+ printk(KERN_DEBUG "%s: Restarting Tx and Rx, CSR5 is %08x\n",
+ dev->name, ioread32(ioaddr + CSR5));
#endif
tulip_start_rxtx(tp);
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: Setting CSR6 %8.8x/%x CSR12 %8.8x.\n",
- dev->name, tp->csr6, ioread32(ioaddr + CSR6),
- ioread32(ioaddr + CSR12));
+ printk(KERN_DEBUG "%s: Setting CSR6 %08x/%x CSR12 %08x\n",
+ dev->name, tp->csr6, ioread32(ioaddr + CSR6),
+ ioread32(ioaddr + CSR12));
} else if ((tp->nwayset && (csr5 & 0x08000000) &&
(dev->if_port == 3 || dev->if_port == 5) &&
(csr12 & 2) == 2) ||
@@ -220,9 +225,9 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
add_timer(&tp->timer);
} else if (dev->if_port == 3 || dev->if_port == 5) {
if (tulip_debug > 1)
- printk(KERN_INFO"%s: 21143 %s link beat %s.\n",
- dev->name, medianame[dev->if_port],
- (csr12 & 2) ? "failed" : "good");
+ dev_info(&dev->dev, "21143 %s link beat %s\n",
+ medianame[dev->if_port],
+ (csr12 & 2) ? "failed" : "good");
if ((csr12 & 2) && ! tp->medialock) {
del_timer_sync(&tp->timer);
t21142_start_nway(dev);
@@ -232,21 +237,18 @@ void t21142_lnk_change(struct net_device *dev, int csr5)
iowrite32(csr14 & ~0x080, ioaddr + CSR14);
} else if (dev->if_port == 0 || dev->if_port == 4) {
if ((csr12 & 4) == 0)
- printk(KERN_INFO"%s: 21143 10baseT link beat good.\n",
- dev->name);
+ dev_info(&dev->dev, "21143 10baseT link beat good\n");
} else if (!(csr12 & 4)) { /* 10mbps link beat good. */
if (tulip_debug)
- printk(KERN_INFO"%s: 21143 10mbps sensed media.\n",
- dev->name);
+ dev_info(&dev->dev, "21143 10mbps sensed media\n");
dev->if_port = 0;
} else if (tp->nwayset) {
if (tulip_debug)
- printk(KERN_INFO"%s: 21143 using NWay-set %s, csr6 %8.8x.\n",
- dev->name, medianame[dev->if_port], tp->csr6);
+ dev_info(&dev->dev, "21143 using NWay-set %s, csr6 %08x\n",
+ medianame[dev->if_port], tp->csr6);
} else { /* 100mbps link beat good. */
if (tulip_debug)
- printk(KERN_INFO"%s: 21143 100baseTx sensed media.\n",
- dev->name);
+ dev_info(&dev->dev, "21143 100baseTx sensed media\n");
dev->if_port = 3;
tp->csr6 = 0x838E0000 | (tp->csr6 & 0x20ff);
iowrite32(0x0003FF7F, ioaddr + CSR14);
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index d4255d44cb7..cb429723b2c 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -337,7 +337,7 @@ static void de21041_media_timer (unsigned long data);
static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media);
-static struct pci_device_id de_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(de_pci_tbl) = {
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_PLUS,
@@ -382,9 +382,9 @@ static void de_rx_err_acct (struct de_private *de, unsigned rx_tail,
/* Ingore earlier buffers. */
if ((status & 0xffff) != 0x7fff) {
if (netif_msg_rx_err(de))
- printk(KERN_WARNING "%s: Oversized Ethernet frame "
- "spanned multiple buffers, status %8.8x!\n",
- de->dev->name, status);
+ dev_warn(&de->dev->dev,
+ "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
+ status);
de->net_stats.rx_length_errors++;
}
} else if (status & RxError) {
@@ -487,7 +487,7 @@ rx_next:
}
if (!rx_work)
- printk(KERN_WARNING "%s: rx work limit reached\n", de->dev->name);
+ dev_warn(&de->dev->dev, "rx work limit reached\n");
de->rx_tail = rx_tail;
}
@@ -504,7 +504,8 @@ static irqreturn_t de_interrupt (int irq, void *dev_instance)
if (netif_msg_intr(de))
printk(KERN_DEBUG "%s: intr, status %08x mode %08x desc %u/%u/%u\n",
- dev->name, status, dr32(MacMode), de->rx_tail, de->tx_head, de->tx_tail);
+ dev->name, status, dr32(MacMode),
+ de->rx_tail, de->tx_head, de->tx_tail);
dw32(MacStatus, status);
@@ -529,8 +530,9 @@ static irqreturn_t de_interrupt (int irq, void *dev_instance)
pci_read_config_word(de->pdev, PCI_STATUS, &pci_status);
pci_write_config_word(de->pdev, PCI_STATUS, pci_status);
- printk(KERN_ERR "%s: PCI bus error, status=%08x, PCI status=%04x\n",
- dev->name, status, pci_status);
+ dev_err(&de->dev->dev,
+ "PCI bus error, status=%08x, PCI status=%04x\n",
+ status, pci_status);
}
return IRQ_HANDLED;
@@ -582,7 +584,8 @@ static void de_tx (struct de_private *de)
de->net_stats.tx_packets++;
de->net_stats.tx_bytes += skb->len;
if (netif_msg_tx_done(de))
- printk(KERN_DEBUG "%s: tx done, slot %d\n", de->dev->name, tx_tail);
+ printk(KERN_DEBUG "%s: tx done, slot %d\n",
+ de->dev->name, tx_tail);
}
dev_kfree_skb_irq(skb);
}
@@ -674,18 +677,17 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
memset(hash_table, 0, sizeof(hash_table));
set_bit_le(255, hash_table); /* Broadcast entry */
/* This should work on big-endian machines as well. */
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
set_bit_le(index, hash_table);
+ }
- for (i = 0; i < 32; i++) {
- *setup_frm++ = hash_table[i];
- *setup_frm++ = hash_table[i];
- }
- setup_frm = &de->setup_frame[13*6];
+ for (i = 0; i < 32; i++) {
+ *setup_frm++ = hash_table[i];
+ *setup_frm++ = hash_table[i];
}
+ setup_frm = &de->setup_frame[13*6];
/* Fill the final entry with our physical address. */
eaddrs = (u16 *)dev->dev_addr;
@@ -698,20 +700,18 @@ static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
{
struct de_private *de = netdev_priv(dev);
struct dev_mc_list *mclist;
- int i;
u16 *eaddrs;
/* We have <= 14 addresses so we can use the wonderful
16 address perfect filtering of the Tulip. */
- for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
eaddrs = (u16 *)mclist->dmi_addr;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
}
/* Fill the unused entries with the broadcast address. */
- memset(setup_frm, 0xff, (15-i)*12);
+ memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
setup_frm = &de->setup_frame[15*6];
/* Fill the final entry with our physical address. */
@@ -738,7 +738,7 @@ static void __de_set_rx_mode (struct net_device *dev)
goto out;
}
- if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
+ if ((netdev_mc_count(dev) > 1000) || (dev->flags & IFF_ALLMULTI)) {
/* Too many to filter well -- accept all multicasts. */
macmode |= AcceptAllMulticast;
goto out;
@@ -746,7 +746,7 @@ static void __de_set_rx_mode (struct net_device *dev)
/* Note that only the low-address shortword of setup_frame is valid!
The values are doubled for big-endian architectures. */
- if (dev->mc_count > 14) /* Must use a multicast hash table. */
+ if (netdev_mc_count(dev) > 14) /* Must use a multicast hash table. */
build_setup_frame_hash (de->setup_frame, dev);
else
build_setup_frame_perfect (de->setup_frame, dev);
@@ -870,7 +870,7 @@ static void de_stop_rxtx (struct de_private *de)
udelay(100);
}
- printk(KERN_WARNING "%s: timeout expired stopping DMA\n", de->dev->name);
+ dev_warn(&de->dev->dev, "timeout expired stopping DMA\n");
}
static inline void de_start_rxtx (struct de_private *de)
@@ -905,8 +905,8 @@ static void de_link_up(struct de_private *de)
if (!netif_carrier_ok(de->dev)) {
netif_carrier_on(de->dev);
if (netif_msg_link(de))
- printk(KERN_INFO "%s: link up, media %s\n",
- de->dev->name, media_name[de->media_type]);
+ dev_info(&de->dev->dev, "link up, media %s\n",
+ media_name[de->media_type]);
}
}
@@ -915,7 +915,7 @@ static void de_link_down(struct de_private *de)
if (netif_carrier_ok(de->dev)) {
netif_carrier_off(de->dev);
if (netif_msg_link(de))
- printk(KERN_INFO "%s: link down\n", de->dev->name);
+ dev_info(&de->dev->dev, "link down\n");
}
}
@@ -925,7 +925,8 @@ static void de_set_media (struct de_private *de)
u32 macmode = dr32(MacMode);
if (de_is_running(de))
- printk(KERN_WARNING "%s: chip is running while changing media!\n", de->dev->name);
+ dev_warn(&de->dev->dev,
+ "chip is running while changing media!\n");
if (de->de21040)
dw32(CSR11, FULL_DUPLEX_MAGIC);
@@ -945,15 +946,15 @@ static void de_set_media (struct de_private *de)
macmode &= ~FullDuplex;
if (netif_msg_link(de)) {
- printk(KERN_INFO
- "%s: set link %s\n"
- "%s: mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n"
- "%s: set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
- de->dev->name, media_name[media],
- de->dev->name, dr32(MacMode), dr32(SIAStatus),
- dr32(CSR13), dr32(CSR14), dr32(CSR15),
- de->dev->name, macmode, de->media[media].csr13,
- de->media[media].csr14, de->media[media].csr15);
+ dev_info(&de->dev->dev, "set link %s\n", media_name[media]);
+ dev_info(&de->dev->dev, "mode 0x%x, sia 0x%x,0x%x,0x%x,0x%x\n",
+ dr32(MacMode), dr32(SIAStatus),
+ dr32(CSR13), dr32(CSR14), dr32(CSR15));
+
+ dev_info(&de->dev->dev,
+ "set mode 0x%x, set sia 0x%x,0x%x,0x%x\n",
+ macmode, de->media[media].csr13,
+ de->media[media].csr14, de->media[media].csr15);
}
if (macmode != dr32(MacMode))
dw32(MacMode, macmode);
@@ -992,9 +993,8 @@ static void de21040_media_timer (unsigned long data)
de_link_up(de);
else
if (netif_msg_timer(de))
- printk(KERN_INFO "%s: %s link ok, status %x\n",
- dev->name, media_name[de->media_type],
- status);
+ dev_info(&dev->dev, "%s link ok, status %x\n",
+ media_name[de->media_type], status);
return;
}
@@ -1022,8 +1022,8 @@ no_link_yet:
add_timer(&de->media_timer);
if (netif_msg_timer(de))
- printk(KERN_INFO "%s: no link, trying media %s, status %x\n",
- dev->name, media_name[de->media_type], status);
+ dev_info(&dev->dev, "no link, trying media %s, status %x\n",
+ media_name[de->media_type], status);
}
static unsigned int de_ok_to_advertise (struct de_private *de, u32 new_media)
@@ -1079,9 +1079,10 @@ static void de21041_media_timer (unsigned long data)
de_link_up(de);
else
if (netif_msg_timer(de))
- printk(KERN_INFO "%s: %s link ok, mode %x status %x\n",
- dev->name, media_name[de->media_type],
- dr32(MacMode), status);
+ dev_info(&dev->dev,
+ "%s link ok, mode %x status %x\n",
+ media_name[de->media_type],
+ dr32(MacMode), status);
return;
}
@@ -1150,8 +1151,8 @@ no_link_yet:
add_timer(&de->media_timer);
if (netif_msg_timer(de))
- printk(KERN_INFO "%s: no link, trying media %s, status %x\n",
- dev->name, media_name[de->media_type], status);
+ dev_info(&dev->dev, "no link, trying media %s, status %x\n",
+ media_name[de->media_type], status);
}
static void de_media_interrupt (struct de_private *de, u32 status)
@@ -1378,8 +1379,7 @@ static int de_open (struct net_device *dev)
rc = de_alloc_rings(de);
if (rc) {
- printk(KERN_ERR "%s: ring allocation failure, err=%d\n",
- dev->name, rc);
+ dev_err(&dev->dev, "ring allocation failure, err=%d\n", rc);
return rc;
}
@@ -1387,15 +1387,14 @@ static int de_open (struct net_device *dev)
rc = request_irq(dev->irq, de_interrupt, IRQF_SHARED, dev->name, dev);
if (rc) {
- printk(KERN_ERR "%s: IRQ %d request failure, err=%d\n",
- dev->name, dev->irq, rc);
+ dev_err(&dev->dev, "IRQ %d request failure, err=%d\n",
+ dev->irq, rc);
goto err_out_free;
}
rc = de_init_hw(de);
if (rc) {
- printk(KERN_ERR "%s: h/w init failure, err=%d\n",
- dev->name, rc);
+ dev_err(&dev->dev, "h/w init failure, err=%d\n", rc);
goto err_out_free_irq;
}
@@ -1666,8 +1665,8 @@ static int de_nway_reset(struct net_device *dev)
status = dr32(SIAStatus);
dw32(SIAStatus, (status & ~NWayState) | NWayRestart);
if (netif_msg_link(de))
- printk(KERN_INFO "%s: link nway restart, status %x,%x\n",
- de->dev->name, status, dr32(SIAStatus));
+ dev_info(&de->dev->dev, "link nway restart, status %x,%x\n",
+ status, dr32(SIAStatus));
return 0;
}
@@ -1711,7 +1710,7 @@ static void __devinit de21040_get_mac_address (struct de_private *de)
de->dev->dev_addr[i] = value;
udelay(1);
if (boguscnt <= 0)
- printk(KERN_WARNING PFX "timeout reading 21040 MAC address byte %u\n", i);
+ pr_warning(PFX "timeout reading 21040 MAC address byte %u\n", i);
}
}
@@ -1830,9 +1829,8 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
}
if (netif_msg_probe(de))
- printk(KERN_INFO "de%d: SROM leaf offset %u, default media %s\n",
- de->board_idx, ofs,
- media_name[de->media_type]);
+ pr_info("de%d: SROM leaf offset %u, default media %s\n",
+ de->board_idx, ofs, media_name[de->media_type]);
/* init SIA register values to defaults */
for (i = 0; i < DE_MAX_MEDIA; i++) {
@@ -1879,9 +1877,9 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
de->media[idx].type = idx;
if (netif_msg_probe(de))
- printk(KERN_INFO "de%d: media block #%u: %s",
- de->board_idx, i,
- media_name[de->media[idx].type]);
+ pr_info("de%d: media block #%u: %s",
+ de->board_idx, i,
+ media_name[de->media[idx].type]);
bufp += sizeof (ib->opts);
@@ -1893,13 +1891,13 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
sizeof(ib->csr15);
if (netif_msg_probe(de))
- printk(" (%x,%x,%x)\n",
- de->media[idx].csr13,
- de->media[idx].csr14,
- de->media[idx].csr15);
+ pr_cont(" (%x,%x,%x)\n",
+ de->media[idx].csr13,
+ de->media[idx].csr14,
+ de->media[idx].csr15);
} else if (netif_msg_probe(de))
- printk("\n");
+ pr_cont("\n");
if (bufp > ((void *)&ee_data[DE_EEPROM_SIZE - 3]))
break;
@@ -2005,7 +2003,7 @@ static int __devinit de_init_one (struct pci_dev *pdev,
/* check for invalid IRQ value */
if (pdev->irq < 2) {
rc = -EIO;
- printk(KERN_ERR PFX "invalid irq (%d) for pci dev %s\n",
+ pr_err(PFX "invalid irq (%d) for pci dev %s\n",
pdev->irq, pci_name(pdev));
goto err_out_res;
}
@@ -2016,14 +2014,14 @@ static int __devinit de_init_one (struct pci_dev *pdev,
pciaddr = pci_resource_start(pdev, 1);
if (!pciaddr) {
rc = -EIO;
- printk(KERN_ERR PFX "no MMIO resource for pci dev %s\n",
- pci_name(pdev));
+ pr_err(PFX "no MMIO resource for pci dev %s\n", pci_name(pdev));
goto err_out_res;
}
if (pci_resource_len(pdev, 1) < DE_REGS_SIZE) {
rc = -EIO;
- printk(KERN_ERR PFX "MMIO resource (%llx) too small on pci dev %s\n",
- (unsigned long long)pci_resource_len(pdev, 1), pci_name(pdev));
+ pr_err(PFX "MMIO resource (%llx) too small on pci dev %s\n",
+ (unsigned long long)pci_resource_len(pdev, 1),
+ pci_name(pdev));
goto err_out_res;
}
@@ -2031,9 +2029,9 @@ static int __devinit de_init_one (struct pci_dev *pdev,
regs = ioremap_nocache(pciaddr, DE_REGS_SIZE);
if (!regs) {
rc = -EIO;
- printk(KERN_ERR PFX "Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
- (unsigned long long)pci_resource_len(pdev, 1),
- pciaddr, pci_name(pdev));
+ pr_err(PFX "Cannot map PCI MMIO (%llx@%lx) on pci dev %s\n",
+ (unsigned long long)pci_resource_len(pdev, 1),
+ pciaddr, pci_name(pdev));
goto err_out_res;
}
dev->base_addr = (unsigned long) regs;
@@ -2044,8 +2042,7 @@ static int __devinit de_init_one (struct pci_dev *pdev,
/* make sure hardware is not running */
rc = de_reset_mac(de);
if (rc) {
- printk(KERN_ERR PFX "Cannot reset MAC, pci dev %s\n",
- pci_name(pdev));
+ pr_err(PFX "Cannot reset MAC, pci dev %s\n", pci_name(pdev));
goto err_out_iomap;
}
@@ -2065,12 +2062,11 @@ static int __devinit de_init_one (struct pci_dev *pdev,
goto err_out_iomap;
/* print info about board and interface just registered */
- printk (KERN_INFO "%s: %s at 0x%lx, %pM, IRQ %d\n",
- dev->name,
- de->de21040 ? "21040" : "21041",
- dev->base_addr,
- dev->dev_addr,
- dev->irq);
+ dev_info(&dev->dev, "%s at 0x%lx, %pM, IRQ %d\n",
+ de->de21040 ? "21040" : "21041",
+ dev->base_addr,
+ dev->dev_addr,
+ dev->irq);
pci_set_drvdata(pdev, dev);
@@ -2158,8 +2154,7 @@ static int de_resume (struct pci_dev *pdev)
if (!netif_running(dev))
goto out_attach;
if ((retval = pci_enable_device(pdev))) {
- printk (KERN_ERR "%s: pci_enable_device failed in resume\n",
- dev->name);
+ dev_err(&dev->dev, "pci_enable_device failed in resume\n");
goto out;
}
de_init_hw(de);
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index a8349b7200b..c4ecb9a9540 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -1951,9 +1951,9 @@ static void
SetMulticastFilter(struct net_device *dev)
{
struct de4x5_private *lp = netdev_priv(dev);
- struct dev_mc_list *dmi=dev->mc_list;
+ struct dev_mc_list *dmi;
u_long iobase = dev->base_addr;
- int i, j, bit, byte;
+ int i, bit, byte;
u16 hashcode;
u32 omr, crc;
char *pa;
@@ -1963,12 +1963,11 @@ SetMulticastFilter(struct net_device *dev)
omr &= ~(OMR_PR | OMR_PM);
pa = build_setup_frame(dev, ALL); /* Build the basic frame */
- if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 14)) {
+ if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 14)) {
omr |= OMR_PM; /* Pass all multicasts */
} else if (lp->setup_f == HASH_PERF) { /* Hash Filtering */
- for (i=0;i<dev->mc_count;i++) { /* for each address in the list */
- addrs=dmi->dmi_addr;
- dmi=dmi->next;
+ netdev_for_each_mc_addr(dmi, dev) {
+ addrs = dmi->dmi_addr;
if ((*addrs & 0x01) == 1) { /* multicast address? */
crc = ether_crc_le(ETH_ALEN, addrs);
hashcode = crc & HASH_BITS; /* hashcode is 9 LSb of CRC */
@@ -1984,9 +1983,8 @@ SetMulticastFilter(struct net_device *dev)
}
}
} else { /* Perfect filtering */
- for (j=0; j<dev->mc_count; j++) {
- addrs=dmi->dmi_addr;
- dmi=dmi->next;
+ netdev_for_each_mc_addr(dmi, dev) {
+ addrs = dmi->dmi_addr;
for (i=0; i<ETH_ALEN; i++) {
*(pa + (i&1)) = *addrs++;
if (i & 0x01) pa += 4;
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 6f44ebf5891..95b38d803e9 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -61,6 +61,8 @@
Test and make sure PCI latency is now correct for all cases.
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define DRV_NAME "dmfe"
#define DRV_VERSION "1.36.4"
#define DRV_RELDATE "2002-01-17"
@@ -149,16 +151,17 @@
#define DMFE_TX_TIMEOUT ((3*HZ)/2) /* tx packet time-out time 1.5 s" */
#define DMFE_TX_KICK (HZ/2) /* tx packet Kick-out time 0.5 s" */
-#define DMFE_DBUG(dbug_now, msg, value) \
- do { \
- if (dmfe_debug || (dbug_now)) \
- printk(KERN_ERR DRV_NAME ": %s %lx\n",\
- (msg), (long) (value)); \
+#define DMFE_DBUG(dbug_now, msg, value) \
+ do { \
+ if (dmfe_debug || (dbug_now)) \
+ pr_err("%s %lx\n", \
+ (msg), (long) (value)); \
} while (0)
-#define SHOW_MEDIA_TYPE(mode) \
- printk (KERN_INFO DRV_NAME ": Change Speed to %sMhz %s duplex\n" , \
- (mode & 1) ? "100":"10", (mode & 4) ? "full":"half");
+#define SHOW_MEDIA_TYPE(mode) \
+ pr_info("Change Speed to %sMhz %s duplex\n" , \
+ (mode & 1) ? "100":"10", \
+ (mode & 4) ? "full":"half");
/* CR9 definition: SROM/MII */
@@ -327,8 +330,8 @@ static void poll_dmfe (struct net_device *dev);
static void dmfe_descriptor_init(struct dmfe_board_info *, unsigned long);
static void allocate_rx_buffer(struct dmfe_board_info *);
static void update_cr6(u32, unsigned long);
-static void send_filter_frame(struct DEVICE * ,int);
-static void dm9132_id_table(struct DEVICE * ,int);
+static void send_filter_frame(struct DEVICE *);
+static void dm9132_id_table(struct DEVICE *);
static u16 phy_read(unsigned long, u8, u8, u32);
static void phy_write(unsigned long, u8, u8, u16, u32);
static void phy_write_1bit(unsigned long, u32);
@@ -391,8 +394,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
struct device_node *dp = pci_device_to_OF_node(pdev);
if (dp && of_get_property(dp, "local-mac-address", NULL)) {
- printk(KERN_INFO DRV_NAME
- ": skipping on-board DM910x (use tulip)\n");
+ pr_info("skipping on-board DM910x (use tulip)\n");
return -ENODEV;
}
}
@@ -405,8 +407,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
SET_NETDEV_DEV(dev, &pdev->dev);
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
- printk(KERN_WARNING DRV_NAME
- ": 32-bit PCI DMA not available.\n");
+ pr_warning("32-bit PCI DMA not available\n");
err = -ENODEV;
goto err_out_free;
}
@@ -417,13 +418,13 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
goto err_out_free;
if (!pci_resource_start(pdev, 0)) {
- printk(KERN_ERR DRV_NAME ": I/O base is zero\n");
+ pr_err("I/O base is zero\n");
err = -ENODEV;
goto err_out_disable;
}
if (pci_resource_len(pdev, 0) < (CHK_IO_SIZE(pdev)) ) {
- printk(KERN_ERR DRV_NAME ": Allocated I/O size too small\n");
+ pr_err("Allocated I/O size too small\n");
err = -ENODEV;
goto err_out_disable;
}
@@ -438,7 +439,7 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
#endif
if (pci_request_regions(pdev, DRV_NAME)) {
- printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n");
+ pr_err("Failed to request PCI regions\n");
err = -ENODEV;
goto err_out_disable;
}
@@ -497,12 +498,9 @@ static int __devinit dmfe_init_one (struct pci_dev *pdev,
if (err)
goto err_out_free_buf;
- printk(KERN_INFO "%s: Davicom DM%04lx at pci%s, %pM, irq %d.\n",
- dev->name,
- ent->driver_data >> 16,
- pci_name(pdev),
- dev->dev_addr,
- dev->irq);
+ dev_info(&dev->dev, "Davicom DM%04lx at pci%s, %pM, irq %d\n",
+ ent->driver_data >> 16,
+ pci_name(pdev), dev->dev_addr, dev->irq);
pci_set_master(pdev);
@@ -660,9 +658,9 @@ static void dmfe_init_dm910x(struct DEVICE *dev)
/* Send setup frame */
if (db->chip_id == PCI_DM9132_ID)
- dm9132_id_table(dev, dev->mc_count); /* DM9132 */
+ dm9132_id_table(dev); /* DM9132 */
else
- send_filter_frame(dev, dev->mc_count); /* DM9102/DM9102A */
+ send_filter_frame(dev); /* DM9102/DM9102A */
/* Init CR7, interrupt active bit */
db->cr7_data = CR7_DEFAULT;
@@ -696,7 +694,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
/* Too large packet check */
if (skb->len > MAX_PACKET_SIZE) {
- printk(KERN_ERR DRV_NAME ": big packet = %d\n", (u16)skb->len);
+ pr_err("big packet = %d\n", (u16)skb->len);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -706,8 +704,7 @@ static netdev_tx_t dmfe_start_xmit(struct sk_buff *skb,
/* No Tx resource check, it never happen nromally */
if (db->tx_queue_cnt >= TX_FREE_DESC_CNT) {
spin_unlock_irqrestore(&db->lock, flags);
- printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n",
- db->tx_queue_cnt);
+ pr_err("No Tx resource %ld\n", db->tx_queue_cnt);
return NETDEV_TX_BUSY;
}
@@ -779,12 +776,11 @@ static int dmfe_stop(struct DEVICE *dev)
#if 0
/* show statistic counter */
- printk(DRV_NAME ": FU:%lx EC:%lx LC:%lx NC:%lx"
- " LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
- db->tx_fifo_underrun, db->tx_excessive_collision,
- db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
- db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
- db->reset_fatal, db->reset_TXtimeout);
+ printk("FU:%lx EC:%lx LC:%lx NC:%lx LOC:%lx TXJT:%lx RESET:%lx RCR8:%lx FAL:%lx TT:%lx\n",
+ db->tx_fifo_underrun, db->tx_excessive_collision,
+ db->tx_late_collision, db->tx_no_carrier, db->tx_loss_carrier,
+ db->tx_jabber_timeout, db->reset_count, db->reset_cr8,
+ db->reset_fatal, db->reset_TXtimeout);
#endif
return 0;
@@ -885,7 +881,7 @@ static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
txptr = db->tx_remove_ptr;
while(db->tx_packet_cnt) {
tdes0 = le32_to_cpu(txptr->tdes0);
- /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
+ pr_debug("tdes0=%x\n", tdes0);
if (tdes0 & 0x80000000)
break;
@@ -895,7 +891,7 @@ static void dmfe_free_tx_pkt(struct DEVICE *dev, struct dmfe_board_info * db)
/* Transmit statistic counter */
if ( tdes0 != 0x7fffffff ) {
- /* printk(DRV_NAME ": tdes0=%x\n", tdes0); */
+ pr_debug("tdes0=%x\n", tdes0);
dev->stats.collisions += (tdes0 >> 3) & 0xf;
dev->stats.tx_bytes += le32_to_cpu(txptr->tdes1) & 0x7ff;
if (tdes0 & TDES0_ERR_MASK) {
@@ -992,7 +988,7 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
/* error summary bit check */
if (rdes0 & 0x8000) {
/* This is a error packet */
- //printk(DRV_NAME ": rdes0: %lx\n", rdes0);
+ pr_debug("rdes0: %x\n", rdes0);
dev->stats.rx_errors++;
if (rdes0 & 1)
dev->stats.rx_fifo_errors++;
@@ -1056,6 +1052,7 @@ static void dmfe_set_filter_mode(struct DEVICE * dev)
{
struct dmfe_board_info *db = netdev_priv(dev);
unsigned long flags;
+ int mc_count = netdev_mc_count(dev);
DMFE_DBUG(0, "dmfe_set_filter_mode()", 0);
spin_lock_irqsave(&db->lock, flags);
@@ -1068,19 +1065,19 @@ static void dmfe_set_filter_mode(struct DEVICE * dev)
return;
}
- if (dev->flags & IFF_ALLMULTI || dev->mc_count > DMFE_MAX_MULTICAST) {
- DMFE_DBUG(0, "Pass all multicast address", dev->mc_count);
+ if (dev->flags & IFF_ALLMULTI || mc_count > DMFE_MAX_MULTICAST) {
+ DMFE_DBUG(0, "Pass all multicast address", mc_count);
db->cr6_data &= ~(CR6_PM | CR6_PBF);
db->cr6_data |= CR6_PAM;
spin_unlock_irqrestore(&db->lock, flags);
return;
}
- DMFE_DBUG(0, "Set multicast address", dev->mc_count);
+ DMFE_DBUG(0, "Set multicast address", mc_count);
if (db->chip_id == PCI_DM9132_ID)
- dm9132_id_table(dev, dev->mc_count); /* DM9132 */
+ dm9132_id_table(dev); /* DM9132 */
else
- send_filter_frame(dev, dev->mc_count); /* DM9102/DM9102A */
+ send_filter_frame(dev); /* DM9102/DM9102A */
spin_unlock_irqrestore(&db->lock, flags);
}
@@ -1191,8 +1188,7 @@ static void dmfe_timer(unsigned long data)
if ( time_after(jiffies, dev->trans_start + DMFE_TX_TIMEOUT) ) {
db->reset_TXtimeout++;
db->wait_reset = 1;
- printk(KERN_WARNING "%s: Tx timeout - resetting\n",
- dev->name);
+ dev_warn(&dev->dev, "Tx timeout - resetting\n");
}
}
@@ -1456,7 +1452,7 @@ static void update_cr6(u32 cr6_data, unsigned long ioaddr)
* This setup frame initilize DM910X address filter mode
*/
-static void dm9132_id_table(struct DEVICE *dev, int mc_cnt)
+static void dm9132_id_table(struct DEVICE *dev)
{
struct dev_mc_list *mcptr;
u16 * addrptr;
@@ -1476,15 +1472,14 @@ static void dm9132_id_table(struct DEVICE *dev, int mc_cnt)
ioaddr += 4;
/* Clear Hash Table */
- for (i = 0; i < 4; i++)
- hash_table[i] = 0x0;
+ memset(hash_table, 0, sizeof(hash_table));
/* broadcast address */
hash_table[3] = 0x8000;
/* the multicast address in Hash Table : 64 bits */
- for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
- hash_val = cal_CRC( (char *) mcptr->dmi_addr, 6, 0) & 0x3f;
+ netdev_for_each_mc_addr(mcptr, dev) {
+ hash_val = cal_CRC((char *) mcptr->dmi_addr, 6, 0) & 0x3f;
hash_table[hash_val / 16] |= (u16) 1 << (hash_val % 16);
}
@@ -1499,7 +1494,7 @@ static void dm9132_id_table(struct DEVICE *dev, int mc_cnt)
* This setup frame initilize DM910X address filter mode
*/
-static void send_filter_frame(struct DEVICE *dev, int mc_cnt)
+static void send_filter_frame(struct DEVICE *dev)
{
struct dmfe_board_info *db = netdev_priv(dev);
struct dev_mc_list *mcptr;
@@ -1525,14 +1520,14 @@ static void send_filter_frame(struct DEVICE *dev, int mc_cnt)
*suptr++ = 0xffff;
/* fit the multicast address */
- for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
+ netdev_for_each_mc_addr(mcptr, dev) {
addrptr = (u16 *) mcptr->dmi_addr;
*suptr++ = addrptr[0];
*suptr++ = addrptr[1];
*suptr++ = addrptr[2];
}
- for (; i<14; i++) {
+ for (i = netdev_mc_count(dev); i < 14; i++) {
*suptr++ = 0xffff;
*suptr++ = 0xffff;
*suptr++ = 0xffff;
@@ -1646,7 +1641,7 @@ static u8 dmfe_sense_speed(struct dmfe_board_info * db)
else /* DM9102/DM9102A */
phy_mode = phy_read(db->ioaddr,
db->phy_addr, 17, db->chip_id) & 0xf000;
- /* printk(DRV_NAME ": Phy_mode %x ",phy_mode); */
+ pr_debug("Phy_mode %x\n", phy_mode);
switch (phy_mode) {
case 0x1000: db->op_mode = DMFE_10MHF; break;
case 0x2000: db->op_mode = DMFE_10MFD; break;
@@ -2089,7 +2084,7 @@ static void dmfe_HPNA_remote_cmd_chk(struct dmfe_board_info * db)
-static struct pci_device_id dmfe_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(dmfe_pci_tbl) = {
{ 0x1282, 0x9132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9132_ID },
{ 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9102_ID },
{ 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_DM9100_ID },
diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
index 889f57aae89..93f4e8309f8 100644
--- a/drivers/net/tulip/eeprom.c
+++ b/drivers/net/tulip/eeprom.c
@@ -161,15 +161,15 @@ void __devinit tulip_parse_eeprom(struct net_device *dev)
if (ee_data[0] == 0xff) {
if (last_mediatable) {
controller_index++;
- printk(KERN_INFO "%s: Controller %d of multiport board.\n",
- dev->name, controller_index);
+ dev_info(&dev->dev,
+ "Controller %d of multiport board\n",
+ controller_index);
tp->mtable = last_mediatable;
ee_data = last_ee_data;
goto subsequent_board;
} else
- printk(KERN_INFO "%s: Missing EEPROM, this interface may "
- "not work correctly!\n",
- dev->name);
+ dev_info(&dev->dev,
+ "Missing EEPROM, this interface may not work correctly!\n");
return;
}
/* Do a fix-up based on the vendor half of the station address prefix. */
@@ -181,16 +181,15 @@ void __devinit tulip_parse_eeprom(struct net_device *dev)
i++; /* An Accton EN1207, not an outlaw Maxtech. */
memcpy(ee_data + 26, eeprom_fixups[i].newtable,
sizeof(eeprom_fixups[i].newtable));
- printk(KERN_INFO "%s: Old format EEPROM on '%s' board. Using"
- " substitute media control info.\n",
- dev->name, eeprom_fixups[i].name);
+ dev_info(&dev->dev,
+ "Old format EEPROM on '%s' board. Using substitute media control info\n",
+ eeprom_fixups[i].name);
break;
}
}
if (eeprom_fixups[i].name == NULL) { /* No fixup found. */
- printk(KERN_INFO "%s: Old style EEPROM with no media selection "
- "information.\n",
- dev->name);
+ dev_info(&dev->dev,
+ "Old style EEPROM with no media selection information\n");
return;
}
}
@@ -218,7 +217,8 @@ subsequent_board:
/* there is no phy information, don't even try to build mtable */
if (count == 0) {
if (tulip_debug > 0)
- printk(KERN_WARNING "%s: no phy info, aborting mtable build\n", dev->name);
+ dev_warn(&dev->dev,
+ "no phy info, aborting mtable build\n");
return;
}
@@ -234,8 +234,8 @@ subsequent_board:
mtable->has_nonmii = mtable->has_mii = mtable->has_reset = 0;
mtable->csr15dir = mtable->csr15val = 0;
- printk(KERN_INFO "%s: EEPROM default media type %s.\n", dev->name,
- media & 0x0800 ? "Autosense" : medianame[media & MEDIA_MASK]);
+ dev_info(&dev->dev, "EEPROM default media type %s\n",
+ media & 0x0800 ? "Autosense" : medianame[media & MEDIA_MASK]);
for (i = 0; i < count; i++) {
struct medialeaf *leaf = &mtable->mleaf[i];
@@ -298,16 +298,17 @@ subsequent_board:
}
if (tulip_debug > 1 && leaf->media == 11) {
unsigned char *bp = leaf->leafdata;
- printk(KERN_INFO "%s: MII interface PHY %d, setup/reset "
- "sequences %d/%d long, capabilities %2.2x %2.2x.\n",
- dev->name, bp[0], bp[1], bp[2 + bp[1]*2],
- bp[5 + bp[2 + bp[1]*2]*2], bp[4 + bp[2 + bp[1]*2]*2]);
+ dev_info(&dev->dev,
+ "MII interface PHY %d, setup/reset sequences %d/%d long, capabilities %02x %02x\n",
+ bp[0], bp[1], bp[2 + bp[1]*2],
+ bp[5 + bp[2 + bp[1]*2]*2],
+ bp[4 + bp[2 + bp[1]*2]*2]);
}
- printk(KERN_INFO "%s: Index #%d - Media %s (#%d) described "
- "by a %s (%d) block.\n",
- dev->name, i, medianame[leaf->media & 15], leaf->media,
- leaf->type < ARRAY_SIZE(block_name) ? block_name[leaf->type] : "<unknown>",
- leaf->type);
+ dev_info(&dev->dev,
+ "Index #%d - Media %s (#%d) described by a %s (%d) block\n",
+ i, medianame[leaf->media & 15], leaf->media,
+ leaf->type < ARRAY_SIZE(block_name) ? block_name[leaf->type] : "<unknown>",
+ leaf->type);
}
if (new_advertise)
tp->sym_advertise = new_advertise;
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c
index 2e8e8ee893c..1faf7a4d720 100644
--- a/drivers/net/tulip/interrupt.c
+++ b/drivers/net/tulip/interrupt.c
@@ -125,12 +125,12 @@ int tulip_poll(struct napi_struct *napi, int budget)
#endif
if (tulip_debug > 4)
- printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
- tp->rx_ring[entry].status);
+ printk(KERN_DEBUG " In tulip_rx(), entry %d %08x\n",
+ entry, tp->rx_ring[entry].status);
do {
if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
- printk(KERN_DEBUG " In tulip_poll(), hardware disappeared.\n");
+ printk(KERN_DEBUG " In tulip_poll(), hardware disappeared\n");
break;
}
/* Acknowledge current RX interrupt sources. */
@@ -146,7 +146,7 @@ int tulip_poll(struct napi_struct *napi, int budget)
break;
if (tulip_debug > 5)
- printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
+ printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %08x\n",
dev->name, entry, status);
if (++work_done >= budget)
@@ -177,15 +177,15 @@ int tulip_poll(struct napi_struct *napi, int budget)
/* Ingore earlier buffers. */
if ((status & 0xffff) != 0x7fff) {
if (tulip_debug > 1)
- printk(KERN_WARNING "%s: Oversized Ethernet frame "
- "spanned multiple buffers, status %8.8x!\n",
- dev->name, status);
+ dev_warn(&dev->dev,
+ "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
+ status);
tp->stats.rx_length_errors++;
}
} else {
/* There was a fatal error. */
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
+ printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
dev->name, status);
tp->stats.rx_errors++; /* end of a packet.*/
if (pkt_len > 1518 ||
@@ -226,12 +226,11 @@ int tulip_poll(struct napi_struct *napi, int budget)
#ifndef final_version
if (tp->rx_buffers[entry].mapping !=
le32_to_cpu(tp->rx_ring[entry].buffer1)) {
- printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
- "do not match in tulip_rx: %08x vs. %08llx %p / %p.\n",
- dev->name,
- le32_to_cpu(tp->rx_ring[entry].buffer1),
- (unsigned long long)tp->rx_buffers[entry].mapping,
- skb->head, temp);
+ dev_err(&dev->dev,
+ "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %08llx %p / %p\n",
+ le32_to_cpu(tp->rx_ring[entry].buffer1),
+ (unsigned long long)tp->rx_buffers[entry].mapping,
+ skb->head, temp);
}
#endif
@@ -365,16 +364,16 @@ static int tulip_rx(struct net_device *dev)
int received = 0;
if (tulip_debug > 4)
- printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
- tp->rx_ring[entry].status);
+ printk(KERN_DEBUG " In tulip_rx(), entry %d %08x\n",
+ entry, tp->rx_ring[entry].status);
/* If we own the next entry, it is a new packet. Send it up. */
while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
s32 status = le32_to_cpu(tp->rx_ring[entry].status);
short pkt_len;
if (tulip_debug > 5)
- printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
- dev->name, entry, status);
+ printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %08x\n",
+ dev->name, entry, status);
if (--rx_work_limit < 0)
break;
@@ -402,16 +401,16 @@ static int tulip_rx(struct net_device *dev)
/* Ingore earlier buffers. */
if ((status & 0xffff) != 0x7fff) {
if (tulip_debug > 1)
- printk(KERN_WARNING "%s: Oversized Ethernet frame "
- "spanned multiple buffers, status %8.8x!\n",
- dev->name, status);
+ dev_warn(&dev->dev,
+ "Oversized Ethernet frame spanned multiple buffers, status %08x!\n",
+ status);
tp->stats.rx_length_errors++;
}
} else {
/* There was a fatal error. */
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
- dev->name, status);
+ printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
+ dev->name, status);
tp->stats.rx_errors++; /* end of a packet.*/
if (pkt_len > 1518 ||
(status & RxDescRunt))
@@ -450,12 +449,11 @@ static int tulip_rx(struct net_device *dev)
#ifndef final_version
if (tp->rx_buffers[entry].mapping !=
le32_to_cpu(tp->rx_ring[entry].buffer1)) {
- printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
- "do not match in tulip_rx: %08x vs. %Lx %p / %p.\n",
- dev->name,
- le32_to_cpu(tp->rx_ring[entry].buffer1),
- (long long)tp->rx_buffers[entry].mapping,
- skb->head, temp);
+ dev_err(&dev->dev,
+ "Internal fault: The skbuff addresses do not match in tulip_rx: %08x vs. %Lx %p / %p\n",
+ le32_to_cpu(tp->rx_ring[entry].buffer1),
+ (long long)tp->rx_buffers[entry].mapping,
+ skb->head, temp);
}
#endif
@@ -569,7 +567,7 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
#endif /* CONFIG_TULIP_NAPI */
if (tulip_debug > 4)
- printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
+ printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x\n",
dev->name, csr5, ioread32(ioaddr + CSR5));
@@ -601,8 +599,8 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
/* There was an major error, log it. */
#ifndef final_version
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
- dev->name, status);
+ printk(KERN_DEBUG "%s: Transmit error, Tx status %08x\n",
+ dev->name, status);
#endif
tp->stats.tx_errors++;
if (status & 0x4104) tp->stats.tx_aborted_errors++;
@@ -631,8 +629,9 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
#ifndef final_version
if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
- printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
- dev->name, dirty_tx, tp->cur_tx);
+ dev_err(&dev->dev,
+ "Out-of-sync dirty pointer, %d vs. %d\n",
+ dirty_tx, tp->cur_tx);
dirty_tx += TX_RING_SIZE;
}
#endif
@@ -643,9 +642,10 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
tp->dirty_tx = dirty_tx;
if (csr5 & TxDied) {
if (tulip_debug > 2)
- printk(KERN_WARNING "%s: The transmitter stopped."
- " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
- dev->name, csr5, ioread32(ioaddr + CSR6), tp->csr6);
+ dev_warn(&dev->dev,
+ "The transmitter stopped. CSR5 is %x, CSR6 %x, new CSR6 %x\n",
+ csr5, ioread32(ioaddr + CSR6),
+ tp->csr6);
tulip_restart_rxtx(tp);
}
spin_unlock(&tp->lock);
@@ -696,8 +696,9 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
* to the 21142/3 docs that is).
* -- rmk
*/
- printk(KERN_ERR "%s: (%lu) System Error occurred (%d)\n",
- dev->name, tp->nir, error);
+ dev_err(&dev->dev,
+ "(%lu) System Error occurred (%d)\n",
+ tp->nir, error);
}
/* Clear all error sources, included undocumented ones! */
iowrite32(0x0800f7ba, ioaddr + CSR5);
@@ -706,16 +707,17 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
if (csr5 & TimerInt) {
if (tulip_debug > 2)
- printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
- dev->name, csr5);
+ dev_err(&dev->dev,
+ "Re-enabling interrupts, %08x\n",
+ csr5);
iowrite32(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
tp->ttimer = 0;
oi++;
}
if (tx > maxtx || rx > maxrx || oi > maxoi) {
if (tulip_debug > 1)
- printk(KERN_WARNING "%s: Too much work during an interrupt, "
- "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev->name, csr5, tp->nir, tx, rx, oi);
+ dev_warn(&dev->dev, "Too much work during an interrupt, csr5=0x%08x. (%lu) (%d,%d,%d)\n",
+ csr5, tp->nir, tx, rx, oi);
/* Acknowledge all interrupt sources. */
iowrite32(0x8001ffff, ioaddr + CSR5);
@@ -764,14 +766,18 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
entry = tp->dirty_rx % RX_RING_SIZE;
if (tp->rx_buffers[entry].skb == NULL) {
if (tulip_debug > 1)
- printk(KERN_WARNING "%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev->name, tp->nir, tp->cur_rx, tp->ttimer, rx);
+ dev_warn(&dev->dev,
+ "in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n",
+ tp->nir, tp->cur_rx, tp->ttimer, rx);
if (tp->chip_id == LC82C168) {
iowrite32(0x00, ioaddr + CSR7);
mod_timer(&tp->timer, RUN_AT(HZ/50));
} else {
if (tp->ttimer == 0 || (ioread32(ioaddr + CSR11) & 0xffff) == 0) {
if (tulip_debug > 1)
- printk(KERN_WARNING "%s: in rx suspend mode: (%lu) set timer\n", dev->name, tp->nir);
+ dev_warn(&dev->dev,
+ "in rx suspend mode: (%lu) set timer\n",
+ tp->nir);
iowrite32(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
ioaddr + CSR7);
iowrite32(TimerInt, ioaddr + CSR5);
@@ -787,8 +793,8 @@ irqreturn_t tulip_interrupt(int irq, void *dev_instance)
}
if (tulip_debug > 4)
- printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
- dev->name, ioread32(ioaddr + CSR5));
+ printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#04x\n",
+ dev->name, ioread32(ioaddr + CSR5));
return IRQ_HANDLED;
}
diff --git a/drivers/net/tulip/media.c b/drivers/net/tulip/media.c
index d8fda83705b..68b170ae4d1 100644
--- a/drivers/net/tulip/media.c
+++ b/drivers/net/tulip/media.c
@@ -182,9 +182,8 @@ void tulip_select_media(struct net_device *dev, int startup)
switch (mleaf->type) {
case 0: /* 21140 non-MII xcvr. */
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Using a 21140 non-MII transceiver"
- " with control setting %2.2x.\n",
- dev->name, p[1]);
+ printk(KERN_DEBUG "%s: Using a 21140 non-MII transceiver with control setting %02x\n",
+ dev->name, p[1]);
dev->if_port = p[0];
if (startup)
iowrite32(mtable->csr12dir | 0x100, ioaddr + CSR12);
@@ -205,15 +204,15 @@ void tulip_select_media(struct net_device *dev, int startup)
struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset];
unsigned char *rst = rleaf->leafdata;
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Resetting the transceiver.\n",
- dev->name);
+ printk(KERN_DEBUG "%s: Resetting the transceiver\n",
+ dev->name);
for (i = 0; i < rst[0]; i++)
iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15);
}
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: 21143 non-MII %s transceiver control "
- "%4.4x/%4.4x.\n",
- dev->name, medianame[dev->if_port], setup[0], setup[1]);
+ printk(KERN_DEBUG "%s: 21143 non-MII %s transceiver control %04x/%04x\n",
+ dev->name, medianame[dev->if_port],
+ setup[0], setup[1]);
if (p[0] & 0x40) { /* SIA (CSR13-15) setup values are provided. */
csr13val = setup[0];
csr14val = setup[1];
@@ -240,8 +239,8 @@ void tulip_select_media(struct net_device *dev, int startup)
if (startup) iowrite32(csr13val, ioaddr + CSR13);
}
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Setting CSR15 to %8.8x/%8.8x.\n",
- dev->name, csr15dir, csr15val);
+ printk(KERN_DEBUG "%s: Setting CSR15 to %08x/%08x\n",
+ dev->name, csr15dir, csr15val);
if (mleaf->type == 4)
new_csr6 = 0x82020000 | ((setup[2] & 0x71) << 18);
else
@@ -317,8 +316,9 @@ void tulip_select_media(struct net_device *dev, int startup)
if (tp->mii_advertise == 0)
tp->mii_advertise = tp->advertising[phy_num];
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Advertising %4.4x on MII %d.\n",
- dev->name, tp->mii_advertise, tp->phys[phy_num]);
+ printk(KERN_DEBUG "%s: Advertising %04x on MII %d\n",
+ dev->name, tp->mii_advertise,
+ tp->phys[phy_num]);
tulip_mdio_write(dev, tp->phys[phy_num], 4, tp->mii_advertise);
}
break;
@@ -335,8 +335,8 @@ void tulip_select_media(struct net_device *dev, int startup)
struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset];
unsigned char *rst = rleaf->leafdata;
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Resetting the transceiver.\n",
- dev->name);
+ printk(KERN_DEBUG "%s: Resetting the transceiver\n",
+ dev->name);
for (i = 0; i < rst[0]; i++)
iowrite32(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15);
}
@@ -344,20 +344,20 @@ void tulip_select_media(struct net_device *dev, int startup)
break;
}
default:
- printk(KERN_DEBUG "%s: Invalid media table selection %d.\n",
- dev->name, mleaf->type);
+ printk(KERN_DEBUG "%s: Invalid media table selection %d\n",
+ dev->name, mleaf->type);
new_csr6 = 0x020E0000;
}
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Using media type %s, CSR12 is %2.2x.\n",
- dev->name, medianame[dev->if_port],
+ printk(KERN_DEBUG "%s: Using media type %s, CSR12 is %02x\n",
+ dev->name, medianame[dev->if_port],
ioread32(ioaddr + CSR12) & 0xff);
} else if (tp->chip_id == LC82C168) {
if (startup && ! tp->medialock)
dev->if_port = tp->mii_cnt ? 11 : 0;
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: PNIC PHY status is %3.3x, media %s.\n",
- dev->name, ioread32(ioaddr + 0xB8), medianame[dev->if_port]);
+ printk(KERN_DEBUG "%s: PNIC PHY status is %3.3x, media %s\n",
+ dev->name, ioread32(ioaddr + 0xB8), medianame[dev->if_port]);
if (tp->mii_cnt) {
new_csr6 = 0x810C0000;
iowrite32(0x0001, ioaddr + CSR15);
@@ -388,10 +388,9 @@ void tulip_select_media(struct net_device *dev, int startup)
} else
new_csr6 = 0x03860000;
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: No media description table, assuming "
- "%s transceiver, CSR12 %2.2x.\n",
- dev->name, medianame[dev->if_port],
- ioread32(ioaddr + CSR12));
+ printk(KERN_DEBUG "%s: No media description table, assuming %s transceiver, CSR12 %02x\n",
+ dev->name, medianame[dev->if_port],
+ ioread32(ioaddr + CSR12));
}
tp->csr6 = new_csr6 | (tp->csr6 & 0xfdff) | (tp->full_duplex ? 0x0200 : 0);
@@ -415,16 +414,17 @@ int tulip_check_duplex(struct net_device *dev)
bmsr = tulip_mdio_read(dev, tp->phys[0], MII_BMSR);
lpa = tulip_mdio_read(dev, tp->phys[0], MII_LPA);
if (tulip_debug > 1)
- printk(KERN_INFO "%s: MII status %4.4x, Link partner report "
- "%4.4x.\n", dev->name, bmsr, lpa);
+ dev_info(&dev->dev, "MII status %04x, Link partner report %04x\n",
+ bmsr, lpa);
if (bmsr == 0xffff)
return -2;
if ((bmsr & BMSR_LSTATUS) == 0) {
int new_bmsr = tulip_mdio_read(dev, tp->phys[0], MII_BMSR);
if ((new_bmsr & BMSR_LSTATUS) == 0) {
if (tulip_debug > 1)
- printk(KERN_INFO "%s: No link beat on the MII interface,"
- " status %4.4x.\n", dev->name, new_bmsr);
+ dev_info(&dev->dev,
+ "No link beat on the MII interface, status %04x\n",
+ new_bmsr);
return -1;
}
}
@@ -443,10 +443,10 @@ int tulip_check_duplex(struct net_device *dev)
tulip_restart_rxtx(tp);
if (tulip_debug > 0)
- printk(KERN_INFO "%s: Setting %s-duplex based on MII"
- "#%d link partner capability of %4.4x.\n",
- dev->name, tp->full_duplex ? "full" : "half",
- tp->phys[0], lpa);
+ dev_info(&dev->dev,
+ "Setting %s-duplex based on MII#%d link partner capability of %04x\n",
+ tp->full_duplex ? "full" : "half",
+ tp->phys[0], lpa);
return 1;
}
@@ -501,15 +501,13 @@ void __devinit tulip_find_mii (struct net_device *dev, int board_idx)
tp->phys[phy_idx++] = phy;
- printk (KERN_INFO "tulip%d: MII transceiver #%d "
- "config %4.4x status %4.4x advertising %4.4x.\n",
+ pr_info("tulip%d: MII transceiver #%d config %04x status %04x advertising %04x\n",
board_idx, phy, mii_reg0, mii_status, mii_advert);
/* Fixup for DLink with miswired PHY. */
if (mii_advert != to_advert) {
- printk (KERN_DEBUG "tulip%d: Advertising %4.4x on PHY %d,"
- " previously advertising %4.4x.\n",
- board_idx, to_advert, phy, mii_advert);
+ printk(KERN_DEBUG "tulip%d: Advertising %04x on PHY %d, previously advertising %04x\n",
+ board_idx, to_advert, phy, mii_advert);
tulip_mdio_write (dev, phy, 4, to_advert);
}
@@ -554,7 +552,7 @@ void __devinit tulip_find_mii (struct net_device *dev, int board_idx)
}
tp->mii_cnt = phy_idx;
if (tp->mtable && tp->mtable->has_mii && phy_idx == 0) {
- printk (KERN_INFO "tulip%d: ***WARNING***: No MII transceiver found!\n",
+ pr_info("tulip%d: ***WARNING***: No MII transceiver found!\n",
board_idx);
tp->phys[0] = 1;
}
diff --git a/drivers/net/tulip/pnic.c b/drivers/net/tulip/pnic.c
index d3253ed09df..966efa1a27d 100644
--- a/drivers/net/tulip/pnic.c
+++ b/drivers/net/tulip/pnic.c
@@ -40,8 +40,8 @@ void pnic_do_nway(struct net_device *dev)
new_csr6 |= 0x00000200;
}
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: PNIC autonegotiated status %8.8x, %s.\n",
- dev->name, phy_reg, medianame[dev->if_port]);
+ printk(KERN_DEBUG "%s: PNIC autonegotiated status %08x, %s\n",
+ dev->name, phy_reg, medianame[dev->if_port]);
if (tp->csr6 != new_csr6) {
tp->csr6 = new_csr6;
/* Restart Tx */
@@ -58,8 +58,8 @@ void pnic_lnk_change(struct net_device *dev, int csr5)
int phy_reg = ioread32(ioaddr + 0xB8);
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: PNIC link changed state %8.8x, CSR5 %8.8x.\n",
- dev->name, phy_reg, csr5);
+ printk(KERN_DEBUG "%s: PNIC link changed state %08x, CSR5 %08x\n",
+ dev->name, phy_reg, csr5);
if (ioread32(ioaddr + CSR5) & TPLnkFail) {
iowrite32((ioread32(ioaddr + CSR7) & ~TPLnkFail) | TPLnkPass, ioaddr + CSR7);
/* If we use an external MII, then we mustn't use the
@@ -114,9 +114,8 @@ void pnic_timer(unsigned long data)
int csr5 = ioread32(ioaddr + CSR5);
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: PNIC timer PHY status %8.8x, %s "
- "CSR5 %8.8x.\n",
- dev->name, phy_reg, medianame[dev->if_port], csr5);
+ printk(KERN_DEBUG "%s: PNIC timer PHY status %08x, %s CSR5 %08x\n",
+ dev->name, phy_reg, medianame[dev->if_port], csr5);
if (phy_reg & 0x04000000) { /* Remote link fault */
iowrite32(0x0201F078, ioaddr + 0xB8);
next_tick = 1*HZ;
@@ -126,10 +125,11 @@ void pnic_timer(unsigned long data)
next_tick = 60*HZ;
} else if (csr5 & TPLnkFail) { /* 100baseTx link beat */
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: %s link beat failed, CSR12 %4.4x, "
- "CSR5 %8.8x, PHY %3.3x.\n",
- dev->name, medianame[dev->if_port], csr12,
- ioread32(ioaddr + CSR5), ioread32(ioaddr + 0xB8));
+ printk(KERN_DEBUG "%s: %s link beat failed, CSR12 %04x, CSR5 %08x, PHY %03x\n",
+ dev->name, medianame[dev->if_port],
+ csr12,
+ ioread32(ioaddr + CSR5),
+ ioread32(ioaddr + 0xB8));
next_tick = 3*HZ;
if (tp->medialock) {
} else if (tp->nwayset && (dev->if_port & 1)) {
@@ -151,10 +151,11 @@ void pnic_timer(unsigned long data)
tulip_restart_rxtx(tp);
dev->trans_start = jiffies;
if (tulip_debug > 1)
- printk(KERN_INFO "%s: Changing PNIC configuration to %s "
- "%s-duplex, CSR6 %8.8x.\n",
- dev->name, medianame[dev->if_port],
- tp->full_duplex ? "full" : "half", new_csr6);
+ dev_info(&dev->dev,
+ "Changing PNIC configuration to %s %s-duplex, CSR6 %08x\n",
+ medianame[dev->if_port],
+ tp->full_duplex ? "full" : "half",
+ new_csr6);
}
}
}
@@ -162,7 +163,7 @@ too_good_connection:
mod_timer(&tp->timer, RUN_AT(next_tick));
if(!ioread32(ioaddr + CSR7)) {
if (tulip_debug > 1)
- printk(KERN_INFO "%s: sw timer wakeup.\n", dev->name);
+ dev_info(&dev->dev, "sw timer wakeup\n");
disable_irq(dev->irq);
tulip_refill_rx(dev);
enable_irq(dev->irq);
diff --git a/drivers/net/tulip/pnic2.c b/drivers/net/tulip/pnic2.c
index d8418694bf4..b8197666021 100644
--- a/drivers/net/tulip/pnic2.c
+++ b/drivers/net/tulip/pnic2.c
@@ -87,8 +87,8 @@ void pnic2_timer(unsigned long data)
int next_tick = 60*HZ;
if (tulip_debug > 3)
- printk(KERN_INFO"%s: PNIC2 negotiation status %8.8x.\n",
- dev->name,ioread32(ioaddr + CSR12));
+ dev_info(&dev->dev, "PNIC2 negotiation status %08x\n",
+ ioread32(ioaddr + CSR12));
if (next_tick) {
mod_timer(&tp->timer, RUN_AT(next_tick));
@@ -125,8 +125,8 @@ void pnic2_start_nway(struct net_device *dev)
csr14 |= 0x00001184;
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Restarting PNIC2 autonegotiation, "
- "csr14=%8.8x.\n", dev->name, csr14);
+ printk(KERN_DEBUG "%s: Restarting PNIC2 autonegotiation, csr14=%08x\n",
+ dev->name, csr14);
/* tell pnic2_lnk_change we are doing an nway negotiation */
dev->if_port = 0;
@@ -137,8 +137,8 @@ void pnic2_start_nway(struct net_device *dev)
tp->csr6 = ioread32(ioaddr + CSR6);
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: On Entry to Nway, "
- "csr6=%8.8x.\n", dev->name, tp->csr6);
+ printk(KERN_DEBUG "%s: On Entry to Nway, csr6=%08x\n",
+ dev->name, tp->csr6);
/* mask off any bits not to touch
* comment at top of file explains mask value
@@ -181,9 +181,9 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
int csr12 = ioread32(ioaddr + CSR12);
if (tulip_debug > 1)
- printk(KERN_INFO"%s: PNIC2 link status interrupt %8.8x, "
- " CSR5 %x, %8.8x.\n", dev->name, csr12,
- csr5, ioread32(ioaddr + CSR14));
+ dev_info(&dev->dev,
+ "PNIC2 link status interrupt %08x, CSR5 %x, %08x\n",
+ csr12, csr5, ioread32(ioaddr + CSR14));
/* If NWay finished and we have a negotiated partner capability.
* check bits 14:12 for bit pattern 101 - all is good
@@ -215,9 +215,9 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
else if (negotiated & 0x0020) dev->if_port = 0;
else {
if (tulip_debug > 1)
- printk(KERN_INFO "%s: funny autonegotiate result "
- "csr12 %8.8x advertising %4.4x\n",
- dev->name, csr12, tp->sym_advertise);
+ dev_info(&dev->dev,
+ "funny autonegotiate result csr12 %08x advertising %04x\n",
+ csr12, tp->sym_advertise);
tp->nwayset = 0;
/* so check if 100baseTx link state is okay */
if ((csr12 & 2) == 0 && (tp->sym_advertise & 0x0180))
@@ -231,10 +231,11 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
if (tulip_debug > 1) {
if (tp->nwayset)
- printk(KERN_INFO "%s: Switching to %s based on link "
- "negotiation %4.4x & %4.4x = %4.4x.\n",
- dev->name, medianame[dev->if_port],
- tp->sym_advertise, tp->lpar, negotiated);
+ dev_info(&dev->dev,
+ "Switching to %s based on link negotiation %04x & %04x = %04x\n",
+ medianame[dev->if_port],
+ tp->sym_advertise, tp->lpar,
+ negotiated);
}
/* remember to turn off bit 7 - autonegotiate
@@ -270,9 +271,9 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
iowrite32(1, ioaddr + CSR13);
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: Setting CSR6 %8.8x/%x CSR12 "
- "%8.8x.\n", dev->name, tp->csr6,
- ioread32(ioaddr + CSR6), ioread32(ioaddr + CSR12));
+ printk(KERN_DEBUG "%s: Setting CSR6 %08x/%x CSR12 %08x\n",
+ dev->name, tp->csr6,
+ ioread32(ioaddr + CSR6), ioread32(ioaddr + CSR12));
/* now the following actually writes out the
* new csr6 values
@@ -282,9 +283,9 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
return;
} else {
- printk(KERN_INFO "%s: Autonegotiation failed, "
- "using %s, link beat status %4.4x.\n",
- dev->name, medianame[dev->if_port], csr12);
+ dev_info(&dev->dev,
+ "Autonegotiation failed, using %s, link beat status %04x\n",
+ medianame[dev->if_port], csr12);
/* remember to turn off bit 7 - autonegotiate
* enable so we don't forget
@@ -339,9 +340,9 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
/* we are at 100mb and a potential link change occurred */
if (tulip_debug > 1)
- printk(KERN_INFO"%s: PNIC2 %s link beat %s.\n",
- dev->name, medianame[dev->if_port],
- (csr12 & 2) ? "failed" : "good");
+ dev_info(&dev->dev, "PNIC2 %s link beat %s\n",
+ medianame[dev->if_port],
+ (csr12 & 2) ? "failed" : "good");
/* check 100 link beat */
@@ -364,9 +365,9 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
/* we are at 10mb and a potential link change occurred */
if (tulip_debug > 1)
- printk(KERN_INFO"%s: PNIC2 %s link beat %s.\n",
- dev->name, medianame[dev->if_port],
- (csr12 & 4) ? "failed" : "good");
+ dev_info(&dev->dev, "PNIC2 %s link beat %s\n",
+ medianame[dev->if_port],
+ (csr12 & 4) ? "failed" : "good");
tp->nway = 0;
@@ -385,7 +386,7 @@ void pnic2_lnk_change(struct net_device *dev, int csr5)
if (tulip_debug > 1)
- printk(KERN_INFO"%s: PNIC2 Link Change Default?\n",dev->name);
+ dev_info(&dev->dev, "PNIC2 Link Change Default?\n");
/* if all else fails default to trying 10baseT-HD */
dev->if_port = 0;
diff --git a/drivers/net/tulip/timer.c b/drivers/net/tulip/timer.c
index a0e08422308..36c2725ec88 100644
--- a/drivers/net/tulip/timer.c
+++ b/drivers/net/tulip/timer.c
@@ -28,11 +28,11 @@ void tulip_media_task(struct work_struct *work)
unsigned long flags;
if (tulip_debug > 2) {
- printk(KERN_DEBUG "%s: Media selection tick, %s, status %8.8x mode"
- " %8.8x SIA %8.8x %8.8x %8.8x %8.8x.\n",
- dev->name, medianame[dev->if_port], ioread32(ioaddr + CSR5),
- ioread32(ioaddr + CSR6), csr12, ioread32(ioaddr + CSR13),
- ioread32(ioaddr + CSR14), ioread32(ioaddr + CSR15));
+ printk(KERN_DEBUG "%s: Media selection tick, %s, status %08x mode %08x SIA %08x %08x %08x %08x\n",
+ dev->name, medianame[dev->if_port],
+ ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR6),
+ csr12, ioread32(ioaddr + CSR13),
+ ioread32(ioaddr + CSR14), ioread32(ioaddr + CSR15));
}
switch (tp->chip_id) {
case DC21140:
@@ -48,9 +48,9 @@ void tulip_media_task(struct work_struct *work)
Assume this a generic MII or SYM transceiver. */
next_tick = 60*HZ;
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: network media monitor CSR6 %8.8x "
- "CSR12 0x%2.2x.\n",
- dev->name, ioread32(ioaddr + CSR6), csr12 & 0xff);
+ printk(KERN_DEBUG "%s: network media monitor CSR6 %08x CSR12 0x%02x\n",
+ dev->name,
+ ioread32(ioaddr + CSR6), csr12 & 0xff);
break;
}
mleaf = &tp->mtable->mleaf[tp->cur_index];
@@ -62,9 +62,8 @@ void tulip_media_task(struct work_struct *work)
s8 bitnum = p[offset];
if (p[offset+1] & 0x80) {
if (tulip_debug > 1)
- printk(KERN_DEBUG"%s: Transceiver monitor tick "
- "CSR12=%#2.2x, no media sense.\n",
- dev->name, csr12);
+ printk(KERN_DEBUG "%s: Transceiver monitor tick CSR12=%#02x, no media sense\n",
+ dev->name, csr12);
if (mleaf->type == 4) {
if (mleaf->media == 3 && (csr12 & 0x02))
goto select_next_media;
@@ -72,16 +71,16 @@ void tulip_media_task(struct work_struct *work)
break;
}
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: Transceiver monitor tick: CSR12=%#2.2x"
- " bit %d is %d, expecting %d.\n",
- dev->name, csr12, (bitnum >> 1) & 7,
- (csr12 & (1 << ((bitnum >> 1) & 7))) != 0,
- (bitnum >= 0));
+ printk(KERN_DEBUG "%s: Transceiver monitor tick: CSR12=%#02x bit %d is %d, expecting %d\n",
+ dev->name, csr12, (bitnum >> 1) & 7,
+ (csr12 & (1 << ((bitnum >> 1) & 7))) != 0,
+ (bitnum >= 0));
/* Check that the specified bit has the proper value. */
if ((bitnum < 0) !=
((csr12 & (1 << ((bitnum >> 1) & 7))) != 0)) {
if (tulip_debug > 2)
- printk(KERN_DEBUG "%s: Link beat detected for %s.\n", dev->name,
+ printk(KERN_DEBUG "%s: Link beat detected for %s\n",
+ dev->name,
medianame[mleaf->media & MEDIA_MASK]);
if ((p[2] & 0x61) == 0x01) /* Bogus Znyx board. */
goto actually_mii;
@@ -100,9 +99,9 @@ void tulip_media_task(struct work_struct *work)
if (tulip_media_cap[dev->if_port] & MediaIsFD)
goto select_next_media; /* Skip FD entries. */
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: No link beat on media %s,"
- " trying transceiver type %s.\n",
- dev->name, medianame[mleaf->media & MEDIA_MASK],
+ printk(KERN_DEBUG "%s: No link beat on media %s, trying transceiver type %s\n",
+ dev->name,
+ medianame[mleaf->media & MEDIA_MASK],
medianame[tp->mtable->mleaf[tp->cur_index].media]);
tulip_select_media(dev, 0);
/* Restart the transmit process. */
@@ -151,8 +150,8 @@ void mxic_timer(unsigned long data)
int next_tick = 60*HZ;
if (tulip_debug > 3) {
- printk(KERN_INFO"%s: MXIC negotiation status %8.8x.\n", dev->name,
- ioread32(ioaddr + CSR12));
+ dev_info(&dev->dev, "MXIC negotiation status %08x\n",
+ ioread32(ioaddr + CSR12));
}
if (next_tick) {
mod_timer(&tp->timer, RUN_AT(next_tick));
@@ -167,11 +166,10 @@ void comet_timer(unsigned long data)
int next_tick = 60*HZ;
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: Comet link status %4.4x partner capability "
- "%4.4x.\n",
- dev->name,
- tulip_mdio_read(dev, tp->phys[0], 1),
- tulip_mdio_read(dev, tp->phys[0], 5));
+ printk(KERN_DEBUG "%s: Comet link status %04x partner capability %04x\n",
+ dev->name,
+ tulip_mdio_read(dev, tp->phys[0], 1),
+ tulip_mdio_read(dev, tp->phys[0], 5));
/* mod_timer synchronizes us with potential add_timer calls
* from interrupts.
*/
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 20696b5d60a..7f544ef2f5f 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -41,7 +41,6 @@
static char version[] __devinitdata =
"Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n";
-
/* A few user-configurable values. */
/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
@@ -211,7 +210,7 @@ struct tulip_chip_table tulip_tbl[] = {
};
-static struct pci_device_id tulip_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(tulip_pci_tbl) = {
{ 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 },
{ 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 },
{ 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 },
@@ -326,7 +325,8 @@ static void tulip_up(struct net_device *dev)
udelay(100);
if (tulip_debug > 1)
- printk(KERN_DEBUG "%s: tulip_up(), irq==%d.\n", dev->name, dev->irq);
+ printk(KERN_DEBUG "%s: tulip_up(), irq==%d\n",
+ dev->name, dev->irq);
iowrite32(tp->rx_ring_dma, ioaddr + CSR3);
iowrite32(tp->tx_ring_dma, ioaddr + CSR4);
@@ -387,8 +387,9 @@ static void tulip_up(struct net_device *dev)
(dev->if_port == 12 ? 0 : dev->if_port);
for (i = 0; i < tp->mtable->leafcount; i++)
if (tp->mtable->mleaf[i].media == looking_for) {
- printk(KERN_INFO "%s: Using user-specified media %s.\n",
- dev->name, medianame[dev->if_port]);
+ dev_info(&dev->dev,
+ "Using user-specified media %s\n",
+ medianame[dev->if_port]);
goto media_picked;
}
}
@@ -396,8 +397,9 @@ static void tulip_up(struct net_device *dev)
int looking_for = tp->mtable->defaultmedia & MEDIA_MASK;
for (i = 0; i < tp->mtable->leafcount; i++)
if (tp->mtable->mleaf[i].media == looking_for) {
- printk(KERN_INFO "%s: Using EEPROM-set media %s.\n",
- dev->name, medianame[looking_for]);
+ dev_info(&dev->dev,
+ "Using EEPROM-set media %s\n",
+ medianame[looking_for]);
goto media_picked;
}
}
@@ -424,9 +426,10 @@ media_picked:
if (tp->mii_cnt) {
tulip_select_media(dev, 1);
if (tulip_debug > 1)
- printk(KERN_INFO "%s: Using MII transceiver %d, status "
- "%4.4x.\n",
- dev->name, tp->phys[0], tulip_mdio_read(dev, tp->phys[0], 1));
+ dev_info(&dev->dev,
+ "Using MII transceiver %d, status %04x\n",
+ tp->phys[0],
+ tulip_mdio_read(dev, tp->phys[0], 1));
iowrite32(csr6_mask_defstate, ioaddr + CSR6);
tp->csr6 = csr6_mask_hdcap;
dev->if_port = 11;
@@ -490,9 +493,10 @@ media_picked:
iowrite32(0, ioaddr + CSR2); /* Rx poll demand */
if (tulip_debug > 2) {
- printk(KERN_DEBUG "%s: Done tulip_up(), CSR0 %8.8x, CSR5 %8.8x CSR6 %8.8x.\n",
- dev->name, ioread32(ioaddr + CSR0), ioread32(ioaddr + CSR5),
- ioread32(ioaddr + CSR6));
+ printk(KERN_DEBUG "%s: Done tulip_up(), CSR0 %08x, CSR5 %08x CSR6 %08x\n",
+ dev->name, ioread32(ioaddr + CSR0),
+ ioread32(ioaddr + CSR5),
+ ioread32(ioaddr + CSR6));
}
/* Set the timer to switch to check for link beat and perhaps switch
@@ -540,27 +544,30 @@ static void tulip_tx_timeout(struct net_device *dev)
if (tulip_media_cap[dev->if_port] & MediaIsMII) {
/* Do nothing -- the media monitor should handle this. */
if (tulip_debug > 1)
- printk(KERN_WARNING "%s: Transmit timeout using MII device.\n",
- dev->name);
+ dev_warn(&dev->dev,
+ "Transmit timeout using MII device\n");
} else if (tp->chip_id == DC21140 || tp->chip_id == DC21142 ||
tp->chip_id == MX98713 || tp->chip_id == COMPEX9881 ||
tp->chip_id == DM910X) {
- printk(KERN_WARNING "%s: 21140 transmit timed out, status %8.8x, "
- "SIA %8.8x %8.8x %8.8x %8.8x, resetting...\n",
- dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
- ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14), ioread32(ioaddr + CSR15));
+ dev_warn(&dev->dev,
+ "21140 transmit timed out, status %08x, SIA %08x %08x %08x %08x, resetting...\n",
+ ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12),
+ ioread32(ioaddr + CSR13), ioread32(ioaddr + CSR14),
+ ioread32(ioaddr + CSR15));
tp->timeout_recovery = 1;
schedule_work(&tp->media_work);
goto out_unlock;
} else if (tp->chip_id == PNIC2) {
- printk(KERN_WARNING "%s: PNIC2 transmit timed out, status %8.8x, "
- "CSR6/7 %8.8x / %8.8x CSR12 %8.8x, resetting...\n",
- dev->name, (int)ioread32(ioaddr + CSR5), (int)ioread32(ioaddr + CSR6),
- (int)ioread32(ioaddr + CSR7), (int)ioread32(ioaddr + CSR12));
+ dev_warn(&dev->dev,
+ "PNIC2 transmit timed out, status %08x, CSR6/7 %08x / %08x CSR12 %08x, resetting...\n",
+ (int)ioread32(ioaddr + CSR5),
+ (int)ioread32(ioaddr + CSR6),
+ (int)ioread32(ioaddr + CSR7),
+ (int)ioread32(ioaddr + CSR12));
} else {
- printk(KERN_WARNING "%s: Transmit timed out, status %8.8x, CSR12 "
- "%8.8x, resetting...\n",
- dev->name, ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12));
+ dev_warn(&dev->dev,
+ "Transmit timed out, status %08x, CSR12 %08x, resetting...\n",
+ ioread32(ioaddr + CSR5), ioread32(ioaddr + CSR12));
dev->if_port = 0;
}
@@ -570,26 +577,26 @@ static void tulip_tx_timeout(struct net_device *dev)
for (i = 0; i < RX_RING_SIZE; i++) {
u8 *buf = (u8 *)(tp->rx_ring[i].buffer1);
int j;
- printk(KERN_DEBUG "%2d: %8.8x %8.8x %8.8x %8.8x "
- "%2.2x %2.2x %2.2x.\n",
- i, (unsigned int)tp->rx_ring[i].status,
- (unsigned int)tp->rx_ring[i].length,
- (unsigned int)tp->rx_ring[i].buffer1,
- (unsigned int)tp->rx_ring[i].buffer2,
- buf[0], buf[1], buf[2]);
+ printk(KERN_DEBUG
+ "%2d: %08x %08x %08x %08x %02x %02x %02x\n",
+ i,
+ (unsigned int)tp->rx_ring[i].status,
+ (unsigned int)tp->rx_ring[i].length,
+ (unsigned int)tp->rx_ring[i].buffer1,
+ (unsigned int)tp->rx_ring[i].buffer2,
+ buf[0], buf[1], buf[2]);
for (j = 0; buf[j] != 0xee && j < 1600; j++)
if (j < 100)
- printk(KERN_CONT " %2.2x", buf[j]);
- printk(KERN_CONT " j=%d.\n", j);
+ pr_cont(" %02x", buf[j]);
+ pr_cont(" j=%d\n", j);
}
- printk(KERN_DEBUG " Rx ring %8.8x: ", (int)tp->rx_ring);
+ printk(KERN_DEBUG " Rx ring %08x: ", (int)tp->rx_ring);
for (i = 0; i < RX_RING_SIZE; i++)
- printk(KERN_CONT " %8.8x",
- (unsigned int)tp->rx_ring[i].status);
- printk(KERN_DEBUG " Tx ring %8.8x: ", (int)tp->tx_ring);
+ pr_cont(" %08x", (unsigned int)tp->rx_ring[i].status);
+ printk(KERN_DEBUG " Tx ring %08x: ", (int)tp->tx_ring);
for (i = 0; i < TX_RING_SIZE; i++)
- printk(KERN_CONT " %8.8x", (unsigned int)tp->tx_ring[i].status);
- printk(KERN_CONT "\n");
+ pr_cont(" %08x", (unsigned int)tp->tx_ring[i].status);
+ pr_cont("\n");
}
#endif
@@ -832,8 +839,9 @@ static int tulip_close (struct net_device *dev)
tulip_down (dev);
if (tulip_debug > 1)
- printk (KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
- dev->name, ioread32 (ioaddr + CSR5));
+ dev_printk(KERN_DEBUG, &dev->dev,
+ "Shutting down ethercard, status was %02x\n",
+ ioread32 (ioaddr + CSR5));
free_irq (dev->irq, dev);
@@ -989,12 +997,10 @@ static void build_setup_frame_hash(u16 *setup_frm, struct net_device *dev)
memset(hash_table, 0, sizeof(hash_table));
set_bit_le(255, hash_table); /* Broadcast entry */
/* This should work on big-endian machines as well. */
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
int index = ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff;
set_bit_le(index, hash_table);
-
}
for (i = 0; i < 32; i++) {
*setup_frm++ = hash_table[i];
@@ -1013,20 +1019,18 @@ static void build_setup_frame_perfect(u16 *setup_frm, struct net_device *dev)
{
struct tulip_private *tp = netdev_priv(dev);
struct dev_mc_list *mclist;
- int i;
u16 *eaddrs;
/* We have <= 14 addresses so we can use the wonderful
16 address perfect filtering of the Tulip. */
- for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
eaddrs = (u16 *)mclist->dmi_addr;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
*setup_frm++ = *eaddrs; *setup_frm++ = *eaddrs++;
}
/* Fill the unused entries with the broadcast address. */
- memset(setup_frm, 0xff, (15-i)*12);
+ memset(setup_frm, 0xff, (15 - netdev_mc_count(dev)) * 12);
setup_frm = &tp->setup_frame[15*6];
/* Fill the final entry with our physical address. */
@@ -1049,7 +1053,8 @@ static void set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
tp->csr6 |= AcceptAllMulticast | AcceptAllPhys;
csr6 |= AcceptAllMulticast | AcceptAllPhys;
- } else if ((dev->mc_count > 1000) || (dev->flags & IFF_ALLMULTI)) {
+ } else if ((netdev_mc_count(dev) > 1000) ||
+ (dev->flags & IFF_ALLMULTI)) {
/* Too many to filter well -- accept all multicasts. */
tp->csr6 |= AcceptAllMulticast;
csr6 |= AcceptAllMulticast;
@@ -1057,15 +1062,14 @@ static void set_rx_mode(struct net_device *dev)
/* Some work-alikes have only a 64-entry hash filter table. */
/* Should verify correctness on big-endian/__powerpc__ */
struct dev_mc_list *mclist;
- int i;
- if (dev->mc_count > 64) { /* Arbitrary non-effective limit. */
+ if (netdev_mc_count(dev) > 64) {
+ /* Arbitrary non-effective limit. */
tp->csr6 |= AcceptAllMulticast;
csr6 |= AcceptAllMulticast;
} else {
u32 mc_filter[2] = {0, 0}; /* Multicast hash filter */
int filterbit;
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
if (tp->flags & COMET_MAC_ADDR)
filterbit = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
else
@@ -1073,10 +1077,10 @@ static void set_rx_mode(struct net_device *dev)
filterbit &= 0x3f;
mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
if (tulip_debug > 2)
- printk(KERN_INFO "%s: Added filter for %pM"
- " %8.8x bit %d.\n",
- dev->name, mclist->dmi_addr,
- ether_crc(ETH_ALEN, mclist->dmi_addr), filterbit);
+ dev_info(&dev->dev,
+ "Added filter for %pM %08x bit %d\n",
+ mclist->dmi_addr,
+ ether_crc(ETH_ALEN, mclist->dmi_addr), filterbit);
}
if (mc_filter[0] == tp->mc_filter[0] &&
mc_filter[1] == tp->mc_filter[1])
@@ -1099,7 +1103,8 @@ static void set_rx_mode(struct net_device *dev)
/* Note that only the low-address shortword of setup_frame is valid!
The values are doubled for big-endian architectures. */
- if (dev->mc_count > 14) { /* Must use a multicast hash table. */
+ if (netdev_mc_count(dev) > 14) {
+ /* Must use a multicast hash table. */
build_setup_frame_hash(tp->setup_frame, dev);
tx_flags = 0x08400000 | 192;
} else {
@@ -1288,9 +1293,8 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
unsigned int force_csr0 = 0;
#ifndef MODULE
- static int did_version; /* Already printed version info. */
- if (tulip_debug > 0 && did_version++ == 0)
- printk (KERN_INFO "%s", version);
+ if (tulip_debug > 0)
+ printk_once(KERN_INFO "%s", version);
#endif
board_idx++;
@@ -1301,7 +1305,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
*/
if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) {
- printk (KERN_ERR PFX "skipping LMC card.\n");
+ pr_err(PFX "skipping LMC card\n");
return -ENODEV;
}
@@ -1317,15 +1321,13 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
if (pdev->vendor == 0x1282 && pdev->device == 0x9100 &&
pdev->revision < 0x30) {
- printk(KERN_INFO PFX
- "skipping early DM9100 with Crc bug (use dmfe)\n");
+ pr_info(PFX "skipping early DM9100 with Crc bug (use dmfe)\n");
return -ENODEV;
}
dp = pci_device_to_OF_node(pdev);
if (!(dp && of_get_property(dp, "local-mac-address", NULL))) {
- printk(KERN_INFO PFX
- "skipping DM910x expansion card (use dmfe)\n");
+ pr_info(PFX "skipping DM910x expansion card (use dmfe)\n");
return -ENODEV;
}
}
@@ -1372,9 +1374,8 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
i = pci_enable_device(pdev);
if (i) {
- printk (KERN_ERR PFX
- "Cannot enable tulip board #%d, aborting\n",
- board_idx);
+ pr_err(PFX "Cannot enable tulip board #%d, aborting\n",
+ board_idx);
return i;
}
@@ -1383,22 +1384,22 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
/* alloc_etherdev ensures aligned and zeroed private structures */
dev = alloc_etherdev (sizeof (*tp));
if (!dev) {
- printk (KERN_ERR PFX "ether device alloc failed, aborting\n");
+ pr_err(PFX "ether device alloc failed, aborting\n");
return -ENOMEM;
}
SET_NETDEV_DEV(dev, &pdev->dev);
if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
- printk (KERN_ERR PFX "%s: I/O region (0x%llx@0x%llx) too small, "
- "aborting\n", pci_name(pdev),
- (unsigned long long)pci_resource_len (pdev, 0),
- (unsigned long long)pci_resource_start (pdev, 0));
+ pr_err(PFX "%s: I/O region (0x%llx@0x%llx) too small, aborting\n",
+ pci_name(pdev),
+ (unsigned long long)pci_resource_len (pdev, 0),
+ (unsigned long long)pci_resource_start (pdev, 0));
goto err_out_free_netdev;
}
/* grab all resources from both PIO and MMIO regions, as we
* don't want anyone else messing around with our hardware */
- if (pci_request_regions (pdev, "tulip"))
+ if (pci_request_regions (pdev, DRV_NAME))
goto err_out_free_netdev;
ioaddr = pci_iomap(pdev, TULIP_BAR, tulip_tbl[chip_idx].io_size);
@@ -1611,8 +1612,8 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
if (dev->mem_start & MEDIA_MASK)
tp->default_port = dev->mem_start & MEDIA_MASK;
if (tp->default_port) {
- printk(KERN_INFO "tulip%d: Transceiver selection forced to %s.\n",
- board_idx, medianame[tp->default_port & MEDIA_MASK]);
+ pr_info(DRV_NAME "%d: Transceiver selection forced to %s\n",
+ board_idx, medianame[tp->default_port & MEDIA_MASK]);
tp->medialock = 1;
if (tulip_media_cap[tp->default_port] & MediaAlwaysFD)
tp->full_duplex = 1;
@@ -1627,7 +1628,7 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
}
if (tp->flags & HAS_MEDIA_TABLE) {
- sprintf(dev->name, "tulip%d", board_idx); /* hack */
+ sprintf(dev->name, DRV_NAME "%d", board_idx); /* hack */
tulip_parse_eeprom(dev);
strcpy(dev->name, "eth%d"); /* un-hack */
}
@@ -1663,20 +1664,18 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
if (register_netdev(dev))
goto err_out_free_ring;
- printk(KERN_INFO "%s: %s rev %d at "
+ pci_set_drvdata(pdev, dev);
+
+ dev_info(&dev->dev,
#ifdef CONFIG_TULIP_MMIO
- "MMIO"
+ "%s rev %d at MMIO %#llx,%s %pM, IRQ %d\n",
#else
- "Port"
+ "%s rev %d at Port %#llx,%s %pM, IRQ %d\n",
#endif
- " %#llx,", dev->name, chip_name, pdev->revision,
- (unsigned long long) pci_resource_start(pdev, TULIP_BAR));
- pci_set_drvdata(pdev, dev);
-
- if (eeprom_missing)
- printk(" EEPROM not present,");
- printk(" %pM", dev->dev_addr);
- printk(", IRQ %d.\n", irq);
+ chip_name, pdev->revision,
+ (unsigned long long)pci_resource_start(pdev, TULIP_BAR),
+ eeprom_missing ? " EEPROM not present," : "",
+ dev->dev_addr, irq);
if (tp->chip_id == PNIC2)
tp->link_change = pnic2_lnk_change;
@@ -1799,12 +1798,12 @@ static int tulip_resume(struct pci_dev *pdev)
return 0;
if ((retval = pci_enable_device(pdev))) {
- printk (KERN_ERR "tulip: pci_enable_device failed in resume\n");
+ pr_err(PFX "pci_enable_device failed in resume\n");
return retval;
}
if ((retval = request_irq(dev->irq, tulip_interrupt, IRQF_SHARED, dev->name, dev))) {
- printk (KERN_ERR "tulip: request_irq failed in resume\n");
+ pr_err(PFX "request_irq failed in resume\n");
return retval;
}
@@ -1874,7 +1873,7 @@ static struct pci_driver tulip_driver = {
static int __init tulip_init (void)
{
#ifdef MODULE
- printk (KERN_INFO "%s", version);
+ pr_info("%s", version);
#endif
/* copy module parms into globals */
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index fa019cabc35..0ab05af237e 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -12,6 +12,8 @@
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define DRV_NAME "uli526x"
#define DRV_VERSION "0.9.3"
#define DRV_RELDATE "2005-7-29"
@@ -82,9 +84,16 @@
#define ULI526X_TX_TIMEOUT ((16*HZ)/2) /* tx packet time-out time 8 s" */
#define ULI526X_TX_KICK (4*HZ/2) /* tx packet Kick-out time 2 s" */
-#define ULI526X_DBUG(dbug_now, msg, value) if (uli526x_debug || (dbug_now)) printk(KERN_ERR DRV_NAME ": %s %lx\n", (msg), (long) (value))
+#define ULI526X_DBUG(dbug_now, msg, value) \
+do { \
+ if (uli526x_debug || (dbug_now)) \
+ pr_err("%s %lx\n", (msg), (long) (value)); \
+} while (0)
-#define SHOW_MEDIA_TYPE(mode) printk(KERN_ERR DRV_NAME ": Change Speed to %sMhz %s duplex\n",mode & 1 ?"100":"10", mode & 4 ? "full":"half");
+#define SHOW_MEDIA_TYPE(mode) \
+ pr_err("Change Speed to %sMhz %s duplex\n", \
+ mode & 1 ? "100" : "10", \
+ mode & 4 ? "full" : "half");
/* CR9 definition: SROM/MII */
@@ -284,7 +293,7 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
SET_NETDEV_DEV(dev, &pdev->dev);
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
- printk(KERN_WARNING DRV_NAME ": 32-bit PCI DMA not available.\n");
+ pr_warning("32-bit PCI DMA not available\n");
err = -ENODEV;
goto err_out_free;
}
@@ -295,19 +304,19 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
goto err_out_free;
if (!pci_resource_start(pdev, 0)) {
- printk(KERN_ERR DRV_NAME ": I/O base is zero\n");
+ pr_err("I/O base is zero\n");
err = -ENODEV;
goto err_out_disable;
}
if (pci_resource_len(pdev, 0) < (ULI526X_IO_SIZE) ) {
- printk(KERN_ERR DRV_NAME ": Allocated I/O size too small\n");
+ pr_err("Allocated I/O size too small\n");
err = -ENODEV;
goto err_out_disable;
}
if (pci_request_regions(pdev, DRV_NAME)) {
- printk(KERN_ERR DRV_NAME ": Failed to request PCI regions\n");
+ pr_err("Failed to request PCI regions\n");
err = -ENODEV;
goto err_out_disable;
}
@@ -382,9 +391,9 @@ static int __devinit uli526x_init_one (struct pci_dev *pdev,
if (err)
goto err_out_res;
- printk(KERN_INFO "%s: ULi M%04lx at pci%s, %pM, irq %d.\n",
- dev->name,ent->driver_data >> 16,pci_name(pdev),
- dev->dev_addr, dev->irq);
+ dev_info(&dev->dev, "ULi M%04lx at pci%s, %pM, irq %d\n",
+ ent->driver_data >> 16, pci_name(pdev),
+ dev->dev_addr, dev->irq);
pci_set_master(pdev);
@@ -516,7 +525,7 @@ static void uli526x_init(struct net_device *dev)
}
}
if(phy_tmp == 32)
- printk(KERN_WARNING "Can not find the phy address!!!");
+ pr_warning("Can not find the phy address!!!");
/* Parser SROM and media mode */
db->media_mode = uli526x_media_mode;
@@ -548,7 +557,7 @@ static void uli526x_init(struct net_device *dev)
update_cr6(db->cr6_data, ioaddr);
/* Send setup frame */
- send_filter_frame(dev, dev->mc_count); /* M5261/M5263 */
+ send_filter_frame(dev, netdev_mc_count(dev)); /* M5261/M5263 */
/* Init CR7, interrupt active bit */
db->cr7_data = CR7_DEFAULT;
@@ -582,7 +591,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
/* Too large packet check */
if (skb->len > MAX_PACKET_SIZE) {
- printk(KERN_ERR DRV_NAME ": big packet = %d\n", (u16)skb->len);
+ pr_err("big packet = %d\n", (u16)skb->len);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -592,7 +601,7 @@ static netdev_tx_t uli526x_start_xmit(struct sk_buff *skb,
/* No Tx resource check, it never happen nromally */
if (db->tx_packet_cnt >= TX_FREE_DESC_CNT) {
spin_unlock_irqrestore(&db->lock, flags);
- printk(KERN_ERR DRV_NAME ": No Tx resource %ld\n", db->tx_packet_cnt);
+ pr_err("No Tx resource %ld\n", db->tx_packet_cnt);
return NETDEV_TX_BUSY;
}
@@ -897,16 +906,18 @@ static void uli526x_set_filter_mode(struct net_device * dev)
return;
}
- if (dev->flags & IFF_ALLMULTI || dev->mc_count > ULI5261_MAX_MULTICAST) {
- ULI526X_DBUG(0, "Pass all multicast address", dev->mc_count);
+ if (dev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(dev) > ULI5261_MAX_MULTICAST) {
+ ULI526X_DBUG(0, "Pass all multicast address",
+ netdev_mc_count(dev));
db->cr6_data &= ~(CR6_PM | CR6_PBF);
db->cr6_data |= CR6_PAM;
spin_unlock_irqrestore(&db->lock, flags);
return;
}
- ULI526X_DBUG(0, "Set multicast address", dev->mc_count);
- send_filter_frame(dev, dev->mc_count); /* M5261/M5263 */
+ ULI526X_DBUG(0, "Set multicast address", netdev_mc_count(dev));
+ send_filter_frame(dev, netdev_mc_count(dev)); /* M5261/M5263 */
spin_unlock_irqrestore(&db->lock, flags);
}
@@ -1058,7 +1069,7 @@ static void uli526x_timer(unsigned long data)
/* Link Failed */
ULI526X_DBUG(0, "Link Failed", tmp_cr12);
netif_carrier_off(dev);
- printk(KERN_INFO "uli526x: %s NIC Link is Down\n",dev->name);
+ pr_info("%s NIC Link is Down\n",dev->name);
db->link_failed = 1;
/* For Force 10/100M Half/Full mode: Enable Auto-Nego mode */
@@ -1090,11 +1101,11 @@ static void uli526x_timer(unsigned long data)
}
if(db->op_mode==ULI526X_10MFD || db->op_mode==ULI526X_100MFD)
{
- printk(KERN_INFO "uli526x: %s NIC Link is Up %d Mbps Full duplex\n",dev->name,TmpSpeed);
+ pr_info("%s NIC Link is Up %d Mbps Full duplex\n",dev->name,TmpSpeed);
}
else
{
- printk(KERN_INFO "uli526x: %s NIC Link is Up %d Mbps Half duplex\n",dev->name,TmpSpeed);
+ pr_info("%s NIC Link is Up %d Mbps Half duplex\n",dev->name,TmpSpeed);
}
netif_carrier_on(dev);
}
@@ -1104,7 +1115,7 @@ static void uli526x_timer(unsigned long data)
{
if(db->init==1)
{
- printk(KERN_INFO "uli526x: %s NIC Link is Down\n",dev->name);
+ pr_info("%s NIC Link is Down\n",dev->name);
netif_carrier_off(dev);
}
}
@@ -1230,8 +1241,7 @@ static int uli526x_resume(struct pci_dev *pdev)
err = pci_set_power_state(pdev, PCI_D0);
if (err) {
- printk(KERN_WARNING "%s: Could not put device into D0\n",
- dev->name);
+ dev_warn(&dev->dev, "Could not put device into D0\n");
return err;
}
@@ -1405,14 +1415,14 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt)
*suptr++ = 0xffff << FLT_SHIFT;
/* fit the multicast address */
- for (mcptr = dev->mc_list, i = 0; i < mc_cnt; i++, mcptr = mcptr->next) {
+ netdev_for_each_mc_addr(mcptr, dev) {
addrptr = (u16 *) mcptr->dmi_addr;
*suptr++ = addrptr[0] << FLT_SHIFT;
*suptr++ = addrptr[1] << FLT_SHIFT;
*suptr++ = addrptr[2] << FLT_SHIFT;
}
- for (; i<14; i++) {
+ for (i = netdev_mc_count(dev); i < 14; i++) {
*suptr++ = 0xffff << FLT_SHIFT;
*suptr++ = 0xffff << FLT_SHIFT;
*suptr++ = 0xffff << FLT_SHIFT;
@@ -1432,7 +1442,7 @@ static void send_filter_frame(struct net_device *dev, int mc_cnt)
update_cr6(db->cr6_data, dev->base_addr);
dev->trans_start = jiffies;
} else
- printk(KERN_ERR DRV_NAME ": No Tx resource - Send_filter_frame!\n");
+ pr_err("No Tx resource - Send_filter_frame!\n");
}
@@ -1783,7 +1793,7 @@ static u16 phy_read_1bit(unsigned long ioaddr, u32 chip_id)
}
-static struct pci_device_id uli526x_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(uli526x_pci_tbl) = {
{ 0x10B9, 0x5261, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5261_ID },
{ 0x10B9, 0x5263, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PCI_ULI5263_ID },
{ 0, }
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index 869a7a0005f..304f43866c4 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -218,7 +218,7 @@ enum chip_capability_flags {
CanHaveMII=1, HasBrokenTx=2, AlwaysFDX=4, FDXOnNoMII=8,
};
-static const struct pci_device_id w840_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(w840_pci_tbl) = {
{ 0x1050, 0x0840, PCI_ANY_ID, 0x8153, 0, 0, 0 },
{ 0x1050, 0x0840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
{ 0x11f6, 0x2011, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
@@ -376,8 +376,8 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
irq = pdev->irq;
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
- printk(KERN_WARNING "Winbond-840: Device %s disabled due to DMA limitations.\n",
- pci_name(pdev));
+ pr_warning("Winbond-840: Device %s disabled due to DMA limitations\n",
+ pci_name(pdev));
return -EIO;
}
dev = alloc_etherdev(sizeof(*np));
@@ -422,8 +422,9 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
if (option & 0x200)
np->mii_if.full_duplex = 1;
if (option & 15)
- printk(KERN_INFO "%s: ignoring user supplied media type %d",
- dev->name, option & 15);
+ dev_info(&dev->dev,
+ "ignoring user supplied media type %d",
+ option & 15);
}
if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
np->mii_if.full_duplex = 1;
@@ -440,9 +441,8 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
if (i)
goto err_out_cleardev;
- printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n",
- dev->name, pci_id_tbl[chip_idx].name, ioaddr,
- dev->dev_addr, irq);
+ dev_info(&dev->dev, "%s at %p, %pM, IRQ %d\n",
+ pci_id_tbl[chip_idx].name, ioaddr, dev->dev_addr, irq);
if (np->drv_flags & CanHaveMII) {
int phy, phy_idx = 0;
@@ -453,16 +453,17 @@ static int __devinit w840_probe1 (struct pci_dev *pdev,
np->mii_if.advertising = mdio_read(dev, phy, MII_ADVERTISE);
np->mii = (mdio_read(dev, phy, MII_PHYSID1) << 16)+
mdio_read(dev, phy, MII_PHYSID2);
- printk(KERN_INFO "%s: MII PHY %8.8xh found at address %d, status "
- "0x%4.4x advertising %4.4x.\n",
- dev->name, np->mii, phy, mii_status, np->mii_if.advertising);
+ dev_info(&dev->dev,
+ "MII PHY %08xh found at address %d, status 0x%04x advertising %04x\n",
+ np->mii, phy, mii_status,
+ np->mii_if.advertising);
}
}
np->mii_cnt = phy_idx;
np->mii_if.phy_id = np->phys[0];
if (phy_idx == 0) {
- printk(KERN_WARNING "%s: MII PHY not found -- this device may "
- "not operate correctly.\n", dev->name);
+ dev_warn(&dev->dev,
+ "MII PHY not found -- this device may not operate correctly\n");
}
}
@@ -644,8 +645,8 @@ static int netdev_open(struct net_device *dev)
goto out_err;
if (debug > 1)
- printk(KERN_DEBUG "%s: w89c840_open() irq %d.\n",
- dev->name, dev->irq);
+ printk(KERN_DEBUG "%s: w89c840_open() irq %d\n",
+ dev->name, dev->irq);
if((i=alloc_ringdesc(dev)))
goto out_err;
@@ -657,7 +658,7 @@ static int netdev_open(struct net_device *dev)
netif_start_queue(dev);
if (debug > 2)
- printk(KERN_DEBUG "%s: Done netdev_open().\n", dev->name);
+ printk(KERN_DEBUG "%s: Done netdev_open()\n", dev->name);
/* Set the timer to check for link beat. */
init_timer(&np->timer);
@@ -688,16 +689,18 @@ static int update_link(struct net_device *dev)
if (!(mii_reg & 0x4)) {
if (netif_carrier_ok(dev)) {
if (debug)
- printk(KERN_INFO "%s: MII #%d reports no link. Disabling watchdog.\n",
- dev->name, np->phys[0]);
+ dev_info(&dev->dev,
+ "MII #%d reports no link. Disabling watchdog\n",
+ np->phys[0]);
netif_carrier_off(dev);
}
return np->csr6;
}
if (!netif_carrier_ok(dev)) {
if (debug)
- printk(KERN_INFO "%s: MII #%d link is back. Enabling watchdog.\n",
- dev->name, np->phys[0]);
+ dev_info(&dev->dev,
+ "MII #%d link is back. Enabling watchdog\n",
+ np->phys[0]);
netif_carrier_on(dev);
}
@@ -729,9 +732,10 @@ static int update_link(struct net_device *dev)
if (fasteth)
result |= 0x20000000;
if (result != np->csr6 && debug)
- printk(KERN_INFO "%s: Setting %dMBit-%s-duplex based on MII#%d\n",
- dev->name, fasteth ? 100 : 10,
- duplex ? "full" : "half", np->phys[0]);
+ dev_info(&dev->dev,
+ "Setting %dMBit-%s-duplex based on MII#%d\n",
+ fasteth ? 100 : 10, duplex ? "full" : "half",
+ np->phys[0]);
return result;
}
@@ -763,8 +767,8 @@ static inline void update_csr6(struct net_device *dev, int new)
limit--;
if(!limit) {
- printk(KERN_INFO "%s: couldn't stop rxtx, IntrStatus %xh.\n",
- dev->name, csr5);
+ dev_info(&dev->dev,
+ "couldn't stop rxtx, IntrStatus %xh\n", csr5);
break;
}
udelay(1);
@@ -783,10 +787,9 @@ static void netdev_timer(unsigned long data)
void __iomem *ioaddr = np->base_addr;
if (debug > 2)
- printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x "
- "config %8.8x.\n",
- dev->name, ioread32(ioaddr + IntrStatus),
- ioread32(ioaddr + NetworkConfig));
+ printk(KERN_DEBUG "%s: Media selection timer tick, status %08x config %08x\n",
+ dev->name, ioread32(ioaddr + IntrStatus),
+ ioread32(ioaddr + NetworkConfig));
spin_lock_irq(&np->lock);
update_csr6(dev, update_link(dev));
spin_unlock_irq(&np->lock);
@@ -899,8 +902,8 @@ static void init_registers(struct net_device *dev)
/* When not a module we can work around broken '486 PCI boards. */
if (boot_cpu_data.x86 <= 4) {
i |= 0x4800;
- printk(KERN_INFO "%s: This is a 386/486 PCI system, setting cache "
- "alignment to 8 longwords.\n", dev->name);
+ dev_info(&dev->dev,
+ "This is a 386/486 PCI system, setting cache alignment to 8 longwords\n");
} else {
i |= 0xE000;
}
@@ -931,22 +934,23 @@ static void tx_timeout(struct net_device *dev)
struct netdev_private *np = netdev_priv(dev);
void __iomem *ioaddr = np->base_addr;
- printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
- " resetting...\n", dev->name, ioread32(ioaddr + IntrStatus));
+ dev_warn(&dev->dev, "Transmit timed out, status %08x, resetting...\n",
+ ioread32(ioaddr + IntrStatus));
{
int i;
printk(KERN_DEBUG " Rx ring %p: ", np->rx_ring);
for (i = 0; i < RX_RING_SIZE; i++)
- printk(" %8.8x", (unsigned int)np->rx_ring[i].status);
- printk(KERN_DEBUG" Tx ring %p: ", np->tx_ring);
+ printk(KERN_CONT " %08x", (unsigned int)np->rx_ring[i].status);
+ printk(KERN_CONT "\n");
+ printk(KERN_DEBUG " Tx ring %p: ", np->tx_ring);
for (i = 0; i < TX_RING_SIZE; i++)
- printk(" %8.8x", np->tx_ring[i].status);
- printk("\n");
+ printk(KERN_CONT " %08x", np->tx_ring[i].status);
+ printk(KERN_CONT "\n");
}
- printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d.\n",
- np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
- printk(KERN_DEBUG "Tx Descriptor addr %xh.\n",ioread32(ioaddr+0x4C));
+ printk(KERN_DEBUG "Tx cur %d Tx dirty %d Tx Full %d, q bytes %d\n",
+ np->cur_tx, np->dirty_tx, np->tx_full, np->tx_q_bytes);
+ printk(KERN_DEBUG "Tx Descriptor addr %xh\n", ioread32(ioaddr+0x4C));
disable_irq(dev->irq);
spin_lock_irq(&np->lock);
@@ -1055,8 +1059,8 @@ static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
dev->trans_start = jiffies;
if (debug > 4) {
- printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
- dev->name, np->cur_tx, entry);
+ printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d\n",
+ dev->name, np->cur_tx, entry);
}
return NETDEV_TX_OK;
}
@@ -1073,8 +1077,8 @@ static void netdev_tx_done(struct net_device *dev)
if (tx_status & 0x8000) { /* There was an error, log it. */
#ifndef final_version
if (debug > 1)
- printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
- dev->name, tx_status);
+ printk(KERN_DEBUG "%s: Transmit error, Tx status %08x\n",
+ dev->name, tx_status);
#endif
np->stats.tx_errors++;
if (tx_status & 0x0104) np->stats.tx_aborted_errors++;
@@ -1086,8 +1090,8 @@ static void netdev_tx_done(struct net_device *dev)
} else {
#ifndef final_version
if (debug > 3)
- printk(KERN_DEBUG "%s: Transmit slot %d ok, Tx status %8.8x.\n",
- dev->name, entry, tx_status);
+ printk(KERN_DEBUG "%s: Transmit slot %d ok, Tx status %08x\n",
+ dev->name, entry, tx_status);
#endif
np->stats.tx_bytes += np->tx_skbuff[entry]->len;
np->stats.collisions += (tx_status >> 3) & 15;
@@ -1130,8 +1134,8 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
iowrite32(intr_status & 0x001ffff, ioaddr + IntrStatus);
if (debug > 4)
- printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
- dev->name, intr_status);
+ printk(KERN_DEBUG "%s: Interrupt, status %04x\n",
+ dev->name, intr_status);
if ((intr_status & (NormalIntr|AbnormalIntr)) == 0)
break;
@@ -1156,8 +1160,9 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
netdev_error(dev, intr_status);
if (--work_limit < 0) {
- printk(KERN_WARNING "%s: Too much work at interrupt, "
- "status=0x%4.4x.\n", dev->name, intr_status);
+ dev_warn(&dev->dev,
+ "Too much work at interrupt, status=0x%04x\n",
+ intr_status);
/* Set the timer to re-enable the other interrupts after
10*82usec ticks. */
spin_lock(&np->lock);
@@ -1171,8 +1176,8 @@ static irqreturn_t intr_handler(int irq, void *dev_instance)
} while (1);
if (debug > 3)
- printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
- dev->name, ioread32(ioaddr + IntrStatus));
+ printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x\n",
+ dev->name, ioread32(ioaddr + IntrStatus));
return IRQ_RETVAL(handled);
}
@@ -1185,8 +1190,8 @@ static int netdev_rx(struct net_device *dev)
int work_limit = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
if (debug > 4) {
- printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
- entry, np->rx_ring[entry].status);
+ printk(KERN_DEBUG " In netdev_rx(), entry %d status %04x\n",
+ entry, np->rx_ring[entry].status);
}
/* If EOP is set on the next entry, it's a new packet. Send it up. */
@@ -1195,24 +1200,24 @@ static int netdev_rx(struct net_device *dev)
s32 status = desc->status;
if (debug > 4)
- printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
- status);
+ printk(KERN_DEBUG " netdev_rx() status was %08x\n",
+ status);
if (status < 0)
break;
if ((status & 0x38008300) != 0x0300) {
if ((status & 0x38000300) != 0x0300) {
/* Ingore earlier buffers. */
if ((status & 0xffff) != 0x7fff) {
- printk(KERN_WARNING "%s: Oversized Ethernet frame spanned "
- "multiple buffers, entry %#x status %4.4x!\n",
- dev->name, np->cur_rx, status);
+ dev_warn(&dev->dev,
+ "Oversized Ethernet frame spanned multiple buffers, entry %#x status %04x!\n",
+ np->cur_rx, status);
np->stats.rx_length_errors++;
}
} else if (status & 0x8000) {
/* There was a fatal error. */
if (debug > 2)
- printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
- dev->name, status);
+ printk(KERN_DEBUG "%s: Receive error, Rx status %08x\n",
+ dev->name, status);
np->stats.rx_errors++; /* end of a packet.*/
if (status & 0x0890) np->stats.rx_length_errors++;
if (status & 0x004C) np->stats.rx_frame_errors++;
@@ -1225,8 +1230,8 @@ static int netdev_rx(struct net_device *dev)
#ifndef final_version
if (debug > 4)
- printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
- " status %x.\n", pkt_len, status);
+ printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d status %x\n",
+ pkt_len, status);
#endif
/* Check if the packet is long enough to accept without copying
to a minimally-sized skbuff. */
@@ -1251,11 +1256,10 @@ static int netdev_rx(struct net_device *dev)
#ifndef final_version /* Remove after testing. */
/* You will want this info for the initial debug. */
if (debug > 5)
- printk(KERN_DEBUG " Rx data %pM %pM"
- " %2.2x%2.2x %d.%d.%d.%d.\n",
+ printk(KERN_DEBUG " Rx data %pM %pM %02x%02x %pI4\n",
&skb->data[0], &skb->data[6],
skb->data[12], skb->data[13],
- skb->data[14], skb->data[15], skb->data[16], skb->data[17]);
+ &skb->data[14]);
#endif
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
@@ -1293,8 +1297,8 @@ static void netdev_error(struct net_device *dev, int intr_status)
void __iomem *ioaddr = np->base_addr;
if (debug > 2)
- printk(KERN_DEBUG "%s: Abnormal event, %8.8x.\n",
- dev->name, intr_status);
+ printk(KERN_DEBUG "%s: Abnormal event, %08x\n",
+ dev->name, intr_status);
if (intr_status == 0xffffffff)
return;
spin_lock(&np->lock);
@@ -1314,8 +1318,8 @@ static void netdev_error(struct net_device *dev, int intr_status)
new = 127; /* load full packet before starting */
new = (np->csr6 & ~(0x7F << 14)) | (new<<14);
#endif
- printk(KERN_DEBUG "%s: Tx underflow, new csr6 %8.8x.\n",
- dev->name, new);
+ printk(KERN_DEBUG "%s: Tx underflow, new csr6 %08x\n",
+ dev->name, new);
update_csr6(dev, new);
}
if (intr_status & RxDied) { /* Missed a Rx frame. */
@@ -1357,17 +1361,16 @@ static u32 __set_rx_mode(struct net_device *dev)
memset(mc_filter, 0xff, sizeof(mc_filter));
rx_mode = RxAcceptBroadcast | AcceptMulticast | RxAcceptAllPhys
| AcceptMyPhys;
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
memset(mc_filter, 0xff, sizeof(mc_filter));
rx_mode = RxAcceptBroadcast | AcceptMulticast | AcceptMyPhys;
} else {
struct dev_mc_list *mclist;
- int i;
+
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
int filterbit = (ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26) ^ 0x3F;
filterbit &= 0x3f;
mc_filter[filterbit >> 5] |= 1 << (filterbit & 31);
@@ -1487,11 +1490,13 @@ static int netdev_close(struct net_device *dev)
netif_stop_queue(dev);
if (debug > 1) {
- printk(KERN_DEBUG "%s: Shutting down ethercard, status was %8.8x "
- "Config %8.8x.\n", dev->name, ioread32(ioaddr + IntrStatus),
- ioread32(ioaddr + NetworkConfig));
- printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
- dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
+ printk(KERN_DEBUG "%s: Shutting down ethercard, status was %08x Config %08x\n",
+ dev->name, ioread32(ioaddr + IntrStatus),
+ ioread32(ioaddr + NetworkConfig));
+ printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d\n",
+ dev->name,
+ np->cur_tx, np->dirty_tx,
+ np->cur_rx, np->dirty_rx);
}
/* Stop the chip's Tx and Rx processes. */
@@ -1512,18 +1517,16 @@ static int netdev_close(struct net_device *dev)
if (debug > 2) {
int i;
- printk(KERN_DEBUG" Tx ring at %8.8x:\n",
- (int)np->tx_ring);
+ printk(KERN_DEBUG" Tx ring at %08x:\n", (int)np->tx_ring);
for (i = 0; i < TX_RING_SIZE; i++)
- printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x.\n",
- i, np->tx_ring[i].length,
- np->tx_ring[i].status, np->tx_ring[i].buffer1);
- printk(KERN_DEBUG " Rx ring %8.8x:\n",
- (int)np->rx_ring);
+ printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
+ i, np->tx_ring[i].length,
+ np->tx_ring[i].status, np->tx_ring[i].buffer1);
+ printk(KERN_DEBUG " Rx ring %08x:\n", (int)np->rx_ring);
for (i = 0; i < RX_RING_SIZE; i++) {
- printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
- i, np->rx_ring[i].length,
- np->rx_ring[i].status, np->rx_ring[i].buffer1);
+ printk(KERN_DEBUG " #%d desc. %04x %04x %08x\n",
+ i, np->rx_ring[i].length,
+ np->rx_ring[i].status, np->rx_ring[i].buffer1);
}
}
#endif /* __i386__ debugging only */
@@ -1622,9 +1625,8 @@ static int w840_resume (struct pci_dev *pdev)
goto out; /* device not suspended */
if (netif_running(dev)) {
if ((retval = pci_enable_device(pdev))) {
- printk (KERN_ERR
- "%s: pci_enable_device failed in resume\n",
- dev->name);
+ dev_err(&dev->dev,
+ "pci_enable_device failed in resume\n");
goto out;
}
spin_lock_irq(&np->lock);
diff --git a/drivers/net/tulip/xircom_cb.c b/drivers/net/tulip/xircom_cb.c
index 9924c4c7e2d..acfeeb98056 100644
--- a/drivers/net/tulip/xircom_cb.c
+++ b/drivers/net/tulip/xircom_cb.c
@@ -14,6 +14,8 @@
* $Id: xircom_cb.c,v 1.33 2001/03/19 14:02:07 arjanv Exp $
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
@@ -144,7 +146,7 @@ static int link_status(struct xircom_private *card);
-static struct pci_device_id xircom_pci_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(xircom_pci_table) = {
{0x115D, 0x0003, PCI_ANY_ID, PCI_ANY_ID,},
{0,},
};
@@ -234,7 +236,7 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
pci_write_config_word (pdev, PCI_STATUS,tmp16);
if (!request_region(pci_resource_start(pdev, 0), 128, "xircom_cb")) {
- printk(KERN_ERR "xircom_probe: failed to allocate io-region\n");
+ pr_err("%s: failed to allocate io-region\n", __func__);
return -ENODEV;
}
@@ -245,7 +247,7 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
*/
dev = alloc_etherdev(sizeof(struct xircom_private));
if (!dev) {
- printk(KERN_ERR "xircom_probe: failed to allocate etherdev\n");
+ pr_err("%s: failed to allocate etherdev\n", __func__);
goto device_fail;
}
private = netdev_priv(dev);
@@ -253,12 +255,12 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
/* Allocate the send/receive buffers */
private->rx_buffer = pci_alloc_consistent(pdev,8192,&private->rx_dma_handle);
if (private->rx_buffer == NULL) {
- printk(KERN_ERR "xircom_probe: no memory for rx buffer \n");
+ pr_err("%s: no memory for rx buffer\n", __func__);
goto rx_buf_fail;
}
private->tx_buffer = pci_alloc_consistent(pdev,8192,&private->tx_dma_handle);
if (private->tx_buffer == NULL) {
- printk(KERN_ERR "xircom_probe: no memory for tx buffer \n");
+ pr_err("%s: no memory for tx buffer\n", __func__);
goto tx_buf_fail;
}
@@ -281,11 +283,12 @@ static int __devinit xircom_probe(struct pci_dev *pdev, const struct pci_device_
pci_set_drvdata(pdev, dev);
if (register_netdev(dev)) {
- printk(KERN_ERR "xircom_probe: netdevice registration failed.\n");
+ pr_err("%s: netdevice registration failed\n", __func__);
goto reg_fail;
}
- printk(KERN_INFO "%s: Xircom cardbus revision %i at irq %i \n", dev->name, pdev->revision, pdev->irq);
+ dev_info(&dev->dev, "Xircom cardbus revision %i at irq %i\n",
+ pdev->revision, pdev->irq);
/* start the transmitter to get a heartbeat */
/* TODO: send 2 dummy packets here */
transceiver_voodoo(private);
@@ -347,8 +350,10 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
#ifdef DEBUG
print_binary(status);
- printk("tx status 0x%08x 0x%08x \n",card->tx_buffer[0],card->tx_buffer[4]);
- printk("rx status 0x%08x 0x%08x \n",card->rx_buffer[0],card->rx_buffer[4]);
+ printk("tx status 0x%08x 0x%08x \n",
+ card->tx_buffer[0], card->tx_buffer[4]);
+ printk("rx status 0x%08x 0x%08x \n",
+ card->rx_buffer[0], card->rx_buffer[4]);
#endif
/* Handle shared irq and hotplug */
if (status == 0 || status == 0xffffffff) {
@@ -358,9 +363,9 @@ static irqreturn_t xircom_interrupt(int irq, void *dev_instance)
if (link_status_changed(card)) {
int newlink;
- printk(KERN_DEBUG "xircom_cb: Link status has changed \n");
+ printk(KERN_DEBUG "xircom_cb: Link status has changed\n");
newlink = link_status(card);
- printk(KERN_INFO "xircom_cb: Link is %i mbit \n",newlink);
+ dev_info(&dev->dev, "Link is %i mbit\n", newlink);
if (newlink)
netif_carrier_on(dev);
else
@@ -457,7 +462,8 @@ static int xircom_open(struct net_device *dev)
struct xircom_private *xp = netdev_priv(dev);
int retval;
enter("xircom_open");
- printk(KERN_INFO "xircom cardbus adaptor found, registering as %s, using irq %i \n",dev->name,dev->irq);
+ pr_info("xircom cardbus adaptor found, registering as %s, using irq %i \n",
+ dev->name, dev->irq);
retval = request_irq(dev->irq, xircom_interrupt, IRQF_SHARED, dev->name, dev);
if (retval) {
leave("xircom_open - No IRQ");
@@ -770,7 +776,7 @@ static void activate_receiver(struct xircom_private *card)
udelay(50);
counter--;
if (counter <= 0)
- printk(KERN_ERR "xircom_cb: Receiver failed to deactivate\n");
+ pr_err("Receiver failed to deactivate\n");
}
/* enable the receiver */
@@ -787,7 +793,7 @@ static void activate_receiver(struct xircom_private *card)
udelay(50);
counter--;
if (counter <= 0)
- printk(KERN_ERR "xircom_cb: Receiver failed to re-activate\n");
+ pr_err("Receiver failed to re-activate\n");
}
leave("activate_receiver");
@@ -818,7 +824,7 @@ static void deactivate_receiver(struct xircom_private *card)
udelay(50);
counter--;
if (counter <= 0)
- printk(KERN_ERR "xircom_cb: Receiver failed to deactivate\n");
+ pr_err("Receiver failed to deactivate\n");
}
@@ -861,7 +867,7 @@ static void activate_transmitter(struct xircom_private *card)
udelay(50);
counter--;
if (counter <= 0)
- printk(KERN_ERR "xircom_cb: Transmitter failed to deactivate\n");
+ pr_err("Transmitter failed to deactivate\n");
}
/* enable the transmitter */
@@ -878,7 +884,7 @@ static void activate_transmitter(struct xircom_private *card)
udelay(50);
counter--;
if (counter <= 0)
- printk(KERN_ERR "xircom_cb: Transmitter failed to re-activate\n");
+ pr_err("Transmitter failed to re-activate\n");
}
leave("activate_transmitter");
@@ -909,7 +915,7 @@ static void deactivate_transmitter(struct xircom_private *card)
udelay(50);
counter--;
if (counter <= 0)
- printk(KERN_ERR "xircom_cb: Transmitter failed to deactivate\n");
+ pr_err("Transmitter failed to deactivate\n");
}
@@ -1184,7 +1190,7 @@ static void investigate_read_descriptor(struct net_device *dev,struct xircom_pri
struct sk_buff *skb;
if (pkt_len > 1518) {
- printk(KERN_ERR "xircom_cb: Packet length %i is bogus \n",pkt_len);
+ pr_err("Packet length %i is bogus\n", pkt_len);
pkt_len = 1518;
}
@@ -1222,7 +1228,7 @@ static void investigate_write_descriptor(struct net_device *dev, struct xircom_p
status = le32_to_cpu(card->tx_buffer[4*descnr]);
#if 0
if (status & 0x8000) { /* Major error */
- printk(KERN_ERR "Major transmit error status %x \n", status);
+ pr_err("Major transmit error status %x\n", status);
card->tx_buffer[4*descnr] = 0;
netif_wake_queue (dev);
}
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 2834a01bae2..ce1efa4c0b0 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -61,6 +61,7 @@
#include <linux/crc32.h>
#include <linux/nsproxy.h>
#include <linux/virtio_net.h>
+#include <linux/rcupdate.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/rtnetlink.h>
@@ -144,6 +145,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file)
err = 0;
tfile->tun = tun;
tun->tfile = tfile;
+ tun->socket.file = file;
dev_hold(tun->dev);
sock_hold(tun->socket.sk);
atomic_inc(&tfile->count);
@@ -158,6 +160,7 @@ static void __tun_detach(struct tun_struct *tun)
/* Detach from net device */
netif_tx_lock_bh(tun->dev);
tun->tfile = NULL;
+ tun->socket.file = NULL;
netif_tx_unlock_bh(tun->dev);
/* Drop read queue */
@@ -364,6 +367,10 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
if (!check_filter(&tun->txflt, skb))
goto drop;
+ if (tun->socket.sk->sk_filter &&
+ sk_filter(tun->socket.sk, skb))
+ goto drop;
+
if (skb_queue_len(&tun->socket.sk->sk_receive_queue) >= dev->tx_queue_len) {
if (!(tun->flags & TUN_ONE_QUEUE)) {
/* Normal queueing mode. */
@@ -387,7 +394,8 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
/* Notify and wake up reader process */
if (tun->flags & TUN_FASYNC)
kill_fasync(&tun->fasync, SIGIO, POLL_IN);
- wake_up_interruptible(&tun->socket.wait);
+ wake_up_interruptible_poll(&tun->socket.wait, POLLIN |
+ POLLRDNORM | POLLRDBAND);
return NETDEV_TX_OK;
drop:
@@ -743,7 +751,7 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
len = min_t(int, skb->len, len);
skb_copy_datagram_const_iovec(skb, 0, iv, total, len);
- total += len;
+ total += skb->len;
tun->dev->stats.tx_packets++;
tun->dev->stats.tx_bytes += len;
@@ -751,34 +759,23 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
return total;
}
-static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
- unsigned long count, loff_t pos)
+static ssize_t tun_do_read(struct tun_struct *tun,
+ struct kiocb *iocb, const struct iovec *iv,
+ ssize_t len, int noblock)
{
- struct file *file = iocb->ki_filp;
- struct tun_file *tfile = file->private_data;
- struct tun_struct *tun = __tun_get(tfile);
DECLARE_WAITQUEUE(wait, current);
struct sk_buff *skb;
- ssize_t len, ret = 0;
-
- if (!tun)
- return -EBADFD;
+ ssize_t ret = 0;
DBG(KERN_INFO "%s: tun_chr_read\n", tun->dev->name);
- len = iov_length(iv, count);
- if (len < 0) {
- ret = -EINVAL;
- goto out;
- }
-
add_wait_queue(&tun->socket.wait, &wait);
while (len) {
current->state = TASK_INTERRUPTIBLE;
/* Read frames from the queue */
if (!(skb=skb_dequeue(&tun->socket.sk->sk_receive_queue))) {
- if (file->f_flags & O_NONBLOCK) {
+ if (noblock) {
ret = -EAGAIN;
break;
}
@@ -805,6 +802,27 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
current->state = TASK_RUNNING;
remove_wait_queue(&tun->socket.wait, &wait);
+ return ret;
+}
+
+static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
+ unsigned long count, loff_t pos)
+{
+ struct file *file = iocb->ki_filp;
+ struct tun_file *tfile = file->private_data;
+ struct tun_struct *tun = __tun_get(tfile);
+ ssize_t len, ret;
+
+ if (!tun)
+ return -EBADFD;
+ len = iov_length(iv, count);
+ if (len < 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = tun_do_read(tun, iocb, iv, len, file->f_flags & O_NONBLOCK);
+ ret = min_t(ssize_t, ret, len);
out:
tun_put(tun);
return ret;
@@ -847,7 +865,8 @@ static void tun_sock_write_space(struct sock *sk)
return;
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
- wake_up_interruptible_sync(sk->sk_sleep);
+ wake_up_interruptible_sync_poll(sk->sk_sleep, POLLOUT |
+ POLLWRNORM | POLLWRBAND);
tun = tun_sk(sk)->tun;
kill_fasync(&tun->fasync, SIGIO, POLL_OUT);
@@ -858,6 +877,37 @@ static void tun_sock_destruct(struct sock *sk)
free_netdev(tun_sk(sk)->tun->dev);
}
+static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len)
+{
+ struct tun_struct *tun = container_of(sock, struct tun_struct, socket);
+ return tun_get_user(tun, m->msg_iov, total_len,
+ m->msg_flags & MSG_DONTWAIT);
+}
+
+static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len,
+ int flags)
+{
+ struct tun_struct *tun = container_of(sock, struct tun_struct, socket);
+ int ret;
+ if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
+ return -EINVAL;
+ ret = tun_do_read(tun, iocb, m->msg_iov, total_len,
+ flags & MSG_DONTWAIT);
+ if (ret > total_len) {
+ m->msg_flags |= MSG_TRUNC;
+ ret = flags & MSG_TRUNC ? ret : total_len;
+ }
+ return ret;
+}
+
+/* Ops structure to mimic raw sockets with tun */
+static const struct proto_ops tun_socket_ops = {
+ .sendmsg = tun_sendmsg,
+ .recvmsg = tun_recvmsg,
+};
+
static struct proto tun_proto = {
.name = "tun",
.owner = THIS_MODULE,
@@ -986,6 +1036,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
goto err_free_dev;
init_waitqueue_head(&tun->socket.wait);
+ tun->socket.ops = &tun_socket_ops;
sock_init_data(&tun->socket, sk);
sk->sk_write_space = tun_sock_write_space;
sk->sk_sndbuf = INT_MAX;
@@ -1116,6 +1167,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
struct tun_file *tfile = file->private_data;
struct tun_struct *tun;
void __user* argp = (void __user*)arg;
+ struct sock_fprog fprog;
struct ifreq ifr;
int sndbuf;
int ret;
@@ -1263,6 +1315,26 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
tun->socket.sk->sk_sndbuf = sndbuf;
break;
+ case TUNATTACHFILTER:
+ /* Can be set only for TAPs */
+ ret = -EINVAL;
+ if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
+ break;
+ ret = -EFAULT;
+ if (copy_from_user(&fprog, argp, sizeof(fprog)))
+ break;
+
+ ret = sk_attach_filter(&fprog, tun->socket.sk);
+ break;
+
+ case TUNDETACHFILTER:
+ /* Can be set only for TAPs */
+ ret = -EINVAL;
+ if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
+ break;
+ ret = sk_detach_filter(tun->socket.sk);
+ break;
+
default:
ret = -EINVAL;
break;
@@ -1525,6 +1597,23 @@ static void tun_cleanup(void)
rtnl_link_unregister(&tun_link_ops);
}
+/* Get an underlying socket object from tun file. Returns error unless file is
+ * attached to a device. The returned object works like a packet socket, it
+ * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
+ * holding a reference to the file for as long as the socket is in use. */
+struct socket *tun_get_socket(struct file *file)
+{
+ struct tun_struct *tun;
+ if (file->f_op != &tun_fops)
+ return ERR_PTR(-EINVAL);
+ tun = tun_get(file);
+ if (!tun)
+ return ERR_PTR(-EBADFD);
+ tun_put(tun);
+ return &tun->socket;
+}
+EXPORT_SYMBOL_GPL(tun_get_socket);
+
module_init(tun_init);
module_exit(tun_cleanup);
MODULE_DESCRIPTION(DRV_DESCRIPTION);
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 39f1fc650be..e3ddcb8f29d 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -98,14 +98,10 @@ static const int multicast_filter_limit = 32;
#define TX_TIMEOUT (2*HZ)
#define PKT_BUF_SZ 1536
-
-#define DRV_MODULE_NAME "typhoon"
-#define DRV_MODULE_VERSION "1.5.9"
-#define DRV_MODULE_RELDATE "Mar 2, 2009"
-#define PFX DRV_MODULE_NAME ": "
-#define ERR_PFX KERN_ERR PFX
#define FIRMWARE_NAME "3com/typhoon.bin"
+#define pr_fmt(fmt) KBUILD_MODNAME " " fmt
+
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -132,14 +128,12 @@ static const int multicast_filter_limit = 32;
#include <linux/in6.h>
#include <linux/dma-mapping.h>
#include <linux/firmware.h>
+#include <generated/utsrelease.h>
#include "typhoon.h"
-static char version[] __devinitdata =
- "typhoon.c: version " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
-
MODULE_AUTHOR("David Dillow <dave@thedillows.org>");
-MODULE_VERSION(DRV_MODULE_VERSION);
+MODULE_VERSION(UTS_RELEASE);
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(FIRMWARE_NAME);
MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
@@ -161,8 +155,8 @@ module_param(use_mmio, int, 0);
#endif
struct typhoon_card_info {
- char *name;
- int capabilities;
+ const char *name;
+ const int capabilities;
};
#define TYPHOON_CRYPTO_NONE 0x00
@@ -215,7 +209,7 @@ static struct typhoon_card_info typhoon_card_info[] __devinitdata = {
* bit 8 indicates if this is a (0) copper or (1) fiber card
* bits 12-16 indicate card type: (0) client and (1) server
*/
-static struct pci_device_id typhoon_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(typhoon_pci_tbl) = {
{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
{ PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
@@ -299,7 +293,6 @@ struct typhoon {
struct basic_ring respRing;
struct net_device_stats stats;
struct net_device_stats stats_saved;
- const char * name;
struct typhoon_shared * shared;
dma_addr_t shared_dma;
__le16 xcvr_select;
@@ -534,13 +527,13 @@ typhoon_process_response(struct typhoon *tp, int resp_size,
} else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
typhoon_hello(tp);
} else {
- printk(KERN_ERR "%s: dumping unexpected response "
- "0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
- tp->name, le16_to_cpu(resp->cmd),
- resp->numDesc, resp->flags,
- le16_to_cpu(resp->parm1),
- le32_to_cpu(resp->parm2),
- le32_to_cpu(resp->parm3));
+ netdev_err(tp->dev,
+ "dumping unexpected response 0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
+ le16_to_cpu(resp->cmd),
+ resp->numDesc, resp->flags,
+ le16_to_cpu(resp->parm1),
+ le32_to_cpu(resp->parm2),
+ le32_to_cpu(resp->parm3));
}
cleanup:
@@ -606,9 +599,8 @@ typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
freeResp = typhoon_num_free_resp(tp);
if(freeCmd < num_cmd || freeResp < num_resp) {
- printk("%s: no descs for cmd, had (needed) %d (%d) cmd, "
- "%d (%d) resp\n", tp->name, freeCmd, num_cmd,
- freeResp, num_resp);
+ netdev_err(tp->dev, "no descs for cmd, had (needed) %d (%d) cmd, %d (%d) resp\n",
+ freeCmd, num_cmd, freeResp, num_resp);
err = -ENOMEM;
goto out;
}
@@ -733,7 +725,7 @@ typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
spin_unlock_bh(&tp->state_lock);
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
if(err < 0)
- printk("%s: vlan offload error %d\n", tp->name, -err);
+ netdev_err(tp->dev, "vlan offload error %d\n", -err);
spin_lock_bh(&tp->state_lock);
}
@@ -924,17 +916,15 @@ typhoon_set_rx_mode(struct net_device *dev)
filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
if(dev->flags & IFF_PROMISC) {
filter |= TYPHOON_RX_FILTER_PROMISCOUS;
- } else if((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
filter |= TYPHOON_RX_FILTER_ALL_MCAST;
- } else if(dev->mc_count) {
+ } else if (!netdev_mc_empty(dev)) {
struct dev_mc_list *mclist;
- int i;
memset(mc_filter, 0, sizeof(mc_filter));
- for(i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
}
@@ -1020,7 +1010,7 @@ typhoon_get_stats(struct net_device *dev)
return saved;
if(typhoon_do_get_stats(tp) < 0) {
- printk(KERN_ERR "%s: error getting stats\n", dev->name);
+ netdev_err(dev, "error getting stats\n");
return saved;
}
@@ -1062,8 +1052,8 @@ typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
}
}
- strcpy(info->driver, DRV_MODULE_NAME);
- strcpy(info->version, DRV_MODULE_VERSION);
+ strcpy(info->driver, KBUILD_MODNAME);
+ strcpy(info->version, UTS_RELEASE);
strcpy(info->bus_info, pci_name(pci_dev));
}
@@ -1365,8 +1355,8 @@ typhoon_request_firmware(struct typhoon *tp)
err = request_firmware(&typhoon_fw, FIRMWARE_NAME, &tp->pdev->dev);
if (err) {
- printk(KERN_ERR "%s: Failed to load firmware \"%s\"\n",
- tp->name, FIRMWARE_NAME);
+ netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
+ FIRMWARE_NAME);
return err;
}
@@ -1401,7 +1391,7 @@ typhoon_request_firmware(struct typhoon *tp)
return 0;
invalid_fw:
- printk(KERN_ERR "%s: Invalid firmware image\n", tp->name);
+ netdev_err(tp->dev, "Invalid firmware image\n");
release_firmware(typhoon_fw);
typhoon_fw = NULL;
return -EINVAL;
@@ -1438,7 +1428,7 @@ typhoon_download_firmware(struct typhoon *tp)
err = -ENOMEM;
dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
if(!dpage) {
- printk(KERN_ERR "%s: no DMA mem for firmware\n", tp->name);
+ netdev_err(tp->dev, "no DMA mem for firmware\n");
goto err_out;
}
@@ -1451,7 +1441,7 @@ typhoon_download_firmware(struct typhoon *tp)
err = -ETIMEDOUT;
if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
- printk(KERN_ERR "%s: card ready timeout\n", tp->name);
+ netdev_err(tp->dev, "card ready timeout\n");
goto err_out_irq;
}
@@ -1491,8 +1481,7 @@ typhoon_download_firmware(struct typhoon *tp)
if(typhoon_wait_interrupt(ioaddr) < 0 ||
ioread32(ioaddr + TYPHOON_REG_STATUS) !=
TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
- printk(KERN_ERR "%s: segment ready timeout\n",
- tp->name);
+ netdev_err(tp->dev, "segment ready timeout\n");
goto err_out_irq;
}
@@ -1502,8 +1491,8 @@ typhoon_download_firmware(struct typhoon *tp)
* the checksum, we can do this once, at the end.
*/
csum = csum_fold(csum_partial_copy_nocheck(image_data,
- dpage, len,
- 0));
+ dpage, len,
+ 0));
iowrite32(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
iowrite32(le16_to_cpu((__force __le16)csum),
@@ -1514,7 +1503,7 @@ typhoon_download_firmware(struct typhoon *tp)
iowrite32(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO);
typhoon_post_pci_writes(ioaddr);
iowrite32(TYPHOON_BOOTCMD_SEG_AVAILABLE,
- ioaddr + TYPHOON_REG_COMMAND);
+ ioaddr + TYPHOON_REG_COMMAND);
image_data += len;
load_addr += len;
@@ -1525,15 +1514,15 @@ typhoon_download_firmware(struct typhoon *tp)
if(typhoon_wait_interrupt(ioaddr) < 0 ||
ioread32(ioaddr + TYPHOON_REG_STATUS) !=
TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
- printk(KERN_ERR "%s: final segment ready timeout\n", tp->name);
+ netdev_err(tp->dev, "final segment ready timeout\n");
goto err_out_irq;
}
iowrite32(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
- printk(KERN_ERR "%s: boot ready timeout, status 0x%0x\n",
- tp->name, ioread32(ioaddr + TYPHOON_REG_STATUS));
+ netdev_err(tp->dev, "boot ready timeout, status 0x%0x\n",
+ ioread32(ioaddr + TYPHOON_REG_STATUS));
goto err_out_irq;
}
@@ -1555,7 +1544,7 @@ typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
void __iomem *ioaddr = tp->ioaddr;
if(typhoon_wait_status(ioaddr, initial_status) < 0) {
- printk(KERN_ERR "%s: boot ready timeout\n", tp->name);
+ netdev_err(tp->dev, "boot ready timeout\n");
goto out_timeout;
}
@@ -1566,8 +1555,8 @@ typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
ioaddr + TYPHOON_REG_COMMAND);
if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
- printk(KERN_ERR "%s: boot finish timeout (status 0x%x)\n",
- tp->name, ioread32(ioaddr + TYPHOON_REG_STATUS));
+ netdev_err(tp->dev, "boot finish timeout (status 0x%x)\n",
+ ioread32(ioaddr + TYPHOON_REG_STATUS));
goto out_timeout;
}
@@ -1866,8 +1855,7 @@ typhoon_interrupt(int irq, void *dev_instance)
typhoon_post_pci_writes(ioaddr);
__napi_schedule(&tp->napi);
} else {
- printk(KERN_ERR "%s: Error, poll already scheduled\n",
- dev->name);
+ netdev_err(dev, "Error, poll already scheduled\n");
}
return IRQ_HANDLED;
}
@@ -1900,16 +1888,15 @@ typhoon_sleep(struct typhoon *tp, pci_power_t state, __le16 events)
xp_cmd.parm1 = events;
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
if(err < 0) {
- printk(KERN_ERR "%s: typhoon_sleep(): wake events cmd err %d\n",
- tp->name, err);
+ netdev_err(tp->dev, "typhoon_sleep(): wake events cmd err %d\n",
+ err);
return err;
}
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
if(err < 0) {
- printk(KERN_ERR "%s: typhoon_sleep(): sleep cmd err %d\n",
- tp->name, err);
+ netdev_err(tp->dev, "typhoon_sleep(): sleep cmd err %d\n", err);
return err;
}
@@ -1960,12 +1947,12 @@ typhoon_start_runtime(struct typhoon *tp)
err = typhoon_download_firmware(tp);
if(err < 0) {
- printk("%s: cannot load runtime on 3XP\n", tp->name);
+ netdev_err(tp->dev, "cannot load runtime on 3XP\n");
goto error_out;
}
if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
- printk("%s: cannot boot 3XP\n", tp->name);
+ netdev_err(tp->dev, "cannot boot 3XP\n");
err = -EIO;
goto error_out;
}
@@ -2069,9 +2056,7 @@ typhoon_stop_runtime(struct typhoon *tp, int wait_type)
}
if(i == TYPHOON_WAIT_TIMEOUT)
- printk(KERN_ERR
- "%s: halt timed out waiting for Tx to complete\n",
- tp->name);
+ netdev_err(tp->dev, "halt timed out waiting for Tx to complete\n");
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
@@ -2088,11 +2073,10 @@ typhoon_stop_runtime(struct typhoon *tp, int wait_type)
typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
- printk(KERN_ERR "%s: timed out waiting for 3XP to halt\n",
- tp->name);
+ netdev_err(tp->dev, "timed out waiting for 3XP to halt\n");
if(typhoon_reset(ioaddr, wait_type) < 0) {
- printk(KERN_ERR "%s: unable to reset 3XP\n", tp->name);
+ netdev_err(tp->dev, "unable to reset 3XP\n");
return -ETIMEDOUT;
}
@@ -2111,8 +2095,7 @@ typhoon_tx_timeout(struct net_device *dev)
struct typhoon *tp = netdev_priv(dev);
if(typhoon_reset(tp->ioaddr, WaitNoSleep) < 0) {
- printk(KERN_WARNING "%s: could not reset in tx timeout\n",
- dev->name);
+ netdev_warn(dev, "could not reset in tx timeout\n");
goto truely_dead;
}
@@ -2121,8 +2104,7 @@ typhoon_tx_timeout(struct net_device *dev)
typhoon_free_rx_rings(tp);
if(typhoon_start_runtime(tp) < 0) {
- printk(KERN_ERR "%s: could not start runtime in tx timeout\n",
- dev->name);
+ netdev_err(dev, "could not start runtime in tx timeout\n");
goto truely_dead;
}
@@ -2147,7 +2129,7 @@ typhoon_open(struct net_device *dev)
err = typhoon_wakeup(tp, WaitSleep);
if(err < 0) {
- printk(KERN_ERR "%s: unable to wakeup device\n", dev->name);
+ netdev_err(dev, "unable to wakeup device\n");
goto out_sleep;
}
@@ -2172,14 +2154,13 @@ out_irq:
out_sleep:
if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
- printk(KERN_ERR "%s: unable to reboot into sleep img\n",
- dev->name);
+ netdev_err(dev, "unable to reboot into sleep img\n");
typhoon_reset(tp->ioaddr, NoWait);
goto out;
}
if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
- printk(KERN_ERR "%s: unable to go back to sleep\n", dev->name);
+ netdev_err(dev, "unable to go back to sleep\n");
out:
return err;
@@ -2194,7 +2175,7 @@ typhoon_close(struct net_device *dev)
napi_disable(&tp->napi);
if(typhoon_stop_runtime(tp, WaitSleep) < 0)
- printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
+ netdev_err(dev, "unable to stop runtime\n");
/* Make sure there is no irq handler running on a different CPU. */
free_irq(dev->irq, dev);
@@ -2203,10 +2184,10 @@ typhoon_close(struct net_device *dev)
typhoon_init_rings(tp);
if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
- printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
+ netdev_err(dev, "unable to boot sleep image\n");
if(typhoon_sleep(tp, PCI_D3hot, 0) < 0)
- printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
+ netdev_err(dev, "unable to put card to sleep\n");
return 0;
}
@@ -2224,14 +2205,12 @@ typhoon_resume(struct pci_dev *pdev)
return 0;
if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
- printk(KERN_ERR "%s: critical: could not wake up in resume\n",
- dev->name);
+ netdev_err(dev, "critical: could not wake up in resume\n");
goto reset;
}
if(typhoon_start_runtime(tp) < 0) {
- printk(KERN_ERR "%s: critical: could not start runtime in "
- "resume\n", dev->name);
+ netdev_err(dev, "critical: could not start runtime in resume\n");
goto reset;
}
@@ -2258,8 +2237,7 @@ typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
spin_lock_bh(&tp->state_lock);
if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) {
spin_unlock_bh(&tp->state_lock);
- printk(KERN_ERR "%s: cannot do WAKE_MAGIC with VLANS\n",
- dev->name);
+ netdev_err(dev, "cannot do WAKE_MAGIC with VLANS\n");
return -EBUSY;
}
spin_unlock_bh(&tp->state_lock);
@@ -2267,7 +2245,7 @@ typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
netif_device_detach(dev);
if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
- printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
+ netdev_err(dev, "unable to stop runtime\n");
goto need_resume;
}
@@ -2275,7 +2253,7 @@ typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
typhoon_init_rings(tp);
if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
- printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
+ netdev_err(dev, "unable to boot sleep image\n");
goto need_resume;
}
@@ -2283,21 +2261,19 @@ typhoon_suspend(struct pci_dev *pdev, pm_message_t state)
xp_cmd.parm1 = cpu_to_le16(ntohs(*(__be16 *)&dev->dev_addr[0]));
xp_cmd.parm2 = cpu_to_le32(ntohl(*(__be32 *)&dev->dev_addr[2]));
if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
- printk(KERN_ERR "%s: unable to set mac address in suspend\n",
- dev->name);
+ netdev_err(dev, "unable to set mac address in suspend\n");
goto need_resume;
}
INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
- printk(KERN_ERR "%s: unable to set rx filter in suspend\n",
- dev->name);
+ netdev_err(dev, "unable to set rx filter in suspend\n");
goto need_resume;
}
if(typhoon_sleep(tp, pci_choose_state(pdev, state), tp->wol_events) < 0) {
- printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
+ netdev_err(dev, "unable to put card to sleep\n");
goto need_resume;
}
@@ -2351,7 +2327,7 @@ out_unmap:
out:
if(!mode)
- printk(KERN_INFO PFX "falling back to port IO\n");
+ pr_info("%s: falling back to port IO\n", pci_name(pdev));
return mode;
}
@@ -2371,7 +2347,6 @@ static const struct net_device_ops typhoon_netdev_ops = {
static int __devinit
typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- static int did_version = 0;
struct net_device *dev;
struct typhoon *tp;
int card_id = (int) ent->driver_data;
@@ -2381,14 +2356,11 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
struct cmd_desc xp_cmd;
struct resp_desc xp_resp[3];
int err = 0;
-
- if(!did_version++)
- printk(KERN_INFO "%s", version);
+ const char *err_msg;
dev = alloc_etherdev(sizeof(*tp));
if(dev == NULL) {
- printk(ERR_PFX "%s: unable to alloc new net device\n",
- pci_name(pdev));
+ err_msg = "unable to alloc new net device";
err = -ENOMEM;
goto error_out;
}
@@ -2396,57 +2368,48 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
err = pci_enable_device(pdev);
if(err < 0) {
- printk(ERR_PFX "%s: unable to enable device\n",
- pci_name(pdev));
+ err_msg = "unable to enable device";
goto error_out_dev;
}
err = pci_set_mwi(pdev);
if(err < 0) {
- printk(ERR_PFX "%s: unable to set MWI\n", pci_name(pdev));
+ err_msg = "unable to set MWI";
goto error_out_disable;
}
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if(err < 0) {
- printk(ERR_PFX "%s: No usable DMA configuration\n",
- pci_name(pdev));
+ err_msg = "No usable DMA configuration";
goto error_out_mwi;
}
/* sanity checks on IO and MMIO BARs
*/
if(!(pci_resource_flags(pdev, 0) & IORESOURCE_IO)) {
- printk(ERR_PFX
- "%s: region #1 not a PCI IO resource, aborting\n",
- pci_name(pdev));
+ err_msg = "region #1 not a PCI IO resource, aborting";
err = -ENODEV;
goto error_out_mwi;
}
if(pci_resource_len(pdev, 0) < 128) {
- printk(ERR_PFX "%s: Invalid PCI IO region size, aborting\n",
- pci_name(pdev));
+ err_msg = "Invalid PCI IO region size, aborting";
err = -ENODEV;
goto error_out_mwi;
}
if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
- printk(ERR_PFX
- "%s: region #1 not a PCI MMIO resource, aborting\n",
- pci_name(pdev));
+ err_msg = "region #1 not a PCI MMIO resource, aborting";
err = -ENODEV;
goto error_out_mwi;
}
if(pci_resource_len(pdev, 1) < 128) {
- printk(ERR_PFX "%s: Invalid PCI MMIO region size, aborting\n",
- pci_name(pdev));
+ err_msg = "Invalid PCI MMIO region size, aborting";
err = -ENODEV;
goto error_out_mwi;
}
- err = pci_request_regions(pdev, "typhoon");
+ err = pci_request_regions(pdev, KBUILD_MODNAME);
if(err < 0) {
- printk(ERR_PFX "%s: could not request regions\n",
- pci_name(pdev));
+ err_msg = "could not request regions";
goto error_out_mwi;
}
@@ -2457,8 +2420,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
ioaddr = pci_iomap(pdev, use_mmio, 128);
if (!ioaddr) {
- printk(ERR_PFX "%s: cannot remap registers, aborting\n",
- pci_name(pdev));
+ err_msg = "cannot remap registers, aborting";
err = -EIO;
goto error_out_regions;
}
@@ -2468,8 +2430,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
&shared_dma);
if(!shared) {
- printk(ERR_PFX "%s: could not allocate DMA memory\n",
- pci_name(pdev));
+ err_msg = "could not allocate DMA memory";
err = -ENOMEM;
goto error_out_remap;
}
@@ -2492,7 +2453,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
* 5) Put the card to sleep.
*/
if (typhoon_reset(ioaddr, WaitSleep) < 0) {
- printk(ERR_PFX "%s: could not reset 3XP\n", pci_name(pdev));
+ err_msg = "could not reset 3XP";
err = -EIO;
goto error_out_dma;
}
@@ -2504,26 +2465,18 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
pci_save_state(pdev);
- /* dev->name is not valid until we register, but we need to
- * use some common routines to initialize the card. So that those
- * routines print the right name, we keep our oun pointer to the name
- */
- tp->name = pci_name(pdev);
-
typhoon_init_interface(tp);
typhoon_init_rings(tp);
if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
- printk(ERR_PFX "%s: cannot boot 3XP sleep image\n",
- pci_name(pdev));
+ err_msg = "cannot boot 3XP sleep image";
err = -EIO;
goto error_out_reset;
}
INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
- printk(ERR_PFX "%s: cannot read MAC address\n",
- pci_name(pdev));
+ err_msg = "cannot read MAC address";
err = -EIO;
goto error_out_reset;
}
@@ -2532,8 +2485,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
*(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
if(!is_valid_ether_addr(dev->dev_addr)) {
- printk(ERR_PFX "%s: Could not obtain valid ethernet address, "
- "aborting\n", pci_name(pdev));
+ err_msg = "Could not obtain valid ethernet address, aborting";
goto error_out_reset;
}
@@ -2542,8 +2494,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
- printk(ERR_PFX "%s: Could not get Sleep Image version\n",
- pci_name(pdev));
+ err_msg = "Could not get Sleep Image version";
goto error_out_reset;
}
@@ -2560,8 +2511,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
- printk(ERR_PFX "%s: cannot put adapter to sleep\n",
- pci_name(pdev));
+ err_msg = "cannot put adapter to sleep";
err = -EIO;
goto error_out_reset;
}
@@ -2580,19 +2530,18 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
dev->features |= NETIF_F_TSO;
- if(register_netdev(dev) < 0)
+ if(register_netdev(dev) < 0) {
+ err_msg = "unable to register netdev";
goto error_out_reset;
-
- /* fixup our local name */
- tp->name = dev->name;
+ }
pci_set_drvdata(pdev, dev);
- printk(KERN_INFO "%s: %s at %s 0x%llx, %pM\n",
- dev->name, typhoon_card_info[card_id].name,
- use_mmio ? "MMIO" : "IO",
- (unsigned long long)pci_resource_start(pdev, use_mmio),
- dev->dev_addr);
+ netdev_info(dev, "%s at %s 0x%llx, %pM\n",
+ typhoon_card_info[card_id].name,
+ use_mmio ? "MMIO" : "IO",
+ (unsigned long long)pci_resource_start(pdev, use_mmio),
+ dev->dev_addr);
/* xp_resp still contains the response to the READ_VERSIONS command.
* For debugging, let the user know what version he has.
@@ -2602,23 +2551,20 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
* of version is Month/Day of build.
*/
u16 monthday = le32_to_cpu(xp_resp[0].parm2) & 0xffff;
- printk(KERN_INFO "%s: Typhoon 1.0 Sleep Image built "
- "%02u/%02u/2000\n", dev->name, monthday >> 8,
- monthday & 0xff);
+ netdev_info(dev, "Typhoon 1.0 Sleep Image built %02u/%02u/2000\n",
+ monthday >> 8, monthday & 0xff);
} else if(xp_resp[0].numDesc == 2) {
/* This is the Typhoon 1.1+ type Sleep Image
*/
u32 sleep_ver = le32_to_cpu(xp_resp[0].parm2);
u8 *ver_string = (u8 *) &xp_resp[1];
ver_string[25] = 0;
- printk(KERN_INFO "%s: Typhoon 1.1+ Sleep Image version "
- "%02x.%03x.%03x %s\n", dev->name, sleep_ver >> 24,
- (sleep_ver >> 12) & 0xfff, sleep_ver & 0xfff,
- ver_string);
+ netdev_info(dev, "Typhoon 1.1+ Sleep Image version %02x.%03x.%03x %s\n",
+ sleep_ver >> 24, (sleep_ver >> 12) & 0xfff,
+ sleep_ver & 0xfff, ver_string);
} else {
- printk(KERN_WARNING "%s: Unknown Sleep Image version "
- "(%u:%04x)\n", dev->name, xp_resp[0].numDesc,
- le32_to_cpu(xp_resp[0].parm2));
+ netdev_warn(dev, "Unknown Sleep Image version (%u:%04x)\n",
+ xp_resp[0].numDesc, le32_to_cpu(xp_resp[0].parm2));
}
return 0;
@@ -2640,6 +2586,7 @@ error_out_disable:
error_out_dev:
free_netdev(dev);
error_out:
+ pr_err("%s: %s\n", pci_name(pdev), err_msg);
return err;
}
@@ -2664,7 +2611,7 @@ typhoon_remove_one(struct pci_dev *pdev)
}
static struct pci_driver typhoon_driver = {
- .name = DRV_MODULE_NAME,
+ .name = KBUILD_MODNAME,
.id_table = typhoon_pci_tbl,
.probe = typhoon_init_one,
.remove = __devexit_p(typhoon_remove_one),
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index eb8fe7e16c6..23a97518bc1 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -37,6 +37,7 @@
#include <asm/qe.h>
#include <asm/ucc.h>
#include <asm/ucc_fast.h>
+#include <asm/machdep.h>
#include "ucc_geth.h"
#include "fsl_pq_mdio.h"
@@ -1334,7 +1335,7 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
struct ucc_geth __iomem *ug_regs;
struct ucc_fast __iomem *uf_regs;
int ret_val;
- u32 upsmr, maccfg2, tbiBaseAddress;
+ u32 upsmr, maccfg2;
u16 value;
ugeth_vdbg("%s: IN", __func__);
@@ -1389,14 +1390,20 @@ static int adjust_enet_interface(struct ucc_geth_private *ugeth)
/* Note that this depends on proper setting in utbipar register. */
if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
(ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
- tbiBaseAddress = in_be32(&ug_regs->utbipar);
- tbiBaseAddress &= UTBIPAR_PHY_ADDRESS_MASK;
- tbiBaseAddress >>= UTBIPAR_PHY_ADDRESS_SHIFT;
- value = ugeth->phydev->bus->read(ugeth->phydev->bus,
- (u8) tbiBaseAddress, ENET_TBI_MII_CR);
+ struct ucc_geth_info *ug_info = ugeth->ug_info;
+ struct phy_device *tbiphy;
+
+ if (!ug_info->tbi_node)
+ ugeth_warn("TBI mode requires that the device "
+ "tree specify a tbi-handle\n");
+
+ tbiphy = of_phy_find_device(ug_info->tbi_node);
+ if (!tbiphy)
+ ugeth_warn("Could not get TBI device\n");
+
+ value = phy_read(tbiphy, ENET_TBI_MII_CR);
value &= ~0x1000; /* Turn off autonegotiation */
- ugeth->phydev->bus->write(ugeth->phydev->bus,
- (u8) tbiBaseAddress, ENET_TBI_MII_CR, value);
+ phy_write(tbiphy, ENET_TBI_MII_CR, value);
}
init_check_frame_length_mode(ug_info->lengthCheckRx, &ug_regs->maccfg2);
@@ -1995,7 +2002,6 @@ static void ucc_geth_set_multi(struct net_device *dev)
struct dev_mc_list *dmi;
struct ucc_fast __iomem *uf_regs;
struct ucc_geth_82xx_address_filtering_pram __iomem *p_82xx_addr_filt;
- int i;
ugeth = netdev_priv(dev);
@@ -2022,10 +2028,7 @@ static void ucc_geth_set_multi(struct net_device *dev)
out_be32(&p_82xx_addr_filt->gaddr_h, 0x0);
out_be32(&p_82xx_addr_filt->gaddr_l, 0x0);
- dmi = dev->mc_list;
-
- for (i = 0; i < dev->mc_count; i++, dmi = dmi->next) {
-
+ netdev_for_each_mc_addr(dmi, dev) {
/* Only support group multicast for now.
*/
if (!(dmi->dmi_addr[0] & 1))
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index a516185cbc9..20e34608fa4 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -184,8 +184,8 @@ static int asix_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
void *buf;
int err = -ENOMEM;
- devdbg(dev,"asix_read_cmd() cmd=0x%02x value=0x%04x index=0x%04x size=%d",
- cmd, value, index, size);
+ netdev_dbg(dev->net, "asix_read_cmd() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
+ cmd, value, index, size);
buf = kmalloc(size, GFP_KERNEL);
if (!buf)
@@ -217,8 +217,8 @@ static int asix_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
void *buf = NULL;
int err = -ENOMEM;
- devdbg(dev,"asix_write_cmd() cmd=0x%02x value=0x%04x index=0x%04x size=%d",
- cmd, value, index, size);
+ netdev_dbg(dev->net, "asix_write_cmd() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
+ cmd, value, index, size);
if (data) {
buf = kmalloc(size, GFP_KERNEL);
@@ -264,15 +264,15 @@ asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
int status;
struct urb *urb;
- devdbg(dev,"asix_write_cmd_async() cmd=0x%02x value=0x%04x index=0x%04x size=%d",
- cmd, value, index, size);
+ netdev_dbg(dev->net, "asix_write_cmd_async() cmd=0x%02x value=0x%04x index=0x%04x size=%d\n",
+ cmd, value, index, size);
if ((urb = usb_alloc_urb(0, GFP_ATOMIC)) == NULL) {
- deverr(dev, "Error allocating URB in write_cmd_async!");
+ netdev_err(dev->net, "Error allocating URB in write_cmd_async!\n");
return;
}
if ((req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC)) == NULL) {
- deverr(dev, "Failed to allocate memory for control request");
+ netdev_err(dev->net, "Failed to allocate memory for control request\n");
usb_free_urb(urb);
return;
}
@@ -289,8 +289,8 @@ asix_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
asix_async_cmd_callback, req);
if((status = usb_submit_urb(urb, GFP_ATOMIC)) < 0) {
- deverr(dev, "Error submitting the control message: status=%d",
- status);
+ netdev_err(dev->net, "Error submitting the control message: status=%d\n",
+ status);
kfree(req);
usb_free_urb(urb);
}
@@ -314,7 +314,7 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
while (skb->len > 0) {
if ((short)(header & 0x0000ffff) !=
~((short)((header & 0xffff0000) >> 16))) {
- deverr(dev,"asix_rx_fixup() Bad Header Length");
+ netdev_err(dev->net, "asix_rx_fixup() Bad Header Length\n");
}
/* get the packet length */
size = (u16) (header & 0x0000ffff);
@@ -322,7 +322,8 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
if ((skb->len) - ((size + 1) & 0xfffe) == 0)
return 2;
if (size > ETH_FRAME_LEN) {
- deverr(dev,"asix_rx_fixup() Bad RX Length %d", size);
+ netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
+ size);
return 0;
}
ax_skb = skb_clone(skb, GFP_ATOMIC);
@@ -348,7 +349,8 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
}
if (skb->len < 0) {
- deverr(dev,"asix_rx_fixup() Bad SKB Length %d", skb->len);
+ netdev_err(dev->net, "asix_rx_fixup() Bad SKB Length %d\n",
+ skb->len);
return 0;
}
return 1;
@@ -409,7 +411,7 @@ static void asix_status(struct usbnet *dev, struct urb *urb)
usbnet_defer_kevent (dev, EVENT_LINK_RESET );
} else
netif_carrier_off(dev->net);
- devdbg(dev, "Link Status is: %d", link);
+ netdev_dbg(dev->net, "Link Status is: %d\n", link);
}
}
@@ -418,7 +420,7 @@ static inline int asix_set_sw_mii(struct usbnet *dev)
int ret;
ret = asix_write_cmd(dev, AX_CMD_SET_SW_MII, 0x0000, 0, 0, NULL);
if (ret < 0)
- deverr(dev, "Failed to enable software MII access");
+ netdev_err(dev->net, "Failed to enable software MII access\n");
return ret;
}
@@ -427,7 +429,7 @@ static inline int asix_set_hw_mii(struct usbnet *dev)
int ret;
ret = asix_write_cmd(dev, AX_CMD_SET_HW_MII, 0x0000, 0, 0, NULL);
if (ret < 0)
- deverr(dev, "Failed to enable hardware MII access");
+ netdev_err(dev->net, "Failed to enable hardware MII access\n");
return ret;
}
@@ -436,13 +438,14 @@ static inline int asix_get_phy_addr(struct usbnet *dev)
u8 buf[2];
int ret = asix_read_cmd(dev, AX_CMD_READ_PHY_ID, 0, 0, 2, buf);
- devdbg(dev, "asix_get_phy_addr()");
+ netdev_dbg(dev->net, "asix_get_phy_addr()\n");
if (ret < 0) {
- deverr(dev, "Error reading PHYID register: %02x", ret);
+ netdev_err(dev->net, "Error reading PHYID register: %02x\n", ret);
goto out;
}
- devdbg(dev, "asix_get_phy_addr() returning 0x%04x", *((__le16 *)buf));
+ netdev_dbg(dev->net, "asix_get_phy_addr() returning 0x%04x\n",
+ *((__le16 *)buf));
ret = buf[1];
out:
@@ -455,7 +458,7 @@ static int asix_sw_reset(struct usbnet *dev, u8 flags)
ret = asix_write_cmd(dev, AX_CMD_SW_RESET, flags, 0, 0, NULL);
if (ret < 0)
- deverr(dev,"Failed to send software reset: %02x", ret);
+ netdev_err(dev->net, "Failed to send software reset: %02x\n", ret);
return ret;
}
@@ -466,7 +469,7 @@ static u16 asix_read_rx_ctl(struct usbnet *dev)
int ret = asix_read_cmd(dev, AX_CMD_READ_RX_CTL, 0, 0, 2, &v);
if (ret < 0) {
- deverr(dev, "Error reading RX_CTL register: %02x", ret);
+ netdev_err(dev->net, "Error reading RX_CTL register: %02x\n", ret);
goto out;
}
ret = le16_to_cpu(v);
@@ -478,11 +481,11 @@ static int asix_write_rx_ctl(struct usbnet *dev, u16 mode)
{
int ret;
- devdbg(dev,"asix_write_rx_ctl() - mode = 0x%04x", mode);
+ netdev_dbg(dev->net, "asix_write_rx_ctl() - mode = 0x%04x\n", mode);
ret = asix_write_cmd(dev, AX_CMD_WRITE_RX_CTL, mode, 0, 0, NULL);
if (ret < 0)
- deverr(dev, "Failed to write RX_CTL mode to 0x%04x: %02x",
- mode, ret);
+ netdev_err(dev->net, "Failed to write RX_CTL mode to 0x%04x: %02x\n",
+ mode, ret);
return ret;
}
@@ -493,7 +496,8 @@ static u16 asix_read_medium_status(struct usbnet *dev)
int ret = asix_read_cmd(dev, AX_CMD_READ_MEDIUM_STATUS, 0, 0, 2, &v);
if (ret < 0) {
- deverr(dev, "Error reading Medium Status register: %02x", ret);
+ netdev_err(dev->net, "Error reading Medium Status register: %02x\n",
+ ret);
goto out;
}
ret = le16_to_cpu(v);
@@ -505,11 +509,11 @@ static int asix_write_medium_mode(struct usbnet *dev, u16 mode)
{
int ret;
- devdbg(dev,"asix_write_medium_mode() - mode = 0x%04x", mode);
+ netdev_dbg(dev->net, "asix_write_medium_mode() - mode = 0x%04x\n", mode);
ret = asix_write_cmd(dev, AX_CMD_WRITE_MEDIUM_MODE, mode, 0, 0, NULL);
if (ret < 0)
- deverr(dev, "Failed to write Medium Mode mode to 0x%04x: %02x",
- mode, ret);
+ netdev_err(dev->net, "Failed to write Medium Mode mode to 0x%04x: %02x\n",
+ mode, ret);
return ret;
}
@@ -518,11 +522,11 @@ static int asix_write_gpio(struct usbnet *dev, u16 value, int sleep)
{
int ret;
- devdbg(dev,"asix_write_gpio() - value = 0x%04x", value);
+ netdev_dbg(dev->net, "asix_write_gpio() - value = 0x%04x\n", value);
ret = asix_write_cmd(dev, AX_CMD_WRITE_GPIOS, value, 0, 0, NULL);
if (ret < 0)
- deverr(dev, "Failed to write GPIO value 0x%04x: %02x",
- value, ret);
+ netdev_err(dev->net, "Failed to write GPIO value 0x%04x: %02x\n",
+ value, ret);
if (sleep)
msleep(sleep);
@@ -542,29 +546,27 @@ static void asix_set_multicast(struct net_device *net)
if (net->flags & IFF_PROMISC) {
rx_ctl |= AX_RX_CTL_PRO;
} else if (net->flags & IFF_ALLMULTI ||
- net->mc_count > AX_MAX_MCAST) {
+ netdev_mc_count(net) > AX_MAX_MCAST) {
rx_ctl |= AX_RX_CTL_AMALL;
- } else if (net->mc_count == 0) {
+ } else if (netdev_mc_empty(net)) {
/* just broadcast and directed */
} else {
/* We use the 20 byte dev->data
* for our 8 byte filter buffer
* to avoid allocating memory that
* is tricky to free later */
- struct dev_mc_list *mc_list = net->mc_list;
+ struct dev_mc_list *mc_list;
u32 crc_bits;
- int i;
memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE);
/* Build the multicast hash filter. */
- for (i = 0; i < net->mc_count; i++) {
+ netdev_for_each_mc_addr(mc_list, net) {
crc_bits =
ether_crc(ETH_ALEN,
mc_list->dmi_addr) >> 26;
data->multi_filter[crc_bits >> 3] |=
1 << (crc_bits & 7);
- mc_list = mc_list->next;
}
asix_write_cmd_async(dev, AX_CMD_WRITE_MULTI_FILTER, 0, 0,
@@ -588,7 +590,8 @@ static int asix_mdio_read(struct net_device *netdev, int phy_id, int loc)
asix_set_hw_mii(dev);
mutex_unlock(&dev->phy_mutex);
- devdbg(dev, "asix_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x", phy_id, loc, le16_to_cpu(res));
+ netdev_dbg(dev->net, "asix_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n",
+ phy_id, loc, le16_to_cpu(res));
return le16_to_cpu(res);
}
@@ -599,7 +602,8 @@ asix_mdio_write(struct net_device *netdev, int phy_id, int loc, int val)
struct usbnet *dev = netdev_priv(netdev);
__le16 res = cpu_to_le16(val);
- devdbg(dev, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x", phy_id, loc, val);
+ netdev_dbg(dev->net, "asix_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n",
+ phy_id, loc, val);
mutex_lock(&dev->phy_mutex);
asix_set_sw_mii(dev);
asix_write_cmd(dev, AX_CMD_WRITE_MII_REG, phy_id, (__u16)loc, 2, &res);
@@ -754,29 +758,27 @@ static void ax88172_set_multicast(struct net_device *net)
if (net->flags & IFF_PROMISC) {
rx_ctl |= 0x01;
} else if (net->flags & IFF_ALLMULTI ||
- net->mc_count > AX_MAX_MCAST) {
+ netdev_mc_count(net) > AX_MAX_MCAST) {
rx_ctl |= 0x02;
- } else if (net->mc_count == 0) {
+ } else if (netdev_mc_empty(net)) {
/* just broadcast and directed */
} else {
/* We use the 20 byte dev->data
* for our 8 byte filter buffer
* to avoid allocating memory that
* is tricky to free later */
- struct dev_mc_list *mc_list = net->mc_list;
+ struct dev_mc_list *mc_list;
u32 crc_bits;
- int i;
memset(data->multi_filter, 0, AX_MCAST_FILTER_SIZE);
/* Build the multicast hash filter. */
- for (i = 0; i < net->mc_count; i++) {
+ netdev_for_each_mc_addr(mc_list, net) {
crc_bits =
ether_crc(ETH_ALEN,
mc_list->dmi_addr) >> 26;
data->multi_filter[crc_bits >> 3] |=
1 << (crc_bits & 7);
- mc_list = mc_list->next;
}
asix_write_cmd_async(dev, AX_CMD_WRITE_MULTI_FILTER, 0, 0,
@@ -800,7 +802,8 @@ static int ax88172_link_reset(struct usbnet *dev)
if (ecmd.duplex != DUPLEX_FULL)
mode |= ~AX88172_MEDIUM_FD;
- devdbg(dev, "ax88172_link_reset() speed: %d duplex: %d setting mode to 0x%04x", ecmd.speed, ecmd.duplex, mode);
+ netdev_dbg(dev->net, "ax88172_link_reset() speed: %d duplex: %d setting mode to 0x%04x\n",
+ ecmd.speed, ecmd.duplex, mode);
asix_write_medium_mode(dev, mode);
@@ -902,7 +905,8 @@ static int ax88772_link_reset(struct usbnet *dev)
if (ecmd.duplex != DUPLEX_FULL)
mode &= ~AX_MEDIUM_FD;
- devdbg(dev, "ax88772_link_reset() speed: %d duplex: %d setting mode to 0x%04x", ecmd.speed, ecmd.duplex, mode);
+ netdev_dbg(dev->net, "ax88772_link_reset() speed: %d duplex: %d setting mode to 0x%04x\n",
+ ecmd.speed, ecmd.duplex, mode);
asix_write_medium_mode(dev, mode);
@@ -1059,10 +1063,10 @@ static int marvell_phy_init(struct usbnet *dev)
struct asix_data *data = (struct asix_data *)&dev->data;
u16 reg;
- devdbg(dev,"marvell_phy_init()");
+ netdev_dbg(dev->net, "marvell_phy_init()\n");
reg = asix_mdio_read(dev->net, dev->mii.phy_id, MII_MARVELL_STATUS);
- devdbg(dev,"MII_MARVELL_STATUS = 0x%04x", reg);
+ netdev_dbg(dev->net, "MII_MARVELL_STATUS = 0x%04x\n", reg);
asix_mdio_write(dev->net, dev->mii.phy_id, MII_MARVELL_CTRL,
MARVELL_CTRL_RXDELAY | MARVELL_CTRL_TXDELAY);
@@ -1070,7 +1074,7 @@ static int marvell_phy_init(struct usbnet *dev)
if (data->ledmode) {
reg = asix_mdio_read(dev->net, dev->mii.phy_id,
MII_MARVELL_LED_CTRL);
- devdbg(dev,"MII_MARVELL_LED_CTRL (1) = 0x%04x", reg);
+ netdev_dbg(dev->net, "MII_MARVELL_LED_CTRL (1) = 0x%04x\n", reg);
reg &= 0xf8ff;
reg |= (1 + 0x0100);
@@ -1079,7 +1083,7 @@ static int marvell_phy_init(struct usbnet *dev)
reg = asix_mdio_read(dev->net, dev->mii.phy_id,
MII_MARVELL_LED_CTRL);
- devdbg(dev,"MII_MARVELL_LED_CTRL (2) = 0x%04x", reg);
+ netdev_dbg(dev->net, "MII_MARVELL_LED_CTRL (2) = 0x%04x\n", reg);
reg &= 0xfc0f;
}
@@ -1090,7 +1094,7 @@ static int marvell_led_status(struct usbnet *dev, u16 speed)
{
u16 reg = asix_mdio_read(dev->net, dev->mii.phy_id, MARVELL_LED_MANUAL);
- devdbg(dev, "marvell_led_status() read 0x%04x", reg);
+ netdev_dbg(dev->net, "marvell_led_status() read 0x%04x\n", reg);
/* Clear out the center LED bits - 0x03F0 */
reg &= 0xfc0f;
@@ -1106,7 +1110,7 @@ static int marvell_led_status(struct usbnet *dev, u16 speed)
reg |= 0x02f0;
}
- devdbg(dev, "marvell_led_status() writing 0x%04x", reg);
+ netdev_dbg(dev->net, "marvell_led_status() writing 0x%04x\n", reg);
asix_mdio_write(dev->net, dev->mii.phy_id, MARVELL_LED_MANUAL, reg);
return 0;
@@ -1118,7 +1122,7 @@ static int ax88178_link_reset(struct usbnet *dev)
struct ethtool_cmd ecmd;
struct asix_data *data = (struct asix_data *)&dev->data;
- devdbg(dev,"ax88178_link_reset()");
+ netdev_dbg(dev->net, "ax88178_link_reset()\n");
mii_check_media(&dev->mii, 1, 1);
mii_ethtool_gset(&dev->mii, &ecmd);
@@ -1138,7 +1142,8 @@ static int ax88178_link_reset(struct usbnet *dev)
else
mode &= ~AX_MEDIUM_FD;
- devdbg(dev, "ax88178_link_reset() speed: %d duplex: %d setting mode to 0x%04x", ecmd.speed, ecmd.duplex, mode);
+ netdev_dbg(dev->net, "ax88178_link_reset() speed: %d duplex: %d setting mode to 0x%04x\n",
+ ecmd.speed, ecmd.duplex, mode);
asix_write_medium_mode(dev, mode);
@@ -1188,7 +1193,7 @@ static int ax88178_change_mtu(struct net_device *net, int new_mtu)
struct usbnet *dev = netdev_priv(net);
int ll_mtu = new_mtu + net->hard_header_len + 4;
- devdbg(dev, "ax88178_change_mtu() new_mtu=%d", new_mtu);
+ netdev_dbg(dev->net, "ax88178_change_mtu() new_mtu=%d\n", new_mtu);
if (new_mtu <= 0 || ll_mtu > 16384)
return -EINVAL;
diff --git a/drivers/net/usb/catc.c b/drivers/net/usb/catc.c
index 22b87e64a81..96f1ebe0d34 100644
--- a/drivers/net/usb/catc.c
+++ b/drivers/net/usb/catc.c
@@ -632,7 +632,6 @@ static void catc_set_multicast_list(struct net_device *netdev)
struct dev_mc_list *mc;
u8 broadcast[6];
u8 rx = RxEnable | RxPolarity | RxMultiCast;
- int i;
memset(broadcast, 0xff, 6);
memset(catc->multicast, 0, 64);
@@ -648,7 +647,7 @@ static void catc_set_multicast_list(struct net_device *netdev)
if (netdev->flags & IFF_ALLMULTI) {
memset(catc->multicast, 0xff, 64);
} else {
- for (i = 0, mc = netdev->mc_list; mc && i < netdev->mc_count; i++, mc = mc->next) {
+ netdev_for_each_mc_addr(mc, netdev) {
u32 crc = ether_crc_le(6, mc->dmi_addr);
if (!catc->is_f5u011) {
catc->multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7);
@@ -897,11 +896,9 @@ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id
f5u011_rxmode(catc, catc->rxmode);
}
dbg("Init done.");
- printk(KERN_INFO "%s: %s USB Ethernet at usb-%s-%s, ",
+ printk(KERN_INFO "%s: %s USB Ethernet at usb-%s-%s, %pM.\n",
netdev->name, (catc->is_f5u011) ? "Belkin F5U011" : "CATC EL1210A NetMate",
- usbdev->bus->bus_name, usbdev->devpath);
- for (i = 0; i < 5; i++) printk("%2.2x:", netdev->dev_addr[i]);
- printk("%2.2x.\n", netdev->dev_addr[i]);
+ usbdev->bus->bus_name, usbdev->devpath, netdev->dev_addr);
usb_set_intfdata(intf, catc);
SET_NETDEV_DEV(netdev, &intf->dev);
diff --git a/drivers/net/usb/cdc_eem.c b/drivers/net/usb/cdc_eem.c
index c337ffc3304..a4a85a6ed86 100644
--- a/drivers/net/usb/cdc_eem.c
+++ b/drivers/net/usb/cdc_eem.c
@@ -73,7 +73,7 @@ static void eem_linkcmd(struct usbnet *dev, struct sk_buff *skb)
usb_free_urb(urb);
fail:
dev_kfree_skb(skb);
- devwarn(dev, "link cmd failure\n");
+ netdev_warn(dev->net, "link cmd failure\n");
return;
}
}
@@ -212,7 +212,8 @@ static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
* b15: 1 (EEM command)
*/
if (header & BIT(14)) {
- devdbg(dev, "reserved command %04x\n", header);
+ netdev_dbg(dev->net, "reserved command %04x\n",
+ header);
continue;
}
@@ -255,8 +256,9 @@ static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
case 1: /* Echo response */
case 5: /* Tickle */
default: /* reserved */
- devwarn(dev, "unexpected link command %d\n",
- bmEEMCmd);
+ netdev_warn(dev->net,
+ "unexpected link command %d\n",
+ bmEEMCmd);
continue;
}
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 5f3b9eaeb04..c8cdb7f30ad 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -339,10 +339,10 @@ EXPORT_SYMBOL_GPL(usbnet_cdc_unbind);
static void dumpspeed(struct usbnet *dev, __le32 *speeds)
{
- if (netif_msg_timer(dev))
- devinfo(dev, "link speeds: %u kbps up, %u kbps down",
- __le32_to_cpu(speeds[0]) / 1000,
- __le32_to_cpu(speeds[1]) / 1000);
+ netif_info(dev, timer, dev->net,
+ "link speeds: %u kbps up, %u kbps down\n",
+ __le32_to_cpu(speeds[0]) / 1000,
+ __le32_to_cpu(speeds[1]) / 1000);
}
static void cdc_status(struct usbnet *dev, struct urb *urb)
@@ -361,18 +361,16 @@ static void cdc_status(struct usbnet *dev, struct urb *urb)
event = urb->transfer_buffer;
switch (event->bNotificationType) {
case USB_CDC_NOTIFY_NETWORK_CONNECTION:
- if (netif_msg_timer(dev))
- devdbg(dev, "CDC: carrier %s",
- event->wValue ? "on" : "off");
+ netif_dbg(dev, timer, dev->net, "CDC: carrier %s\n",
+ event->wValue ? "on" : "off");
if (event->wValue)
netif_carrier_on(dev->net);
else
netif_carrier_off(dev->net);
break;
case USB_CDC_NOTIFY_SPEED_CHANGE: /* tx/rx rates */
- if (netif_msg_timer(dev))
- devdbg(dev, "CDC: speed change (len %d)",
- urb->actual_length);
+ netif_dbg(dev, timer, dev->net, "CDC: speed change (len %d)\n",
+ urb->actual_length);
if (urb->actual_length != (sizeof *event + 8))
set_bit(EVENT_STS_SPLIT, &dev->flags);
else
@@ -382,8 +380,8 @@ static void cdc_status(struct usbnet *dev, struct urb *urb)
* but there are no standard formats for the response data.
*/
default:
- deverr(dev, "CDC: unexpected notification %02x!",
- event->bNotificationType);
+ netdev_err(dev->net, "CDC: unexpected notification %02x!\n",
+ event->bNotificationType);
break;
}
}
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index 3d406f9b2f2..269339769f4 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -58,7 +58,7 @@ static int dm_read(struct usbnet *dev, u8 reg, u16 length, void *data)
void *buf;
int err = -ENOMEM;
- devdbg(dev, "dm_read() reg=0x%02x length=%d", reg, length);
+ netdev_dbg(dev->net, "dm_read() reg=0x%02x length=%d\n", reg, length);
buf = kmalloc(length, GFP_KERNEL);
if (!buf)
@@ -89,7 +89,7 @@ static int dm_write(struct usbnet *dev, u8 reg, u16 length, void *data)
void *buf = NULL;
int err = -ENOMEM;
- devdbg(dev, "dm_write() reg=0x%02x, length=%d", reg, length);
+ netdev_dbg(dev->net, "dm_write() reg=0x%02x, length=%d\n", reg, length);
if (data) {
buf = kmalloc(length, GFP_KERNEL);
@@ -112,7 +112,8 @@ static int dm_write(struct usbnet *dev, u8 reg, u16 length, void *data)
static int dm_write_reg(struct usbnet *dev, u8 reg, u8 value)
{
- devdbg(dev, "dm_write_reg() reg=0x%02x, value=0x%02x", reg, value);
+ netdev_dbg(dev->net, "dm_write_reg() reg=0x%02x, value=0x%02x\n",
+ reg, value);
return usb_control_msg(dev->udev,
usb_sndctrlpipe(dev->udev, 0),
DM_WRITE_REG,
@@ -142,13 +143,13 @@ static void dm_write_async_helper(struct usbnet *dev, u8 reg, u8 value,
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb) {
- deverr(dev, "Error allocating URB in dm_write_async_helper!");
+ netdev_err(dev->net, "Error allocating URB in dm_write_async_helper!\n");
return;
}
req = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
if (!req) {
- deverr(dev, "Failed to allocate memory for control request");
+ netdev_err(dev->net, "Failed to allocate memory for control request\n");
usb_free_urb(urb);
return;
}
@@ -166,8 +167,8 @@ static void dm_write_async_helper(struct usbnet *dev, u8 reg, u8 value,
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status < 0) {
- deverr(dev, "Error submitting the control message: status=%d",
- status);
+ netdev_err(dev->net, "Error submitting the control message: status=%d\n",
+ status);
kfree(req);
usb_free_urb(urb);
}
@@ -175,15 +176,15 @@ static void dm_write_async_helper(struct usbnet *dev, u8 reg, u8 value,
static void dm_write_async(struct usbnet *dev, u8 reg, u16 length, void *data)
{
- devdbg(dev, "dm_write_async() reg=0x%02x length=%d", reg, length);
+ netdev_dbg(dev->net, "dm_write_async() reg=0x%02x length=%d\n", reg, length);
dm_write_async_helper(dev, reg, 0, length, data);
}
static void dm_write_reg_async(struct usbnet *dev, u8 reg, u8 value)
{
- devdbg(dev, "dm_write_reg_async() reg=0x%02x value=0x%02x",
- reg, value);
+ netdev_dbg(dev->net, "dm_write_reg_async() reg=0x%02x value=0x%02x\n",
+ reg, value);
dm_write_async_helper(dev, reg, value, 0, NULL);
}
@@ -211,7 +212,7 @@ static int dm_read_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 *valu
}
if (i == DM_TIMEOUT) {
- deverr(dev, "%s read timed out!", phy ? "phy" : "eeprom");
+ netdev_err(dev->net, "%s read timed out!\n", phy ? "phy" : "eeprom");
ret = -EIO;
goto out;
}
@@ -219,8 +220,8 @@ static int dm_read_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 *valu
dm_write_reg(dev, DM_SHARED_CTRL, 0x0);
ret = dm_read(dev, DM_SHARED_DATA, 2, value);
- devdbg(dev, "read shared %d 0x%02x returned 0x%04x, %d",
- phy, reg, *value, ret);
+ netdev_dbg(dev->net, "read shared %d 0x%02x returned 0x%04x, %d\n",
+ phy, reg, *value, ret);
out:
mutex_unlock(&dev->phy_mutex);
@@ -254,7 +255,7 @@ static int dm_write_shared_word(struct usbnet *dev, int phy, u8 reg, __le16 valu
}
if (i == DM_TIMEOUT) {
- deverr(dev, "%s write timed out!", phy ? "phy" : "eeprom");
+ netdev_err(dev->net, "%s write timed out!\n", phy ? "phy" : "eeprom");
ret = -EIO;
goto out;
}
@@ -304,15 +305,15 @@ static int dm9601_mdio_read(struct net_device *netdev, int phy_id, int loc)
__le16 res;
if (phy_id) {
- devdbg(dev, "Only internal phy supported");
+ netdev_dbg(dev->net, "Only internal phy supported\n");
return 0;
}
dm_read_shared_word(dev, 1, loc, &res);
- devdbg(dev,
- "dm9601_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x",
- phy_id, loc, le16_to_cpu(res));
+ netdev_dbg(dev->net,
+ "dm9601_mdio_read() phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n",
+ phy_id, loc, le16_to_cpu(res));
return le16_to_cpu(res);
}
@@ -324,12 +325,12 @@ static void dm9601_mdio_write(struct net_device *netdev, int phy_id, int loc,
__le16 res = cpu_to_le16(val);
if (phy_id) {
- devdbg(dev, "Only internal phy supported");
+ netdev_dbg(dev->net, "Only internal phy supported\n");
return;
}
- devdbg(dev,"dm9601_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x",
- phy_id, loc, val);
+ netdev_dbg(dev->net, "dm9601_mdio_write() phy_id=0x%02x, loc=0x%02x, val=0x%04x\n",
+ phy_id, loc, val);
dm_write_shared_word(dev, 1, loc, res);
}
@@ -381,13 +382,13 @@ static void dm9601_set_multicast(struct net_device *net)
if (net->flags & IFF_PROMISC) {
rx_ctl |= 0x02;
- } else if (net->flags & IFF_ALLMULTI || net->mc_count > DM_MAX_MCAST) {
+ } else if (net->flags & IFF_ALLMULTI ||
+ netdev_mc_count(net) > DM_MAX_MCAST) {
rx_ctl |= 0x04;
- } else if (net->mc_count) {
- struct dev_mc_list *mc_list = net->mc_list;
- int i;
+ } else if (!netdev_mc_empty(net)) {
+ struct dev_mc_list *mc_list;
- for (i = 0; i < net->mc_count; i++, mc_list = mc_list->next) {
+ netdev_for_each_mc_addr(mc_list, net) {
u32 crc = ether_crc(ETH_ALEN, mc_list->dmi_addr) >> 26;
hashes[crc >> 3] |= 1 << (crc & 0x7);
}
@@ -592,7 +593,7 @@ static void dm9601_status(struct usbnet *dev, struct urb *urb)
}
else
netif_carrier_off(dev->net);
- devdbg(dev, "Link Status is: %d", link);
+ netdev_dbg(dev->net, "Link Status is: %d\n", link);
}
}
@@ -603,8 +604,8 @@ static int dm9601_link_reset(struct usbnet *dev)
mii_check_media(&dev->mii, 1, 1);
mii_ethtool_gset(&dev->mii, &ecmd);
- devdbg(dev, "link_reset() speed: %d duplex: %d",
- ecmd.speed, ecmd.duplex);
+ netdev_dbg(dev->net, "link_reset() speed: %d duplex: %d\n",
+ ecmd.speed, ecmd.duplex);
return 0;
}
diff --git a/drivers/net/usb/int51x1.c b/drivers/net/usb/int51x1.c
index 55cf7081de1..3c228df5706 100644
--- a/drivers/net/usb/int51x1.c
+++ b/drivers/net/usb/int51x1.c
@@ -51,7 +51,7 @@ static int int51x1_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
int len;
if (!(pskb_may_pull(skb, INT51X1_HEADER_SIZE))) {
- deverr(dev, "unexpected tiny rx frame");
+ netdev_err(dev->net, "unexpected tiny rx frame\n");
return 0;
}
@@ -138,25 +138,25 @@ static void int51x1_set_multicast(struct net_device *netdev)
if (netdev->flags & IFF_PROMISC) {
/* do not expect to see traffic of other PLCs */
filter |= PACKET_TYPE_PROMISCUOUS;
- devinfo(dev, "promiscuous mode enabled");
- } else if (netdev->mc_count ||
+ netdev_info(dev->net, "promiscuous mode enabled\n");
+ } else if (!netdev_mc_empty(netdev) ||
(netdev->flags & IFF_ALLMULTI)) {
filter |= PACKET_TYPE_ALL_MULTICAST;
- devdbg(dev, "receive all multicast enabled");
+ netdev_dbg(dev->net, "receive all multicast enabled\n");
} else {
/* ~PROMISCUOUS, ~MULTICAST */
- devdbg(dev, "receive own packets only");
+ netdev_dbg(dev->net, "receive own packets only\n");
}
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb) {
- devwarn(dev, "Error allocating URB");
+ netdev_warn(dev->net, "Error allocating URB\n");
return;
}
req = kmalloc(sizeof(*req), GFP_ATOMIC);
if (!req) {
- devwarn(dev, "Error allocating control msg");
+ netdev_warn(dev->net, "Error allocating control msg\n");
goto out;
}
@@ -173,7 +173,8 @@ static void int51x1_set_multicast(struct net_device *netdev)
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status < 0) {
- devwarn(dev, "Error submitting control msg, sts=%d", status);
+ netdev_warn(dev->net, "Error submitting control msg, sts=%d\n",
+ status);
goto out1;
}
return;
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index f1d64ef67ef..52671ea043a 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -881,7 +881,7 @@ static void kaweth_set_rx_mode(struct net_device *net)
if (net->flags & IFF_PROMISC) {
packet_filter_bitmap |= KAWETH_PACKET_FILTER_PROMISCUOUS;
}
- else if ((net->mc_count) || (net->flags & IFF_ALLMULTI)) {
+ else if (!netdev_mc_empty(net) || (net->flags & IFF_ALLMULTI)) {
packet_filter_bitmap |= KAWETH_PACKET_FILTER_ALL_MULTICAST;
}
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 87374317f48..70978219e98 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -1,13 +1,27 @@
/*
- * MosChips MCS7830 based USB 2.0 Ethernet Devices
+ * MOSCHIP MCS7830 based USB 2.0 Ethernet Devices
*
* based on usbnet.c, asix.c and the vendor provided mcs7830 driver
*
+ * Copyright (C) 2010 Andreas Mohr <andi@lisas.de>
* Copyright (C) 2006 Arnd Bergmann <arnd@arndb.de>
* Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com>
* Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net>
* Copyright (c) 2002-2003 TiVo Inc.
*
+ * Definitions gathered from MOSCHIP, Data Sheet_7830DA.pdf (thanks!).
+ *
+ * TODO:
+ * - support HIF_REG_CONFIG_SLEEPMODE/HIF_REG_CONFIG_TXENABLE (via autopm?)
+ * - implement ethtool_ops get_pauseparam/set_pauseparam
+ * via HIF_REG_PAUSE_THRESHOLD (>= revision C only!)
+ * - implement get_eeprom/[set_eeprom]
+ * - switch PHY on/off on ifup/ifdown (perhaps in usbnet.c, via MII)
+ * - mcs7830_get_regs() handling is weird: for rev 2 we return 32 regs,
+ * can access only ~ 24, remaining user buffer is uninitialized garbage
+ * - anything else?
+ *
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
@@ -55,7 +69,7 @@
ADVERTISE_100HALF | ADVERTISE_10FULL | \
ADVERTISE_10HALF | ADVERTISE_CSMA)
-/* HIF_REG_XX coressponding index value */
+/* HIF_REG_XX corresponding index value */
enum {
HIF_REG_MULTICAST_HASH = 0x00,
HIF_REG_PACKET_GAP1 = 0x08,
@@ -69,6 +83,7 @@ enum {
HIF_REG_PHY_CMD2_PEND_FLAG_BIT = 0x80,
HIF_REG_PHY_CMD2_READY_FLAG_BIT = 0x40,
HIF_REG_CONFIG = 0x0e,
+ /* hmm, spec sez: "R/W", "Except bit 3" (likely TXENABLE). */
HIF_REG_CONFIG_CFG = 0x80,
HIF_REG_CONFIG_SPEED100 = 0x40,
HIF_REG_CONFIG_FULLDUPLEX_ENABLE = 0x20,
@@ -76,13 +91,24 @@ enum {
HIF_REG_CONFIG_TXENABLE = 0x08,
HIF_REG_CONFIG_SLEEPMODE = 0x04,
HIF_REG_CONFIG_ALLMULTICAST = 0x02,
- HIF_REG_CONFIG_PROMISCIOUS = 0x01,
+ HIF_REG_CONFIG_PROMISCUOUS = 0x01,
HIF_REG_ETHERNET_ADDR = 0x0f,
- HIF_REG_22 = 0x15,
+ HIF_REG_FRAME_DROP_COUNTER = 0x15, /* 0..ff; reset: 0 */
HIF_REG_PAUSE_THRESHOLD = 0x16,
HIF_REG_PAUSE_THRESHOLD_DEFAULT = 0,
};
+/* Trailing status byte in Ethernet Rx frame */
+enum {
+ MCS7830_RX_SHORT_FRAME = 0x01, /* < 64 bytes */
+ MCS7830_RX_LENGTH_ERROR = 0x02, /* framelen != Ethernet length field */
+ MCS7830_RX_ALIGNMENT_ERROR = 0x04, /* non-even number of nibbles */
+ MCS7830_RX_CRC_ERROR = 0x08,
+ MCS7830_RX_LARGE_FRAME = 0x10, /* > 1518 bytes */
+ MCS7830_RX_FRAME_CORRECT = 0x20, /* frame is correct */
+ /* [7:6] reserved */
+};
+
struct mcs7830_data {
u8 multi_filter[8];
u8 config;
@@ -109,7 +135,7 @@ static int mcs7830_get_reg(struct usbnet *dev, u16 index, u16 size, void *data)
return ret;
}
-static int mcs7830_set_reg(struct usbnet *dev, u16 index, u16 size, void *data)
+static int mcs7830_set_reg(struct usbnet *dev, u16 index, u16 size, const void *data)
{
struct usb_device *xdev = dev->udev;
int ret;
@@ -183,13 +209,43 @@ out:
usb_free_urb(urb);
}
-static int mcs7830_get_address(struct usbnet *dev)
+static int mcs7830_hif_get_mac_address(struct usbnet *dev, unsigned char *addr)
+{
+ int ret = mcs7830_get_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN, addr);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static int mcs7830_hif_set_mac_address(struct usbnet *dev, unsigned char *addr)
+{
+ int ret = mcs7830_set_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN, addr);
+
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static int mcs7830_set_mac_address(struct net_device *netdev, void *p)
{
int ret;
- ret = mcs7830_get_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN,
- dev->net->dev_addr);
+ struct usbnet *dev = netdev_priv(netdev);
+ struct sockaddr *addr = p;
+
+ if (netif_running(netdev))
+ return -EBUSY;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EINVAL;
+
+ ret = mcs7830_hif_set_mac_address(dev, addr->sa_data);
+
if (ret < 0)
return ret;
+
+ /* it worked --> adopt it on netdev side */
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+
return 0;
}
@@ -307,7 +363,7 @@ static int mcs7830_get_rev(struct usbnet *dev)
{
u8 dummy[2];
int ret;
- ret = mcs7830_get_reg(dev, HIF_REG_22, 2, dummy);
+ ret = mcs7830_get_reg(dev, HIF_REG_FRAME_DROP_COUNTER, 2, dummy);
if (ret > 0)
return 2; /* Rev C or later */
return 1; /* earlier revision */
@@ -331,33 +387,6 @@ static void mcs7830_rev_C_fixup(struct usbnet *dev)
}
}
-static int mcs7830_init_dev(struct usbnet *dev)
-{
- int ret;
- int retry;
-
- /* Read MAC address from EEPROM */
- ret = -EINVAL;
- for (retry = 0; retry < 5 && ret; retry++)
- ret = mcs7830_get_address(dev);
- if (ret) {
- dev_warn(&dev->udev->dev, "Cannot read MAC address\n");
- goto out;
- }
-
- /* Set up PHY */
- ret = mcs7830_set_autoneg(dev, 0);
- if (ret) {
- dev_info(&dev->udev->dev, "Cannot set autoneg\n");
- goto out;
- }
-
- mcs7830_rev_C_fixup(dev);
- ret = 0;
-out:
- return ret;
-}
-
static int mcs7830_mdio_read(struct net_device *netdev, int phy_id,
int location)
{
@@ -378,11 +407,33 @@ static int mcs7830_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
}
-/* credits go to asix_set_multicast */
-static void mcs7830_set_multicast(struct net_device *net)
+static inline struct mcs7830_data *mcs7830_get_data(struct usbnet *dev)
+{
+ return (struct mcs7830_data *)&dev->data;
+}
+
+static void mcs7830_hif_update_multicast_hash(struct usbnet *dev)
+{
+ struct mcs7830_data *data = mcs7830_get_data(dev);
+ mcs7830_set_reg_async(dev, HIF_REG_MULTICAST_HASH,
+ sizeof data->multi_filter,
+ data->multi_filter);
+}
+
+static void mcs7830_hif_update_config(struct usbnet *dev)
+{
+ /* implementation specific to data->config
+ (argument needs to be heap-based anyway - USB DMA!) */
+ struct mcs7830_data *data = mcs7830_get_data(dev);
+ mcs7830_set_reg_async(dev, HIF_REG_CONFIG, 1, &data->config);
+}
+
+static void mcs7830_data_set_multicast(struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
- struct mcs7830_data *data = (struct mcs7830_data *)&dev->data;
+ struct mcs7830_data *data = mcs7830_get_data(dev);
+
+ memset(data->multi_filter, 0, sizeof data->multi_filter);
data->config = HIF_REG_CONFIG_TXENABLE;
@@ -390,36 +441,64 @@ static void mcs7830_set_multicast(struct net_device *net)
data->config |= HIF_REG_CONFIG_ALLMULTICAST;
if (net->flags & IFF_PROMISC) {
- data->config |= HIF_REG_CONFIG_PROMISCIOUS;
+ data->config |= HIF_REG_CONFIG_PROMISCUOUS;
} else if (net->flags & IFF_ALLMULTI ||
- net->mc_count > MCS7830_MAX_MCAST) {
+ netdev_mc_count(net) > MCS7830_MAX_MCAST) {
data->config |= HIF_REG_CONFIG_ALLMULTICAST;
- } else if (net->mc_count == 0) {
+ } else if (netdev_mc_empty(net)) {
/* just broadcast and directed */
} else {
/* We use the 20 byte dev->data
* for our 8 byte filter buffer
* to avoid allocating memory that
* is tricky to free later */
- struct dev_mc_list *mc_list = net->mc_list;
+ struct dev_mc_list *mc_list;
u32 crc_bits;
- int i;
-
- memset(data->multi_filter, 0, sizeof data->multi_filter);
/* Build the multicast hash filter. */
- for (i = 0; i < net->mc_count; i++) {
+ netdev_for_each_mc_addr(mc_list, net) {
crc_bits = ether_crc(ETH_ALEN, mc_list->dmi_addr) >> 26;
data->multi_filter[crc_bits >> 3] |= 1 << (crc_bits & 7);
- mc_list = mc_list->next;
}
+ }
+}
- mcs7830_set_reg_async(dev, HIF_REG_MULTICAST_HASH,
- sizeof data->multi_filter,
- data->multi_filter);
+static int mcs7830_apply_base_config(struct usbnet *dev)
+{
+ int ret;
+
+ /* re-configure known MAC (suspend case etc.) */
+ ret = mcs7830_hif_set_mac_address(dev, dev->net->dev_addr);
+ if (ret) {
+ dev_info(&dev->udev->dev, "Cannot set MAC address\n");
+ goto out;
}
- mcs7830_set_reg_async(dev, HIF_REG_CONFIG, 1, &data->config);
+ /* Set up PHY */
+ ret = mcs7830_set_autoneg(dev, 0);
+ if (ret) {
+ dev_info(&dev->udev->dev, "Cannot set autoneg\n");
+ goto out;
+ }
+
+ mcs7830_hif_update_multicast_hash(dev);
+ mcs7830_hif_update_config(dev);
+
+ mcs7830_rev_C_fixup(dev);
+ ret = 0;
+out:
+ return ret;
+}
+
+/* credits go to asix_set_multicast */
+static void mcs7830_set_multicast(struct net_device *net)
+{
+ struct usbnet *dev = netdev_priv(net);
+
+ mcs7830_data_set_multicast(net);
+
+ mcs7830_hif_update_multicast_hash(dev);
+ mcs7830_hif_update_config(dev);
}
static int mcs7830_get_regs_len(struct net_device *net)
@@ -463,29 +542,6 @@ static const struct ethtool_ops mcs7830_ethtool_ops = {
.nway_reset = usbnet_nway_reset,
};
-static int mcs7830_set_mac_address(struct net_device *netdev, void *p)
-{
- int ret;
- struct usbnet *dev = netdev_priv(netdev);
- struct sockaddr *addr = p;
-
- if (netif_running(netdev))
- return -EBUSY;
-
- if (!is_valid_ether_addr(addr->sa_data))
- return -EINVAL;
-
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
-
- ret = mcs7830_set_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN,
- netdev->dev_addr);
-
- if (ret < 0)
- return ret;
-
- return 0;
-}
-
static const struct net_device_ops mcs7830_netdev_ops = {
.ndo_open = usbnet_open,
.ndo_stop = usbnet_stop,
@@ -495,21 +551,32 @@ static const struct net_device_ops mcs7830_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_do_ioctl = mcs7830_ioctl,
.ndo_set_multicast_list = mcs7830_set_multicast,
- .ndo_set_mac_address = mcs7830_set_mac_address,
+ .ndo_set_mac_address = mcs7830_set_mac_address,
};
static int mcs7830_bind(struct usbnet *dev, struct usb_interface *udev)
{
struct net_device *net = dev->net;
int ret;
+ int retry;
- ret = mcs7830_init_dev(dev);
+ /* Initial startup: Gather MAC address setting from EEPROM */
+ ret = -EINVAL;
+ for (retry = 0; retry < 5 && ret; retry++)
+ ret = mcs7830_hif_get_mac_address(dev, net->dev_addr);
+ if (ret) {
+ dev_warn(&dev->udev->dev, "Cannot read MAC address\n");
+ goto out;
+ }
+
+ mcs7830_data_set_multicast(net);
+
+ ret = mcs7830_apply_base_config(dev);
if (ret)
goto out;
net->ethtool_ops = &mcs7830_ethtool_ops;
net->netdev_ops = &mcs7830_netdev_ops;
- mcs7830_set_multicast(net);
/* reserve space for the status byte on rx */
dev->rx_urb_size = ETH_FRAME_LEN + 1;
@@ -526,7 +593,7 @@ out:
return ret;
}
-/* The chip always appends a status bytes that we need to strip */
+/* The chip always appends a status byte that we need to strip */
static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
u8 status;
@@ -539,9 +606,23 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb_trim(skb, skb->len - 1);
status = skb->data[skb->len];
- if (status != 0x20)
+ if (status != MCS7830_RX_FRAME_CORRECT) {
dev_dbg(&dev->udev->dev, "rx fixup status %x\n", status);
+ /* hmm, perhaps usbnet.c already sees a globally visible
+ frame error and increments rx_errors on its own already? */
+ dev->net->stats.rx_errors++;
+
+ if (status & (MCS7830_RX_SHORT_FRAME
+ |MCS7830_RX_LENGTH_ERROR
+ |MCS7830_RX_LARGE_FRAME))
+ dev->net->stats.rx_length_errors++;
+ if (status & MCS7830_RX_ALIGNMENT_ERROR)
+ dev->net->stats.rx_frame_errors++;
+ if (status & MCS7830_RX_CRC_ERROR)
+ dev->net->stats.rx_crc_errors++;
+ }
+
return skb->len > 0;
}
@@ -580,6 +661,20 @@ static const struct usb_device_id products[] = {
};
MODULE_DEVICE_TABLE(usb, products);
+static int mcs7830_reset_resume (struct usb_interface *intf)
+{
+ /* YES, this function is successful enough that ethtool -d
+ does show same output pre-/post-suspend */
+
+ struct usbnet *dev = usb_get_intfdata(intf);
+
+ mcs7830_apply_base_config(dev);
+
+ usbnet_resume(intf);
+
+ return 0;
+}
+
static struct usb_driver mcs7830_driver = {
.name = driver_name,
.id_table = products,
@@ -587,6 +682,7 @@ static struct usb_driver mcs7830_driver = {
.disconnect = usbnet_disconnect,
.suspend = usbnet_suspend,
.resume = usbnet_resume,
+ .reset_resume = mcs7830_reset_resume,
};
static int __init mcs7830_init(void)
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index aeb1ab03a9e..bdcad45954a 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -203,25 +203,23 @@ static void nc_dump_registers(struct usbnet *dev)
static inline void nc_dump_usbctl(struct usbnet *dev, u16 usbctl)
{
- if (!netif_msg_link(dev))
- return;
- devdbg(dev, "net1080 %s-%s usbctl 0x%x:%s%s%s%s%s;"
- " this%s%s;"
- " other%s%s; r/o 0x%x",
- dev->udev->bus->bus_name, dev->udev->devpath,
- usbctl,
- (usbctl & USBCTL_ENABLE_LANG) ? " lang" : "",
- (usbctl & USBCTL_ENABLE_MFGR) ? " mfgr" : "",
- (usbctl & USBCTL_ENABLE_PROD) ? " prod" : "",
- (usbctl & USBCTL_ENABLE_SERIAL) ? " serial" : "",
- (usbctl & USBCTL_ENABLE_DEFAULTS) ? " defaults" : "",
-
- (usbctl & USBCTL_FLUSH_OTHER) ? " FLUSH" : "",
- (usbctl & USBCTL_DISCONN_OTHER) ? " DIS" : "",
- (usbctl & USBCTL_FLUSH_THIS) ? " FLUSH" : "",
- (usbctl & USBCTL_DISCONN_THIS) ? " DIS" : "",
- usbctl & ~USBCTL_WRITABLE_MASK
- );
+ netif_dbg(dev, link, dev->net,
+ "net1080 %s-%s usbctl 0x%x:%s%s%s%s%s; this%s%s; other%s%s; r/o 0x%x\n",
+ dev->udev->bus->bus_name, dev->udev->devpath,
+ usbctl,
+ (usbctl & USBCTL_ENABLE_LANG) ? " lang" : "",
+ (usbctl & USBCTL_ENABLE_MFGR) ? " mfgr" : "",
+ (usbctl & USBCTL_ENABLE_PROD) ? " prod" : "",
+ (usbctl & USBCTL_ENABLE_SERIAL) ? " serial" : "",
+ (usbctl & USBCTL_ENABLE_DEFAULTS) ? " defaults" : "",
+
+ (usbctl & USBCTL_FLUSH_THIS) ? " FLUSH" : "",
+ (usbctl & USBCTL_DISCONN_THIS) ? " DIS" : "",
+
+ (usbctl & USBCTL_FLUSH_OTHER) ? " FLUSH" : "",
+ (usbctl & USBCTL_DISCONN_OTHER) ? " DIS" : "",
+
+ usbctl & ~USBCTL_WRITABLE_MASK);
}
/*-------------------------------------------------------------------------*/
@@ -248,30 +246,26 @@ static inline void nc_dump_usbctl(struct usbnet *dev, u16 usbctl)
static inline void nc_dump_status(struct usbnet *dev, u16 status)
{
- if (!netif_msg_link(dev))
- return;
- devdbg(dev, "net1080 %s-%s status 0x%x:"
- " this (%c) PKT=%d%s%s%s;"
- " other PKT=%d%s%s%s; unspec 0x%x",
- dev->udev->bus->bus_name, dev->udev->devpath,
- status,
-
- // XXX the packet counts don't seem right
- // (1 at reset, not 0); maybe UNSPEC too
-
- (status & STATUS_PORT_A) ? 'A' : 'B',
- STATUS_PACKETS_THIS(status),
- (status & STATUS_CONN_THIS) ? " CON" : "",
- (status & STATUS_SUSPEND_THIS) ? " SUS" : "",
- (status & STATUS_MAILBOX_THIS) ? " MBOX" : "",
-
- STATUS_PACKETS_OTHER(status),
- (status & STATUS_CONN_OTHER) ? " CON" : "",
- (status & STATUS_SUSPEND_OTHER) ? " SUS" : "",
- (status & STATUS_MAILBOX_OTHER) ? " MBOX" : "",
-
- status & STATUS_UNSPEC_MASK
- );
+ netif_dbg(dev, link, dev->net,
+ "net1080 %s-%s status 0x%x: this (%c) PKT=%d%s%s%s; other PKT=%d%s%s%s; unspec 0x%x\n",
+ dev->udev->bus->bus_name, dev->udev->devpath,
+ status,
+
+ // XXX the packet counts don't seem right
+ // (1 at reset, not 0); maybe UNSPEC too
+
+ (status & STATUS_PORT_A) ? 'A' : 'B',
+ STATUS_PACKETS_THIS(status),
+ (status & STATUS_CONN_THIS) ? " CON" : "",
+ (status & STATUS_SUSPEND_THIS) ? " SUS" : "",
+ (status & STATUS_MAILBOX_THIS) ? " MBOX" : "",
+
+ STATUS_PACKETS_OTHER(status),
+ (status & STATUS_CONN_OTHER) ? " CON" : "",
+ (status & STATUS_SUSPEND_OTHER) ? " SUS" : "",
+ (status & STATUS_MAILBOX_OTHER) ? " MBOX" : "",
+
+ status & STATUS_UNSPEC_MASK);
}
/*-------------------------------------------------------------------------*/
@@ -286,10 +280,9 @@ static inline void nc_dump_status(struct usbnet *dev, u16 status)
static inline void nc_dump_ttl(struct usbnet *dev, u16 ttl)
{
- if (netif_msg_link(dev))
- devdbg(dev, "net1080 %s-%s ttl 0x%x this = %d, other = %d",
- dev->udev->bus->bus_name, dev->udev->devpath,
- ttl, TTL_THIS(ttl), TTL_OTHER(ttl));
+ netif_dbg(dev, link, dev->net, "net1080 %s-%s ttl 0x%x this = %d, other = %d\n",
+ dev->udev->bus->bus_name, dev->udev->devpath,
+ ttl, TTL_THIS(ttl), TTL_OTHER(ttl));
}
/*-------------------------------------------------------------------------*/
@@ -334,11 +327,9 @@ static int net1080_reset(struct usbnet *dev)
MK_TTL(NC_READ_TTL_MS, TTL_OTHER(ttl)) );
dbg("%s: assigned TTL, %d ms", dev->net->name, NC_READ_TTL_MS);
- if (netif_msg_link(dev))
- devinfo(dev, "port %c, peer %sconnected",
- (status & STATUS_PORT_A) ? 'A' : 'B',
- (status & STATUS_CONN_OTHER) ? "" : "dis"
- );
+ netif_info(dev, link, dev->net, "port %c, peer %sconnected\n",
+ (status & STATUS_PORT_A) ? 'A' : 'B',
+ (status & STATUS_CONN_OTHER) ? "" : "dis");
retval = 0;
done:
@@ -415,8 +406,8 @@ static void nc_ensure_sync(struct usbnet *dev)
return;
}
- if (netif_msg_rx_err(dev))
- devdbg(dev, "flush net1080; too many framing errors");
+ netif_dbg(dev, rx_err, dev->net,
+ "flush net1080; too many framing errors\n");
dev->frame_errors = 0;
}
}
@@ -486,8 +477,8 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
return 0;
}
#if 0
- devdbg(dev, "frame <rx h %d p %d id %d", header->hdr_len,
- header->packet_len, header->packet_id);
+ netdev_dbg(dev->net, "frame <rx h %d p %d id %d\n", header->hdr_len,
+ header->packet_len, header->packet_id);
#endif
dev->frame_errors = 0;
return 1;
@@ -547,9 +538,9 @@ encapsulate:
trailer = (struct nc_trailer *) skb_put(skb, sizeof *trailer);
put_unaligned(header->packet_id, &trailer->packet_id);
#if 0
- devdbg(dev, "frame >tx h %d p %d id %d",
- header->hdr_len, header->packet_len,
- header->packet_id);
+ netdev_dbg(dev->net, "frame >tx h %d p %d id %d\n",
+ header->hdr_len, header->packet_len,
+ header->packet_id);
#endif
return skb;
}
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index ed4a508ef26..41838773b56 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -132,9 +132,10 @@ static void ctrl_callback(struct urb *urb)
case -ENOENT:
break;
default:
- if (netif_msg_drv(pegasus) && printk_ratelimit())
- dev_dbg(&pegasus->intf->dev, "%s, status %d\n",
- __func__, status);
+ if (net_ratelimit())
+ netif_dbg(pegasus, drv, pegasus->net,
+ "%s, status %d\n", __func__, status);
+ break;
}
pegasus->flags &= ~ETH_REGS_CHANGED;
wake_up(&pegasus->ctrl_wait);
@@ -149,9 +150,8 @@ static int get_registers(pegasus_t * pegasus, __u16 indx, __u16 size,
buffer = kmalloc(size, GFP_KERNEL);
if (!buffer) {
- if (netif_msg_drv(pegasus))
- dev_warn(&pegasus->intf->dev, "out of memory in %s\n",
- __func__);
+ netif_warn(pegasus, drv, pegasus->net,
+ "out of memory in %s\n", __func__);
return -ENOMEM;
}
add_wait_queue(&pegasus->ctrl_wait, &wait);
@@ -181,9 +181,9 @@ static int get_registers(pegasus_t * pegasus, __u16 indx, __u16 size,
set_current_state(TASK_RUNNING);
if (ret == -ENODEV)
netif_device_detach(pegasus->net);
- if (netif_msg_drv(pegasus) && printk_ratelimit())
- dev_err(&pegasus->intf->dev, "%s, status %d\n",
- __func__, ret);
+ if (net_ratelimit())
+ netif_err(pegasus, drv, pegasus->net,
+ "%s, status %d\n", __func__, ret);
goto out;
}
@@ -205,9 +205,8 @@ static int set_registers(pegasus_t * pegasus, __u16 indx, __u16 size,
buffer = kmalloc(size, GFP_KERNEL);
if (!buffer) {
- if (netif_msg_drv(pegasus))
- dev_warn(&pegasus->intf->dev, "out of memory in %s\n",
- __func__);
+ netif_warn(pegasus, drv, pegasus->net,
+ "out of memory in %s\n", __func__);
return -ENOMEM;
}
memcpy(buffer, data, size);
@@ -237,9 +236,8 @@ static int set_registers(pegasus_t * pegasus, __u16 indx, __u16 size,
if ((ret = usb_submit_urb(pegasus->ctrl_urb, GFP_ATOMIC))) {
if (ret == -ENODEV)
netif_device_detach(pegasus->net);
- if (netif_msg_drv(pegasus))
- dev_err(&pegasus->intf->dev, "%s, status %d\n",
- __func__, ret);
+ netif_err(pegasus, drv, pegasus->net,
+ "%s, status %d\n", __func__, ret);
goto out;
}
@@ -259,9 +257,8 @@ static int set_register(pegasus_t * pegasus, __u16 indx, __u8 data)
tmp = kmalloc(1, GFP_KERNEL);
if (!tmp) {
- if (netif_msg_drv(pegasus))
- dev_warn(&pegasus->intf->dev, "out of memory in %s\n",
- __func__);
+ netif_warn(pegasus, drv, pegasus->net,
+ "out of memory in %s\n", __func__);
return -ENOMEM;
}
memcpy(tmp, &data, 1);
@@ -290,9 +287,9 @@ static int set_register(pegasus_t * pegasus, __u16 indx, __u8 data)
if ((ret = usb_submit_urb(pegasus->ctrl_urb, GFP_ATOMIC))) {
if (ret == -ENODEV)
netif_device_detach(pegasus->net);
- if (netif_msg_drv(pegasus) && printk_ratelimit())
- dev_err(&pegasus->intf->dev, "%s, status %d\n",
- __func__, ret);
+ if (net_ratelimit())
+ netif_err(pegasus, drv, pegasus->net,
+ "%s, status %d\n", __func__, ret);
goto out;
}
@@ -323,9 +320,8 @@ static int update_eth_regs_async(pegasus_t * pegasus)
if ((ret = usb_submit_urb(pegasus->ctrl_urb, GFP_ATOMIC))) {
if (ret == -ENODEV)
netif_device_detach(pegasus->net);
- if (netif_msg_drv(pegasus))
- dev_err(&pegasus->intf->dev, "%s, status %d\n",
- __func__, ret);
+ netif_err(pegasus, drv, pegasus->net,
+ "%s, status %d\n", __func__, ret);
}
return ret;
@@ -349,14 +345,16 @@ static int read_mii_word(pegasus_t * pegasus, __u8 phy, __u8 indx, __u16 * regd)
if (data[0] & PHY_DONE)
break;
}
- if (i < REG_TIMEOUT) {
- ret = get_registers(pegasus, PhyData, 2, &regdi);
- *regd = le16_to_cpu(regdi);
- return ret;
- }
+
+ if (i >= REG_TIMEOUT)
+ goto fail;
+
+ ret = get_registers(pegasus, PhyData, 2, &regdi);
+ *regd = le16_to_cpu(regdi);
+ return ret;
+
fail:
- if (netif_msg_drv(pegasus))
- dev_warn(&pegasus->intf->dev, "%s failed\n", __func__);
+ netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__);
return ret;
}
@@ -388,12 +386,14 @@ static int write_mii_word(pegasus_t * pegasus, __u8 phy, __u8 indx, __u16 regd)
if (data[0] & PHY_DONE)
break;
}
- if (i < REG_TIMEOUT)
- return ret;
+
+ if (i >= REG_TIMEOUT)
+ goto fail;
+
+ return ret;
fail:
- if (netif_msg_drv(pegasus))
- dev_warn(&pegasus->intf->dev, "%s failed\n", __func__);
+ netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__);
return -ETIMEDOUT;
}
@@ -422,15 +422,15 @@ static int read_eprom_word(pegasus_t * pegasus, __u8 index, __u16 * retdata)
if (ret == -ESHUTDOWN)
goto fail;
}
- if (i < REG_TIMEOUT) {
- ret = get_registers(pegasus, EpromData, 2, &retdatai);
- *retdata = le16_to_cpu(retdatai);
- return ret;
- }
+ if (i >= REG_TIMEOUT)
+ goto fail;
+
+ ret = get_registers(pegasus, EpromData, 2, &retdatai);
+ *retdata = le16_to_cpu(retdatai);
+ return ret;
fail:
- if (netif_msg_drv(pegasus))
- dev_warn(&pegasus->intf->dev, "%s failed\n", __func__);
+ netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__);
return -ETIMEDOUT;
}
@@ -475,11 +475,13 @@ static int write_eprom_word(pegasus_t * pegasus, __u8 index, __u16 data)
break;
}
disable_eprom_write(pegasus);
- if (i < REG_TIMEOUT)
- return ret;
+ if (i >= REG_TIMEOUT)
+ goto fail;
+
+ return ret;
+
fail:
- if (netif_msg_drv(pegasus))
- dev_warn(&pegasus->intf->dev, "%s failed\n", __func__);
+ netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__);
return -ETIMEDOUT;
}
#endif /* PEGASUS_WRITE_EEPROM */
@@ -642,25 +644,20 @@ static void read_bulk_callback(struct urb *urb)
case 0:
break;
case -ETIME:
- if (netif_msg_rx_err(pegasus))
- pr_debug("%s: reset MAC\n", net->name);
+ netif_dbg(pegasus, rx_err, net, "reset MAC\n");
pegasus->flags &= ~PEGASUS_RX_BUSY;
break;
case -EPIPE: /* stall, or disconnect from TT */
/* FIXME schedule work to clear the halt */
- if (netif_msg_rx_err(pegasus))
- printk(KERN_WARNING "%s: no rx stall recovery\n",
- net->name);
+ netif_warn(pegasus, rx_err, net, "no rx stall recovery\n");
return;
case -ENOENT:
case -ECONNRESET:
case -ESHUTDOWN:
- if (netif_msg_ifdown(pegasus))
- pr_debug("%s: rx unlink, %d\n", net->name, status);
+ netif_dbg(pegasus, ifdown, net, "rx unlink, %d\n", status);
return;
default:
- if (netif_msg_rx_err(pegasus))
- pr_debug("%s: RX status %d\n", net->name, status);
+ netif_dbg(pegasus, rx_err, net, "RX status %d\n", status);
goto goon;
}
@@ -669,9 +666,8 @@ static void read_bulk_callback(struct urb *urb)
rx_status = buf[count - 2];
if (rx_status & 0x1e) {
- if (netif_msg_rx_err(pegasus))
- pr_debug("%s: RX packet error %x\n",
- net->name, rx_status);
+ netif_dbg(pegasus, rx_err, net,
+ "RX packet error %x\n", rx_status);
pegasus->stats.rx_errors++;
if (rx_status & 0x06) // long or runt
pegasus->stats.rx_length_errors++;
@@ -758,9 +754,7 @@ static void rx_fixup(unsigned long data)
pegasus->rx_skb = pull_skb(pegasus);
}
if (pegasus->rx_skb == NULL) {
- if (netif_msg_rx_err(pegasus))
- printk(KERN_WARNING "%s: low on memory\n",
- pegasus->net->name);
+ netif_warn(pegasus, rx_err, pegasus->net, "low on memory\n");
tasklet_schedule(&pegasus->rx_tl);
goto done;
}
@@ -800,19 +794,15 @@ static void write_bulk_callback(struct urb *urb)
case -EPIPE:
/* FIXME schedule_work() to clear the tx halt */
netif_stop_queue(net);
- if (netif_msg_tx_err(pegasus))
- printk(KERN_WARNING "%s: no tx stall recovery\n",
- net->name);
+ netif_warn(pegasus, tx_err, net, "no tx stall recovery\n");
return;
case -ENOENT:
case -ECONNRESET:
case -ESHUTDOWN:
- if (netif_msg_ifdown(pegasus))
- pr_debug("%s: tx unlink, %d\n", net->name, status);
+ netif_dbg(pegasus, ifdown, net, "tx unlink, %d\n", status);
return;
default:
- if (netif_msg_tx_err(pegasus))
- pr_info("%s: TX status %d\n", net->name, status);
+ netif_info(pegasus, tx_err, net, "TX status %d\n", status);
/* FALL THROUGH */
case 0:
break;
@@ -843,9 +833,7 @@ static void intr_callback(struct urb *urb)
/* some Pegasus-I products report LOTS of data
* toggle errors... avoid log spamming
*/
- if (netif_msg_timer(pegasus))
- pr_debug("%s: intr status %d\n", net->name,
- status);
+ netif_dbg(pegasus, timer, net, "intr status %d\n", status);
}
if (urb->actual_length >= 6) {
@@ -875,16 +863,15 @@ static void intr_callback(struct urb *urb)
res = usb_submit_urb(urb, GFP_ATOMIC);
if (res == -ENODEV)
netif_device_detach(pegasus->net);
- if (res && netif_msg_timer(pegasus))
- printk(KERN_ERR "%s: can't resubmit interrupt urb, %d\n",
- net->name, res);
+ if (res)
+ netif_err(pegasus, timer, net,
+ "can't resubmit interrupt urb, %d\n", res);
}
static void pegasus_tx_timeout(struct net_device *net)
{
pegasus_t *pegasus = netdev_priv(net);
- if (netif_msg_timer(pegasus))
- printk(KERN_WARNING "%s: tx timeout\n", net->name);
+ netif_warn(pegasus, timer, net, "tx timeout\n");
usb_unlink_urb(pegasus->tx_urb);
pegasus->stats.tx_errors++;
}
@@ -906,9 +893,7 @@ static netdev_tx_t pegasus_start_xmit(struct sk_buff *skb,
pegasus->tx_buff, count,
write_bulk_callback, pegasus);
if ((res = usb_submit_urb(pegasus->tx_urb, GFP_ATOMIC))) {
- if (netif_msg_tx_err(pegasus))
- printk(KERN_WARNING "%s: fail tx, %d\n",
- net->name, res);
+ netif_warn(pegasus, tx_err, net, "fail tx, %d\n", res);
switch (res) {
case -EPIPE: /* stall, or disconnect from TT */
/* cleanup should already have been scheduled */
@@ -952,10 +937,9 @@ static inline void get_interrupt_interval(pegasus_t * pegasus)
interval = data >> 8;
if (pegasus->usb->speed != USB_SPEED_HIGH) {
if (interval < 0x80) {
- if (netif_msg_timer(pegasus))
- dev_info(&pegasus->intf->dev, "intr interval "
- "changed from %ums to %ums\n",
- interval, 0x80);
+ netif_info(pegasus, timer, pegasus->net,
+ "intr interval changed from %ums to %ums\n",
+ interval, 0x80);
interval = 0x80;
data = (data & 0x00FF) | ((u16)interval << 8);
#ifdef PEGASUS_WRITE_EEPROM
@@ -1046,8 +1030,7 @@ static int pegasus_open(struct net_device *net)
if ((res = usb_submit_urb(pegasus->rx_urb, GFP_KERNEL))) {
if (res == -ENODEV)
netif_device_detach(pegasus->net);
- if (netif_msg_ifup(pegasus))
- pr_debug("%s: failed rx_urb, %d", net->name, res);
+ netif_dbg(pegasus, ifup, net, "failed rx_urb, %d\n", res);
goto exit;
}
@@ -1058,15 +1041,13 @@ static int pegasus_open(struct net_device *net)
if ((res = usb_submit_urb(pegasus->intr_urb, GFP_KERNEL))) {
if (res == -ENODEV)
netif_device_detach(pegasus->net);
- if (netif_msg_ifup(pegasus))
- pr_debug("%s: failed intr_urb, %d\n", net->name, res);
+ netif_dbg(pegasus, ifup, net, "failed intr_urb, %d\n", res);
usb_kill_urb(pegasus->rx_urb);
goto exit;
}
if ((res = enable_net_traffic(net, pegasus->usb))) {
- if (netif_msg_ifup(pegasus))
- pr_debug("%s: can't enable_net_traffic() - %d\n",
- net->name, res);
+ netif_dbg(pegasus, ifup, net,
+ "can't enable_net_traffic() - %d\n", res);
res = -EIO;
usb_kill_urb(pegasus->rx_urb);
usb_kill_urb(pegasus->intr_urb);
@@ -1075,8 +1056,7 @@ static int pegasus_open(struct net_device *net)
}
set_carrier(net);
netif_start_queue(net);
- if (netif_msg_ifup(pegasus))
- pr_debug("%s: open\n", net->name);
+ netif_dbg(pegasus, ifup, net, "open\n");
res = 0;
exit:
return res;
@@ -1230,13 +1210,11 @@ static void pegasus_set_multicast(struct net_device *net)
if (net->flags & IFF_PROMISC) {
pegasus->eth_regs[EthCtrl2] |= RX_PROMISCUOUS;
- if (netif_msg_link(pegasus))
- pr_info("%s: Promiscuous mode enabled.\n", net->name);
- } else if (net->mc_count || (net->flags & IFF_ALLMULTI)) {
+ netif_info(pegasus, link, net, "Promiscuous mode enabled\n");
+ } else if (!netdev_mc_empty(net) || (net->flags & IFF_ALLMULTI)) {
pegasus->eth_regs[EthCtrl0] |= RX_MULTICAST;
pegasus->eth_regs[EthCtrl2] &= ~RX_PROMISCUOUS;
- if (netif_msg_link(pegasus))
- pr_debug("%s: set allmulti\n", net->name);
+ netif_dbg(pegasus, link, net, "set allmulti\n");
} else {
pegasus->eth_regs[EthCtrl0] &= ~RX_MULTICAST;
pegasus->eth_regs[EthCtrl2] &= ~RX_PROMISCUOUS;
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 490fa8f5542..4ce331fb1e1 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -57,8 +57,8 @@
*/
void rndis_status(struct usbnet *dev, struct urb *urb)
{
- devdbg(dev, "rndis status urb, len %d stat %d",
- urb->actual_length, urb->status);
+ netdev_dbg(dev->net, "rndis status urb, len %d stat %d\n",
+ urb->actual_length, urb->status);
// FIXME for keepalives, respond immediately (asynchronously)
// if not an RNDIS status, do like cdc_status(dev,urb) does
}
@@ -335,8 +335,8 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
dev->maxpacket = usb_maxpacket(dev->udev, dev->out, 1);
if (dev->maxpacket == 0) {
- if (netif_msg_probe(dev))
- dev_dbg(&intf->dev, "dev->maxpacket can't be 0\n");
+ netif_dbg(dev, probe, dev->net,
+ "dev->maxpacket can't be 0\n");
retval = -EINVAL;
goto fail_and_release;
}
@@ -394,17 +394,15 @@ generic_rndis_bind(struct usbnet *dev, struct usb_interface *intf, int flags)
}
if ((flags & FLAG_RNDIS_PHYM_WIRELESS) &&
*phym != RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN) {
- if (netif_msg_probe(dev))
- dev_dbg(&intf->dev, "driver requires wireless "
- "physical medium, but device is not.\n");
+ netif_dbg(dev, probe, dev->net,
+ "driver requires wireless physical medium, but device is not\n");
retval = -ENODEV;
goto halt_fail_and_release;
}
if ((flags & FLAG_RNDIS_PHYM_NOT_WIRELESS) &&
*phym == RNDIS_PHYSICAL_MEDIUM_WIRELESS_LAN) {
- if (netif_msg_probe(dev))
- dev_dbg(&intf->dev, "driver requires non-wireless "
- "physical medium, but device is wireless.\n");
+ netif_dbg(dev, probe, dev->net,
+ "driver requires non-wireless physical medium, but device is wireless.\n");
retval = -ENODEV;
goto halt_fail_and_release;
}
@@ -497,9 +495,9 @@ int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
skb->len < msg_len ||
(data_offset + data_len + 8) > msg_len)) {
dev->net->stats.rx_frame_errors++;
- devdbg(dev, "bad rndis message %d/%d/%d/%d, len %d",
- le32_to_cpu(hdr->msg_type),
- msg_len, data_offset, data_len, skb->len);
+ netdev_dbg(dev->net, "bad rndis message %d/%d/%d/%d, len %d\n",
+ le32_to_cpu(hdr->msg_type),
+ msg_len, data_offset, data_len, skb->len);
return 0;
}
skb_pull(skb, 8 + data_offset);
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index fd19db0d250..e85c89c6706 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -313,20 +313,17 @@ static int rtl8150_set_mac_address(struct net_device *netdev, void *p)
{
struct sockaddr *addr = p;
rtl8150_t *dev = netdev_priv(netdev);
- int i;
if (netif_running(netdev))
return -EBUSY;
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
- dbg("%s: Setting MAC address to ", netdev->name);
- for (i = 0; i < 5; i++)
- dbg("%02X:", netdev->dev_addr[i]);
- dbg("%02X\n", netdev->dev_addr[i]);
+ dbg("%s: Setting MAC address to %pM\n", netdev->name, netdev->dev_addr);
/* Set the IDR registers. */
set_registers(dev, IDR, netdev->addr_len, netdev->dev_addr);
#ifdef EEPROM_WRITE
{
+ int i;
u8 cr;
/* Get the CR contents. */
get_registers(dev, CR, 1, &cr);
@@ -714,7 +711,7 @@ static void rtl8150_set_multicast(struct net_device *netdev)
if (netdev->flags & IFF_PROMISC) {
dev->rx_creg |= cpu_to_le16(0x0001);
dev_info(&netdev->dev, "%s: promiscuous mode\n", netdev->name);
- } else if (netdev->mc_count ||
+ } else if (!netdev_mc_empty(netdev) ||
(netdev->flags & IFF_ALLMULTI)) {
dev->rx_creg &= cpu_to_le16(0xfffe);
dev->rx_creg |= cpu_to_le16(0x0002);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 0c3c738d741..df9179a1c93 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -78,7 +78,7 @@ static int smsc95xx_read_reg(struct usbnet *dev, u32 index, u32 *data)
00, index, buf, 4, USB_CTRL_GET_TIMEOUT);
if (unlikely(ret < 0))
- devwarn(dev, "Failed to read register index 0x%08x", index);
+ netdev_warn(dev->net, "Failed to read register index 0x%08x\n", index);
le32_to_cpus(buf);
*data = *buf;
@@ -106,7 +106,7 @@ static int smsc95xx_write_reg(struct usbnet *dev, u32 index, u32 data)
00, index, buf, 4, USB_CTRL_SET_TIMEOUT);
if (unlikely(ret < 0))
- devwarn(dev, "Failed to write register index 0x%08x", index);
+ netdev_warn(dev->net, "Failed to write register index 0x%08x\n", index);
kfree(buf);
@@ -138,7 +138,7 @@ static int smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
/* confirm MII not busy */
if (smsc95xx_phy_wait_not_busy(dev)) {
- devwarn(dev, "MII is busy in smsc95xx_mdio_read");
+ netdev_warn(dev->net, "MII is busy in smsc95xx_mdio_read\n");
mutex_unlock(&dev->phy_mutex);
return -EIO;
}
@@ -150,7 +150,7 @@ static int smsc95xx_mdio_read(struct net_device *netdev, int phy_id, int idx)
smsc95xx_write_reg(dev, MII_ADDR, addr);
if (smsc95xx_phy_wait_not_busy(dev)) {
- devwarn(dev, "Timed out reading MII reg %02X", idx);
+ netdev_warn(dev->net, "Timed out reading MII reg %02X\n", idx);
mutex_unlock(&dev->phy_mutex);
return -EIO;
}
@@ -172,7 +172,7 @@ static void smsc95xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
/* confirm MII not busy */
if (smsc95xx_phy_wait_not_busy(dev)) {
- devwarn(dev, "MII is busy in smsc95xx_mdio_write");
+ netdev_warn(dev->net, "MII is busy in smsc95xx_mdio_write\n");
mutex_unlock(&dev->phy_mutex);
return;
}
@@ -187,7 +187,7 @@ static void smsc95xx_mdio_write(struct net_device *netdev, int phy_id, int idx,
smsc95xx_write_reg(dev, MII_ADDR, addr);
if (smsc95xx_phy_wait_not_busy(dev))
- devwarn(dev, "Timed out writing MII reg %02X", idx);
+ netdev_warn(dev->net, "Timed out writing MII reg %02X\n", idx);
mutex_unlock(&dev->phy_mutex);
}
@@ -205,7 +205,7 @@ static int smsc95xx_wait_eeprom(struct usbnet *dev)
} while (!time_after(jiffies, start_time + HZ));
if (val & (E2P_CMD_TIMEOUT_ | E2P_CMD_BUSY_)) {
- devwarn(dev, "EEPROM read operation timeout");
+ netdev_warn(dev->net, "EEPROM read operation timeout\n");
return -EIO;
}
@@ -226,7 +226,7 @@ static int smsc95xx_eeprom_confirm_not_busy(struct usbnet *dev)
udelay(40);
} while (!time_after(jiffies, start_time + HZ));
- devwarn(dev, "EEPROM is busy");
+ netdev_warn(dev->net, "EEPROM is busy\n");
return -EIO;
}
@@ -308,7 +308,7 @@ static void smsc95xx_async_cmd_callback(struct urb *urb)
int status = urb->status;
if (status < 0)
- devwarn(dev, "async callback failed with %d", status);
+ netdev_warn(dev->net, "async callback failed with %d\n", status);
kfree(usb_context);
usb_free_urb(urb);
@@ -323,13 +323,13 @@ static int smsc95xx_write_reg_async(struct usbnet *dev, u16 index, u32 *data)
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb) {
- devwarn(dev, "Error allocating URB");
+ netdev_warn(dev->net, "Error allocating URB\n");
return -ENOMEM;
}
usb_context = kmalloc(sizeof(struct usb_context), GFP_ATOMIC);
if (usb_context == NULL) {
- devwarn(dev, "Error allocating control msg");
+ netdev_warn(dev->net, "Error allocating control msg\n");
usb_free_urb(urb);
return -ENOMEM;
}
@@ -348,7 +348,8 @@ static int smsc95xx_write_reg_async(struct usbnet *dev, u16 index, u32 *data)
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status < 0) {
- devwarn(dev, "Error submitting control msg, sts=%d", status);
+ netdev_warn(dev->net, "Error submitting control msg, sts=%d\n",
+ status);
kfree(usb_context);
usb_free_urb(urb);
}
@@ -375,46 +376,32 @@ static void smsc95xx_set_multicast(struct net_device *netdev)
spin_lock_irqsave(&pdata->mac_cr_lock, flags);
if (dev->net->flags & IFF_PROMISC) {
- if (netif_msg_drv(dev))
- devdbg(dev, "promiscuous mode enabled");
+ netif_dbg(dev, drv, dev->net, "promiscuous mode enabled\n");
pdata->mac_cr |= MAC_CR_PRMS_;
pdata->mac_cr &= ~(MAC_CR_MCPAS_ | MAC_CR_HPFILT_);
} else if (dev->net->flags & IFF_ALLMULTI) {
- if (netif_msg_drv(dev))
- devdbg(dev, "receive all multicast enabled");
+ netif_dbg(dev, drv, dev->net, "receive all multicast enabled\n");
pdata->mac_cr |= MAC_CR_MCPAS_;
pdata->mac_cr &= ~(MAC_CR_PRMS_ | MAC_CR_HPFILT_);
- } else if (dev->net->mc_count > 0) {
- struct dev_mc_list *mc_list = dev->net->mc_list;
- int count = 0;
+ } else if (!netdev_mc_empty(dev->net)) {
+ struct dev_mc_list *mc_list;
pdata->mac_cr |= MAC_CR_HPFILT_;
pdata->mac_cr &= ~(MAC_CR_PRMS_ | MAC_CR_MCPAS_);
- while (mc_list) {
- count++;
- if (mc_list->dmi_addrlen == ETH_ALEN) {
- u32 bitnum = smsc95xx_hash(mc_list->dmi_addr);
- u32 mask = 0x01 << (bitnum & 0x1F);
- if (bitnum & 0x20)
- hash_hi |= mask;
- else
- hash_lo |= mask;
- } else {
- devwarn(dev, "dmi_addrlen != 6");
- }
- mc_list = mc_list->next;
+ netdev_for_each_mc_addr(mc_list, netdev) {
+ u32 bitnum = smsc95xx_hash(mc_list->dmi_addr);
+ u32 mask = 0x01 << (bitnum & 0x1F);
+ if (bitnum & 0x20)
+ hash_hi |= mask;
+ else
+ hash_lo |= mask;
}
- if (count != ((u32)dev->net->mc_count))
- devwarn(dev, "mc_count != dev->mc_count");
-
- if (netif_msg_drv(dev))
- devdbg(dev, "HASHH=0x%08X, HASHL=0x%08X", hash_hi,
- hash_lo);
+ netif_dbg(dev, drv, dev->net, "HASHH=0x%08X, HASHL=0x%08X\n",
+ hash_hi, hash_lo);
} else {
- if (netif_msg_drv(dev))
- devdbg(dev, "receive own packets only");
+ netif_dbg(dev, drv, dev->net, "receive own packets only\n");
pdata->mac_cr &=
~(MAC_CR_PRMS_ | MAC_CR_MCPAS_ | MAC_CR_HPFILT_);
}
@@ -434,7 +421,7 @@ static void smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
int ret = smsc95xx_read_reg(dev, AFC_CFG, &afc_cfg);
if (ret < 0) {
- devwarn(dev, "error reading AFC_CFG");
+ netdev_warn(dev->net, "error reading AFC_CFG\n");
return;
}
@@ -451,13 +438,11 @@ static void smsc95xx_phy_update_flowcontrol(struct usbnet *dev, u8 duplex,
else
afc_cfg &= ~0xF;
- if (netif_msg_link(dev))
- devdbg(dev, "rx pause %s, tx pause %s",
- (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
- (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
+ netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s\n",
+ cap & FLOW_CTRL_RX ? "enabled" : "disabled",
+ cap & FLOW_CTRL_TX ? "enabled" : "disabled");
} else {
- if (netif_msg_link(dev))
- devdbg(dev, "half duplex");
+ netif_dbg(dev, link, dev->net, "half duplex\n");
flow = 0;
afc_cfg |= 0xF;
}
@@ -485,9 +470,8 @@ static int smsc95xx_link_reset(struct usbnet *dev)
lcladv = smsc95xx_mdio_read(dev->net, mii->phy_id, MII_ADVERTISE);
rmtadv = smsc95xx_mdio_read(dev->net, mii->phy_id, MII_LPA);
- if (netif_msg_link(dev))
- devdbg(dev, "speed: %d duplex: %d lcladv: %04x rmtadv: %04x",
- ecmd.speed, ecmd.duplex, lcladv, rmtadv);
+ netif_dbg(dev, link, dev->net, "speed: %d duplex: %d lcladv: %04x rmtadv: %04x\n",
+ ecmd.speed, ecmd.duplex, lcladv, rmtadv);
spin_lock_irqsave(&pdata->mac_cr_lock, flags);
if (ecmd.duplex != DUPLEX_FULL) {
@@ -511,20 +495,21 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb)
u32 intdata;
if (urb->actual_length != 4) {
- devwarn(dev, "unexpected urb length %d", urb->actual_length);
+ netdev_warn(dev->net, "unexpected urb length %d\n",
+ urb->actual_length);
return;
}
memcpy(&intdata, urb->transfer_buffer, 4);
le32_to_cpus(&intdata);
- if (netif_msg_link(dev))
- devdbg(dev, "intdata: 0x%08X", intdata);
+ netif_dbg(dev, link, dev->net, "intdata: 0x%08X\n", intdata);
if (intdata & INT_ENP_PHY_INT_)
usbnet_defer_kevent(dev, EVENT_LINK_RESET);
else
- devwarn(dev, "unexpected interrupt, intdata=0x%08X", intdata);
+ netdev_warn(dev->net, "unexpected interrupt, intdata=0x%08X\n",
+ intdata);
}
/* Enable or disable Tx & Rx checksum offload engines */
@@ -534,7 +519,7 @@ static int smsc95xx_set_csums(struct usbnet *dev)
u32 read_buf;
int ret = smsc95xx_read_reg(dev, COE_CR, &read_buf);
if (ret < 0) {
- devwarn(dev, "Failed to read COE_CR: %d", ret);
+ netdev_warn(dev->net, "Failed to read COE_CR: %d\n", ret);
return ret;
}
@@ -550,12 +535,11 @@ static int smsc95xx_set_csums(struct usbnet *dev)
ret = smsc95xx_write_reg(dev, COE_CR, read_buf);
if (ret < 0) {
- devwarn(dev, "Failed to write COE_CR: %d", ret);
+ netdev_warn(dev->net, "Failed to write COE_CR: %d\n", ret);
return ret;
}
- if (netif_msg_hw(dev))
- devdbg(dev, "COE_CR = 0x%08x", read_buf);
+ netif_dbg(dev, hw, dev->net, "COE_CR = 0x%08x\n", read_buf);
return 0;
}
@@ -580,8 +564,8 @@ static int smsc95xx_ethtool_set_eeprom(struct net_device *netdev,
struct usbnet *dev = netdev_priv(netdev);
if (ee->magic != LAN95XX_EEPROM_MAGIC) {
- devwarn(dev, "EEPROM: magic value mismatch, magic = 0x%x",
- ee->magic);
+ netdev_warn(dev->net, "EEPROM: magic value mismatch, magic = 0x%x\n",
+ ee->magic);
return -EINVAL;
}
@@ -659,16 +643,14 @@ static void smsc95xx_init_mac_address(struct usbnet *dev)
dev->net->dev_addr) == 0) {
if (is_valid_ether_addr(dev->net->dev_addr)) {
/* eeprom values are valid so use them */
- if (netif_msg_ifup(dev))
- devdbg(dev, "MAC address read from EEPROM");
+ netif_dbg(dev, ifup, dev->net, "MAC address read from EEPROM\n");
return;
}
}
/* no eeprom, or eeprom values are invalid. generate random MAC */
random_ether_addr(dev->net->dev_addr);
- if (netif_msg_ifup(dev))
- devdbg(dev, "MAC address set to random_ether_addr");
+ netif_dbg(dev, ifup, dev->net, "MAC address set to random_ether_addr\n");
}
static int smsc95xx_set_mac_address(struct usbnet *dev)
@@ -680,13 +662,13 @@ static int smsc95xx_set_mac_address(struct usbnet *dev)
ret = smsc95xx_write_reg(dev, ADDRL, addr_lo);
if (ret < 0) {
- devwarn(dev, "Failed to write ADDRL: %d", ret);
+ netdev_warn(dev->net, "Failed to write ADDRL: %d\n", ret);
return ret;
}
ret = smsc95xx_write_reg(dev, ADDRH, addr_hi);
if (ret < 0) {
- devwarn(dev, "Failed to write ADDRH: %d", ret);
+ netdev_warn(dev->net, "Failed to write ADDRH: %d\n", ret);
return ret;
}
@@ -747,8 +729,7 @@ static int smsc95xx_phy_initialize(struct usbnet *dev)
PHY_INT_MASK_DEFAULT_);
mii_nway_restart(&dev->mii);
- if (netif_msg_ifup(dev))
- devdbg(dev, "phy initialised successfully");
+ netif_dbg(dev, ifup, dev->net, "phy initialised successfully\n");
return 0;
}
@@ -759,14 +740,13 @@ static int smsc95xx_reset(struct usbnet *dev)
u32 read_buf, write_buf, burst_cap;
int ret = 0, timeout;
- if (netif_msg_ifup(dev))
- devdbg(dev, "entering smsc95xx_reset");
+ netif_dbg(dev, ifup, dev->net, "entering smsc95xx_reset\n");
write_buf = HW_CFG_LRST_;
ret = smsc95xx_write_reg(dev, HW_CFG, write_buf);
if (ret < 0) {
- devwarn(dev, "Failed to write HW_CFG_LRST_ bit in HW_CFG "
- "register, ret = %d", ret);
+ netdev_warn(dev->net, "Failed to write HW_CFG_LRST_ bit in HW_CFG register, ret = %d\n",
+ ret);
return ret;
}
@@ -774,7 +754,7 @@ static int smsc95xx_reset(struct usbnet *dev)
do {
ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
if (ret < 0) {
- devwarn(dev, "Failed to read HW_CFG: %d", ret);
+ netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
return ret;
}
msleep(10);
@@ -782,14 +762,14 @@ static int smsc95xx_reset(struct usbnet *dev)
} while ((read_buf & HW_CFG_LRST_) && (timeout < 100));
if (timeout >= 100) {
- devwarn(dev, "timeout waiting for completion of Lite Reset");
+ netdev_warn(dev->net, "timeout waiting for completion of Lite Reset\n");
return ret;
}
write_buf = PM_CTL_PHY_RST_;
ret = smsc95xx_write_reg(dev, PM_CTRL, write_buf);
if (ret < 0) {
- devwarn(dev, "Failed to write PM_CTRL: %d", ret);
+ netdev_warn(dev->net, "Failed to write PM_CTRL: %d\n", ret);
return ret;
}
@@ -797,7 +777,7 @@ static int smsc95xx_reset(struct usbnet *dev)
do {
ret = smsc95xx_read_reg(dev, PM_CTRL, &read_buf);
if (ret < 0) {
- devwarn(dev, "Failed to read PM_CTRL: %d", ret);
+ netdev_warn(dev->net, "Failed to read PM_CTRL: %d\n", ret);
return ret;
}
msleep(10);
@@ -805,7 +785,7 @@ static int smsc95xx_reset(struct usbnet *dev)
} while ((read_buf & PM_CTL_PHY_RST_) && (timeout < 100));
if (timeout >= 100) {
- devwarn(dev, "timeout waiting for PHY Reset");
+ netdev_warn(dev->net, "timeout waiting for PHY Reset\n");
return ret;
}
@@ -815,35 +795,35 @@ static int smsc95xx_reset(struct usbnet *dev)
if (ret < 0)
return ret;
- if (netif_msg_ifup(dev))
- devdbg(dev, "MAC Address: %pM", dev->net->dev_addr);
+ netif_dbg(dev, ifup, dev->net,
+ "MAC Address: %pM\n", dev->net->dev_addr);
ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
if (ret < 0) {
- devwarn(dev, "Failed to read HW_CFG: %d", ret);
+ netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
return ret;
}
- if (netif_msg_ifup(dev))
- devdbg(dev, "Read Value from HW_CFG : 0x%08x", read_buf);
+ netif_dbg(dev, ifup, dev->net,
+ "Read Value from HW_CFG : 0x%08x\n", read_buf);
read_buf |= HW_CFG_BIR_;
ret = smsc95xx_write_reg(dev, HW_CFG, read_buf);
if (ret < 0) {
- devwarn(dev, "Failed to write HW_CFG_BIR_ bit in HW_CFG "
- "register, ret = %d", ret);
+ netdev_warn(dev->net, "Failed to write HW_CFG_BIR_ bit in HW_CFG register, ret = %d\n",
+ ret);
return ret;
}
ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
if (ret < 0) {
- devwarn(dev, "Failed to read HW_CFG: %d", ret);
+ netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
return ret;
}
- if (netif_msg_ifup(dev))
- devdbg(dev, "Read Value from HW_CFG after writing "
- "HW_CFG_BIR_: 0x%08x", read_buf);
+ netif_dbg(dev, ifup, dev->net,
+ "Read Value from HW_CFG after writing HW_CFG_BIR_: 0x%08x\n",
+ read_buf);
if (!turbo_mode) {
burst_cap = 0;
@@ -856,47 +836,47 @@ static int smsc95xx_reset(struct usbnet *dev)
dev->rx_urb_size = DEFAULT_FS_BURST_CAP_SIZE;
}
- if (netif_msg_ifup(dev))
- devdbg(dev, "rx_urb_size=%ld", (ulong)dev->rx_urb_size);
+ netif_dbg(dev, ifup, dev->net,
+ "rx_urb_size=%ld\n", (ulong)dev->rx_urb_size);
ret = smsc95xx_write_reg(dev, BURST_CAP, burst_cap);
if (ret < 0) {
- devwarn(dev, "Failed to write BURST_CAP: %d", ret);
+ netdev_warn(dev->net, "Failed to write BURST_CAP: %d\n", ret);
return ret;
}
ret = smsc95xx_read_reg(dev, BURST_CAP, &read_buf);
if (ret < 0) {
- devwarn(dev, "Failed to read BURST_CAP: %d", ret);
+ netdev_warn(dev->net, "Failed to read BURST_CAP: %d\n", ret);
return ret;
}
- if (netif_msg_ifup(dev))
- devdbg(dev, "Read Value from BURST_CAP after writing: 0x%08x",
- read_buf);
+ netif_dbg(dev, ifup, dev->net,
+ "Read Value from BURST_CAP after writing: 0x%08x\n",
+ read_buf);
read_buf = DEFAULT_BULK_IN_DELAY;
ret = smsc95xx_write_reg(dev, BULK_IN_DLY, read_buf);
if (ret < 0) {
- devwarn(dev, "ret = %d", ret);
+ netdev_warn(dev->net, "ret = %d\n", ret);
return ret;
}
ret = smsc95xx_read_reg(dev, BULK_IN_DLY, &read_buf);
if (ret < 0) {
- devwarn(dev, "Failed to read BULK_IN_DLY: %d", ret);
+ netdev_warn(dev->net, "Failed to read BULK_IN_DLY: %d\n", ret);
return ret;
}
- if (netif_msg_ifup(dev))
- devdbg(dev, "Read Value from BULK_IN_DLY after writing: "
- "0x%08x", read_buf);
+ netif_dbg(dev, ifup, dev->net,
+ "Read Value from BULK_IN_DLY after writing: 0x%08x\n",
+ read_buf);
ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
if (ret < 0) {
- devwarn(dev, "Failed to read HW_CFG: %d", ret);
+ netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
return ret;
}
- if (netif_msg_ifup(dev))
- devdbg(dev, "Read Value from HW_CFG: 0x%08x", read_buf);
+ netif_dbg(dev, ifup, dev->net,
+ "Read Value from HW_CFG: 0x%08x\n", read_buf);
if (turbo_mode)
read_buf |= (HW_CFG_MEF_ | HW_CFG_BCE_);
@@ -908,41 +888,41 @@ static int smsc95xx_reset(struct usbnet *dev)
ret = smsc95xx_write_reg(dev, HW_CFG, read_buf);
if (ret < 0) {
- devwarn(dev, "Failed to write HW_CFG register, ret=%d", ret);
+ netdev_warn(dev->net, "Failed to write HW_CFG register, ret=%d\n",
+ ret);
return ret;
}
ret = smsc95xx_read_reg(dev, HW_CFG, &read_buf);
if (ret < 0) {
- devwarn(dev, "Failed to read HW_CFG: %d", ret);
+ netdev_warn(dev->net, "Failed to read HW_CFG: %d\n", ret);
return ret;
}
- if (netif_msg_ifup(dev))
- devdbg(dev, "Read Value from HW_CFG after writing: 0x%08x",
- read_buf);
+ netif_dbg(dev, ifup, dev->net,
+ "Read Value from HW_CFG after writing: 0x%08x\n", read_buf);
write_buf = 0xFFFFFFFF;
ret = smsc95xx_write_reg(dev, INT_STS, write_buf);
if (ret < 0) {
- devwarn(dev, "Failed to write INT_STS register, ret=%d", ret);
+ netdev_warn(dev->net, "Failed to write INT_STS register, ret=%d\n",
+ ret);
return ret;
}
ret = smsc95xx_read_reg(dev, ID_REV, &read_buf);
if (ret < 0) {
- devwarn(dev, "Failed to read ID_REV: %d", ret);
+ netdev_warn(dev->net, "Failed to read ID_REV: %d\n", ret);
return ret;
}
- if (netif_msg_ifup(dev))
- devdbg(dev, "ID_REV = 0x%08x", read_buf);
+ netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x\n", read_buf);
/* Configure GPIO pins as LED outputs */
write_buf = LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED |
LED_GPIO_CFG_FDX_LED;
ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, write_buf);
if (ret < 0) {
- devwarn(dev, "Failed to write LED_GPIO_CFG register, ret=%d",
- ret);
+ netdev_warn(dev->net, "Failed to write LED_GPIO_CFG register, ret=%d\n",
+ ret);
return ret;
}
@@ -950,21 +930,21 @@ static int smsc95xx_reset(struct usbnet *dev)
write_buf = 0;
ret = smsc95xx_write_reg(dev, FLOW, write_buf);
if (ret < 0) {
- devwarn(dev, "Failed to write FLOW: %d", ret);
+ netdev_warn(dev->net, "Failed to write FLOW: %d\n", ret);
return ret;
}
read_buf = AFC_CFG_DEFAULT;
ret = smsc95xx_write_reg(dev, AFC_CFG, read_buf);
if (ret < 0) {
- devwarn(dev, "Failed to write AFC_CFG: %d", ret);
+ netdev_warn(dev->net, "Failed to write AFC_CFG: %d\n", ret);
return ret;
}
/* Don't need mac_cr_lock during initialisation */
ret = smsc95xx_read_reg(dev, MAC_CR, &pdata->mac_cr);
if (ret < 0) {
- devwarn(dev, "Failed to read MAC_CR: %d", ret);
+ netdev_warn(dev->net, "Failed to read MAC_CR: %d\n", ret);
return ret;
}
@@ -973,7 +953,7 @@ static int smsc95xx_reset(struct usbnet *dev)
write_buf = (u32)ETH_P_8021Q;
ret = smsc95xx_write_reg(dev, VLAN1, write_buf);
if (ret < 0) {
- devwarn(dev, "Failed to write VAN1: %d", ret);
+ netdev_warn(dev->net, "Failed to write VAN1: %d\n", ret);
return ret;
}
@@ -981,7 +961,7 @@ static int smsc95xx_reset(struct usbnet *dev)
ethtool_op_set_tx_hw_csum(netdev, pdata->use_tx_csum);
ret = smsc95xx_set_csums(dev);
if (ret < 0) {
- devwarn(dev, "Failed to set csum offload: %d", ret);
+ netdev_warn(dev->net, "Failed to set csum offload: %d\n", ret);
return ret;
}
@@ -992,7 +972,7 @@ static int smsc95xx_reset(struct usbnet *dev)
ret = smsc95xx_read_reg(dev, INT_EP_CTL, &read_buf);
if (ret < 0) {
- devwarn(dev, "Failed to read INT_EP_CTL: %d", ret);
+ netdev_warn(dev->net, "Failed to read INT_EP_CTL: %d\n", ret);
return ret;
}
@@ -1001,15 +981,14 @@ static int smsc95xx_reset(struct usbnet *dev)
ret = smsc95xx_write_reg(dev, INT_EP_CTL, read_buf);
if (ret < 0) {
- devwarn(dev, "Failed to write INT_EP_CTL: %d", ret);
+ netdev_warn(dev->net, "Failed to write INT_EP_CTL: %d\n", ret);
return ret;
}
smsc95xx_start_tx_path(dev);
smsc95xx_start_rx_path(dev);
- if (netif_msg_ifup(dev))
- devdbg(dev, "smsc95xx_reset, return 0");
+ netif_dbg(dev, ifup, dev->net, "smsc95xx_reset, return 0\n");
return 0;
}
@@ -1034,7 +1013,7 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
ret = usbnet_get_endpoints(dev, intf);
if (ret < 0) {
- devwarn(dev, "usbnet_get_endpoints failed: %d", ret);
+ netdev_warn(dev->net, "usbnet_get_endpoints failed: %d\n", ret);
return ret;
}
@@ -1043,7 +1022,7 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf)
pdata = (struct smsc95xx_priv *)(dev->data[0]);
if (!pdata) {
- devwarn(dev, "Unable to allocate struct smsc95xx_priv");
+ netdev_warn(dev->net, "Unable to allocate struct smsc95xx_priv\n");
return -ENOMEM;
}
@@ -1066,8 +1045,7 @@ static void smsc95xx_unbind(struct usbnet *dev, struct usb_interface *intf)
{
struct smsc95xx_priv *pdata = (struct smsc95xx_priv *)(dev->data[0]);
if (pdata) {
- if (netif_msg_ifdown(dev))
- devdbg(dev, "free pdata");
+ netif_dbg(dev, ifdown, dev->net, "free pdata\n");
kfree(pdata);
pdata = NULL;
dev->data[0] = 0;
@@ -1101,8 +1079,8 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
align_count = (4 - ((size + NET_IP_ALIGN) % 4)) % 4;
if (unlikely(header & RX_STS_ES_)) {
- if (netif_msg_rx_err(dev))
- devdbg(dev, "Error header=0x%08x", header);
+ netif_dbg(dev, rx_err, dev->net,
+ "Error header=0x%08x\n", header);
dev->net->stats.rx_errors++;
dev->net->stats.rx_dropped++;
@@ -1119,9 +1097,8 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
} else {
/* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */
if (unlikely(size > (ETH_FRAME_LEN + 12))) {
- if (netif_msg_rx_err(dev))
- devdbg(dev, "size err header=0x%08x",
- header);
+ netif_dbg(dev, rx_err, dev->net,
+ "size err header=0x%08x\n", header);
return 0;
}
@@ -1137,7 +1114,7 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
ax_skb = skb_clone(skb, GFP_ATOMIC);
if (unlikely(!ax_skb)) {
- devwarn(dev, "Error allocating skb");
+ netdev_warn(dev->net, "Error allocating skb\n");
return 0;
}
@@ -1161,7 +1138,7 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
}
if (unlikely(skb->len < 0)) {
- devwarn(dev, "invalid rx length<0 %d", skb->len);
+ netdev_warn(dev->net, "invalid rx length<0 %d\n", skb->len);
return 0;
}
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 035fab04c0a..17b6a62d206 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -242,13 +242,13 @@ void usbnet_skb_return (struct usbnet *dev, struct sk_buff *skb)
dev->net->stats.rx_packets++;
dev->net->stats.rx_bytes += skb->len;
- if (netif_msg_rx_status (dev))
- devdbg (dev, "< rx, len %zu, type 0x%x",
- skb->len + sizeof (struct ethhdr), skb->protocol);
+ netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
+ skb->len + sizeof (struct ethhdr), skb->protocol);
memset (skb->cb, 0, sizeof (struct skb_data));
status = netif_rx (skb);
- if (status != NET_RX_SUCCESS && netif_msg_rx_err (dev))
- devdbg (dev, "netif_rx status %d", status);
+ if (status != NET_RX_SUCCESS)
+ netif_dbg(dev, rx_err, dev->net,
+ "netif_rx status %d\n", status);
}
EXPORT_SYMBOL_GPL(usbnet_skb_return);
@@ -313,9 +313,9 @@ void usbnet_defer_kevent (struct usbnet *dev, int work)
{
set_bit (work, &dev->flags);
if (!schedule_work (&dev->kevent))
- deverr (dev, "kevent %d may have been dropped", work);
+ netdev_err(dev->net, "kevent %d may have been dropped\n", work);
else
- devdbg (dev, "kevent %d scheduled", work);
+ netdev_dbg(dev->net, "kevent %d scheduled\n", work);
}
EXPORT_SYMBOL_GPL(usbnet_defer_kevent);
@@ -332,8 +332,7 @@ static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
size_t size = dev->rx_urb_size;
if ((skb = alloc_skb (size + NET_IP_ALIGN, flags)) == NULL) {
- if (netif_msg_rx_err (dev))
- devdbg (dev, "no rx skb");
+ netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
usb_free_urb (urb);
return;
@@ -363,21 +362,19 @@ static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
break;
case -ENODEV:
- if (netif_msg_ifdown (dev))
- devdbg (dev, "device gone");
+ netif_dbg(dev, ifdown, dev->net, "device gone\n");
netif_device_detach (dev->net);
break;
default:
- if (netif_msg_rx_err (dev))
- devdbg (dev, "rx submit, %d", retval);
+ netif_dbg(dev, rx_err, dev->net,
+ "rx submit, %d\n", retval);
tasklet_schedule (&dev->bh);
break;
case 0:
__skb_queue_tail (&dev->rxq, skb);
}
} else {
- if (netif_msg_ifdown (dev))
- devdbg (dev, "rx: stopped");
+ netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
retval = -ENOLINK;
}
spin_unlock_irqrestore (&dev->rxq.lock, lockflags);
@@ -400,8 +397,7 @@ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
if (skb->len)
usbnet_skb_return (dev, skb);
else {
- if (netif_msg_rx_err (dev))
- devdbg (dev, "drop");
+ netif_dbg(dev, rx_err, dev->net, "drop\n");
error:
dev->net->stats.rx_errors++;
skb_queue_tail (&dev->done, skb);
@@ -428,8 +424,8 @@ static void rx_complete (struct urb *urb)
entry->state = rx_cleanup;
dev->net->stats.rx_errors++;
dev->net->stats.rx_length_errors++;
- if (netif_msg_rx_err (dev))
- devdbg (dev, "rx length %d", skb->len);
+ netif_dbg(dev, rx_err, dev->net,
+ "rx length %d\n", skb->len);
}
break;
@@ -446,8 +442,8 @@ static void rx_complete (struct urb *urb)
/* software-driven interface shutdown */
case -ECONNRESET: /* async unlink */
case -ESHUTDOWN: /* hardware gone */
- if (netif_msg_ifdown (dev))
- devdbg (dev, "rx shutdown, code %d", urb_status);
+ netif_dbg(dev, ifdown, dev->net,
+ "rx shutdown, code %d\n", urb_status);
goto block;
/* we get controller i/o faults during khubd disconnect() delays.
@@ -460,8 +456,8 @@ static void rx_complete (struct urb *urb)
dev->net->stats.rx_errors++;
if (!timer_pending (&dev->delay)) {
mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES);
- if (netif_msg_link (dev))
- devdbg (dev, "rx throttle %d", urb_status);
+ netif_dbg(dev, link, dev->net,
+ "rx throttle %d\n", urb_status);
}
block:
entry->state = rx_cleanup;
@@ -477,8 +473,7 @@ block:
default:
entry->state = rx_cleanup;
dev->net->stats.rx_errors++;
- if (netif_msg_rx_err (dev))
- devdbg (dev, "rx status %d", urb_status);
+ netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
break;
}
@@ -492,8 +487,7 @@ block:
}
usb_free_urb (urb);
}
- if (netif_msg_rx_err (dev))
- devdbg (dev, "no read resubmitted");
+ netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
}
static void intr_complete (struct urb *urb)
@@ -510,15 +504,15 @@ static void intr_complete (struct urb *urb)
/* software-driven interface shutdown */
case -ENOENT: /* urb killed */
case -ESHUTDOWN: /* hardware gone */
- if (netif_msg_ifdown (dev))
- devdbg (dev, "intr shutdown, code %d", status);
+ netif_dbg(dev, ifdown, dev->net,
+ "intr shutdown, code %d\n", status);
return;
/* NOTE: not throttling like RX/TX, since this endpoint
* already polls infrequently
*/
default:
- devdbg (dev, "intr status %d", status);
+ netdev_dbg(dev->net, "intr status %d\n", status);
break;
}
@@ -527,8 +521,9 @@ static void intr_complete (struct urb *urb)
memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
status = usb_submit_urb (urb, GFP_ATOMIC);
- if (status != 0 && netif_msg_timer (dev))
- deverr(dev, "intr resubmit --> %d", status);
+ if (status != 0)
+ netif_err(dev, timer, dev->net,
+ "intr resubmit --> %d\n", status);
}
/*-------------------------------------------------------------------------*/
@@ -536,8 +531,7 @@ void usbnet_pause_rx(struct usbnet *dev)
{
set_bit(EVENT_RX_PAUSED, &dev->flags);
- if (netif_msg_rx_status(dev))
- devdbg(dev, "paused rx queue enabled");
+ netif_dbg(dev, rx_status, dev->net, "paused rx queue enabled\n");
}
EXPORT_SYMBOL_GPL(usbnet_pause_rx);
@@ -555,8 +549,8 @@ void usbnet_resume_rx(struct usbnet *dev)
tasklet_schedule(&dev->bh);
- if (netif_msg_rx_status(dev))
- devdbg(dev, "paused rx queue disabled, %d skbs requeued", num);
+ netif_dbg(dev, rx_status, dev->net,
+ "paused rx queue disabled, %d skbs requeued\n", num);
}
EXPORT_SYMBOL_GPL(usbnet_resume_rx);
@@ -589,7 +583,7 @@ static int unlink_urbs (struct usbnet *dev, struct sk_buff_head *q)
// these (async) unlinks complete immediately
retval = usb_unlink_urb (urb);
if (retval != -EINPROGRESS && retval != 0)
- devdbg (dev, "unlink urb err, %d", retval);
+ netdev_dbg(dev->net, "unlink urb err, %d\n", retval);
else
count++;
}
@@ -631,9 +625,8 @@ static void usbnet_terminate_urbs(struct usbnet *dev)
&& !skb_queue_empty(&dev->done)) {
schedule_timeout(UNLINK_TIMEOUT_MS);
set_current_state(TASK_UNINTERRUPTIBLE);
- if (netif_msg_ifdown(dev))
- devdbg(dev, "waited for %d urb completions",
- temp);
+ netif_dbg(dev, ifdown, dev->net,
+ "waited for %d urb completions\n", temp);
}
set_current_state(TASK_RUNNING);
dev->wait = NULL;
@@ -648,22 +641,21 @@ int usbnet_stop (struct net_device *net)
netif_stop_queue (net);
- if (netif_msg_ifdown (dev))
- devinfo (dev, "stop stats: rx/tx %ld/%ld, errs %ld/%ld",
- net->stats.rx_packets, net->stats.tx_packets,
- net->stats.rx_errors, net->stats.tx_errors
- );
+ netif_info(dev, ifdown, dev->net,
+ "stop stats: rx/tx %ld/%ld, errs %ld/%ld\n",
+ net->stats.rx_packets, net->stats.tx_packets,
+ net->stats.rx_errors, net->stats.tx_errors);
/* allow minidriver to stop correctly (wireless devices to turn off
* radio etc) */
if (info->stop) {
retval = info->stop(dev);
- if (retval < 0 && netif_msg_ifdown(dev))
- devinfo(dev,
- "stop fail (%d) usbnet usb-%s-%s, %s",
- retval,
- dev->udev->bus->bus_name, dev->udev->devpath,
- info->description);
+ if (retval < 0)
+ netif_info(dev, ifdown, dev->net,
+ "stop fail (%d) usbnet usb-%s-%s, %s\n",
+ retval,
+ dev->udev->bus->bus_name, dev->udev->devpath,
+ info->description);
}
if (!(info->flags & FLAG_AVOID_UNLINK_URBS))
@@ -702,30 +694,29 @@ int usbnet_open (struct net_device *net)
struct driver_info *info = dev->driver_info;
if ((retval = usb_autopm_get_interface(dev->intf)) < 0) {
- if (netif_msg_ifup (dev))
- devinfo (dev,
- "resumption fail (%d) usbnet usb-%s-%s, %s",
- retval,
- dev->udev->bus->bus_name, dev->udev->devpath,
- info->description);
+ netif_info(dev, ifup, dev->net,
+ "resumption fail (%d) usbnet usb-%s-%s, %s\n",
+ retval,
+ dev->udev->bus->bus_name,
+ dev->udev->devpath,
+ info->description);
goto done_nopm;
}
// put into "known safe" state
if (info->reset && (retval = info->reset (dev)) < 0) {
- if (netif_msg_ifup (dev))
- devinfo (dev,
- "open reset fail (%d) usbnet usb-%s-%s, %s",
- retval,
- dev->udev->bus->bus_name, dev->udev->devpath,
- info->description);
+ netif_info(dev, ifup, dev->net,
+ "open reset fail (%d) usbnet usb-%s-%s, %s\n",
+ retval,
+ dev->udev->bus->bus_name,
+ dev->udev->devpath,
+ info->description);
goto done;
}
// insist peer be connected
if (info->check_connect && (retval = info->check_connect (dev)) < 0) {
- if (netif_msg_ifup (dev))
- devdbg (dev, "can't open; %d", retval);
+ netif_dbg(dev, ifup, dev->net, "can't open; %d\n", retval);
goto done;
}
@@ -733,34 +724,23 @@ int usbnet_open (struct net_device *net)
if (dev->interrupt) {
retval = usb_submit_urb (dev->interrupt, GFP_KERNEL);
if (retval < 0) {
- if (netif_msg_ifup (dev))
- deverr (dev, "intr submit %d", retval);
+ netif_err(dev, ifup, dev->net,
+ "intr submit %d\n", retval);
goto done;
}
}
netif_start_queue (net);
- if (netif_msg_ifup (dev)) {
- char *framing;
-
- if (dev->driver_info->flags & FLAG_FRAMING_NC)
- framing = "NetChip";
- else if (dev->driver_info->flags & FLAG_FRAMING_GL)
- framing = "GeneSys";
- else if (dev->driver_info->flags & FLAG_FRAMING_Z)
- framing = "Zaurus";
- else if (dev->driver_info->flags & FLAG_FRAMING_RN)
- framing = "RNDIS";
- else if (dev->driver_info->flags & FLAG_FRAMING_AX)
- framing = "ASIX";
- else
- framing = "simple";
-
- devinfo (dev, "open: enable queueing "
- "(rx %d, tx %d) mtu %d %s framing",
- (int)RX_QLEN (dev), (int)TX_QLEN (dev), dev->net->mtu,
- framing);
- }
+ netif_info(dev, ifup, dev->net,
+ "open: enable queueing (rx %d, tx %d) mtu %d %s framing\n",
+ (int)RX_QLEN(dev), (int)TX_QLEN(dev),
+ dev->net->mtu,
+ (dev->driver_info->flags & FLAG_FRAMING_NC) ? "NetChip" :
+ (dev->driver_info->flags & FLAG_FRAMING_GL) ? "GeneSys" :
+ (dev->driver_info->flags & FLAG_FRAMING_Z) ? "Zaurus" :
+ (dev->driver_info->flags & FLAG_FRAMING_RN) ? "RNDIS" :
+ (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" :
+ "simple");
// delay posting reads until we're fully open
tasklet_schedule (&dev->bh);
@@ -771,6 +751,7 @@ int usbnet_open (struct net_device *net)
usb_autopm_put_interface(dev->intf);
}
return retval;
+
done:
usb_autopm_put_interface(dev->intf);
done_nopm:
@@ -908,8 +889,8 @@ kevent (struct work_struct *work)
status != -ESHUTDOWN) {
if (netif_msg_tx_err (dev))
fail_pipe:
- deverr (dev, "can't clear tx halt, status %d",
- status);
+ netdev_err(dev->net, "can't clear tx halt, status %d\n",
+ status);
} else {
clear_bit (EVENT_TX_HALT, &dev->flags);
if (status != -ESHUTDOWN)
@@ -928,8 +909,8 @@ fail_pipe:
status != -ESHUTDOWN) {
if (netif_msg_rx_err (dev))
fail_halt:
- deverr (dev, "can't clear rx halt, status %d",
- status);
+ netdev_err(dev->net, "can't clear rx halt, status %d\n",
+ status);
} else {
clear_bit (EVENT_RX_HALT, &dev->flags);
tasklet_schedule (&dev->bh);
@@ -967,18 +948,18 @@ fail_lowmem:
if(info->link_reset && (retval = info->link_reset(dev)) < 0) {
usb_autopm_put_interface(dev->intf);
skip_reset:
- devinfo(dev, "link reset failed (%d) usbnet usb-%s-%s, %s",
- retval,
- dev->udev->bus->bus_name, dev->udev->devpath,
- info->description);
+ netdev_info(dev->net, "link reset failed (%d) usbnet usb-%s-%s, %s\n",
+ retval,
+ dev->udev->bus->bus_name,
+ dev->udev->devpath,
+ info->description);
} else {
usb_autopm_put_interface(dev->intf);
}
}
if (dev->flags)
- devdbg (dev, "kevent done, flags = 0x%lx",
- dev->flags);
+ netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags);
}
/*-------------------------------------------------------------------------*/
@@ -1014,15 +995,14 @@ static void tx_complete (struct urb *urb)
if (!timer_pending (&dev->delay)) {
mod_timer (&dev->delay,
jiffies + THROTTLE_JIFFIES);
- if (netif_msg_link (dev))
- devdbg (dev, "tx throttle %d",
- urb->status);
+ netif_dbg(dev, link, dev->net,
+ "tx throttle %d\n", urb->status);
}
netif_stop_queue (dev->net);
break;
default:
- if (netif_msg_tx_err (dev))
- devdbg (dev, "tx err %d", entry->urb->status);
+ netif_dbg(dev, tx_err, dev->net,
+ "tx err %d\n", entry->urb->status);
break;
}
}
@@ -1064,16 +1044,14 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
if (info->tx_fixup) {
skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
if (!skb) {
- if (netif_msg_tx_err (dev))
- devdbg (dev, "can't tx_fixup skb");
+ netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
goto drop;
}
}
length = skb->len;
if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) {
- if (netif_msg_tx_err (dev))
- devdbg (dev, "no urb");
+ netif_dbg(dev, tx_err, dev->net, "no urb\n");
goto drop;
}
@@ -1113,7 +1091,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
/* no use to process more packets */
netif_stop_queue(net);
spin_unlock_irqrestore(&dev->txq.lock, flags);
- devdbg(dev, "Delaying transmission for resumption");
+ netdev_dbg(dev->net, "Delaying transmission for resumption\n");
goto deferred;
}
#endif
@@ -1126,8 +1104,8 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
break;
default:
usb_autopm_put_interface_async(dev->intf);
- if (netif_msg_tx_err (dev))
- devdbg (dev, "tx: submit urb err %d", retval);
+ netif_dbg(dev, tx_err, dev->net,
+ "tx: submit urb err %d\n", retval);
break;
case 0:
net->trans_start = jiffies;
@@ -1138,17 +1116,15 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
spin_unlock_irqrestore (&dev->txq.lock, flags);
if (retval) {
- if (netif_msg_tx_err (dev))
- devdbg (dev, "drop, code %d", retval);
+ netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval);
drop:
dev->net->stats.tx_dropped++;
if (skb)
dev_kfree_skb_any (skb);
usb_free_urb (urb);
- } else if (netif_msg_tx_queued (dev)) {
- devdbg (dev, "> tx, len %d, type 0x%x",
- length, skb->protocol);
- }
+ } else
+ netif_dbg(dev, tx_queued, dev->net,
+ "> tx, len %d, type 0x%x\n", length, skb->protocol);
#ifdef CONFIG_PM
deferred:
#endif
@@ -1179,7 +1155,7 @@ static void usbnet_bh (unsigned long param)
dev_kfree_skb (skb);
continue;
default:
- devdbg (dev, "bogus skb state %d", entry->state);
+ netdev_dbg(dev->net, "bogus skb state %d\n", entry->state);
}
}
@@ -1207,9 +1183,10 @@ static void usbnet_bh (unsigned long param)
if (urb != NULL)
rx_submit (dev, urb, GFP_ATOMIC);
}
- if (temp != dev->rxq.qlen && netif_msg_link (dev))
- devdbg (dev, "rxqlen %d --> %d",
- temp, dev->rxq.qlen);
+ if (temp != dev->rxq.qlen)
+ netif_dbg(dev, link, dev->net,
+ "rxqlen %d --> %d\n",
+ temp, dev->rxq.qlen);
if (dev->rxq.qlen < qlen)
tasklet_schedule (&dev->bh);
}
@@ -1240,11 +1217,10 @@ void usbnet_disconnect (struct usb_interface *intf)
xdev = interface_to_usbdev (intf);
- if (netif_msg_probe (dev))
- devinfo (dev, "unregister '%s' usb-%s-%s, %s",
- intf->dev.driver->name,
- xdev->bus->bus_name, xdev->devpath,
- dev->driver_info->description);
+ netif_info(dev, probe, dev->net, "unregister '%s' usb-%s-%s, %s\n",
+ intf->dev.driver->name,
+ xdev->bus->bus_name, xdev->devpath,
+ dev->driver_info->description);
net = dev->net;
unregister_netdev (net);
@@ -1407,12 +1383,12 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
status = register_netdev (net);
if (status)
goto out3;
- if (netif_msg_probe (dev))
- devinfo (dev, "register '%s' at usb-%s-%s, %s, %pM",
- udev->dev.driver->name,
- xdev->bus->bus_name, xdev->devpath,
- dev->driver_info->description,
- net->dev_addr);
+ netif_info(dev, probe, dev->net,
+ "register '%s' at usb-%s-%s, %s, %pM\n",
+ udev->dev.driver->name,
+ xdev->bus->bus_name, xdev->devpath,
+ dev->driver_info->description,
+ net->dev_addr);
// ok, it's ready to go.
usb_set_intfdata (udev, dev);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 3a15de56df9..b583d4968ad 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -34,7 +34,7 @@ struct veth_net_stats {
struct veth_priv {
struct net_device *peer;
- struct veth_net_stats *stats;
+ struct veth_net_stats __percpu *stats;
unsigned ip_summed;
};
@@ -263,7 +263,7 @@ static int veth_change_mtu(struct net_device *dev, int new_mtu)
static int veth_dev_init(struct net_device *dev)
{
- struct veth_net_stats *stats;
+ struct veth_net_stats __percpu *stats;
struct veth_priv *priv;
stats = alloc_percpu(struct veth_net_stats);
@@ -333,19 +333,17 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
struct veth_priv *priv;
char ifname[IFNAMSIZ];
struct nlattr *peer_tb[IFLA_MAX + 1], **tbp;
+ struct ifinfomsg *ifmp;
struct net *net;
/*
* create and register peer first
- *
- * struct ifinfomsg is at the head of VETH_INFO_PEER, but we
- * skip it since no info from it is useful yet
*/
-
if (data != NULL && data[VETH_INFO_PEER] != NULL) {
struct nlattr *nla_peer;
nla_peer = data[VETH_INFO_PEER];
+ ifmp = nla_data(nla_peer);
err = nla_parse(peer_tb, IFLA_MAX,
nla_data(nla_peer) + sizeof(struct ifinfomsg),
nla_len(nla_peer) - sizeof(struct ifinfomsg),
@@ -358,8 +356,10 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
return err;
tbp = peer_tb;
- } else
+ } else {
+ ifmp = NULL;
tbp = tb;
+ }
if (tbp[IFLA_IFNAME])
nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
@@ -387,6 +387,10 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
netif_carrier_off(peer);
+ err = rtnl_configure_link(peer, ifmp);
+ if (err < 0)
+ goto err_configure_peer;
+
/*
* register dev last
*
@@ -428,6 +432,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
err_register_dev:
/* nothing to do */
err_alloc_name:
+err_configure_peer:
unregister_netdevice(peer);
return err;
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 611b8043595..50f881aa393 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -267,7 +267,7 @@ enum rhine_quirks {
/* Beware of PCI posted writes */
#define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
-static const struct pci_device_id rhine_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = {
{ 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */
{ 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */
{ 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */
@@ -1697,7 +1697,7 @@ static void rhine_set_rx_mode(struct net_device *dev)
rx_mode = 0x1C;
iowrite32(0xffffffff, ioaddr + MulticastFilter0);
iowrite32(0xffffffff, ioaddr + MulticastFilter1);
- } else if ((dev->mc_count > multicast_filter_limit) ||
+ } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
(dev->flags & IFF_ALLMULTI)) {
/* Too many to match, or accept all multicasts. */
iowrite32(0xffffffff, ioaddr + MulticastFilter0);
@@ -1705,10 +1705,9 @@ static void rhine_set_rx_mode(struct net_device *dev)
rx_mode = 0x0C;
} else {
struct dev_mc_list *mclist;
- int i;
+
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 317aa34b21c..3a486f3bad3 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -361,7 +361,7 @@ static struct velocity_info_tbl chip_info_table[] = {
* Describe the PCI device identifiers that we support in this
* device driver. Used for hotplug autoloading.
*/
-static const struct pci_device_id velocity_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(velocity_id_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_612X) },
{ }
};
@@ -1132,7 +1132,7 @@ static void velocity_set_multi(struct net_device *dev)
writel(0xffffffff, &regs->MARCAM[0]);
writel(0xffffffff, &regs->MARCAM[4]);
rx_mode = (RCR_AM | RCR_AB | RCR_PROM);
- } else if ((dev->mc_count > vptr->multicast_limit) ||
+ } else if ((netdev_mc_count(dev) > vptr->multicast_limit) ||
(dev->flags & IFF_ALLMULTI)) {
writel(0xffffffff, &regs->MARCAM[0]);
writel(0xffffffff, &regs->MARCAM[4]);
@@ -1141,9 +1141,11 @@ static void velocity_set_multi(struct net_device *dev)
int offset = MCAM_SIZE - vptr->multicast_limit;
mac_get_cam_mask(regs, vptr->mCAMmask);
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; i++, mclist = mclist->next) {
+ i = 0;
+ netdev_for_each_mc_addr(mclist, dev) {
mac_set_cam(regs, i + offset, mclist->dmi_addr);
vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
+ i++;
}
mac_set_cam_mask(regs, vptr->mCAMmask);
@@ -2698,10 +2700,8 @@ static void __devinit velocity_print_info(struct velocity_info *vptr)
struct net_device *dev = vptr->dev;
printk(KERN_INFO "%s: %s\n", dev->name, get_chip_name(vptr->chip_id));
- printk(KERN_INFO "%s: Ethernet Address: %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
- dev->name,
- dev->dev_addr[0], dev->dev_addr[1], dev->dev_addr[2],
- dev->dev_addr[3], dev->dev_addr[4], dev->dev_addr[5]);
+ printk(KERN_INFO "%s: Ethernet Address: %pM\n",
+ dev->name, dev->dev_addr);
}
static u32 velocity_get_link(struct net_device *dev)
@@ -2825,7 +2825,7 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
netif_napi_add(dev, &vptr->napi, velocity_poll, VELOCITY_NAPI_WEIGHT);
dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
- NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM;
+ NETIF_F_HW_VLAN_RX | NETIF_F_IP_CSUM | NETIF_F_SG;
ret = register_netdev(dev);
if (ret < 0)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 9ead30bd00c..25dc77ccbf5 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -56,10 +56,6 @@ struct virtnet_info
/* Host will merge rx buffers for big packets (shake it! shake it!) */
bool mergeable_rx_bufs;
- /* Receive & send queues. */
- struct sk_buff_head recv;
- struct sk_buff_head send;
-
/* Work struct for refilling if we run low on memory. */
struct delayed_work refill;
@@ -75,34 +71,44 @@ struct skb_vnet_hdr {
unsigned int num_sg;
};
+struct padded_vnet_hdr {
+ struct virtio_net_hdr hdr;
+ /*
+ * virtio_net_hdr should be in a separated sg buffer because of a
+ * QEMU bug, and data sg buffer shares same page with this header sg.
+ * This padding makes next sg 16 byte aligned after virtio_net_hdr.
+ */
+ char padding[6];
+};
+
static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
{
return (struct skb_vnet_hdr *)skb->cb;
}
-static void give_a_page(struct virtnet_info *vi, struct page *page)
-{
- page->private = (unsigned long)vi->pages;
- vi->pages = page;
-}
-
-static void trim_pages(struct virtnet_info *vi, struct sk_buff *skb)
+/*
+ * private is used to chain pages for big packets, put the whole
+ * most recent used list in the beginning for reuse
+ */
+static void give_pages(struct virtnet_info *vi, struct page *page)
{
- unsigned int i;
+ struct page *end;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
- give_a_page(vi, skb_shinfo(skb)->frags[i].page);
- skb_shinfo(skb)->nr_frags = 0;
- skb->data_len = 0;
+ /* Find end of list, sew whole thing into vi->pages. */
+ for (end = page; end->private; end = (struct page *)end->private);
+ end->private = (unsigned long)vi->pages;
+ vi->pages = page;
}
static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask)
{
struct page *p = vi->pages;
- if (p)
+ if (p) {
vi->pages = (struct page *)p->private;
- else
+ /* clear private here, it is used to chain pages */
+ p->private = 0;
+ } else
p = alloc_page(gfp_mask);
return p;
}
@@ -118,99 +124,142 @@ static void skb_xmit_done(struct virtqueue *svq)
netif_wake_queue(vi->dev);
}
-static void receive_skb(struct net_device *dev, struct sk_buff *skb,
- unsigned len)
+static void set_skb_frag(struct sk_buff *skb, struct page *page,
+ unsigned int offset, unsigned int *len)
{
- struct virtnet_info *vi = netdev_priv(dev);
- struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
- int err;
- int i;
+ int i = skb_shinfo(skb)->nr_frags;
+ skb_frag_t *f;
+
+ f = &skb_shinfo(skb)->frags[i];
+ f->size = min((unsigned)PAGE_SIZE - offset, *len);
+ f->page_offset = offset;
+ f->page = page;
+
+ skb->data_len += f->size;
+ skb->len += f->size;
+ skb_shinfo(skb)->nr_frags++;
+ *len -= f->size;
+}
- if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
- pr_debug("%s: short packet %i\n", dev->name, len);
- dev->stats.rx_length_errors++;
- goto drop;
- }
+static struct sk_buff *page_to_skb(struct virtnet_info *vi,
+ struct page *page, unsigned int len)
+{
+ struct sk_buff *skb;
+ struct skb_vnet_hdr *hdr;
+ unsigned int copy, hdr_len, offset;
+ char *p;
- if (vi->mergeable_rx_bufs) {
- unsigned int copy;
- char *p = page_address(skb_shinfo(skb)->frags[0].page);
+ p = page_address(page);
- if (len > PAGE_SIZE)
- len = PAGE_SIZE;
- len -= sizeof(struct virtio_net_hdr_mrg_rxbuf);
-
- memcpy(&hdr->mhdr, p, sizeof(hdr->mhdr));
- p += sizeof(hdr->mhdr);
+ /* copy small packet so we can reuse these pages for small data */
+ skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
+ if (unlikely(!skb))
+ return NULL;
- copy = len;
- if (copy > skb_tailroom(skb))
- copy = skb_tailroom(skb);
+ hdr = skb_vnet_hdr(skb);
- memcpy(skb_put(skb, copy), p, copy);
+ if (vi->mergeable_rx_bufs) {
+ hdr_len = sizeof hdr->mhdr;
+ offset = hdr_len;
+ } else {
+ hdr_len = sizeof hdr->hdr;
+ offset = sizeof(struct padded_vnet_hdr);
+ }
- len -= copy;
+ memcpy(hdr, p, hdr_len);
- if (!len) {
- give_a_page(vi, skb_shinfo(skb)->frags[0].page);
- skb_shinfo(skb)->nr_frags--;
- } else {
- skb_shinfo(skb)->frags[0].page_offset +=
- sizeof(hdr->mhdr) + copy;
- skb_shinfo(skb)->frags[0].size = len;
- skb->data_len += len;
- skb->len += len;
- }
+ len -= hdr_len;
+ p += offset;
- while (--hdr->mhdr.num_buffers) {
- struct sk_buff *nskb;
+ copy = len;
+ if (copy > skb_tailroom(skb))
+ copy = skb_tailroom(skb);
+ memcpy(skb_put(skb, copy), p, copy);
- i = skb_shinfo(skb)->nr_frags;
- if (i >= MAX_SKB_FRAGS) {
- pr_debug("%s: packet too long %d\n", dev->name,
- len);
- dev->stats.rx_length_errors++;
- goto drop;
- }
+ len -= copy;
+ offset += copy;
- nskb = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
- if (!nskb) {
- pr_debug("%s: rx error: %d buffers missing\n",
- dev->name, hdr->mhdr.num_buffers);
- dev->stats.rx_length_errors++;
- goto drop;
- }
+ while (len) {
+ set_skb_frag(skb, page, offset, &len);
+ page = (struct page *)page->private;
+ offset = 0;
+ }
- __skb_unlink(nskb, &vi->recv);
- vi->num--;
+ if (page)
+ give_pages(vi, page);
- skb_shinfo(skb)->frags[i] = skb_shinfo(nskb)->frags[0];
- skb_shinfo(nskb)->nr_frags = 0;
- kfree_skb(nskb);
+ return skb;
+}
- if (len > PAGE_SIZE)
- len = PAGE_SIZE;
+static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb)
+{
+ struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
+ struct page *page;
+ int num_buf, i, len;
+
+ num_buf = hdr->mhdr.num_buffers;
+ while (--num_buf) {
+ i = skb_shinfo(skb)->nr_frags;
+ if (i >= MAX_SKB_FRAGS) {
+ pr_debug("%s: packet too long\n", skb->dev->name);
+ skb->dev->stats.rx_length_errors++;
+ return -EINVAL;
+ }
- skb_shinfo(skb)->frags[i].size = len;
- skb_shinfo(skb)->nr_frags++;
- skb->data_len += len;
- skb->len += len;
+ page = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
+ if (!page) {
+ pr_debug("%s: rx error: %d buffers missing\n",
+ skb->dev->name, hdr->mhdr.num_buffers);
+ skb->dev->stats.rx_length_errors++;
+ return -EINVAL;
}
- } else {
- len -= sizeof(hdr->hdr);
+ if (len > PAGE_SIZE)
+ len = PAGE_SIZE;
+
+ set_skb_frag(skb, page, 0, &len);
+
+ --vi->num;
+ }
+ return 0;
+}
+
+static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ struct sk_buff *skb;
+ struct page *page;
+ struct skb_vnet_hdr *hdr;
- if (len <= MAX_PACKET_LEN)
- trim_pages(vi, skb);
+ if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
+ pr_debug("%s: short packet %i\n", dev->name, len);
+ dev->stats.rx_length_errors++;
+ if (vi->mergeable_rx_bufs || vi->big_packets)
+ give_pages(vi, buf);
+ else
+ dev_kfree_skb(buf);
+ return;
+ }
- err = pskb_trim(skb, len);
- if (err) {
- pr_debug("%s: pskb_trim failed %i %d\n", dev->name,
- len, err);
+ if (!vi->mergeable_rx_bufs && !vi->big_packets) {
+ skb = buf;
+ len -= sizeof(struct virtio_net_hdr);
+ skb_trim(skb, len);
+ } else {
+ page = buf;
+ skb = page_to_skb(vi, page, len);
+ if (unlikely(!skb)) {
dev->stats.rx_dropped++;
- goto drop;
+ give_pages(vi, page);
+ return;
}
+ if (vi->mergeable_rx_bufs)
+ if (receive_mergeable(vi, skb)) {
+ dev_kfree_skb(skb);
+ return;
+ }
}
+ hdr = skb_vnet_hdr(skb);
skb->truesize += skb->data_len;
dev->stats.rx_bytes += skb->len;
dev->stats.rx_packets++;
@@ -267,110 +316,119 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
frame_err:
dev->stats.rx_frame_errors++;
-drop:
dev_kfree_skb(skb);
}
-static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
+static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
{
struct sk_buff *skb;
- struct scatterlist sg[2+MAX_SKB_FRAGS];
- int num, err, i;
- bool oom = false;
-
- sg_init_table(sg, 2+MAX_SKB_FRAGS);
- do {
- struct skb_vnet_hdr *hdr;
+ struct skb_vnet_hdr *hdr;
+ struct scatterlist sg[2];
+ int err;
- skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
- if (unlikely(!skb)) {
- oom = true;
- break;
- }
+ skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
+ if (unlikely(!skb))
+ return -ENOMEM;
- skb_put(skb, MAX_PACKET_LEN);
+ skb_put(skb, MAX_PACKET_LEN);
- hdr = skb_vnet_hdr(skb);
- sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr));
+ hdr = skb_vnet_hdr(skb);
+ sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr);
- if (vi->big_packets) {
- for (i = 0; i < MAX_SKB_FRAGS; i++) {
- skb_frag_t *f = &skb_shinfo(skb)->frags[i];
- f->page = get_a_page(vi, gfp);
- if (!f->page)
- break;
+ skb_to_sgvec(skb, sg + 1, 0, skb->len);
- f->page_offset = 0;
- f->size = PAGE_SIZE;
+ err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 2, skb);
+ if (err < 0)
+ dev_kfree_skb(skb);
- skb->data_len += PAGE_SIZE;
- skb->len += PAGE_SIZE;
+ return err;
+}
- skb_shinfo(skb)->nr_frags++;
- }
+static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
+{
+ struct scatterlist sg[MAX_SKB_FRAGS + 2];
+ struct page *first, *list = NULL;
+ char *p;
+ int i, err, offset;
+
+ /* page in sg[MAX_SKB_FRAGS + 1] is list tail */
+ for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
+ first = get_a_page(vi, gfp);
+ if (!first) {
+ if (list)
+ give_pages(vi, list);
+ return -ENOMEM;
}
+ sg_set_buf(&sg[i], page_address(first), PAGE_SIZE);
- num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
- skb_queue_head(&vi->recv, skb);
+ /* chain new page in list head to match sg */
+ first->private = (unsigned long)list;
+ list = first;
+ }
- err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, num, skb);
- if (err < 0) {
- skb_unlink(skb, &vi->recv);
- trim_pages(vi, skb);
- kfree_skb(skb);
- break;
- }
- vi->num++;
- } while (err >= num);
- if (unlikely(vi->num > vi->max))
- vi->max = vi->num;
- vi->rvq->vq_ops->kick(vi->rvq);
- return !oom;
+ first = get_a_page(vi, gfp);
+ if (!first) {
+ give_pages(vi, list);
+ return -ENOMEM;
+ }
+ p = page_address(first);
+
+ /* sg[0], sg[1] share the same page */
+ /* a separated sg[0] for virtio_net_hdr only during to QEMU bug*/
+ sg_set_buf(&sg[0], p, sizeof(struct virtio_net_hdr));
+
+ /* sg[1] for data packet, from offset */
+ offset = sizeof(struct padded_vnet_hdr);
+ sg_set_buf(&sg[1], p + offset, PAGE_SIZE - offset);
+
+ /* chain first in list head */
+ first->private = (unsigned long)list;
+ err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, MAX_SKB_FRAGS + 2,
+ first);
+ if (err < 0)
+ give_pages(vi, first);
+
+ return err;
}
-/* Returns false if we couldn't fill entirely (OOM). */
-static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
+static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
{
- struct sk_buff *skb;
- struct scatterlist sg[1];
+ struct page *page;
+ struct scatterlist sg;
int err;
- bool oom = false;
- if (!vi->mergeable_rx_bufs)
- return try_fill_recv_maxbufs(vi, gfp);
+ page = get_a_page(vi, gfp);
+ if (!page)
+ return -ENOMEM;
- do {
- skb_frag_t *f;
+ sg_init_one(&sg, page_address(page), PAGE_SIZE);
- skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
- if (unlikely(!skb)) {
- oom = true;
- break;
- }
+ err = vi->rvq->vq_ops->add_buf(vi->rvq, &sg, 0, 1, page);
+ if (err < 0)
+ give_pages(vi, page);
- f = &skb_shinfo(skb)->frags[0];
- f->page = get_a_page(vi, gfp);
- if (!f->page) {
- oom = true;
- kfree_skb(skb);
- break;
- }
-
- f->page_offset = 0;
- f->size = PAGE_SIZE;
+ return err;
+}
- skb_shinfo(skb)->nr_frags++;
+/* Returns false if we couldn't fill entirely (OOM). */
+static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
+{
+ int err;
+ bool oom = false;
- sg_init_one(sg, page_address(f->page), PAGE_SIZE);
- skb_queue_head(&vi->recv, skb);
+ do {
+ if (vi->mergeable_rx_bufs)
+ err = add_recvbuf_mergeable(vi, gfp);
+ else if (vi->big_packets)
+ err = add_recvbuf_big(vi, gfp);
+ else
+ err = add_recvbuf_small(vi, gfp);
- err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 1, skb);
if (err < 0) {
- skb_unlink(skb, &vi->recv);
- kfree_skb(skb);
+ oom = true;
break;
}
- vi->num++;
+ ++vi->num;
} while (err > 0);
if (unlikely(vi->num > vi->max))
vi->max = vi->num;
@@ -407,15 +465,14 @@ static void refill_work(struct work_struct *work)
static int virtnet_poll(struct napi_struct *napi, int budget)
{
struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
- struct sk_buff *skb = NULL;
+ void *buf;
unsigned int len, received = 0;
again:
while (received < budget &&
- (skb = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
- __skb_unlink(skb, &vi->recv);
- receive_skb(vi->dev, skb, len);
- vi->num--;
+ (buf = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
+ receive_buf(vi->dev, buf, len);
+ --vi->num;
received++;
}
@@ -445,7 +502,6 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
pr_debug("Sent skb %p\n", skb);
- __skb_unlink(skb, &vi->send);
vi->dev->stats.tx_bytes += skb->len;
vi->dev->stats.tx_packets++;
tot_sgs += skb_vnet_hdr(skb)->num_sg;
@@ -495,9 +551,9 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
/* Encode metadata header at front. */
if (vi->mergeable_rx_bufs)
- sg_set_buf(sg, &hdr->mhdr, sizeof(hdr->mhdr));
+ sg_set_buf(sg, &hdr->mhdr, sizeof hdr->mhdr);
else
- sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr));
+ sg_set_buf(sg, &hdr->hdr, sizeof hdr->hdr);
hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
return vi->svq->vq_ops->add_buf(vi->svq, sg, hdr->num_sg, 0, skb);
@@ -528,15 +584,6 @@ again:
}
vi->svq->vq_ops->kick(vi->svq);
- /*
- * Put new one in send queue. You'd expect we'd need this before
- * xmit_skb calls add_buf(), since the callback can be triggered
- * immediately after that. But since the callback just triggers
- * another call back here, normal network xmit locking prevents the
- * race.
- */
- __skb_queue_head(&vi->send, skb);
-
/* Don't wait up for transmitted skbs to be freed. */
skb_orphan(skb);
nf_reset(skb);
@@ -674,6 +721,8 @@ static void virtnet_set_rx_mode(struct net_device *dev)
struct virtio_net_ctrl_mac *mac_data;
struct dev_addr_list *addr;
struct netdev_hw_addr *ha;
+ int uc_count;
+ int mc_count;
void *buf;
int i;
@@ -700,9 +749,12 @@ static void virtnet_set_rx_mode(struct net_device *dev)
dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
allmulti ? "en" : "dis");
+ uc_count = netdev_uc_count(dev);
+ mc_count = netdev_mc_count(dev);
/* MAC filter - use one buffer for both lists */
- mac_data = buf = kzalloc(((dev->uc.count + dev->mc_count) * ETH_ALEN) +
- (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
+ buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
+ (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
+ mac_data = buf;
if (!buf) {
dev_warn(&dev->dev, "No memory for MAC address buffer\n");
return;
@@ -711,24 +763,24 @@ static void virtnet_set_rx_mode(struct net_device *dev)
sg_init_table(sg, 2);
/* Store the unicast list and count in the front of the buffer */
- mac_data->entries = dev->uc.count;
+ mac_data->entries = uc_count;
i = 0;
- list_for_each_entry(ha, &dev->uc.list, list)
+ netdev_for_each_uc_addr(ha, dev)
memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
sg_set_buf(&sg[0], mac_data,
- sizeof(mac_data->entries) + (dev->uc.count * ETH_ALEN));
+ sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
/* multicast list and count fill the end */
- mac_data = (void *)&mac_data->macs[dev->uc.count][0];
+ mac_data = (void *)&mac_data->macs[uc_count][0];
- mac_data->entries = dev->mc_count;
- addr = dev->mc_list;
- for (i = 0; i < dev->mc_count; i++, addr = addr->next)
- memcpy(&mac_data->macs[i][0], addr->da_addr, ETH_ALEN);
+ mac_data->entries = mc_count;
+ i = 0;
+ netdev_for_each_mc_addr(addr, dev)
+ memcpy(&mac_data->macs[i++][0], addr->da_addr, ETH_ALEN);
sg_set_buf(&sg[1], mac_data,
- sizeof(mac_data->entries) + (dev->mc_count * ETH_ALEN));
+ sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
VIRTIO_NET_CTRL_MAC_TABLE_SET,
@@ -915,10 +967,6 @@ static int virtnet_probe(struct virtio_device *vdev)
dev->features |= NETIF_F_HW_VLAN_FILTER;
}
- /* Initialize our empty receive and send queues. */
- skb_queue_head_init(&vi->recv);
- skb_queue_head_init(&vi->send);
-
err = register_netdev(dev);
if (err) {
pr_debug("virtio_net: registering device failed\n");
@@ -951,26 +999,42 @@ free:
return err;
}
+static void free_unused_bufs(struct virtnet_info *vi)
+{
+ void *buf;
+ while (1) {
+ buf = vi->svq->vq_ops->detach_unused_buf(vi->svq);
+ if (!buf)
+ break;
+ dev_kfree_skb(buf);
+ }
+ while (1) {
+ buf = vi->rvq->vq_ops->detach_unused_buf(vi->rvq);
+ if (!buf)
+ break;
+ if (vi->mergeable_rx_bufs || vi->big_packets)
+ give_pages(vi, buf);
+ else
+ dev_kfree_skb(buf);
+ --vi->num;
+ }
+ BUG_ON(vi->num != 0);
+}
+
static void __devexit virtnet_remove(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
- struct sk_buff *skb;
/* Stop all the virtqueues. */
vdev->config->reset(vdev);
- /* Free our skbs in send and recv queues, if any. */
- while ((skb = __skb_dequeue(&vi->recv)) != NULL) {
- kfree_skb(skb);
- vi->num--;
- }
- __skb_queue_purge(&vi->send);
-
- BUG_ON(vi->num != 0);
unregister_netdev(vi->dev);
cancel_delayed_work_sync(&vi->refill);
+ /* Free unused buffers in both send and recv, if any. */
+ free_unused_bufs(vi);
+
vdev->config->del_vqs(vi->vdev);
while (vi->pages)
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 9cc438282d7..cff3485d967 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -35,7 +35,7 @@ char vmxnet3_driver_name[] = "vmxnet3";
* PCI Device ID Table
* Last entry must be all 0s
*/
-static const struct pci_device_id vmxnet3_pciid_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(vmxnet3_pciid_table) = {
{PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
{0}
};
@@ -1668,22 +1668,19 @@ static u8 *
vmxnet3_copy_mc(struct net_device *netdev)
{
u8 *buf = NULL;
- u32 sz = netdev->mc_count * ETH_ALEN;
+ u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
/* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
if (sz <= 0xffff) {
/* We may be called with BH disabled */
buf = kmalloc(sz, GFP_ATOMIC);
if (buf) {
- int i;
- struct dev_mc_list *mc = netdev->mc_list;
+ struct dev_mc_list *mc;
+ int i = 0;
- for (i = 0; i < netdev->mc_count; i++) {
- BUG_ON(!mc);
- memcpy(buf + i * ETH_ALEN, mc->dmi_addr,
+ netdev_for_each_mc_addr(mc, netdev)
+ memcpy(buf + i++ * ETH_ALEN, mc->dmi_addr,
ETH_ALEN);
- mc = mc->next;
- }
}
}
return buf;
@@ -1708,12 +1705,12 @@ vmxnet3_set_mc(struct net_device *netdev)
if (netdev->flags & IFF_ALLMULTI)
new_mode |= VMXNET3_RXM_ALL_MULTI;
else
- if (netdev->mc_count > 0) {
+ if (!netdev_mc_empty(netdev)) {
new_table = vmxnet3_copy_mc(netdev);
if (new_table) {
new_mode |= VMXNET3_RXM_MCAST;
rxConf->mfTableLen = cpu_to_le16(
- netdev->mc_count * ETH_ALEN);
+ netdev_mc_count(netdev) * ETH_ALEN);
rxConf->mfTablePA = cpu_to_le64(virt_to_phys(
new_table));
} else {
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index b9685e82f7b..46a7c9e689e 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -54,7 +54,7 @@ MODULE_LICENSE("Dual BSD/GPL");
MODULE_DESCRIPTION("Neterion's X3100 Series 10GbE PCIe I/O"
"Virtualized Server Adapter");
-static struct pci_device_id vxge_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(vxge_id_table) = {
{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_WIN, PCI_ANY_ID,
PCI_ANY_ID},
{PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_TITAN_UNI, PCI_ANY_ID,
@@ -1178,11 +1178,11 @@ static void vxge_set_multicast(struct net_device *dev)
memset(&mac_info, 0, sizeof(struct macInfo));
/* Update individual M_CAST address list */
- if ((!vdev->all_multi_flg) && dev->mc_count) {
+ if ((!vdev->all_multi_flg) && netdev_mc_count(dev)) {
mcast_cnt = vdev->vpaths[0].mcast_addr_cnt;
list_head = &vdev->vpaths[0].mac_addr_list;
- if ((dev->mc_count +
+ if ((netdev_mc_count(dev) +
(vdev->vpaths[0].mac_addr_cnt - mcast_cnt)) >
vdev->vpaths[0].max_mac_addr_cnt)
goto _set_all_mcast;
@@ -1217,9 +1217,7 @@ static void vxge_set_multicast(struct net_device *dev)
}
/* Add new ones */
- for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
- i++, mclist = mclist->next) {
-
+ netdev_for_each_mc_addr(mclist, dev) {
memcpy(mac_info.macaddr, mclist->dmi_addr, ETH_ALEN);
for (vpath_idx = 0; vpath_idx < vdev->no_of_vpath;
vpath_idx++) {
@@ -4297,10 +4295,8 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
vxge_debug_init(VXGE_TRACE, "%s: Neterion %s Server Adapter",
vdev->ndev->name, ll_config.device_hw_info.product_desc);
- vxge_debug_init(VXGE_TRACE,
- "%s: MAC ADDR: %02X:%02X:%02X:%02X:%02X:%02X",
- vdev->ndev->name, macaddr[0], macaddr[1], macaddr[2],
- macaddr[3], macaddr[4], macaddr[5]);
+ vxge_debug_init(VXGE_TRACE, "%s: MAC ADDR: %pM",
+ vdev->ndev->name, macaddr);
vxge_debug_init(VXGE_TRACE, "%s: Link Width x%d",
vdev->ndev->name, vxge_hw_device_link_width_get(hldev));
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 3f759daf3ca..f88c07c1319 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -2050,7 +2050,7 @@ static int __init dscc4_setup(char *str)
__setup("dscc4.setup=", dscc4_setup);
#endif
-static struct pci_device_id dscc4_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(dscc4_pci_tbl) = {
{ PCI_VENDOR_ID_SIEMENS, PCI_DEVICE_ID_SIEMENS_DSCC4,
PCI_ANY_ID, PCI_ANY_ID, },
{ 0,}
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 9bc2e364915..40d724a8e02 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -528,7 +528,7 @@ static int fst_debug_mask = { FST_DEBUG };
/*
* PCI ID lookup table
*/
-static struct pci_device_id fst_pci_dev_id[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(fst_pci_dev_id) = {
{PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T2P, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, FST_TYPE_T2P},
diff --git a/drivers/net/wan/lmc/lmc_main.c b/drivers/net/wan/lmc/lmc_main.c
index 4b6f27e7c82..b2785037712 100644
--- a/drivers/net/wan/lmc/lmc_main.c
+++ b/drivers/net/wan/lmc/lmc_main.c
@@ -77,7 +77,7 @@
static int LMC_PKT_BUF_SZ = 1542;
-static struct pci_device_id lmc_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(lmc_pci_tbl) = {
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
PCI_VENDOR_ID_LMC, PCI_ANY_ID },
{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_TULIP_FAST,
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c
index aec4d395542..f4f1c00d0d2 100644
--- a/drivers/net/wan/pc300_drv.c
+++ b/drivers/net/wan/pc300_drv.c
@@ -251,7 +251,7 @@ static char rcsid[] =
#undef PC300_DEBUG_RX
#undef PC300_DEBUG_OTHER
-static struct pci_device_id cpc_pci_dev_id[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(cpc_pci_dev_id) = {
/* PC300/RSV or PC300/X21, 2 chan */
{0x120e, 0x300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0x300},
/* PC300/RSV or PC300/X21, 1 chan */
diff --git a/drivers/net/wan/pc300too.c b/drivers/net/wan/pc300too.c
index 60ece54bdd9..c7ab3becd26 100644
--- a/drivers/net/wan/pc300too.c
+++ b/drivers/net/wan/pc300too.c
@@ -481,7 +481,7 @@ static int __devinit pc300_pci_init_one(struct pci_dev *pdev,
-static struct pci_device_id pc300_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(pc300_pci_tbl) = {
{ PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_RX_1, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_RX_2, PCI_ANY_ID,
diff --git a/drivers/net/wan/pci200syn.c b/drivers/net/wan/pci200syn.c
index f1340faaf02..e2cff64a446 100644
--- a/drivers/net/wan/pci200syn.c
+++ b/drivers/net/wan/pci200syn.c
@@ -417,7 +417,7 @@ static int __devinit pci200_pci_init_one(struct pci_dev *pdev,
-static struct pci_device_id pci200_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(pci200_pci_tbl) = {
{ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_PLX,
PCI_DEVICE_ID_PLX_PCI200SYN, 0, 0, 0 },
{ 0, }
diff --git a/drivers/net/wan/wanxl.c b/drivers/net/wan/wanxl.c
index daee8a0624e..541c700dcee 100644
--- a/drivers/net/wan/wanxl.c
+++ b/drivers/net/wan/wanxl.c
@@ -814,7 +814,7 @@ static int __devinit wanxl_pci_init_one(struct pci_dev *pdev,
return 0;
}
-static struct pci_device_id wanxl_pci_tbl[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(wanxl_pci_tbl) = {
{ PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL100, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL200, PCI_ANY_ID,
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index 96a615fe09d..6cead321bc1 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -301,24 +301,15 @@ int i2400m_check_mac_addr(struct i2400m *i2400m)
/* Extract MAC addresss */
ddi = (void *) skb->data;
BUILD_BUG_ON(ETH_ALEN != sizeof(ddi->mac_address));
- d_printf(2, dev, "GET DEVICE INFO: mac addr "
- "%02x:%02x:%02x:%02x:%02x:%02x\n",
- ddi->mac_address[0], ddi->mac_address[1],
- ddi->mac_address[2], ddi->mac_address[3],
- ddi->mac_address[4], ddi->mac_address[5]);
+ d_printf(2, dev, "GET DEVICE INFO: mac addr %pM\n",
+ ddi->mac_address);
if (!memcmp(net_dev->perm_addr, ddi->mac_address,
sizeof(ddi->mac_address)))
goto ok;
dev_warn(dev, "warning: device reports a different MAC address "
"to that of boot mode's\n");
- dev_warn(dev, "device reports %02x:%02x:%02x:%02x:%02x:%02x\n",
- ddi->mac_address[0], ddi->mac_address[1],
- ddi->mac_address[2], ddi->mac_address[3],
- ddi->mac_address[4], ddi->mac_address[5]);
- dev_warn(dev, "boot mode reported %02x:%02x:%02x:%02x:%02x:%02x\n",
- net_dev->perm_addr[0], net_dev->perm_addr[1],
- net_dev->perm_addr[2], net_dev->perm_addr[3],
- net_dev->perm_addr[4], net_dev->perm_addr[5]);
+ dev_warn(dev, "device reports %pM\n", ddi->mac_address);
+ dev_warn(dev, "boot mode reported %pM\n", net_dev->perm_addr);
if (!memcmp(zeromac, ddi->mac_address, sizeof(zeromac)))
dev_err(dev, "device reports an invalid MAC address, "
"not updating\n");
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index 64cdfeb299c..e803a7dc650 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -1041,21 +1041,14 @@ int i2400m_read_mac_addr(struct i2400m *i2400m)
dev_err(dev, "BM: read mac addr failed: %d\n", result);
goto error_read_mac;
}
- d_printf(2, dev,
- "mac addr is %02x:%02x:%02x:%02x:%02x:%02x\n",
- ack_buf.ack_pl[0], ack_buf.ack_pl[1],
- ack_buf.ack_pl[2], ack_buf.ack_pl[3],
- ack_buf.ack_pl[4], ack_buf.ack_pl[5]);
+ d_printf(2, dev, "mac addr is %pM\n", ack_buf.ack_pl);
if (i2400m->bus_bm_mac_addr_impaired == 1) {
ack_buf.ack_pl[0] = 0x00;
ack_buf.ack_pl[1] = 0x16;
ack_buf.ack_pl[2] = 0xd3;
get_random_bytes(&ack_buf.ack_pl[3], 3);
dev_err(dev, "BM is MAC addr impaired, faking MAC addr to "
- "mac addr is %02x:%02x:%02x:%02x:%02x:%02x\n",
- ack_buf.ack_pl[0], ack_buf.ack_pl[1],
- ack_buf.ack_pl[2], ack_buf.ack_pl[3],
- ack_buf.ack_pl[4], ack_buf.ack_pl[5]);
+ "mac addr is %pM\n", ack_buf.ack_pl);
result = 0;
}
net_dev->addr_len = ETH_ALEN;
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 56dd6650c97..58894366075 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -112,6 +112,7 @@ config AIRO_CS
depends on PCMCIA && (BROKEN || !M32R)
select WIRELESS_EXT
select WEXT_SPY
+ select WEXT_PRIV
select CRYPTO
select CRYPTO_AES
---help---
diff --git a/drivers/net/wireless/adm8211.c b/drivers/net/wireless/adm8211.c
index 39410016b4f..547912e6843 100644
--- a/drivers/net/wireless/adm8211.c
+++ b/drivers/net/wireless/adm8211.c
@@ -39,7 +39,7 @@ static unsigned int rx_ring_size __read_mostly = 16;
module_param(tx_ring_size, uint, 0);
module_param(rx_ring_size, uint, 0);
-static struct pci_device_id adm8211_pci_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(adm8211_pci_id_table) = {
/* ADMtek ADM8211 */
{ PCI_DEVICE(0x10B7, 0x6000) }, /* 3Com 3CRSHPW796 */
{ PCI_DEVICE(0x1200, 0x8201) }, /* ? */
@@ -302,18 +302,6 @@ static int adm8211_get_stats(struct ieee80211_hw *dev,
return 0;
}
-static int adm8211_get_tx_stats(struct ieee80211_hw *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
- struct adm8211_priv *priv = dev->priv;
-
- stats[0].len = priv->cur_tx - priv->dirty_tx;
- stats[0].limit = priv->tx_ring_size - 2;
- stats[0].count = priv->dirty_tx;
-
- return 0;
-}
-
static void adm8211_interrupt_tci(struct ieee80211_hw *dev)
{
struct adm8211_priv *priv = dev->priv;
@@ -1400,15 +1388,15 @@ static void adm8211_configure_filter(struct ieee80211_hw *dev,
}
static int adm8211_add_interface(struct ieee80211_hw *dev,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct adm8211_priv *priv = dev->priv;
if (priv->mode != NL80211_IFTYPE_MONITOR)
return -EOPNOTSUPP;
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
- priv->mode = conf->type;
+ priv->mode = vif->type;
break;
default:
return -EOPNOTSUPP;
@@ -1416,8 +1404,8 @@ static int adm8211_add_interface(struct ieee80211_hw *dev,
ADM8211_IDLE();
- ADM8211_CSR_WRITE(PAR0, le32_to_cpu(*(__le32 *)conf->mac_addr));
- ADM8211_CSR_WRITE(PAR1, le16_to_cpu(*(__le16 *)(conf->mac_addr + 4)));
+ ADM8211_CSR_WRITE(PAR0, le32_to_cpu(*(__le32 *)vif->addr));
+ ADM8211_CSR_WRITE(PAR1, le16_to_cpu(*(__le16 *)(vif->addr + 4)));
adm8211_update_mode(dev);
@@ -1427,7 +1415,7 @@ static int adm8211_add_interface(struct ieee80211_hw *dev,
}
static void adm8211_remove_interface(struct ieee80211_hw *dev,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct adm8211_priv *priv = dev->priv;
priv->mode = NL80211_IFTYPE_MONITOR;
@@ -1773,7 +1761,6 @@ static const struct ieee80211_ops adm8211_ops = {
.prepare_multicast = adm8211_prepare_multicast,
.configure_filter = adm8211_configure_filter,
.get_stats = adm8211_get_stats,
- .get_tx_stats = adm8211_get_tx_stats,
.get_tsf = adm8211_get_tsft
};
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index 4331d675fcc..698d5672a07 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -51,13 +51,14 @@
#include <linux/freezer.h>
#include <linux/ieee80211.h>
+#include <net/iw_handler.h>
#include "airo.h"
#define DRV_NAME "airo"
#ifdef CONFIG_PCI
-static struct pci_device_id card_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(card_ids) = {
{ 0x14b9, 1, PCI_ANY_ID, PCI_ANY_ID, },
{ 0x14b9, 0x4500, PCI_ANY_ID, PCI_ANY_ID },
{ 0x14b9, 0x4800, PCI_ANY_ID, PCI_ANY_ID, },
@@ -2310,7 +2311,7 @@ static void airo_set_multicast_list(struct net_device *dev) {
airo_set_promisc(ai);
}
- if ((dev->flags&IFF_ALLMULTI)||dev->mc_count>0) {
+ if ((dev->flags&IFF_ALLMULTI) || !netdev_mc_empty(dev)) {
/* Turn on multicast. (Should be already setup...) */
}
}
@@ -5254,11 +5255,7 @@ static int set_wep_key(struct airo_info *ai, u16 index, const char *key,
WepKeyRid wkr;
int rc;
- if (keylen == 0) {
- airo_print_err(ai->dev->name, "%s: key length to set was zero",
- __func__);
- return -1;
- }
+ WARN_ON(keylen == 0);
memset(&wkr, 0, sizeof(wkr));
wkr.len = cpu_to_le16(sizeof(wkr));
@@ -6405,11 +6402,7 @@ static int airo_set_encode(struct net_device *dev,
if (dwrq->length > MIN_KEY_SIZE)
key.len = MAX_KEY_SIZE;
else
- if (dwrq->length > 0)
- key.len = MIN_KEY_SIZE;
- else
- /* Disable the key */
- key.len = 0;
+ key.len = MIN_KEY_SIZE;
/* Check if the key is not marked as invalid */
if(!(dwrq->flags & IW_ENCODE_NOKEY)) {
/* Cleanup */
@@ -6590,12 +6583,22 @@ static int airo_set_encodeext(struct net_device *dev,
default:
return -EINVAL;
}
- /* Send the key to the card */
- rc = set_wep_key(local, idx, key.key, key.len, perm, 1);
- if (rc < 0) {
- airo_print_err(local->dev->name, "failed to set WEP key"
- " at index %d: %d.", idx, rc);
- return rc;
+ if (key.len == 0) {
+ rc = set_wep_tx_idx(local, idx, perm, 1);
+ if (rc < 0) {
+ airo_print_err(local->dev->name,
+ "failed to set WEP transmit index to %d: %d.",
+ idx, rc);
+ return rc;
+ }
+ } else {
+ rc = set_wep_key(local, idx, key.key, key.len, perm, 1);
+ if (rc < 0) {
+ airo_print_err(local->dev->name,
+ "failed to set WEP key at index %d: %d.",
+ idx, rc);
+ return rc;
+ }
}
}
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 2517364d3eb..0fb419936df 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -1789,7 +1789,7 @@ static void at76_mac80211_stop(struct ieee80211_hw *hw)
}
static int at76_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct at76_priv *priv = hw->priv;
int ret = 0;
@@ -1798,7 +1798,7 @@ static int at76_add_interface(struct ieee80211_hw *hw,
mutex_lock(&priv->mtx);
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
priv->iw_mode = IW_MODE_INFRA;
break;
@@ -1814,7 +1814,7 @@ exit:
}
static void at76_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
at76_dbg(DBG_MAC80211, "%s()", __func__);
}
diff --git a/drivers/net/wireless/ath/ar9170/ar9170.h b/drivers/net/wireless/ath/ar9170/ar9170.h
index 9f9459860d8..8c8ce67971e 100644
--- a/drivers/net/wireless/ath/ar9170/ar9170.h
+++ b/drivers/net/wireless/ath/ar9170/ar9170.h
@@ -109,7 +109,6 @@ struct ar9170_rxstream_mpdu_merge {
bool has_plcp;
};
-#define AR9170_NUM_MAX_BA_RETRY 5
#define AR9170_NUM_TID 16
#define WME_BA_BMP_SIZE 64
#define AR9170_NUM_MAX_AGG_LEN (2 * WME_BA_BMP_SIZE)
@@ -143,7 +142,12 @@ struct ar9170_sta_tid {
u16 tid;
enum ar9170_tid_state state;
bool active;
- u8 retry;
+};
+
+struct ar9170_tx_queue_stats {
+ unsigned int len;
+ unsigned int limit;
+ unsigned int count;
};
#define AR9170_QUEUE_TIMEOUT 64
@@ -154,6 +158,8 @@ struct ar9170_sta_tid {
#define AR9170_NUM_TX_STATUS 128
#define AR9170_NUM_TX_AGG_MAX 30
+#define AR9170_NUM_TX_LIMIT_HARD AR9170_TXQ_DEPTH
+#define AR9170_NUM_TX_LIMIT_SOFT (AR9170_TXQ_DEPTH - 10)
struct ar9170 {
struct ieee80211_hw *hw;
@@ -211,7 +217,7 @@ struct ar9170 {
/* qos queue settings */
spinlock_t tx_stats_lock;
- struct ieee80211_tx_queue_stats tx_stats[5];
+ struct ar9170_tx_queue_stats tx_stats[5];
struct ieee80211_tx_queue_params edcf[5];
spinlock_t cmdlock;
@@ -248,13 +254,8 @@ struct ar9170_sta_info {
unsigned int ampdu_max_len;
};
-#define AR9170_TX_FLAG_WAIT_FOR_ACK BIT(0)
-#define AR9170_TX_FLAG_NO_ACK BIT(1)
-#define AR9170_TX_FLAG_BLOCK_ACK BIT(2)
-
struct ar9170_tx_info {
unsigned long timeout;
- unsigned int flags;
};
#define IS_STARTED(a) (((struct ar9170 *)a)->state >= AR9170_STARTED)
diff --git a/drivers/net/wireless/ath/ar9170/hw.h b/drivers/net/wireless/ath/ar9170/hw.h
index 701ddb7d840..0a1d4c28e68 100644
--- a/drivers/net/wireless/ath/ar9170/hw.h
+++ b/drivers/net/wireless/ath/ar9170/hw.h
@@ -276,6 +276,7 @@ struct ar9170_tx_control {
#define AR9170_TX_MAC_RATE_PROBE 0x8000
/* either-or */
+#define AR9170_TX_PHY_MOD_MASK 0x00000003
#define AR9170_TX_PHY_MOD_CCK 0x00000000
#define AR9170_TX_PHY_MOD_OFDM 0x00000001
#define AR9170_TX_PHY_MOD_HT 0x00000002
diff --git a/drivers/net/wireless/ath/ar9170/mac.c b/drivers/net/wireless/ath/ar9170/mac.c
index ddc8c09dc79..857e8610429 100644
--- a/drivers/net/wireless/ath/ar9170/mac.c
+++ b/drivers/net/wireless/ath/ar9170/mac.c
@@ -117,7 +117,7 @@ int ar9170_set_qos(struct ar9170 *ar)
ar9170_regwrite(AR9170_MAC_REG_AC1_AC0_TXOP,
ar->edcf[0].txop | ar->edcf[1].txop << 16);
ar9170_regwrite(AR9170_MAC_REG_AC3_AC2_TXOP,
- ar->edcf[1].txop | ar->edcf[3].txop << 16);
+ ar->edcf[2].txop | ar->edcf[3].txop << 16);
ar9170_regwrite_finish();
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index f9d6db8d013..a6452af9c6c 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -194,12 +194,15 @@ static inline u16 ar9170_get_seq(struct sk_buff *skb)
return ar9170_get_seq_h((void *) txc->frame_data);
}
+static inline u16 ar9170_get_tid_h(struct ieee80211_hdr *hdr)
+{
+ return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK;
+}
+
static inline u16 ar9170_get_tid(struct sk_buff *skb)
{
struct ar9170_tx_control *txc = (void *) skb->data;
- struct ieee80211_hdr *hdr = (void *) txc->frame_data;
-
- return (ieee80211_get_qos_ctl(hdr))[0] & IEEE80211_QOS_CTL_TID_MASK;
+ return ar9170_get_tid_h((struct ieee80211_hdr *) txc->frame_data);
}
#define GET_NEXT_SEQ(seq) ((seq + 1) & 0x0fff)
@@ -213,10 +216,10 @@ static void ar9170_print_txheader(struct ar9170 *ar, struct sk_buff *skb)
struct ar9170_tx_info *arinfo = (void *) txinfo->rate_driver_data;
struct ieee80211_hdr *hdr = (void *) txc->frame_data;
- printk(KERN_DEBUG "%s: => FRAME [skb:%p, q:%d, DA:[%pM] flags:%x s:%d "
+ printk(KERN_DEBUG "%s: => FRAME [skb:%p, q:%d, DA:[%pM] s:%d "
"mac_ctrl:%04x, phy_ctrl:%08x, timeout:[%d ms]]\n",
wiphy_name(ar->hw->wiphy), skb, skb_get_queue_mapping(skb),
- ieee80211_get_DA(hdr), arinfo->flags, ar9170_get_seq_h(hdr),
+ ieee80211_get_DA(hdr), ar9170_get_seq_h(hdr),
le16_to_cpu(txc->mac_control), le32_to_cpu(txc->phy_control),
jiffies_to_msecs(arinfo->timeout - jiffies));
}
@@ -391,7 +394,7 @@ static void ar9170_tx_fake_ampdu_status(struct ar9170 *ar)
ieee80211_tx_status_irqsafe(ar->hw, skb);
}
- for_each_bit(i, &queue_bitmap, BITS_PER_BYTE) {
+ for_each_set_bit(i, &queue_bitmap, BITS_PER_BYTE) {
#ifdef AR9170_QUEUE_STOP_DEBUG
printk(KERN_DEBUG "%s: wake queue %d\n",
wiphy_name(ar->hw->wiphy), i);
@@ -430,7 +433,7 @@ void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
spin_lock_irqsave(&ar->tx_stats_lock, flags);
ar->tx_stats[queue].len--;
- if (skb_queue_empty(&ar->tx_pending[queue])) {
+ if (ar->tx_stats[queue].len < AR9170_NUM_TX_LIMIT_SOFT) {
#ifdef AR9170_QUEUE_STOP_DEBUG
printk(KERN_DEBUG "%s: wake queue %d\n",
wiphy_name(ar->hw->wiphy), queue);
@@ -440,22 +443,17 @@ void ar9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
}
spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
- if (arinfo->flags & AR9170_TX_FLAG_BLOCK_ACK) {
- ar9170_tx_ampdu_callback(ar, skb);
- } else if (arinfo->flags & AR9170_TX_FLAG_WAIT_FOR_ACK) {
- arinfo->timeout = jiffies +
- msecs_to_jiffies(AR9170_TX_TIMEOUT);
-
- skb_queue_tail(&ar->tx_status[queue], skb);
- } else if (arinfo->flags & AR9170_TX_FLAG_NO_ACK) {
+ if (info->flags & IEEE80211_TX_CTL_NO_ACK) {
ar9170_tx_status(ar, skb, AR9170_TX_STATUS_FAILED);
} else {
-#ifdef AR9170_QUEUE_DEBUG
- printk(KERN_DEBUG "%s: unsupported frame flags!\n",
- wiphy_name(ar->hw->wiphy));
- ar9170_print_txheader(ar, skb);
-#endif /* AR9170_QUEUE_DEBUG */
- dev_kfree_skb_any(skb);
+ if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+ ar9170_tx_ampdu_callback(ar, skb);
+ } else {
+ arinfo->timeout = jiffies +
+ msecs_to_jiffies(AR9170_TX_TIMEOUT);
+
+ skb_queue_tail(&ar->tx_status[queue], skb);
+ }
}
if (!ar->tx_stats[queue].len &&
@@ -1407,17 +1405,6 @@ static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
(is_valid_ether_addr(ieee80211_get_DA(hdr)))) {
- if (info->flags & IEEE80211_TX_CTL_AMPDU) {
- if (unlikely(!info->control.sta))
- goto err_out;
-
- txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
- arinfo->flags = AR9170_TX_FLAG_BLOCK_ACK;
-
- goto out;
- }
-
- txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
/*
* WARNING:
* Putting the QoS queue bits into an unexplored territory is
@@ -1431,12 +1418,17 @@ static int ar9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
txc->phy_control |=
cpu_to_le32(queue << AR9170_TX_PHY_QOS_SHIFT);
- arinfo->flags = AR9170_TX_FLAG_WAIT_FOR_ACK;
- } else {
- arinfo->flags = AR9170_TX_FLAG_NO_ACK;
+
+ if (info->flags & IEEE80211_TX_CTL_AMPDU) {
+ if (unlikely(!info->control.sta))
+ goto err_out;
+
+ txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_AGGR);
+ } else {
+ txc->mac_control |= cpu_to_le16(AR9170_TX_MAC_RATE_PROBE);
+ }
}
-out:
return 0;
err_out:
@@ -1671,8 +1663,7 @@ static bool ar9170_tx_ampdu(struct ar9170 *ar)
* tell the FW/HW that this is the last frame,
* that way it will wait for the immediate block ack.
*/
- if (likely(skb_peek_tail(&agg)))
- ar9170_tx_indicate_immba(ar, skb_peek_tail(&agg));
+ ar9170_tx_indicate_immba(ar, skb_peek_tail(&agg));
#ifdef AR9170_TXAGG_DEBUG
printk(KERN_DEBUG "%s: generated A-MPDU looks like this:\n",
@@ -1716,6 +1707,21 @@ static void ar9170_tx(struct ar9170 *ar)
for (i = 0; i < __AR9170_NUM_TXQ; i++) {
spin_lock_irqsave(&ar->tx_stats_lock, flags);
+ frames = min(ar->tx_stats[i].limit - ar->tx_stats[i].len,
+ skb_queue_len(&ar->tx_pending[i]));
+
+ if (remaining_space < frames) {
+#ifdef AR9170_QUEUE_DEBUG
+ printk(KERN_DEBUG "%s: tx quota reached queue:%d, "
+ "remaining slots:%d, needed:%d\n",
+ wiphy_name(ar->hw->wiphy), i, remaining_space,
+ frames);
+#endif /* AR9170_QUEUE_DEBUG */
+ frames = remaining_space;
+ }
+
+ ar->tx_stats[i].len += frames;
+ ar->tx_stats[i].count += frames;
if (ar->tx_stats[i].len >= ar->tx_stats[i].limit) {
#ifdef AR9170_QUEUE_DEBUG
printk(KERN_DEBUG "%s: queue %d full\n",
@@ -1733,25 +1739,8 @@ static void ar9170_tx(struct ar9170 *ar)
__ar9170_dump_txstats(ar);
#endif /* AR9170_QUEUE_STOP_DEBUG */
ieee80211_stop_queue(ar->hw, i);
- spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
- continue;
- }
-
- frames = min(ar->tx_stats[i].limit - ar->tx_stats[i].len,
- skb_queue_len(&ar->tx_pending[i]));
-
- if (remaining_space < frames) {
-#ifdef AR9170_QUEUE_DEBUG
- printk(KERN_DEBUG "%s: tx quota reached queue:%d, "
- "remaining slots:%d, needed:%d\n",
- wiphy_name(ar->hw->wiphy), i, remaining_space,
- frames);
-#endif /* AR9170_QUEUE_DEBUG */
- frames = remaining_space;
}
- ar->tx_stats[i].len += frames;
- ar->tx_stats[i].count += frames;
spin_unlock_irqrestore(&ar->tx_stats_lock, flags);
if (!frames)
@@ -1773,7 +1762,7 @@ static void ar9170_tx(struct ar9170 *ar)
arinfo->timeout = jiffies +
msecs_to_jiffies(AR9170_TX_TIMEOUT);
- if (arinfo->flags == AR9170_TX_FLAG_BLOCK_ACK)
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
atomic_inc(&ar->tx_ampdu_pending);
#ifdef AR9170_QUEUE_DEBUG
@@ -1784,7 +1773,7 @@ static void ar9170_tx(struct ar9170 *ar)
err = ar->tx(ar, skb);
if (unlikely(err)) {
- if (arinfo->flags == AR9170_TX_FLAG_BLOCK_ACK)
+ if (info->flags & IEEE80211_TX_CTL_AMPDU)
atomic_dec(&ar->tx_ampdu_pending);
frames_failed++;
@@ -1950,7 +1939,7 @@ err_free:
}
static int ar9170_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct ar9170 *ar = hw->priv;
struct ath_common *common = &ar->common;
@@ -1963,8 +1952,8 @@ static int ar9170_op_add_interface(struct ieee80211_hw *hw,
goto unlock;
}
- ar->vif = conf->vif;
- memcpy(common->macaddr, conf->mac_addr, ETH_ALEN);
+ ar->vif = vif;
+ memcpy(common->macaddr, vif->addr, ETH_ALEN);
if (modparam_nohwcrypt || (ar->vif->type != NL80211_IFTYPE_STATION)) {
ar->rx_software_decryption = true;
@@ -1984,7 +1973,7 @@ unlock:
}
static void ar9170_op_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct ar9170 *ar = hw->priv;
@@ -2340,55 +2329,55 @@ out:
return err;
}
-static void ar9170_sta_notify(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum sta_notify_cmd cmd,
- struct ieee80211_sta *sta)
+static int ar9170_sta_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
{
struct ar9170 *ar = hw->priv;
struct ar9170_sta_info *sta_info = (void *) sta->drv_priv;
unsigned int i;
- switch (cmd) {
- case STA_NOTIFY_ADD:
- memset(sta_info, 0, sizeof(*sta_info));
+ memset(sta_info, 0, sizeof(*sta_info));
- if (!sta->ht_cap.ht_supported)
- break;
+ if (!sta->ht_cap.ht_supported)
+ return 0;
- if (sta->ht_cap.ampdu_density > ar->global_ampdu_density)
- ar->global_ampdu_density = sta->ht_cap.ampdu_density;
+ if (sta->ht_cap.ampdu_density > ar->global_ampdu_density)
+ ar->global_ampdu_density = sta->ht_cap.ampdu_density;
- if (sta->ht_cap.ampdu_factor < ar->global_ampdu_factor)
- ar->global_ampdu_factor = sta->ht_cap.ampdu_factor;
+ if (sta->ht_cap.ampdu_factor < ar->global_ampdu_factor)
+ ar->global_ampdu_factor = sta->ht_cap.ampdu_factor;
- for (i = 0; i < AR9170_NUM_TID; i++) {
- sta_info->agg[i].state = AR9170_TID_STATE_SHUTDOWN;
- sta_info->agg[i].active = false;
- sta_info->agg[i].ssn = 0;
- sta_info->agg[i].retry = 0;
- sta_info->agg[i].tid = i;
- INIT_LIST_HEAD(&sta_info->agg[i].list);
- skb_queue_head_init(&sta_info->agg[i].queue);
- }
+ for (i = 0; i < AR9170_NUM_TID; i++) {
+ sta_info->agg[i].state = AR9170_TID_STATE_SHUTDOWN;
+ sta_info->agg[i].active = false;
+ sta_info->agg[i].ssn = 0;
+ sta_info->agg[i].tid = i;
+ INIT_LIST_HEAD(&sta_info->agg[i].list);
+ skb_queue_head_init(&sta_info->agg[i].queue);
+ }
- sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
- break;
+ sta_info->ampdu_max_len = 1 << (3 + sta->ht_cap.ampdu_factor);
- case STA_NOTIFY_REMOVE:
- if (!sta->ht_cap.ht_supported)
- break;
+ return 0;
+}
- for (i = 0; i < AR9170_NUM_TID; i++) {
- sta_info->agg[i].state = AR9170_TID_STATE_INVALID;
- skb_queue_purge(&sta_info->agg[i].queue);
- }
+static int ar9170_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ar9170_sta_info *sta_info = (void *) sta->drv_priv;
+ unsigned int i;
- break;
+ if (!sta->ht_cap.ht_supported)
+ return 0;
- default:
- break;
+ for (i = 0; i < AR9170_NUM_TID; i++) {
+ sta_info->agg[i].state = AR9170_TID_STATE_INVALID;
+ skb_queue_purge(&sta_info->agg[i].queue);
}
+
+ return 0;
}
static int ar9170_get_stats(struct ieee80211_hw *hw,
@@ -2408,18 +2397,6 @@ static int ar9170_get_stats(struct ieee80211_hw *hw,
return 0;
}
-static int ar9170_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *tx_stats)
-{
- struct ar9170 *ar = hw->priv;
-
- spin_lock_bh(&ar->tx_stats_lock);
- memcpy(tx_stats, ar->tx_stats, sizeof(tx_stats[0]) * hw->queues);
- spin_unlock_bh(&ar->tx_stats_lock);
-
- return 0;
-}
-
static int ar9170_conf_tx(struct ieee80211_hw *hw, u16 queue,
const struct ieee80211_tx_queue_params *param)
{
@@ -2519,9 +2496,9 @@ static const struct ieee80211_ops ar9170_ops = {
.bss_info_changed = ar9170_op_bss_info_changed,
.get_tsf = ar9170_op_get_tsf,
.set_key = ar9170_set_key,
- .sta_notify = ar9170_sta_notify,
+ .sta_add = ar9170_sta_add,
+ .sta_remove = ar9170_sta_remove,
.get_stats = ar9170_get_stats,
- .get_tx_stats = ar9170_get_tx_stats,
.ampdu_action = ar9170_ampdu_action,
};
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
index e0799d92405..0f361186b78 100644
--- a/drivers/net/wireless/ath/ar9170/usb.c
+++ b/drivers/net/wireless/ath/ar9170/usb.c
@@ -84,6 +84,8 @@ static struct usb_device_id ar9170_usb_ids[] = {
{ USB_DEVICE(0x0cde, 0x0023) },
/* Z-Com UB82 ABG */
{ USB_DEVICE(0x0cde, 0x0026) },
+ /* Sphairon Homelink 1202 */
+ { USB_DEVICE(0x0cde, 0x0027) },
/* Arcadyan WN7512 */
{ USB_DEVICE(0x083a, 0xf522) },
/* Planex GWUS300 */
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 9e05648356f..71fc960814f 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -74,7 +74,6 @@ struct ath_common;
struct ath_bus_ops {
void (*read_cachesize)(struct ath_common *common, int *csz);
- void (*cleanup)(struct ath_common *common);
bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data);
void (*bt_coex_prep)(struct ath_common *common);
};
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 6a2a9676111..ac67f02e26d 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -535,13 +535,12 @@ struct ath5k_txq_info {
u32 tqi_cbr_period; /* Constant bit rate period */
u32 tqi_cbr_overflow_limit;
u32 tqi_burst_time;
- u32 tqi_ready_time; /* Not used */
+ u32 tqi_ready_time; /* Time queue waits after an event */
};
/*
* Transmit packet types.
* used on tx control descriptor
- * TODO: Use them inside base.c corectly
*/
enum ath5k_pkt_type {
AR5K_PKT_TYPE_NORMAL = 0,
@@ -1063,6 +1062,7 @@ struct ath5k_hw {
u32 ah_cw_min;
u32 ah_cw_max;
u32 ah_limit_tx_retries;
+ u8 ah_coverage_class;
/* Antenna Control */
u32 ah_ant_ctl[AR5K_EEPROM_N_MODES][AR5K_ANT_MAX];
@@ -1200,6 +1200,7 @@ extern bool ath5k_eeprom_is_hb63(struct ath5k_hw *ah);
/* Protocol Control Unit Functions */
extern int ath5k_hw_set_opmode(struct ath5k_hw *ah);
+extern void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class);
/* BSSID Functions */
extern int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac);
extern void ath5k_hw_set_associd(struct ath5k_hw *ah);
@@ -1231,6 +1232,10 @@ extern int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout);
extern unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah);
extern int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout);
extern unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah);
+/* Clock rate related functions */
+unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec);
+unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock);
+unsigned int ath5k_hw_get_clockrate(struct ath5k_hw *ah);
/* Key table (WEP) functions */
extern int ath5k_hw_reset_key(struct ath5k_hw *ah, u16 entry);
extern int ath5k_hw_is_key_valid(struct ath5k_hw *ah, u16 entry);
@@ -1310,24 +1315,6 @@ extern int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower);
* Functions used internaly
*/
-/*
- * Translate usec to hw clock units
- * TODO: Half/quarter rate
- */
-static inline unsigned int ath5k_hw_htoclock(unsigned int usec, bool turbo)
-{
- return turbo ? (usec * 80) : (usec * 40);
-}
-
-/*
- * Translate hw clock units to usec
- * TODO: Half/quarter rate
- */
-static inline unsigned int ath5k_hw_clocktoh(unsigned int clock, bool turbo)
-{
- return turbo ? (clock / 80) : (clock / 40);
-}
-
static inline struct ath_common *ath5k_hw_common(struct ath5k_hw *ah)
{
return &ah->common;
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index e63b7c40d0e..8dce0077b02 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -83,7 +83,7 @@ MODULE_VERSION("0.6.0 (EXPERIMENTAL)");
/* Known PCI ids */
-static const struct pci_device_id ath5k_pci_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(ath5k_pci_id_table) = {
{ PCI_VDEVICE(ATHEROS, 0x0207) }, /* 5210 early */
{ PCI_VDEVICE(ATHEROS, 0x0007) }, /* 5210 */
{ PCI_VDEVICE(ATHEROS, 0x0011) }, /* 5311 - this is on AHB bus !*/
@@ -225,9 +225,9 @@ static int ath5k_reset_wake(struct ath5k_softc *sc);
static int ath5k_start(struct ieee80211_hw *hw);
static void ath5k_stop(struct ieee80211_hw *hw);
static int ath5k_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf);
+ struct ieee80211_vif *vif);
static void ath5k_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf);
+ struct ieee80211_vif *vif);
static int ath5k_config(struct ieee80211_hw *hw, u32 changed);
static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw,
int mc_count, struct dev_addr_list *mc_list);
@@ -241,8 +241,6 @@ static int ath5k_set_key(struct ieee80211_hw *hw,
struct ieee80211_key_conf *key);
static int ath5k_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats);
-static int ath5k_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *stats);
static u64 ath5k_get_tsf(struct ieee80211_hw *hw);
static void ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf);
static void ath5k_reset_tsf(struct ieee80211_hw *hw);
@@ -254,6 +252,8 @@ static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
u32 changes);
static void ath5k_sw_scan_start(struct ieee80211_hw *hw);
static void ath5k_sw_scan_complete(struct ieee80211_hw *hw);
+static void ath5k_set_coverage_class(struct ieee80211_hw *hw,
+ u8 coverage_class);
static const struct ieee80211_ops ath5k_hw_ops = {
.tx = ath5k_tx,
@@ -267,13 +267,13 @@ static const struct ieee80211_ops ath5k_hw_ops = {
.set_key = ath5k_set_key,
.get_stats = ath5k_get_stats,
.conf_tx = NULL,
- .get_tx_stats = ath5k_get_tx_stats,
.get_tsf = ath5k_get_tsf,
.set_tsf = ath5k_set_tsf,
.reset_tsf = ath5k_reset_tsf,
.bss_info_changed = ath5k_bss_info_changed,
.sw_scan_start = ath5k_sw_scan_start,
.sw_scan_complete = ath5k_sw_scan_complete,
+ .set_coverage_class = ath5k_set_coverage_class,
};
/*
@@ -1246,6 +1246,29 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
return 0;
}
+static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb)
+{
+ struct ieee80211_hdr *hdr;
+ enum ath5k_pkt_type htype;
+ __le16 fc;
+
+ hdr = (struct ieee80211_hdr *)skb->data;
+ fc = hdr->frame_control;
+
+ if (ieee80211_is_beacon(fc))
+ htype = AR5K_PKT_TYPE_BEACON;
+ else if (ieee80211_is_probe_resp(fc))
+ htype = AR5K_PKT_TYPE_PROBE_RESP;
+ else if (ieee80211_is_atim(fc))
+ htype = AR5K_PKT_TYPE_ATIM;
+ else if (ieee80211_is_pspoll(fc))
+ htype = AR5K_PKT_TYPE_PSPOLL;
+ else
+ htype = AR5K_PKT_TYPE_NORMAL;
+
+ return htype;
+}
+
static int
ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
struct ath5k_txq *txq)
@@ -1300,7 +1323,8 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
sc->vif, pktlen, info));
}
ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
- ieee80211_get_hdrlen_from_skb(skb), AR5K_PKT_TYPE_NORMAL,
+ ieee80211_get_hdrlen_from_skb(skb),
+ get_hw_packet_type(skb),
(sc->power_level * 2),
hw_rate,
info->control.rates[0].count, keyidx, ah->ah_tx_ant, flags,
@@ -1329,7 +1353,6 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
spin_lock_bh(&txq->lock);
list_add_tail(&bf->list, &txq->q);
- sc->tx_stats[txq->qnum].len++;
if (txq->link == NULL) /* is this first packet? */
ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr);
else /* no, so only link it */
@@ -1513,7 +1536,8 @@ ath5k_beaconq_config(struct ath5k_softc *sc)
ret = ath5k_hw_get_tx_queueprops(ah, sc->bhalq, &qi);
if (ret)
- return ret;
+ goto err;
+
if (sc->opmode == NL80211_IFTYPE_AP ||
sc->opmode == NL80211_IFTYPE_MESH_POINT) {
/*
@@ -1540,10 +1564,25 @@ ath5k_beaconq_config(struct ath5k_softc *sc)
if (ret) {
ATH5K_ERR(sc, "%s: unable to update parameters for beacon "
"hardware queue!\n", __func__);
- return ret;
+ goto err;
}
+ ret = ath5k_hw_reset_tx_queue(ah, sc->bhalq); /* push to h/w */
+ if (ret)
+ goto err;
+
+ /* reconfigure cabq with ready time to 80% of beacon_interval */
+ ret = ath5k_hw_get_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
+ if (ret)
+ goto err;
+
+ qi.tqi_ready_time = (sc->bintval * 80) / 100;
+ ret = ath5k_hw_set_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
+ if (ret)
+ goto err;
- return ath5k_hw_reset_tx_queue(ah, sc->bhalq); /* push to h/w */;
+ ret = ath5k_hw_reset_tx_queue(ah, AR5K_TX_QUEUE_ID_CAB);
+err:
+ return ret;
}
static void
@@ -1562,7 +1601,6 @@ ath5k_txq_drainq(struct ath5k_softc *sc, struct ath5k_txq *txq)
ath5k_txbuf_free(sc, bf);
spin_lock_bh(&sc->txbuflock);
- sc->tx_stats[txq->qnum].len--;
list_move_tail(&bf->list, &sc->txbuf);
sc->txbuf_len++;
spin_unlock_bh(&sc->txbuflock);
@@ -1992,10 +2030,8 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
}
ieee80211_tx_status(sc->hw, skb);
- sc->tx_stats[txq->qnum].count++;
spin_lock(&sc->txbuflock);
- sc->tx_stats[txq->qnum].len--;
list_move_tail(&bf->list, &sc->txbuf);
sc->txbuf_len++;
spin_unlock(&sc->txbuflock);
@@ -2773,7 +2809,7 @@ static void ath5k_stop(struct ieee80211_hw *hw)
}
static int ath5k_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct ath5k_softc *sc = hw->priv;
int ret;
@@ -2784,22 +2820,22 @@ static int ath5k_add_interface(struct ieee80211_hw *hw,
goto end;
}
- sc->vif = conf->vif;
+ sc->vif = vif;
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_MONITOR:
- sc->opmode = conf->type;
+ sc->opmode = vif->type;
break;
default:
ret = -EOPNOTSUPP;
goto end;
}
- ath5k_hw_set_lladdr(sc->ah, conf->mac_addr);
+ ath5k_hw_set_lladdr(sc->ah, vif->addr);
ath5k_mode_setup(sc);
ret = 0;
@@ -2810,13 +2846,13 @@ end:
static void
ath5k_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct ath5k_softc *sc = hw->priv;
u8 mac[ETH_ALEN] = {};
mutex_lock(&sc->lock);
- if (sc->vif != conf->vif)
+ if (sc->vif != vif)
goto end;
ath5k_hw_set_lladdr(sc->ah, mac);
@@ -3097,17 +3133,6 @@ ath5k_get_stats(struct ieee80211_hw *hw,
return 0;
}
-static int
-ath5k_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *stats)
-{
- struct ath5k_softc *sc = hw->priv;
-
- memcpy(stats, &sc->tx_stats, sizeof(sc->tx_stats));
-
- return 0;
-}
-
static u64
ath5k_get_tsf(struct ieee80211_hw *hw)
{
@@ -3262,3 +3287,22 @@ static void ath5k_sw_scan_complete(struct ieee80211_hw *hw)
ath5k_hw_set_ledstate(sc->ah, sc->assoc ?
AR5K_LED_ASSOC : AR5K_LED_INIT);
}
+
+/**
+ * ath5k_set_coverage_class - Set IEEE 802.11 coverage class
+ *
+ * @hw: struct ieee80211_hw pointer
+ * @coverage_class: IEEE 802.11 coverage class number
+ *
+ * Mac80211 callback. Sets slot time, ACK timeout and CTS timeout for given
+ * coverage class. The values are persistent, they are restored after device
+ * reset.
+ */
+static void ath5k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
+{
+ struct ath5k_softc *sc = hw->priv;
+
+ mutex_lock(&sc->lock);
+ ath5k_hw_set_coverage_class(sc->ah, coverage_class);
+ mutex_unlock(&sc->lock);
+}
diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h
index 952b3a21bbc..7e1a88a5abd 100644
--- a/drivers/net/wireless/ath/ath5k/base.h
+++ b/drivers/net/wireless/ath/ath5k/base.h
@@ -117,7 +117,6 @@ struct ath5k_softc {
struct pci_dev *pdev; /* for dma mapping */
void __iomem *iobase; /* address of the device */
struct mutex lock; /* dev-level lock */
- struct ieee80211_tx_queue_stats tx_stats[AR5K_NUM_TX_QUEUES];
struct ieee80211_low_level_stats ll_stats;
struct ieee80211_hw *hw; /* IEEE 802.11 common */
struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c
index 60f547503d7..67aa52e9bf9 100644
--- a/drivers/net/wireless/ath/ath5k/led.c
+++ b/drivers/net/wireless/ath/ath5k/led.c
@@ -77,6 +77,8 @@ static const struct pci_device_id ath5k_led_devices[] = {
{ ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137a), ATH_LED(3, 1) },
/* HP Compaq C700 (nitrousnrg@gmail.com) */
{ ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137b), ATH_LED(3, 1) },
+ /* LiteOn AR5BXB63 (magooz@salug.it) */
+ { ATH_SDEVICE(PCI_VENDOR_ID_ATHEROS, 0x3067), ATH_LED(3, 0) },
/* IBM-specific AR5212 (all others) */
{ PCI_VDEVICE(ATHEROS, PCI_DEVICE_ID_ATHEROS_AR5212_IBM), ATH_LED(0, 0) },
/* Dell Vostro A860 (shahar@shahar-or.co.il) */
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index 64fc1eb9b6d..aefe84f9c04 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -187,8 +187,8 @@ unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah)
{
ATH5K_TRACE(ah->ah_sc);
- return ath5k_hw_clocktoh(AR5K_REG_MS(ath5k_hw_reg_read(ah,
- AR5K_TIME_OUT), AR5K_TIME_OUT_ACK), ah->ah_turbo);
+ return ath5k_hw_clocktoh(ah, AR5K_REG_MS(ath5k_hw_reg_read(ah,
+ AR5K_TIME_OUT), AR5K_TIME_OUT_ACK));
}
/**
@@ -200,12 +200,12 @@ unsigned int ath5k_hw_get_ack_timeout(struct ath5k_hw *ah)
int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
{
ATH5K_TRACE(ah->ah_sc);
- if (ath5k_hw_clocktoh(AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK),
- ah->ah_turbo) <= timeout)
+ if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK))
+ <= timeout)
return -EINVAL;
AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_ACK,
- ath5k_hw_htoclock(timeout, ah->ah_turbo));
+ ath5k_hw_htoclock(ah, timeout));
return 0;
}
@@ -218,8 +218,8 @@ int ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah)
{
ATH5K_TRACE(ah->ah_sc);
- return ath5k_hw_clocktoh(AR5K_REG_MS(ath5k_hw_reg_read(ah,
- AR5K_TIME_OUT), AR5K_TIME_OUT_CTS), ah->ah_turbo);
+ return ath5k_hw_clocktoh(ah, AR5K_REG_MS(ath5k_hw_reg_read(ah,
+ AR5K_TIME_OUT), AR5K_TIME_OUT_CTS));
}
/**
@@ -231,17 +231,97 @@ unsigned int ath5k_hw_get_cts_timeout(struct ath5k_hw *ah)
int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
{
ATH5K_TRACE(ah->ah_sc);
- if (ath5k_hw_clocktoh(AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS),
- ah->ah_turbo) <= timeout)
+ if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS))
+ <= timeout)
return -EINVAL;
AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_CTS,
- ath5k_hw_htoclock(timeout, ah->ah_turbo));
+ ath5k_hw_htoclock(ah, timeout));
return 0;
}
/**
+ * ath5k_hw_htoclock - Translate usec to hw clock units
+ *
+ * @ah: The &struct ath5k_hw
+ * @usec: value in microseconds
+ */
+unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec)
+{
+ return usec * ath5k_hw_get_clockrate(ah);
+}
+
+/**
+ * ath5k_hw_clocktoh - Translate hw clock units to usec
+ * @clock: value in hw clock units
+ */
+unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock)
+{
+ return clock / ath5k_hw_get_clockrate(ah);
+}
+
+/**
+ * ath5k_hw_get_clockrate - Get the clock rate for current mode
+ *
+ * @ah: The &struct ath5k_hw
+ */
+unsigned int ath5k_hw_get_clockrate(struct ath5k_hw *ah)
+{
+ struct ieee80211_channel *channel = ah->ah_current_channel;
+ int clock;
+
+ if (channel->hw_value & CHANNEL_5GHZ)
+ clock = 40; /* 802.11a */
+ else if (channel->hw_value & CHANNEL_CCK)
+ clock = 22; /* 802.11b */
+ else
+ clock = 44; /* 802.11g */
+
+ /* Clock rate in turbo modes is twice the normal rate */
+ if (channel->hw_value & CHANNEL_TURBO)
+ clock *= 2;
+
+ return clock;
+}
+
+/**
+ * ath5k_hw_get_default_slottime - Get the default slot time for current mode
+ *
+ * @ah: The &struct ath5k_hw
+ */
+unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
+{
+ struct ieee80211_channel *channel = ah->ah_current_channel;
+
+ if (channel->hw_value & CHANNEL_TURBO)
+ return 6; /* both turbo modes */
+
+ if (channel->hw_value & CHANNEL_CCK)
+ return 20; /* 802.11b */
+
+ return 9; /* 802.11 a/g */
+}
+
+/**
+ * ath5k_hw_get_default_sifs - Get the default SIFS for current mode
+ *
+ * @ah: The &struct ath5k_hw
+ */
+unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
+{
+ struct ieee80211_channel *channel = ah->ah_current_channel;
+
+ if (channel->hw_value & CHANNEL_TURBO)
+ return 8; /* both turbo modes */
+
+ if (channel->hw_value & CHANNEL_5GHZ)
+ return 16; /* 802.11a */
+
+ return 10; /* 802.11 b/g */
+}
+
+/**
* ath5k_hw_set_lladdr - Set station id
*
* @ah: The &struct ath5k_hw
@@ -1050,3 +1130,24 @@ int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac)
return 0;
}
+/**
+ * ath5k_hw_set_coverage_class - Set IEEE 802.11 coverage class
+ *
+ * @ah: The &struct ath5k_hw
+ * @coverage_class: IEEE 802.11 coverage class number
+ *
+ * Sets slot time, ACK timeout and CTS timeout for given coverage class.
+ */
+void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
+{
+ /* As defined by IEEE 802.11-2007 17.3.8.6 */
+ int slot_time = ath5k_hw_get_default_slottime(ah) + 3 * coverage_class;
+ int ack_timeout = ath5k_hw_get_default_sifs(ah) + slot_time;
+ int cts_timeout = ack_timeout;
+
+ ath5k_hw_set_slot_time(ah, slot_time);
+ ath5k_hw_set_ack_timeout(ah, ack_timeout);
+ ath5k_hw_set_cts_timeout(ah, cts_timeout);
+
+ ah->ah_coverage_class = coverage_class;
+}
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index eeebb9aef20..9122a8556f4 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -408,12 +408,13 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
break;
case AR5K_TX_QUEUE_CAB:
+ /* XXX: use BCN_SENT_GT, if we can figure out how */
AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
- AR5K_QCU_MISC_FRSHED_BCN_SENT_GT |
+ AR5K_QCU_MISC_FRSHED_DBA_GT |
AR5K_QCU_MISC_CBREXP_DIS |
AR5K_QCU_MISC_CBREXP_BCN_DIS);
- ath5k_hw_reg_write(ah, ((AR5K_TUNE_BEACON_INTERVAL -
+ ath5k_hw_reg_write(ah, ((tq->tqi_ready_time -
(AR5K_TUNE_SW_BEACON_RESP -
AR5K_TUNE_DMA_BEACON_RESP) -
AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) |
@@ -520,12 +521,16 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
*/
unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah)
{
+ unsigned int slot_time_clock;
+
ATH5K_TRACE(ah->ah_sc);
+
if (ah->ah_version == AR5K_AR5210)
- return ath5k_hw_clocktoh(ath5k_hw_reg_read(ah,
- AR5K_SLOT_TIME) & 0xffff, ah->ah_turbo);
+ slot_time_clock = ath5k_hw_reg_read(ah, AR5K_SLOT_TIME);
else
- return ath5k_hw_reg_read(ah, AR5K_DCU_GBL_IFS_SLOT) & 0xffff;
+ slot_time_clock = ath5k_hw_reg_read(ah, AR5K_DCU_GBL_IFS_SLOT);
+
+ return ath5k_hw_clocktoh(ah, slot_time_clock & 0xffff);
}
/*
@@ -533,15 +538,17 @@ unsigned int ath5k_hw_get_slot_time(struct ath5k_hw *ah)
*/
int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time)
{
+ u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time);
+
ATH5K_TRACE(ah->ah_sc);
- if (slot_time < AR5K_SLOT_TIME_9 || slot_time > AR5K_SLOT_TIME_MAX)
+
+ if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX)
return -EINVAL;
if (ah->ah_version == AR5K_AR5210)
- ath5k_hw_reg_write(ah, ath5k_hw_htoclock(slot_time,
- ah->ah_turbo), AR5K_SLOT_TIME);
+ ath5k_hw_reg_write(ah, slot_time_clock, AR5K_SLOT_TIME);
else
- ath5k_hw_reg_write(ah, slot_time, AR5K_DCU_GBL_IFS_SLOT);
+ ath5k_hw_reg_write(ah, slot_time_clock, AR5K_DCU_GBL_IFS_SLOT);
return 0;
}
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 62954fc7786..a35a7db0fc4 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -60,12 +60,11 @@ static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
!(channel->hw_value & CHANNEL_OFDM));
/* Get coefficient
- * ALGO: coef = (5 * clock * carrier_freq) / 2)
+ * ALGO: coef = (5 * clock / carrier_freq) / 2
* we scale coef by shifting clock value by 24 for
* better precision since we use integers */
/* TODO: Half/quarter rate */
- clock = ath5k_hw_htoclock(1, channel->hw_value & CHANNEL_TURBO);
-
+ clock = (channel->hw_value & CHANNEL_TURBO) ? 80 : 40;
coef_scaled = ((5 * (clock << 24)) / 2) / channel->center_freq;
/* Get exponent
@@ -1317,6 +1316,10 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
/* Restore antenna mode */
ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode);
+ /* Restore slot time and ACK timeouts */
+ if (ah->ah_coverage_class > 0)
+ ath5k_hw_set_coverage_class(ah, ah->ah_coverage_class);
+
/*
* Configure QCUs/DCUs
*/
@@ -1371,8 +1374,9 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
* Set clocks to 32KHz operation and use an
* external 32KHz crystal when sleeping if one
* exists */
- if (ah->ah_version == AR5K_AR5212)
- ath5k_hw_set_sleep_clock(ah, true);
+ if (ah->ah_version == AR5K_AR5212 &&
+ ah->ah_op_mode != NL80211_IFTYPE_AP)
+ ath5k_hw_set_sleep_clock(ah, true);
/*
* Disable beacons and reset the register
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index 4985b2b1b0a..6b50d5eb9ec 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -1,4 +1,6 @@
ath9k-y += beacon.o \
+ gpio.o \
+ init.o \
main.o \
recv.o \
xmit.o \
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 329e6bc137a..ca4994f1315 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -27,12 +27,6 @@ static void ath_ahb_read_cachesize(struct ath_common *common, int *csz)
*csz = L1_CACHE_BYTES >> 2;
}
-static void ath_ahb_cleanup(struct ath_common *common)
-{
- struct ath_softc *sc = (struct ath_softc *)common->priv;
- iounmap(sc->mem);
-}
-
static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
{
struct ath_softc *sc = (struct ath_softc *)common->priv;
@@ -54,8 +48,6 @@ static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
static struct ath_bus_ops ath_ahb_bus_ops = {
.read_cachesize = ath_ahb_read_cachesize,
- .cleanup = ath_ahb_cleanup,
-
.eeprom_read = ath_ahb_eeprom_read,
};
@@ -121,16 +113,19 @@ static int ath_ahb_probe(struct platform_device *pdev)
sc->mem = mem;
sc->irq = irq;
- ret = ath_init_device(AR5416_AR9100_DEVID, sc, 0x0, &ath_ahb_bus_ops);
+ /* Will be cleared in ath9k_start() */
+ sc->sc_flags |= SC_OP_INVALID;
+
+ ret = request_irq(irq, ath_isr, IRQF_SHARED, "ath9k", sc);
if (ret) {
- dev_err(&pdev->dev, "failed to initialize device\n");
+ dev_err(&pdev->dev, "request_irq failed\n");
goto err_free_hw;
}
- ret = request_irq(irq, ath_isr, IRQF_SHARED, "ath9k", sc);
+ ret = ath9k_init_device(AR5416_AR9100_DEVID, sc, 0x0, &ath_ahb_bus_ops);
if (ret) {
- dev_err(&pdev->dev, "request_irq failed\n");
- goto err_detach;
+ dev_err(&pdev->dev, "failed to initialize device\n");
+ goto err_irq;
}
ah = sc->sc_ah;
@@ -143,8 +138,8 @@ static int ath_ahb_probe(struct platform_device *pdev)
return 0;
- err_detach:
- ath_detach(sc);
+ err_irq:
+ free_irq(irq, sc);
err_free_hw:
ieee80211_free_hw(hw);
platform_set_drvdata(pdev, NULL);
@@ -161,8 +156,12 @@ static int ath_ahb_remove(struct platform_device *pdev)
if (hw) {
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
+ void __iomem *mem = sc->mem;
- ath_cleanup(sc);
+ ath9k_deinit_device(sc);
+ free_irq(sc->irq, sc);
+ ieee80211_free_hw(sc->hw);
+ iounmap(mem);
platform_set_drvdata(pdev, NULL);
}
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 1597a42731e..83c7ea4c007 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -267,6 +267,7 @@ void ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
u16 tid, u16 *ssn);
void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
void ath_tx_aggr_resume(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
+void ath9k_enable_ps(struct ath_softc *sc);
/********/
/* VIFs */
@@ -341,6 +342,12 @@ int ath_beaconq_config(struct ath_softc *sc);
#define ATH_LONG_CALINTERVAL 30000 /* 30 seconds */
#define ATH_RESTART_CALINTERVAL 1200000 /* 20 minutes */
+void ath_ani_calibrate(unsigned long data);
+
+/**********/
+/* BTCOEX */
+/**********/
+
/* Defines the BT AR_BT_COEX_WGHT used */
enum ath_stomp_type {
ATH_BTCOEX_NO_STOMP,
@@ -358,9 +365,14 @@ struct ath_btcoex {
int bt_stomp_type; /* Types of BT stomping */
u32 btcoex_no_stomp; /* in usec */
u32 btcoex_period; /* in usec */
+ u32 btscan_no_stomp; /* in usec */
struct ath_gen_timer *no_stomp_timer; /* Timer for no BT stomping */
};
+int ath_init_btcoex_timer(struct ath_softc *sc);
+void ath9k_btcoex_timer_resume(struct ath_softc *sc);
+void ath9k_btcoex_timer_pause(struct ath_softc *sc);
+
/********************/
/* LED Control */
/********************/
@@ -385,6 +397,9 @@ struct ath_led {
bool registered;
};
+void ath_init_leds(struct ath_softc *sc);
+void ath_deinit_leds(struct ath_softc *sc);
+
/********************/
/* Main driver core */
/********************/
@@ -403,26 +418,29 @@ struct ath_led {
#define ATH_TXPOWER_MAX 100 /* .5 dBm units */
#define ATH_RATE_DUMMY_MARKER 0
-#define SC_OP_INVALID BIT(0)
-#define SC_OP_BEACONS BIT(1)
-#define SC_OP_RXAGGR BIT(2)
-#define SC_OP_TXAGGR BIT(3)
-#define SC_OP_FULL_RESET BIT(4)
-#define SC_OP_PREAMBLE_SHORT BIT(5)
-#define SC_OP_PROTECT_ENABLE BIT(6)
-#define SC_OP_RXFLUSH BIT(7)
-#define SC_OP_LED_ASSOCIATED BIT(8)
-#define SC_OP_WAIT_FOR_BEACON BIT(12)
-#define SC_OP_LED_ON BIT(13)
-#define SC_OP_SCANNING BIT(14)
-#define SC_OP_TSF_RESET BIT(15)
-#define SC_OP_WAIT_FOR_CAB BIT(16)
-#define SC_OP_WAIT_FOR_PSPOLL_DATA BIT(17)
-#define SC_OP_WAIT_FOR_TX_ACK BIT(18)
-#define SC_OP_BEACON_SYNC BIT(19)
-#define SC_OP_BT_PRIORITY_DETECTED BIT(21)
-#define SC_OP_NULLFUNC_COMPLETED BIT(22)
-#define SC_OP_PS_ENABLED BIT(23)
+#define SC_OP_INVALID BIT(0)
+#define SC_OP_BEACONS BIT(1)
+#define SC_OP_RXAGGR BIT(2)
+#define SC_OP_TXAGGR BIT(3)
+#define SC_OP_FULL_RESET BIT(4)
+#define SC_OP_PREAMBLE_SHORT BIT(5)
+#define SC_OP_PROTECT_ENABLE BIT(6)
+#define SC_OP_RXFLUSH BIT(7)
+#define SC_OP_LED_ASSOCIATED BIT(8)
+#define SC_OP_LED_ON BIT(9)
+#define SC_OP_SCANNING BIT(10)
+#define SC_OP_TSF_RESET BIT(11)
+#define SC_OP_BT_PRIORITY_DETECTED BIT(12)
+#define SC_OP_BT_SCAN BIT(13)
+
+/* Powersave flags */
+#define PS_WAIT_FOR_BEACON BIT(0)
+#define PS_WAIT_FOR_CAB BIT(1)
+#define PS_WAIT_FOR_PSPOLL_DATA BIT(2)
+#define PS_WAIT_FOR_TX_ACK BIT(3)
+#define PS_BEACON_SYNC BIT(4)
+#define PS_NULLFUNC_COMPLETED BIT(5)
+#define PS_ENABLED BIT(6)
struct ath_wiphy;
struct ath_rate_table;
@@ -453,16 +471,17 @@ struct ath_softc {
int irq;
spinlock_t sc_resetlock;
spinlock_t sc_serial_rw;
- spinlock_t ani_lock;
spinlock_t sc_pm_lock;
struct mutex mutex;
u32 intrstatus;
u32 sc_flags; /* SC_OP_* */
+ u16 ps_flags; /* PS_* */
u16 curtxpow;
u8 nbcnvifs;
u16 nvifs;
bool ps_enabled;
+ bool ps_idle;
unsigned long ps_usecount;
enum ath9k_int imask;
@@ -509,6 +528,7 @@ struct ath_wiphy {
int chan_is_ht;
};
+void ath9k_tasklet(unsigned long data);
int ath_reset(struct ath_softc *sc, bool retry_tx);
int ath_get_hal_qnum(u16 queue, struct ath_softc *sc);
int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc);
@@ -519,21 +539,16 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz)
common->bus_ops->read_cachesize(common, csz);
}
-static inline void ath_bus_cleanup(struct ath_common *common)
-{
- common->bus_ops->cleanup(common);
-}
-
extern struct ieee80211_ops ath9k_ops;
+extern int modparam_nohwcrypt;
irqreturn_t ath_isr(int irq, void *dev);
-void ath_cleanup(struct ath_softc *sc);
-int ath_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
+int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
const struct ath_bus_ops *bus_ops);
-void ath_detach(struct ath_softc *sc);
+void ath9k_deinit_device(struct ath_softc *sc);
const char *ath_mac_bb_name(u32 mac_bb_version);
const char *ath_rf_name(u16 rf_version);
-void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
+void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
struct ath9k_channel *ichan);
void ath_update_chainmask(struct ath_softc *sc, int is_ht);
@@ -542,6 +557,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw);
void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw);
+bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode);
#ifdef CONFIG_PCI
int ath_pci_init(void);
@@ -583,4 +599,8 @@ void ath_mac80211_stop_queue(struct ath_softc *sc, u16 skb_queue);
void ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue);
int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype);
+
+void ath_start_rfkill_poll(struct ath_softc *sc);
+extern void ath9k_rfkill_poll_state(struct ieee80211_hw *hw);
+
#endif /* ATH9K_H */
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 1660ef17aaf..b4a31a43a62 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -62,7 +62,7 @@ int ath_beaconq_config(struct ath_softc *sc)
* Beacons are always sent out at the lowest rate, and are not retried.
*/
static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
- struct ath_buf *bf)
+ struct ath_buf *bf, int rateidx)
{
struct sk_buff *skb = bf->bf_mpdu;
struct ath_hw *ah = sc->sc_ah;
@@ -96,9 +96,9 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
ds->ds_data = bf->bf_buf_addr;
sband = &sc->sbands[common->hw->conf.channel->band];
- rate = sband->bitrates[0].hw_value;
+ rate = sband->bitrates[rateidx].hw_value;
if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
- rate |= sband->bitrates[0].hw_value_short;
+ rate |= sband->bitrates[rateidx].hw_value_short;
ath9k_hw_set11n_txdesc(ah, ds, skb->len + FCS_LEN,
ATH9K_PKT_TYPE_BEACON,
@@ -206,7 +206,7 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
}
}
- ath_beacon_setup(sc, avp, bf);
+ ath_beacon_setup(sc, avp, bf, info->control.rates[0].idx);
while (skb) {
ath_tx_cabq(hw, skb);
@@ -237,7 +237,7 @@ static void ath_beacon_start_adhoc(struct ath_softc *sc,
bf = avp->av_bcbuf;
skb = bf->bf_mpdu;
- ath_beacon_setup(sc, avp, bf);
+ ath_beacon_setup(sc, avp, bf, 0);
/* NB: caller is known to have already stopped tx dma */
ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bf->bf_daddr);
@@ -480,7 +480,8 @@ void ath_beacon_tasklet(unsigned long data)
sc->beacon.updateslot = COMMIT; /* commit next beacon */
sc->beacon.slotupdate = slot;
} else if (sc->beacon.updateslot == COMMIT && sc->beacon.slotupdate == slot) {
- ath9k_hw_setslottime(sc->sc_ah, sc->beacon.slottime);
+ ah->slottime = sc->beacon.slottime;
+ ath9k_hw_init_global_settings(ah);
sc->beacon.updateslot = OK;
}
if (bfaddr != 0) {
@@ -525,16 +526,13 @@ static void ath_beacon_config_ap(struct ath_softc *sc,
{
u32 nexttbtt, intval;
- /* Configure the timers only when the TSF has to be reset */
-
- if (!(sc->sc_flags & SC_OP_TSF_RESET))
- return;
-
/* NB: the beacon interval is kept internally in TU's */
intval = conf->beacon_interval & ATH9K_BEACON_PERIOD;
intval /= ATH_BCBUF; /* for staggered beacons */
nexttbtt = intval;
- intval |= ATH9K_BEACON_RESET_TSF;
+
+ if (sc->sc_flags & SC_OP_TSF_RESET)
+ intval |= ATH9K_BEACON_RESET_TSF;
/*
* In AP mode we enable the beacon timers and SWBA interrupts to
@@ -576,6 +574,13 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
u64 tsf;
int num_beacons, offset, dtim_dec_count, cfp_dec_count;
+ /* No need to configure beacon if we are not associated */
+ if (!common->curaid) {
+ ath_print(common, ATH_DBG_BEACON,
+ "STA is not yet associated..skipping beacon config\n");
+ return;
+ }
+
memset(&bs, 0, sizeof(bs));
intval = conf->beacon_interval & ATH9K_BEACON_PERIOD;
@@ -738,7 +743,6 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
enum nl80211_iftype iftype;
/* Setup the beacon configuration parameters */
-
if (vif) {
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index 1ba31a73317..1ee5a15ccbb 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -25,10 +25,12 @@
#define ATH_BTCOEX_DEF_BT_PERIOD 45
#define ATH_BTCOEX_DEF_DUTY_CYCLE 55
+#define ATH_BTCOEX_BTSCAN_DUTY_CYCLE 90
#define ATH_BTCOEX_BMISS_THRESH 50
#define ATH_BT_PRIORITY_TIME_THRESHOLD 1000 /* ms */
#define ATH_BT_CNT_THRESHOLD 3
+#define ATH_BT_CNT_SCAN_THRESHOLD 15
enum ath_btcoex_scheme {
ATH_BTCOEX_CFG_NONE,
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index b66f72dbf7b..42d2a506845 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -75,17 +75,24 @@ static const struct file_operations fops_debug = {
#endif
+#define DMA_BUF_LEN 1024
+
static ssize_t read_file_dma(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath_softc *sc = file->private_data;
struct ath_hw *ah = sc->sc_ah;
- char buf[1024];
+ char *buf;
+ int retval;
unsigned int len = 0;
u32 val[ATH9K_NUM_DMA_DEBUG_REGS];
int i, qcuOffset = 0, dcuOffset = 0;
u32 *qcuBase = &val[0], *dcuBase = &val[4];
+ buf = kmalloc(DMA_BUF_LEN, GFP_KERNEL);
+ if (!buf)
+ return 0;
+
ath9k_ps_wakeup(sc);
REG_WRITE_D(ah, AR_MACMISC,
@@ -93,20 +100,20 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
(AR_MACMISC_MISC_OBS_BUS_1 <<
AR_MACMISC_MISC_OBS_BUS_MSB_S)));
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"Raw DMA Debug values:\n");
for (i = 0; i < ATH9K_NUM_DMA_DEBUG_REGS; i++) {
if (i % 4 == 0)
- len += snprintf(buf + len, sizeof(buf) - len, "\n");
+ len += snprintf(buf + len, DMA_BUF_LEN - len, "\n");
val[i] = REG_READ_D(ah, AR_DMADBG_0 + (i * sizeof(u32)));
- len += snprintf(buf + len, sizeof(buf) - len, "%d: %08x ",
+ len += snprintf(buf + len, DMA_BUF_LEN - len, "%d: %08x ",
i, val[i]);
}
- len += snprintf(buf + len, sizeof(buf) - len, "\n\n");
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len, "\n\n");
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"Num QCU: chain_st fsp_ok fsp_st DCU: chain_st\n");
for (i = 0; i < ATH9K_NUM_QUEUES; i++, qcuOffset += 4, dcuOffset += 5) {
@@ -120,7 +127,7 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
dcuBase++;
}
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"%2d %2x %1x %2x %2x\n",
i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
(*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + 3),
@@ -128,35 +135,37 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
(*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
}
- len += snprintf(buf + len, sizeof(buf) - len, "\n");
+ len += snprintf(buf + len, DMA_BUF_LEN - len, "\n");
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"qcu_stitch state: %2x qcu_fetch state: %2x\n",
(val[3] & 0x003c0000) >> 18, (val[3] & 0x03c00000) >> 22);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"qcu_complete state: %2x dcu_complete state: %2x\n",
(val[3] & 0x1c000000) >> 26, (val[6] & 0x3));
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"dcu_arb state: %2x dcu_fp state: %2x\n",
(val[5] & 0x06000000) >> 25, (val[5] & 0x38000000) >> 27);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"chan_idle_dur: %3d chan_idle_dur_valid: %1d\n",
(val[6] & 0x000003fc) >> 2, (val[6] & 0x00000400) >> 10);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"txfifo_valid_0: %1d txfifo_valid_1: %1d\n",
(val[6] & 0x00000800) >> 11, (val[6] & 0x00001000) >> 12);
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"txfifo_dcu_num_0: %2d txfifo_dcu_num_1: %2d\n",
(val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17);
- len += snprintf(buf + len, sizeof(buf) - len, "pcu observe: 0x%x \n",
+ len += snprintf(buf + len, DMA_BUF_LEN - len, "pcu observe: 0x%x \n",
REG_READ_D(ah, AR_OBS_BUS_1));
- len += snprintf(buf + len, sizeof(buf) - len,
+ len += snprintf(buf + len, DMA_BUF_LEN - len,
"AR_CR: 0x%x \n", REG_READ_D(ah, AR_CR));
ath9k_ps_restore(sc);
- return simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+ return retval;
}
static const struct file_operations fops_dma = {
@@ -289,23 +298,49 @@ static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
if (sc->cur_rate_table == NULL)
return 0;
- max = 80 + sc->cur_rate_table->rate_cnt * 64;
+ max = 80 + sc->cur_rate_table->rate_cnt * 1024;
buf = kmalloc(max + 1, GFP_KERNEL);
if (buf == NULL)
return 0;
buf[max] = 0;
- len += sprintf(buf, "%5s %15s %8s %9s %3s\n\n", "Rate", "Success",
- "Retries", "XRetries", "PER");
+ len += sprintf(buf, "%6s %6s %6s "
+ "%10s %10s %10s %10s\n",
+ "HT", "MCS", "Rate",
+ "Success", "Retries", "XRetries", "PER");
for (i = 0; i < sc->cur_rate_table->rate_cnt; i++) {
u32 ratekbps = sc->cur_rate_table->info[i].ratekbps;
struct ath_rc_stats *stats = &sc->debug.stats.rcstats[i];
+ char mcs[5];
+ char htmode[5];
+ int used_mcs = 0, used_htmode = 0;
+
+ if (WLAN_RC_PHY_HT(sc->cur_rate_table->info[i].phy)) {
+ used_mcs = snprintf(mcs, 5, "%d",
+ sc->cur_rate_table->info[i].ratecode);
+
+ if (WLAN_RC_PHY_40(sc->cur_rate_table->info[i].phy))
+ used_htmode = snprintf(htmode, 5, "HT40");
+ else if (WLAN_RC_PHY_20(sc->cur_rate_table->info[i].phy))
+ used_htmode = snprintf(htmode, 5, "HT20");
+ else
+ used_htmode = snprintf(htmode, 5, "????");
+ }
+
+ mcs[used_mcs] = '\0';
+ htmode[used_htmode] = '\0';
len += snprintf(buf + len, max - len,
- "%3u.%d: %8u %8u %8u %8u\n", ratekbps / 1000,
- (ratekbps % 1000) / 100, stats->success,
- stats->retries, stats->xretries,
+ "%6s %6s %3u.%d: "
+ "%10u %10u %10u %10u\n",
+ htmode,
+ mcs,
+ ratekbps / 1000,
+ (ratekbps % 1000) / 100,
+ stats->success,
+ stats->retries,
+ stats->xretries,
stats->per);
}
@@ -554,6 +589,116 @@ static const struct file_operations fops_xmit = {
.owner = THIS_MODULE
};
+static ssize_t read_file_recv(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+#define PHY_ERR(s, p) \
+ len += snprintf(buf + len, size - len, "%18s : %10u\n", s, \
+ sc->debug.stats.rxstats.phy_err_stats[p]);
+
+ struct ath_softc *sc = file->private_data;
+ char *buf;
+ unsigned int len = 0, size = 1152;
+ ssize_t retval = 0;
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (buf == NULL)
+ return 0;
+
+ len += snprintf(buf + len, size - len,
+ "%18s : %10u\n", "CRC ERR",
+ sc->debug.stats.rxstats.crc_err);
+ len += snprintf(buf + len, size - len,
+ "%18s : %10u\n", "DECRYPT CRC ERR",
+ sc->debug.stats.rxstats.decrypt_crc_err);
+ len += snprintf(buf + len, size - len,
+ "%18s : %10u\n", "PHY ERR",
+ sc->debug.stats.rxstats.phy_err);
+ len += snprintf(buf + len, size - len,
+ "%18s : %10u\n", "MIC ERR",
+ sc->debug.stats.rxstats.mic_err);
+ len += snprintf(buf + len, size - len,
+ "%18s : %10u\n", "PRE-DELIM CRC ERR",
+ sc->debug.stats.rxstats.pre_delim_crc_err);
+ len += snprintf(buf + len, size - len,
+ "%18s : %10u\n", "POST-DELIM CRC ERR",
+ sc->debug.stats.rxstats.post_delim_crc_err);
+ len += snprintf(buf + len, size - len,
+ "%18s : %10u\n", "DECRYPT BUSY ERR",
+ sc->debug.stats.rxstats.decrypt_busy_err);
+
+ PHY_ERR("UNDERRUN", ATH9K_PHYERR_UNDERRUN);
+ PHY_ERR("TIMING", ATH9K_PHYERR_TIMING);
+ PHY_ERR("PARITY", ATH9K_PHYERR_PARITY);
+ PHY_ERR("RATE", ATH9K_PHYERR_RATE);
+ PHY_ERR("LENGTH", ATH9K_PHYERR_LENGTH);
+ PHY_ERR("RADAR", ATH9K_PHYERR_RADAR);
+ PHY_ERR("SERVICE", ATH9K_PHYERR_SERVICE);
+ PHY_ERR("TOR", ATH9K_PHYERR_TOR);
+ PHY_ERR("OFDM-TIMING", ATH9K_PHYERR_OFDM_TIMING);
+ PHY_ERR("OFDM-SIGNAL-PARITY", ATH9K_PHYERR_OFDM_SIGNAL_PARITY);
+ PHY_ERR("OFDM-RATE", ATH9K_PHYERR_OFDM_RATE_ILLEGAL);
+ PHY_ERR("OFDM-LENGTH", ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL);
+ PHY_ERR("OFDM-POWER-DROP", ATH9K_PHYERR_OFDM_POWER_DROP);
+ PHY_ERR("OFDM-SERVICE", ATH9K_PHYERR_OFDM_SERVICE);
+ PHY_ERR("OFDM-RESTART", ATH9K_PHYERR_OFDM_RESTART);
+ PHY_ERR("FALSE-RADAR-EXT", ATH9K_PHYERR_FALSE_RADAR_EXT);
+ PHY_ERR("CCK-TIMING", ATH9K_PHYERR_CCK_TIMING);
+ PHY_ERR("CCK-HEADER-CRC", ATH9K_PHYERR_CCK_HEADER_CRC);
+ PHY_ERR("CCK-RATE", ATH9K_PHYERR_CCK_RATE_ILLEGAL);
+ PHY_ERR("CCK-SERVICE", ATH9K_PHYERR_CCK_SERVICE);
+ PHY_ERR("CCK-RESTART", ATH9K_PHYERR_CCK_RESTART);
+ PHY_ERR("CCK-LENGTH", ATH9K_PHYERR_CCK_LENGTH_ILLEGAL);
+ PHY_ERR("CCK-POWER-DROP", ATH9K_PHYERR_CCK_POWER_DROP);
+ PHY_ERR("HT-CRC", ATH9K_PHYERR_HT_CRC_ERROR);
+ PHY_ERR("HT-LENGTH", ATH9K_PHYERR_HT_LENGTH_ILLEGAL);
+ PHY_ERR("HT-RATE", ATH9K_PHYERR_HT_RATE_ILLEGAL);
+
+ retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+
+ return retval;
+
+#undef PHY_ERR
+}
+
+void ath_debug_stat_rx(struct ath_softc *sc, struct ath_buf *bf)
+{
+#define RX_STAT_INC(c) sc->debug.stats.rxstats.c++
+#define RX_PHY_ERR_INC(c) sc->debug.stats.rxstats.phy_err_stats[c]++
+
+ struct ath_desc *ds = bf->bf_desc;
+ u32 phyerr;
+
+ if (ds->ds_rxstat.rs_status & ATH9K_RXERR_CRC)
+ RX_STAT_INC(crc_err);
+ if (ds->ds_rxstat.rs_status & ATH9K_RXERR_DECRYPT)
+ RX_STAT_INC(decrypt_crc_err);
+ if (ds->ds_rxstat.rs_status & ATH9K_RXERR_MIC)
+ RX_STAT_INC(mic_err);
+ if (ds->ds_rxstat.rs_status & ATH9K_RX_DELIM_CRC_PRE)
+ RX_STAT_INC(pre_delim_crc_err);
+ if (ds->ds_rxstat.rs_status & ATH9K_RX_DELIM_CRC_POST)
+ RX_STAT_INC(post_delim_crc_err);
+ if (ds->ds_rxstat.rs_status & ATH9K_RX_DECRYPT_BUSY)
+ RX_STAT_INC(decrypt_busy_err);
+
+ if (ds->ds_rxstat.rs_status & ATH9K_RXERR_PHY) {
+ RX_STAT_INC(phy_err);
+ phyerr = ds->ds_rxstat.rs_phyerr & 0x24;
+ RX_PHY_ERR_INC(phyerr);
+ }
+
+#undef RX_STAT_INC
+#undef RX_PHY_ERR_INC
+}
+
+static const struct file_operations fops_recv = {
+ .read = read_file_recv,
+ .open = ath9k_debugfs_open,
+ .owner = THIS_MODULE
+};
+
int ath9k_init_debug(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
@@ -606,6 +751,13 @@ int ath9k_init_debug(struct ath_hw *ah)
if (!sc->debug.debugfs_xmit)
goto err;
+ sc->debug.debugfs_recv = debugfs_create_file("recv",
+ S_IRUSR,
+ sc->debug.debugfs_phy,
+ sc, &fops_recv);
+ if (!sc->debug.debugfs_recv)
+ goto err;
+
return 0;
err:
ath9k_exit_debug(ah);
@@ -617,6 +769,7 @@ void ath9k_exit_debug(struct ath_hw *ah)
struct ath_common *common = ath9k_hw_common(ah);
struct ath_softc *sc = (struct ath_softc *) common->priv;
+ debugfs_remove(sc->debug.debugfs_recv);
debugfs_remove(sc->debug.debugfs_xmit);
debugfs_remove(sc->debug.debugfs_wiphy);
debugfs_remove(sc->debug.debugfs_rcstat);
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 536663e3ee1..86780e68b31 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -116,10 +116,35 @@ struct ath_tx_stats {
u32 delim_underrun;
};
+/**
+ * struct ath_rx_stats - RX Statistics
+ * @crc_err: No. of frames with incorrect CRC value
+ * @decrypt_crc_err: No. of frames whose CRC check failed after
+ decryption process completed
+ * @phy_err: No. of frames whose reception failed because the PHY
+ encountered an error
+ * @mic_err: No. of frames with incorrect TKIP MIC verification failure
+ * @pre_delim_crc_err: Pre-Frame delimiter CRC error detections
+ * @post_delim_crc_err: Post-Frame delimiter CRC error detections
+ * @decrypt_busy_err: Decryption interruptions counter
+ * @phy_err_stats: Individual PHY error statistics
+ */
+struct ath_rx_stats {
+ u32 crc_err;
+ u32 decrypt_crc_err;
+ u32 phy_err;
+ u32 mic_err;
+ u32 pre_delim_crc_err;
+ u32 post_delim_crc_err;
+ u32 decrypt_busy_err;
+ u32 phy_err_stats[ATH9K_PHYERR_MAX];
+};
+
struct ath_stats {
struct ath_interrupt_stats istats;
struct ath_rc_stats rcstats[RATE_TABLE_SIZE];
struct ath_tx_stats txstats[ATH9K_NUM_TX_QUEUES];
+ struct ath_rx_stats rxstats;
};
struct ath9k_debug {
@@ -130,6 +155,7 @@ struct ath9k_debug {
struct dentry *debugfs_rcstat;
struct dentry *debugfs_wiphy;
struct dentry *debugfs_xmit;
+ struct dentry *debugfs_recv;
struct ath_stats stats;
};
@@ -142,6 +168,7 @@ void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
void ath_debug_stat_rc(struct ath_softc *sc, int final_rate);
void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq,
struct ath_buf *bf);
+void ath_debug_stat_rx(struct ath_softc *sc, struct ath_buf *bf);
void ath_debug_stat_retries(struct ath_softc *sc, int rix,
int xretries, int retries, u8 per);
@@ -181,6 +208,11 @@ static inline void ath_debug_stat_tx(struct ath_softc *sc,
{
}
+static inline void ath_debug_stat_rx(struct ath_softc *sc,
+ struct ath_buf *bf)
+{
+}
+
static inline void ath_debug_stat_retries(struct ath_softc *sc, int rix,
int xretries, int retries, u8 per)
{
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
new file mode 100644
index 00000000000..deab8beb068
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -0,0 +1,442 @@
+/*
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "ath9k.h"
+
+/********************************/
+/* LED functions */
+/********************************/
+
+static void ath_led_blink_work(struct work_struct *work)
+{
+ struct ath_softc *sc = container_of(work, struct ath_softc,
+ ath_led_blink_work.work);
+
+ if (!(sc->sc_flags & SC_OP_LED_ASSOCIATED))
+ return;
+
+ if ((sc->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
+ (sc->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
+ ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
+ else
+ ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
+ (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0);
+
+ ieee80211_queue_delayed_work(sc->hw,
+ &sc->ath_led_blink_work,
+ (sc->sc_flags & SC_OP_LED_ON) ?
+ msecs_to_jiffies(sc->led_off_duration) :
+ msecs_to_jiffies(sc->led_on_duration));
+
+ sc->led_on_duration = sc->led_on_cnt ?
+ max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25) :
+ ATH_LED_ON_DURATION_IDLE;
+ sc->led_off_duration = sc->led_off_cnt ?
+ max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10) :
+ ATH_LED_OFF_DURATION_IDLE;
+ sc->led_on_cnt = sc->led_off_cnt = 0;
+ if (sc->sc_flags & SC_OP_LED_ON)
+ sc->sc_flags &= ~SC_OP_LED_ON;
+ else
+ sc->sc_flags |= SC_OP_LED_ON;
+}
+
+static void ath_led_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
+ struct ath_softc *sc = led->sc;
+
+ switch (brightness) {
+ case LED_OFF:
+ if (led->led_type == ATH_LED_ASSOC ||
+ led->led_type == ATH_LED_RADIO) {
+ ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
+ (led->led_type == ATH_LED_RADIO));
+ sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
+ if (led->led_type == ATH_LED_RADIO)
+ sc->sc_flags &= ~SC_OP_LED_ON;
+ } else {
+ sc->led_off_cnt++;
+ }
+ break;
+ case LED_FULL:
+ if (led->led_type == ATH_LED_ASSOC) {
+ sc->sc_flags |= SC_OP_LED_ASSOCIATED;
+ ieee80211_queue_delayed_work(sc->hw,
+ &sc->ath_led_blink_work, 0);
+ } else if (led->led_type == ATH_LED_RADIO) {
+ ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
+ sc->sc_flags |= SC_OP_LED_ON;
+ } else {
+ sc->led_on_cnt++;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static int ath_register_led(struct ath_softc *sc, struct ath_led *led,
+ char *trigger)
+{
+ int ret;
+
+ led->sc = sc;
+ led->led_cdev.name = led->name;
+ led->led_cdev.default_trigger = trigger;
+ led->led_cdev.brightness_set = ath_led_brightness;
+
+ ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev);
+ if (ret)
+ ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
+ "Failed to register led:%s", led->name);
+ else
+ led->registered = 1;
+ return ret;
+}
+
+static void ath_unregister_led(struct ath_led *led)
+{
+ if (led->registered) {
+ led_classdev_unregister(&led->led_cdev);
+ led->registered = 0;
+ }
+}
+
+void ath_deinit_leds(struct ath_softc *sc)
+{
+ ath_unregister_led(&sc->assoc_led);
+ sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
+ ath_unregister_led(&sc->tx_led);
+ ath_unregister_led(&sc->rx_led);
+ ath_unregister_led(&sc->radio_led);
+ ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
+}
+
+void ath_init_leds(struct ath_softc *sc)
+{
+ char *trigger;
+ int ret;
+
+ if (AR_SREV_9287(sc->sc_ah))
+ sc->sc_ah->led_pin = ATH_LED_PIN_9287;
+ else
+ sc->sc_ah->led_pin = ATH_LED_PIN_DEF;
+
+ /* Configure gpio 1 for output */
+ ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin,
+ AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
+ /* LED off, active low */
+ ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
+
+ INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work);
+
+ trigger = ieee80211_get_radio_led_name(sc->hw);
+ snprintf(sc->radio_led.name, sizeof(sc->radio_led.name),
+ "ath9k-%s::radio", wiphy_name(sc->hw->wiphy));
+ ret = ath_register_led(sc, &sc->radio_led, trigger);
+ sc->radio_led.led_type = ATH_LED_RADIO;
+ if (ret)
+ goto fail;
+
+ trigger = ieee80211_get_assoc_led_name(sc->hw);
+ snprintf(sc->assoc_led.name, sizeof(sc->assoc_led.name),
+ "ath9k-%s::assoc", wiphy_name(sc->hw->wiphy));
+ ret = ath_register_led(sc, &sc->assoc_led, trigger);
+ sc->assoc_led.led_type = ATH_LED_ASSOC;
+ if (ret)
+ goto fail;
+
+ trigger = ieee80211_get_tx_led_name(sc->hw);
+ snprintf(sc->tx_led.name, sizeof(sc->tx_led.name),
+ "ath9k-%s::tx", wiphy_name(sc->hw->wiphy));
+ ret = ath_register_led(sc, &sc->tx_led, trigger);
+ sc->tx_led.led_type = ATH_LED_TX;
+ if (ret)
+ goto fail;
+
+ trigger = ieee80211_get_rx_led_name(sc->hw);
+ snprintf(sc->rx_led.name, sizeof(sc->rx_led.name),
+ "ath9k-%s::rx", wiphy_name(sc->hw->wiphy));
+ ret = ath_register_led(sc, &sc->rx_led, trigger);
+ sc->rx_led.led_type = ATH_LED_RX;
+ if (ret)
+ goto fail;
+
+ return;
+
+fail:
+ cancel_delayed_work_sync(&sc->ath_led_blink_work);
+ ath_deinit_leds(sc);
+}
+
+/*******************/
+/* Rfkill */
+/*******************/
+
+static bool ath_is_rfkill_set(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+
+ return ath9k_hw_gpio_get(ah, ah->rfkill_gpio) ==
+ ah->rfkill_polarity;
+}
+
+void ath9k_rfkill_poll_state(struct ieee80211_hw *hw)
+{
+ struct ath_wiphy *aphy = hw->priv;
+ struct ath_softc *sc = aphy->sc;
+ bool blocked = !!ath_is_rfkill_set(sc);
+
+ wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
+}
+
+void ath_start_rfkill_poll(struct ath_softc *sc)
+{
+ struct ath_hw *ah = sc->sc_ah;
+
+ if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
+ wiphy_rfkill_start_polling(sc->hw->wiphy);
+}
+
+/******************/
+/* BTCOEX */
+/******************/
+
+/*
+ * Detects if there is any priority bt traffic
+ */
+static void ath_detect_bt_priority(struct ath_softc *sc)
+{
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_hw *ah = sc->sc_ah;
+
+ if (ath9k_hw_gpio_get(sc->sc_ah, ah->btcoex_hw.btpriority_gpio))
+ btcoex->bt_priority_cnt++;
+
+ if (time_after(jiffies, btcoex->bt_priority_time +
+ msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) {
+ sc->sc_flags &= ~(SC_OP_BT_PRIORITY_DETECTED | SC_OP_BT_SCAN);
+ /* Detect if colocated bt started scanning */
+ if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) {
+ ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
+ "BT scan detected");
+ sc->sc_flags |= (SC_OP_BT_SCAN |
+ SC_OP_BT_PRIORITY_DETECTED);
+ } else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
+ ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
+ "BT priority traffic detected");
+ sc->sc_flags |= SC_OP_BT_PRIORITY_DETECTED;
+ }
+
+ btcoex->bt_priority_cnt = 0;
+ btcoex->bt_priority_time = jiffies;
+ }
+}
+
+/*
+ * Configures appropriate weight based on stomp type.
+ */
+static void ath9k_btcoex_bt_stomp(struct ath_softc *sc,
+ enum ath_stomp_type stomp_type)
+{
+ struct ath_hw *ah = sc->sc_ah;
+
+ switch (stomp_type) {
+ case ATH_BTCOEX_STOMP_ALL:
+ ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
+ AR_STOMP_ALL_WLAN_WGHT);
+ break;
+ case ATH_BTCOEX_STOMP_LOW:
+ ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
+ AR_STOMP_LOW_WLAN_WGHT);
+ break;
+ case ATH_BTCOEX_STOMP_NONE:
+ ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
+ AR_STOMP_NONE_WLAN_WGHT);
+ break;
+ default:
+ ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
+ "Invalid Stomptype\n");
+ break;
+ }
+
+ ath9k_hw_btcoex_enable(ah);
+}
+
+static void ath9k_gen_timer_start(struct ath_hw *ah,
+ struct ath_gen_timer *timer,
+ u32 timer_next,
+ u32 timer_period)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_softc *sc = (struct ath_softc *) common->priv;
+
+ ath9k_hw_gen_timer_start(ah, timer, timer_next, timer_period);
+
+ if ((sc->imask & ATH9K_INT_GENTIMER) == 0) {
+ ath9k_hw_set_interrupts(ah, 0);
+ sc->imask |= ATH9K_INT_GENTIMER;
+ ath9k_hw_set_interrupts(ah, sc->imask);
+ }
+}
+
+static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_softc *sc = (struct ath_softc *) common->priv;
+ struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
+
+ ath9k_hw_gen_timer_stop(ah, timer);
+
+ /* if no timer is enabled, turn off interrupt mask */
+ if (timer_table->timer_mask.val == 0) {
+ ath9k_hw_set_interrupts(ah, 0);
+ sc->imask &= ~ATH9K_INT_GENTIMER;
+ ath9k_hw_set_interrupts(ah, sc->imask);
+ }
+}
+
+/*
+ * This is the master bt coex timer which runs for every
+ * 45ms, bt traffic will be given priority during 55% of this
+ * period while wlan gets remaining 45%
+ */
+static void ath_btcoex_period_timer(unsigned long data)
+{
+ struct ath_softc *sc = (struct ath_softc *) data;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ u32 timer_period;
+ bool is_btscan;
+
+ ath_detect_bt_priority(sc);
+
+ is_btscan = sc->sc_flags & SC_OP_BT_SCAN;
+
+ spin_lock_bh(&btcoex->btcoex_lock);
+
+ ath9k_btcoex_bt_stomp(sc, is_btscan ? ATH_BTCOEX_STOMP_ALL :
+ btcoex->bt_stomp_type);
+
+ spin_unlock_bh(&btcoex->btcoex_lock);
+
+ if (btcoex->btcoex_period != btcoex->btcoex_no_stomp) {
+ if (btcoex->hw_timer_enabled)
+ ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
+
+ timer_period = is_btscan ? btcoex->btscan_no_stomp :
+ btcoex->btcoex_no_stomp;
+ ath9k_gen_timer_start(ah,
+ btcoex->no_stomp_timer,
+ (ath9k_hw_gettsf32(ah) +
+ timer_period), timer_period * 10);
+ btcoex->hw_timer_enabled = true;
+ }
+
+ mod_timer(&btcoex->period_timer, jiffies +
+ msecs_to_jiffies(ATH_BTCOEX_DEF_BT_PERIOD));
+}
+
+/*
+ * Generic tsf based hw timer which configures weight
+ * registers to time slice between wlan and bt traffic
+ */
+static void ath_btcoex_no_stomp_timer(void *arg)
+{
+ struct ath_softc *sc = (struct ath_softc *)arg;
+ struct ath_hw *ah = sc->sc_ah;
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ bool is_btscan = sc->sc_flags & SC_OP_BT_SCAN;
+
+ ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
+ "no stomp timer running \n");
+
+ spin_lock_bh(&btcoex->btcoex_lock);
+
+ if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW || is_btscan)
+ ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_NONE);
+ else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
+ ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_LOW);
+
+ spin_unlock_bh(&btcoex->btcoex_lock);
+}
+
+int ath_init_btcoex_timer(struct ath_softc *sc)
+{
+ struct ath_btcoex *btcoex = &sc->btcoex;
+
+ btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000;
+ btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
+ btcoex->btcoex_period / 100;
+ btcoex->btscan_no_stomp = (100 - ATH_BTCOEX_BTSCAN_DUTY_CYCLE) *
+ btcoex->btcoex_period / 100;
+
+ setup_timer(&btcoex->period_timer, ath_btcoex_period_timer,
+ (unsigned long) sc);
+
+ spin_lock_init(&btcoex->btcoex_lock);
+
+ btcoex->no_stomp_timer = ath_gen_timer_alloc(sc->sc_ah,
+ ath_btcoex_no_stomp_timer,
+ ath_btcoex_no_stomp_timer,
+ (void *) sc, AR_FIRST_NDP_TIMER);
+
+ if (!btcoex->no_stomp_timer)
+ return -ENOMEM;
+
+ return 0;
+}
+
+/*
+ * (Re)start btcoex timers
+ */
+void ath9k_btcoex_timer_resume(struct ath_softc *sc)
+{
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_hw *ah = sc->sc_ah;
+
+ ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
+ "Starting btcoex timers");
+
+ /* make sure duty cycle timer is also stopped when resuming */
+ if (btcoex->hw_timer_enabled)
+ ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
+
+ btcoex->bt_priority_cnt = 0;
+ btcoex->bt_priority_time = jiffies;
+ sc->sc_flags &= ~(SC_OP_BT_PRIORITY_DETECTED | SC_OP_BT_SCAN);
+
+ mod_timer(&btcoex->period_timer, jiffies);
+}
+
+
+/*
+ * Pause btcoex timer and bt duty cycle timer
+ */
+void ath9k_btcoex_timer_pause(struct ath_softc *sc)
+{
+ struct ath_btcoex *btcoex = &sc->btcoex;
+ struct ath_hw *ah = sc->sc_ah;
+
+ del_timer_sync(&btcoex->period_timer);
+
+ if (btcoex->hw_timer_enabled)
+ ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
+
+ btcoex->hw_timer_enabled = false;
+}
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index ae371448b5a..2e767cf22f1 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -52,28 +52,6 @@ module_exit(ath9k_exit);
/* Helper Functions */
/********************/
-static u32 ath9k_hw_mac_usec(struct ath_hw *ah, u32 clks)
-{
- struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
-
- if (!ah->curchan) /* should really check for CCK instead */
- return clks / ATH9K_CLOCK_RATE_CCK;
- if (conf->channel->band == IEEE80211_BAND_2GHZ)
- return clks / ATH9K_CLOCK_RATE_2GHZ_OFDM;
-
- return clks / ATH9K_CLOCK_RATE_5GHZ_OFDM;
-}
-
-static u32 ath9k_hw_mac_to_usec(struct ath_hw *ah, u32 clks)
-{
- struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
-
- if (conf_is_ht40(conf))
- return ath9k_hw_mac_usec(ah, clks) / 2;
- else
- return ath9k_hw_mac_usec(ah, clks);
-}
-
static u32 ath9k_hw_mac_clks(struct ath_hw *ah, u32 usecs)
{
struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
@@ -343,30 +321,6 @@ static bool ath9k_hw_chip_test(struct ath_hw *ah)
return true;
}
-static const char *ath9k_hw_devname(u16 devid)
-{
- switch (devid) {
- case AR5416_DEVID_PCI:
- return "Atheros 5416";
- case AR5416_DEVID_PCIE:
- return "Atheros 5418";
- case AR9160_DEVID_PCI:
- return "Atheros 9160";
- case AR5416_AR9100_DEVID:
- return "Atheros 9100";
- case AR9280_DEVID_PCI:
- case AR9280_DEVID_PCIE:
- return "Atheros 9280";
- case AR9285_DEVID_PCIE:
- return "Atheros 9285";
- case AR5416_DEVID_AR9287_PCI:
- case AR5416_DEVID_AR9287_PCIE:
- return "Atheros 9287";
- }
-
- return NULL;
-}
-
static void ath9k_hw_init_config(struct ath_hw *ah)
{
int i;
@@ -380,7 +334,6 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
ah->config.pcie_clock_req = 0;
ah->config.pcie_waen = 0;
ah->config.analog_shiftreg = 1;
- ah->config.ht_enable = 1;
ah->config.ofdm_trig_low = 200;
ah->config.ofdm_trig_high = 500;
ah->config.cck_trig_high = 200;
@@ -392,7 +345,12 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
ah->config.spurchans[i][1] = AR_NO_SPUR;
}
- ah->config.intr_mitigation = true;
+ if (ah->hw_version.devid != AR2427_DEVID_PCIE)
+ ah->config.ht_enable = 1;
+ else
+ ah->config.ht_enable = 0;
+
+ ah->config.rx_intr_mitigation = true;
/*
* We need this for PCI devices only (Cardbus, PCI, miniPCI)
@@ -437,8 +395,6 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
ah->beacon_interval = 100;
ah->enable_32kHz_clock = DONT_USE_32KHZ;
ah->slottime = (u32) -1;
- ah->acktimeout = (u32) -1;
- ah->ctstimeout = (u32) -1;
ah->globaltxtimeout = (u32) -1;
ah->power_mode = ATH9K_PM_UNDEFINED;
}
@@ -590,6 +546,7 @@ static bool ath9k_hw_devid_supported(u16 devid)
case AR5416_DEVID_AR9287_PCI:
case AR5416_DEVID_AR9287_PCIE:
case AR9271_USB:
+ case AR2427_DEVID_PCIE:
return true;
default:
break;
@@ -1183,7 +1140,7 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
AR_IMR_RXORN |
AR_IMR_BCNMISC;
- if (ah->config.intr_mitigation)
+ if (ah->config.rx_intr_mitigation)
ah->mask_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
else
ah->mask_reg |= AR_IMR_RXOK;
@@ -1203,34 +1160,25 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
}
}
-static bool ath9k_hw_set_ack_timeout(struct ath_hw *ah, u32 us)
+static void ath9k_hw_setslottime(struct ath_hw *ah, u32 us)
{
- if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_ACK))) {
- ath_print(ath9k_hw_common(ah), ATH_DBG_RESET,
- "bad ack timeout %u\n", us);
- ah->acktimeout = (u32) -1;
- return false;
- } else {
- REG_RMW_FIELD(ah, AR_TIME_OUT,
- AR_TIME_OUT_ACK, ath9k_hw_mac_to_clks(ah, us));
- ah->acktimeout = us;
- return true;
- }
+ u32 val = ath9k_hw_mac_to_clks(ah, us);
+ val = min(val, (u32) 0xFFFF);
+ REG_WRITE(ah, AR_D_GBL_IFS_SLOT, val);
}
-static bool ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us)
+static void ath9k_hw_set_ack_timeout(struct ath_hw *ah, u32 us)
{
- if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_CTS))) {
- ath_print(ath9k_hw_common(ah), ATH_DBG_RESET,
- "bad cts timeout %u\n", us);
- ah->ctstimeout = (u32) -1;
- return false;
- } else {
- REG_RMW_FIELD(ah, AR_TIME_OUT,
- AR_TIME_OUT_CTS, ath9k_hw_mac_to_clks(ah, us));
- ah->ctstimeout = us;
- return true;
- }
+ u32 val = ath9k_hw_mac_to_clks(ah, us);
+ val = min(val, (u32) MS(0xFFFFFFFF, AR_TIME_OUT_ACK));
+ REG_RMW_FIELD(ah, AR_TIME_OUT, AR_TIME_OUT_ACK, val);
+}
+
+static void ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us)
+{
+ u32 val = ath9k_hw_mac_to_clks(ah, us);
+ val = min(val, (u32) MS(0xFFFFFFFF, AR_TIME_OUT_CTS));
+ REG_RMW_FIELD(ah, AR_TIME_OUT, AR_TIME_OUT_CTS, val);
}
static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu)
@@ -1247,31 +1195,48 @@ static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu)
}
}
-static void ath9k_hw_init_user_settings(struct ath_hw *ah)
+void ath9k_hw_init_global_settings(struct ath_hw *ah)
{
+ struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
+ int acktimeout;
+ int slottime;
+ int sifstime;
+
ath_print(ath9k_hw_common(ah), ATH_DBG_RESET, "ah->misc_mode 0x%x\n",
ah->misc_mode);
if (ah->misc_mode != 0)
REG_WRITE(ah, AR_PCU_MISC,
REG_READ(ah, AR_PCU_MISC) | ah->misc_mode);
- if (ah->slottime != (u32) -1)
- ath9k_hw_setslottime(ah, ah->slottime);
- if (ah->acktimeout != (u32) -1)
- ath9k_hw_set_ack_timeout(ah, ah->acktimeout);
- if (ah->ctstimeout != (u32) -1)
- ath9k_hw_set_cts_timeout(ah, ah->ctstimeout);
+
+ if (conf->channel && conf->channel->band == IEEE80211_BAND_5GHZ)
+ sifstime = 16;
+ else
+ sifstime = 10;
+
+ /* As defined by IEEE 802.11-2007 17.3.8.6 */
+ slottime = ah->slottime + 3 * ah->coverage_class;
+ acktimeout = slottime + sifstime;
+
+ /*
+ * Workaround for early ACK timeouts, add an offset to match the
+ * initval's 64us ack timeout value.
+ * This was initially only meant to work around an issue with delayed
+ * BA frames in some implementations, but it has been found to fix ACK
+ * timeout issues in other cases as well.
+ */
+ if (conf->channel && conf->channel->band == IEEE80211_BAND_2GHZ)
+ acktimeout += 64 - sifstime - ah->slottime;
+
+ ath9k_hw_setslottime(ah, slottime);
+ ath9k_hw_set_ack_timeout(ah, acktimeout);
+ ath9k_hw_set_cts_timeout(ah, acktimeout);
if (ah->globaltxtimeout != (u32) -1)
ath9k_hw_set_global_txtimeout(ah, ah->globaltxtimeout);
}
+EXPORT_SYMBOL(ath9k_hw_init_global_settings);
-const char *ath9k_hw_probe(u16 vendorid, u16 devid)
-{
- return vendorid == ATHEROS_VENDOR_ID ?
- ath9k_hw_devname(devid) : NULL;
-}
-
-void ath9k_hw_detach(struct ath_hw *ah)
+void ath9k_hw_deinit(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
@@ -1289,7 +1254,7 @@ free_hw:
kfree(ah);
ah = NULL;
}
-EXPORT_SYMBOL(ath9k_hw_detach);
+EXPORT_SYMBOL(ath9k_hw_deinit);
/*******/
/* INI */
@@ -1345,6 +1310,16 @@ static void ath9k_hw_override_ini(struct ath_hw *ah,
* Necessary to avoid issues on AR5416 2.0
*/
REG_WRITE(ah, 0x9800 + (651 << 2), 0x11);
+
+ /*
+ * Disable RIFS search on some chips to avoid baseband
+ * hang issues.
+ */
+ if (AR_SREV_9100(ah) || AR_SREV_9160(ah)) {
+ val = REG_READ(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS);
+ val &= ~AR_PHY_RIFS_INIT_DELAY;
+ REG_WRITE(ah, AR_PHY_HEAVY_CLIP_FACTOR_RIFS, val);
+ }
}
static u32 ath9k_hw_def_ini_fixup(struct ath_hw *ah,
@@ -2090,7 +2065,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
ath9k_enable_rfkill(ah);
- ath9k_hw_init_user_settings(ah);
+ ath9k_hw_init_global_settings(ah);
if (AR_SREV_9287_12_OR_LATER(ah)) {
REG_WRITE(ah, AR_D_GBL_IFS_SIFS,
@@ -2120,7 +2095,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
REG_WRITE(ah, AR_OBS, 8);
- if (ah->config.intr_mitigation) {
+ if (ah->config.rx_intr_mitigation) {
REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500);
REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, 2000);
}
@@ -2780,7 +2755,7 @@ bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
*masked = isr & ATH9K_INT_COMMON;
- if (ah->config.intr_mitigation) {
+ if (ah->config.rx_intr_mitigation) {
if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM))
*masked |= ATH9K_INT_RX;
}
@@ -2913,7 +2888,7 @@ enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
}
if (ints & ATH9K_INT_RX) {
mask |= AR_IMR_RXERR;
- if (ah->config.intr_mitigation)
+ if (ah->config.rx_intr_mitigation)
mask |= AR_IMR_RXMINTR | AR_IMR_RXINTM;
else
mask |= AR_IMR_RXOK | AR_IMR_RXDESC;
@@ -3687,21 +3662,6 @@ u64 ath9k_hw_extend_tsf(struct ath_hw *ah, u32 rstamp)
}
EXPORT_SYMBOL(ath9k_hw_extend_tsf);
-bool ath9k_hw_setslottime(struct ath_hw *ah, u32 us)
-{
- if (us < ATH9K_SLOT_TIME_9 || us > ath9k_hw_mac_to_usec(ah, 0xffff)) {
- ath_print(ath9k_hw_common(ah), ATH_DBG_RESET,
- "bad slot time %u\n", us);
- ah->slottime = (u32) -1;
- return false;
- } else {
- REG_WRITE(ah, AR_D_GBL_IFS_SLOT, ath9k_hw_mac_to_clks(ah, us));
- ah->slottime = us;
- return true;
- }
-}
-EXPORT_SYMBOL(ath9k_hw_setslottime);
-
void ath9k_hw_set11nmac2040(struct ath_hw *ah)
{
struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index e2b0c73a616..dbbf7ca5f97 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -40,6 +40,7 @@
#define AR9280_DEVID_PCI 0x0029
#define AR9280_DEVID_PCIE 0x002a
#define AR9285_DEVID_PCIE 0x002b
+#define AR2427_DEVID_PCIE 0x002c
#define AR5416_AR9100_DEVID 0x000b
@@ -212,7 +213,7 @@ struct ath9k_ops_config {
u32 cck_trig_low;
u32 enable_ani;
int serialize_regmode;
- bool intr_mitigation;
+ bool rx_intr_mitigation;
#define SPUR_DISABLE 0
#define SPUR_ENABLE_IOCTL 1
#define SPUR_ENABLE_EEPROM 2
@@ -551,10 +552,9 @@ struct ath_hw {
u32 *bank6Temp;
int16_t txpower_indexoffset;
+ int coverage_class;
u32 beacon_interval;
u32 slottime;
- u32 acktimeout;
- u32 ctstimeout;
u32 globaltxtimeout;
/* ANI */
@@ -616,7 +616,7 @@ static inline struct ath_regulatory *ath9k_hw_regulatory(struct ath_hw *ah)
/* Initialization, Detach, Reset */
const char *ath9k_hw_probe(u16 vendorid, u16 devid);
-void ath9k_hw_detach(struct ath_hw *ah);
+void ath9k_hw_deinit(struct ath_hw *ah);
int ath9k_hw_init(struct ath_hw *ah);
int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
bool bChannelChange);
@@ -668,7 +668,7 @@ void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64);
void ath9k_hw_reset_tsf(struct ath_hw *ah);
void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting);
u64 ath9k_hw_extend_tsf(struct ath_hw *ah, u32 rstamp);
-bool ath9k_hw_setslottime(struct ath_hw *ah, u32 us);
+void ath9k_hw_init_global_settings(struct ath_hw *ah);
void ath9k_hw_set11nmac2040(struct ath_hw *ah);
void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period);
void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
new file mode 100644
index 00000000000..623c2f88498
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -0,0 +1,863 @@
+/*
+ * Copyright (c) 2008-2009 Atheros Communications Inc.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include "ath9k.h"
+
+static char *dev_info = "ath9k";
+
+MODULE_AUTHOR("Atheros Communications");
+MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
+MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
+MODULE_LICENSE("Dual BSD/GPL");
+
+static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
+module_param_named(debug, ath9k_debug, uint, 0);
+MODULE_PARM_DESC(debug, "Debugging mask");
+
+int modparam_nohwcrypt;
+module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
+MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
+
+/* We use the hw_value as an index into our private channel structure */
+
+#define CHAN2G(_freq, _idx) { \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 20, \
+}
+
+#define CHAN5G(_freq, _idx) { \
+ .band = IEEE80211_BAND_5GHZ, \
+ .center_freq = (_freq), \
+ .hw_value = (_idx), \
+ .max_power = 20, \
+}
+
+/* Some 2 GHz radios are actually tunable on 2312-2732
+ * on 5 MHz steps, we support the channels which we know
+ * we have calibration data for all cards though to make
+ * this static */
+static struct ieee80211_channel ath9k_2ghz_chantable[] = {
+ CHAN2G(2412, 0), /* Channel 1 */
+ CHAN2G(2417, 1), /* Channel 2 */
+ CHAN2G(2422, 2), /* Channel 3 */
+ CHAN2G(2427, 3), /* Channel 4 */
+ CHAN2G(2432, 4), /* Channel 5 */
+ CHAN2G(2437, 5), /* Channel 6 */
+ CHAN2G(2442, 6), /* Channel 7 */
+ CHAN2G(2447, 7), /* Channel 8 */
+ CHAN2G(2452, 8), /* Channel 9 */
+ CHAN2G(2457, 9), /* Channel 10 */
+ CHAN2G(2462, 10), /* Channel 11 */
+ CHAN2G(2467, 11), /* Channel 12 */
+ CHAN2G(2472, 12), /* Channel 13 */
+ CHAN2G(2484, 13), /* Channel 14 */
+};
+
+/* Some 5 GHz radios are actually tunable on XXXX-YYYY
+ * on 5 MHz steps, we support the channels which we know
+ * we have calibration data for all cards though to make
+ * this static */
+static struct ieee80211_channel ath9k_5ghz_chantable[] = {
+ /* _We_ call this UNII 1 */
+ CHAN5G(5180, 14), /* Channel 36 */
+ CHAN5G(5200, 15), /* Channel 40 */
+ CHAN5G(5220, 16), /* Channel 44 */
+ CHAN5G(5240, 17), /* Channel 48 */
+ /* _We_ call this UNII 2 */
+ CHAN5G(5260, 18), /* Channel 52 */
+ CHAN5G(5280, 19), /* Channel 56 */
+ CHAN5G(5300, 20), /* Channel 60 */
+ CHAN5G(5320, 21), /* Channel 64 */
+ /* _We_ call this "Middle band" */
+ CHAN5G(5500, 22), /* Channel 100 */
+ CHAN5G(5520, 23), /* Channel 104 */
+ CHAN5G(5540, 24), /* Channel 108 */
+ CHAN5G(5560, 25), /* Channel 112 */
+ CHAN5G(5580, 26), /* Channel 116 */
+ CHAN5G(5600, 27), /* Channel 120 */
+ CHAN5G(5620, 28), /* Channel 124 */
+ CHAN5G(5640, 29), /* Channel 128 */
+ CHAN5G(5660, 30), /* Channel 132 */
+ CHAN5G(5680, 31), /* Channel 136 */
+ CHAN5G(5700, 32), /* Channel 140 */
+ /* _We_ call this UNII 3 */
+ CHAN5G(5745, 33), /* Channel 149 */
+ CHAN5G(5765, 34), /* Channel 153 */
+ CHAN5G(5785, 35), /* Channel 157 */
+ CHAN5G(5805, 36), /* Channel 161 */
+ CHAN5G(5825, 37), /* Channel 165 */
+};
+
+/* Atheros hardware rate code addition for short premble */
+#define SHPCHECK(__hw_rate, __flags) \
+ ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
+
+#define RATE(_bitrate, _hw_rate, _flags) { \
+ .bitrate = (_bitrate), \
+ .flags = (_flags), \
+ .hw_value = (_hw_rate), \
+ .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
+}
+
+static struct ieee80211_rate ath9k_legacy_rates[] = {
+ RATE(10, 0x1b, 0),
+ RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
+ RATE(60, 0x0b, 0),
+ RATE(90, 0x0f, 0),
+ RATE(120, 0x0a, 0),
+ RATE(180, 0x0e, 0),
+ RATE(240, 0x09, 0),
+ RATE(360, 0x0d, 0),
+ RATE(480, 0x08, 0),
+ RATE(540, 0x0c, 0),
+};
+
+static void ath9k_deinit_softc(struct ath_softc *sc);
+
+/*
+ * Read and write, they both share the same lock. We do this to serialize
+ * reads and writes on Atheros 802.11n PCI devices only. This is required
+ * as the FIFO on these devices can only accept sanely 2 requests.
+ */
+
+static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_softc *sc = (struct ath_softc *) common->priv;
+
+ if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
+ unsigned long flags;
+ spin_lock_irqsave(&sc->sc_serial_rw, flags);
+ iowrite32(val, sc->mem + reg_offset);
+ spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
+ } else
+ iowrite32(val, sc->mem + reg_offset);
+}
+
+static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
+{
+ struct ath_hw *ah = (struct ath_hw *) hw_priv;
+ struct ath_common *common = ath9k_hw_common(ah);
+ struct ath_softc *sc = (struct ath_softc *) common->priv;
+ u32 val;
+
+ if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
+ unsigned long flags;
+ spin_lock_irqsave(&sc->sc_serial_rw, flags);
+ val = ioread32(sc->mem + reg_offset);
+ spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
+ } else
+ val = ioread32(sc->mem + reg_offset);
+ return val;
+}
+
+static const struct ath_ops ath9k_common_ops = {
+ .read = ath9k_ioread32,
+ .write = ath9k_iowrite32,
+};
+
+/**************************/
+/* Initialization */
+/**************************/
+
+static void setup_ht_cap(struct ath_softc *sc,
+ struct ieee80211_sta_ht_cap *ht_info)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ u8 tx_streams, rx_streams;
+
+ ht_info->ht_supported = true;
+ ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
+ IEEE80211_HT_CAP_SM_PS |
+ IEEE80211_HT_CAP_SGI_40 |
+ IEEE80211_HT_CAP_DSSSCCK40;
+
+ ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
+
+ /* set up supported mcs set */
+ memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
+ tx_streams = !(common->tx_chainmask & (common->tx_chainmask - 1)) ?
+ 1 : 2;
+ rx_streams = !(common->rx_chainmask & (common->rx_chainmask - 1)) ?
+ 1 : 2;
+
+ if (tx_streams != rx_streams) {
+ ath_print(common, ATH_DBG_CONFIG,
+ "TX streams %d, RX streams: %d\n",
+ tx_streams, rx_streams);
+ ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
+ ht_info->mcs.tx_params |= ((tx_streams - 1) <<
+ IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
+ }
+
+ ht_info->mcs.rx_mask[0] = 0xff;
+ if (rx_streams >= 2)
+ ht_info->mcs.rx_mask[1] = 0xff;
+
+ ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
+}
+
+static int ath9k_reg_notifier(struct wiphy *wiphy,
+ struct regulatory_request *request)
+{
+ struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
+ struct ath_wiphy *aphy = hw->priv;
+ struct ath_softc *sc = aphy->sc;
+ struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
+
+ return ath_reg_notifier_apply(wiphy, request, reg);
+}
+
+/*
+ * This function will allocate both the DMA descriptor structure, and the
+ * buffers it contains. These are used to contain the descriptors used
+ * by the system.
+*/
+int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
+ struct list_head *head, const char *name,
+ int nbuf, int ndesc)
+{
+#define DS2PHYS(_dd, _ds) \
+ ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
+#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
+#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ struct ath_desc *ds;
+ struct ath_buf *bf;
+ int i, bsize, error;
+
+ ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
+ name, nbuf, ndesc);
+
+ INIT_LIST_HEAD(head);
+ /* ath_desc must be a multiple of DWORDs */
+ if ((sizeof(struct ath_desc) % 4) != 0) {
+ ath_print(common, ATH_DBG_FATAL,
+ "ath_desc not DWORD aligned\n");
+ BUG_ON((sizeof(struct ath_desc) % 4) != 0);
+ error = -ENOMEM;
+ goto fail;
+ }
+
+ dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
+
+ /*
+ * Need additional DMA memory because we can't use
+ * descriptors that cross the 4K page boundary. Assume
+ * one skipped descriptor per 4K page.
+ */
+ if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
+ u32 ndesc_skipped =
+ ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
+ u32 dma_len;
+
+ while (ndesc_skipped) {
+ dma_len = ndesc_skipped * sizeof(struct ath_desc);
+ dd->dd_desc_len += dma_len;
+
+ ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
+ };
+ }
+
+ /* allocate descriptors */
+ dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
+ &dd->dd_desc_paddr, GFP_KERNEL);
+ if (dd->dd_desc == NULL) {
+ error = -ENOMEM;
+ goto fail;
+ }
+ ds = dd->dd_desc;
+ ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
+ name, ds, (u32) dd->dd_desc_len,
+ ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
+
+ /* allocate buffers */
+ bsize = sizeof(struct ath_buf) * nbuf;
+ bf = kzalloc(bsize, GFP_KERNEL);
+ if (bf == NULL) {
+ error = -ENOMEM;
+ goto fail2;
+ }
+ dd->dd_bufptr = bf;
+
+ for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
+ bf->bf_desc = ds;
+ bf->bf_daddr = DS2PHYS(dd, ds);
+
+ if (!(sc->sc_ah->caps.hw_caps &
+ ATH9K_HW_CAP_4KB_SPLITTRANS)) {
+ /*
+ * Skip descriptor addresses which can cause 4KB
+ * boundary crossing (addr + length) with a 32 dword
+ * descriptor fetch.
+ */
+ while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
+ BUG_ON((caddr_t) bf->bf_desc >=
+ ((caddr_t) dd->dd_desc +
+ dd->dd_desc_len));
+
+ ds += ndesc;
+ bf->bf_desc = ds;
+ bf->bf_daddr = DS2PHYS(dd, ds);
+ }
+ }
+ list_add_tail(&bf->list, head);
+ }
+ return 0;
+fail2:
+ dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
+ dd->dd_desc_paddr);
+fail:
+ memset(dd, 0, sizeof(*dd));
+ return error;
+#undef ATH_DESC_4KB_BOUND_CHECK
+#undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
+#undef DS2PHYS
+}
+
+static void ath9k_init_crypto(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ int i = 0;
+
+ /* Get the hardware key cache size. */
+ common->keymax = sc->sc_ah->caps.keycache_size;
+ if (common->keymax > ATH_KEYMAX) {
+ ath_print(common, ATH_DBG_ANY,
+ "Warning, using only %u entries in %u key cache\n",
+ ATH_KEYMAX, common->keymax);
+ common->keymax = ATH_KEYMAX;
+ }
+
+ /*
+ * Reset the key cache since some parts do not
+ * reset the contents on initial power up.
+ */
+ for (i = 0; i < common->keymax; i++)
+ ath9k_hw_keyreset(sc->sc_ah, (u16) i);
+
+ if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
+ ATH9K_CIPHER_TKIP, NULL)) {
+ /*
+ * Whether we should enable h/w TKIP MIC.
+ * XXX: if we don't support WME TKIP MIC, then we wouldn't
+ * report WMM capable, so it's always safe to turn on
+ * TKIP MIC in this case.
+ */
+ ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC, 0, 1, NULL);
+ }
+
+ /*
+ * Check whether the separate key cache entries
+ * are required to handle both tx+rx MIC keys.
+ * With split mic keys the number of stations is limited
+ * to 27 otherwise 59.
+ */
+ if (ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
+ ATH9K_CIPHER_TKIP, NULL)
+ && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_CIPHER,
+ ATH9K_CIPHER_MIC, NULL)
+ && ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_TKIP_SPLIT,
+ 0, NULL))
+ common->splitmic = 1;
+
+ /* turn on mcast key search if possible */
+ if (!ath9k_hw_getcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
+ (void)ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_MCAST_KEYSRCH,
+ 1, 1, NULL);
+
+}
+
+static int ath9k_init_btcoex(struct ath_softc *sc)
+{
+ int r, qnum;
+
+ switch (sc->sc_ah->btcoex_hw.scheme) {
+ case ATH_BTCOEX_CFG_NONE:
+ break;
+ case ATH_BTCOEX_CFG_2WIRE:
+ ath9k_hw_btcoex_init_2wire(sc->sc_ah);
+ break;
+ case ATH_BTCOEX_CFG_3WIRE:
+ ath9k_hw_btcoex_init_3wire(sc->sc_ah);
+ r = ath_init_btcoex_timer(sc);
+ if (r)
+ return -1;
+ qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
+ ath9k_hw_init_btcoex_hw(sc->sc_ah, qnum);
+ sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+
+ return 0;
+}
+
+static int ath9k_init_queues(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ int i = 0;
+
+ for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
+ sc->tx.hwq_map[i] = -1;
+
+ sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
+ if (sc->beacon.beaconq == -1) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup a beacon xmit queue\n");
+ goto err;
+ }
+
+ sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
+ if (sc->beacon.cabq == NULL) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup CAB xmit queue\n");
+ goto err;
+ }
+
+ sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
+ ath_cabq_update(sc);
+
+ if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup xmit queue for BK traffic\n");
+ goto err;
+ }
+
+ if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup xmit queue for BE traffic\n");
+ goto err;
+ }
+ if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup xmit queue for VI traffic\n");
+ goto err;
+ }
+ if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to setup xmit queue for VO traffic\n");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
+ if (ATH_TXQ_SETUP(sc, i))
+ ath_tx_cleanupq(sc, &sc->tx.txq[i]);
+
+ return -EIO;
+}
+
+static void ath9k_init_channels_rates(struct ath_softc *sc)
+{
+ if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) {
+ sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable;
+ sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
+ sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
+ ARRAY_SIZE(ath9k_2ghz_chantable);
+ sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
+ sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
+ ARRAY_SIZE(ath9k_legacy_rates);
+ }
+
+ if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) {
+ sc->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_chantable;
+ sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
+ sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
+ ARRAY_SIZE(ath9k_5ghz_chantable);
+ sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
+ ath9k_legacy_rates + 4;
+ sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
+ ARRAY_SIZE(ath9k_legacy_rates) - 4;
+ }
+}
+
+static void ath9k_init_misc(struct ath_softc *sc)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+ int i = 0;
+
+ common->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR;
+ setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
+
+ sc->config.txpowlimit = ATH_TXPOWER_MAX;
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
+ sc->sc_flags |= SC_OP_TXAGGR;
+ sc->sc_flags |= SC_OP_RXAGGR;
+ }
+
+ common->tx_chainmask = sc->sc_ah->caps.tx_chainmask;
+ common->rx_chainmask = sc->sc_ah->caps.rx_chainmask;
+
+ ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
+ sc->rx.defant = ath9k_hw_getdefantenna(sc->sc_ah);
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
+ memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
+
+ sc->beacon.slottime = ATH9K_SLOT_TIME_9;
+
+ for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
+ sc->beacon.bslot[i] = NULL;
+ sc->beacon.bslot_aphy[i] = NULL;
+ }
+}
+
+static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
+ const struct ath_bus_ops *bus_ops)
+{
+ struct ath_hw *ah = NULL;
+ struct ath_common *common;
+ int ret = 0, i;
+ int csz = 0;
+
+ ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
+ if (!ah)
+ return -ENOMEM;
+
+ ah->hw_version.devid = devid;
+ ah->hw_version.subsysid = subsysid;
+ sc->sc_ah = ah;
+
+ common = ath9k_hw_common(ah);
+ common->ops = &ath9k_common_ops;
+ common->bus_ops = bus_ops;
+ common->ah = ah;
+ common->hw = sc->hw;
+ common->priv = sc;
+ common->debug_mask = ath9k_debug;
+
+ spin_lock_init(&sc->wiphy_lock);
+ spin_lock_init(&sc->sc_resetlock);
+ spin_lock_init(&sc->sc_serial_rw);
+ spin_lock_init(&sc->sc_pm_lock);
+ mutex_init(&sc->mutex);
+ tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
+ tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
+ (unsigned long)sc);
+
+ /*
+ * Cache line size is used to size and align various
+ * structures used to communicate with the hardware.
+ */
+ ath_read_cachesize(common, &csz);
+ common->cachelsz = csz << 2; /* convert to bytes */
+
+ ret = ath9k_hw_init(ah);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to initialize hardware; "
+ "initialization status: %d\n", ret);
+ goto err_hw;
+ }
+
+ ret = ath9k_init_debug(ah);
+ if (ret) {
+ ath_print(common, ATH_DBG_FATAL,
+ "Unable to create debugfs files\n");
+ goto err_debug;
+ }
+
+ ret = ath9k_init_queues(sc);
+ if (ret)
+ goto err_queues;
+
+ ret = ath9k_init_btcoex(sc);
+ if (ret)
+ goto err_btcoex;
+
+ ath9k_init_crypto(sc);
+ ath9k_init_channels_rates(sc);
+ ath9k_init_misc(sc);
+
+ return 0;
+
+err_btcoex:
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
+ if (ATH_TXQ_SETUP(sc, i))
+ ath_tx_cleanupq(sc, &sc->tx.txq[i]);
+err_queues:
+ ath9k_exit_debug(ah);
+err_debug:
+ ath9k_hw_deinit(ah);
+err_hw:
+ tasklet_kill(&sc->intr_tq);
+ tasklet_kill(&sc->bcon_tasklet);
+
+ kfree(ah);
+ sc->sc_ah = NULL;
+
+ return ret;
+}
+
+void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
+{
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
+
+ hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
+ IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
+ IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_SUPPORTS_PS |
+ IEEE80211_HW_PS_NULLFUNC_STACK |
+ IEEE80211_HW_SPECTRUM_MGMT |
+ IEEE80211_HW_REPORTS_TX_ACK_STATUS;
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
+ hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
+
+ if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
+ hw->flags |= IEEE80211_HW_MFP_CAPABLE;
+
+ hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC) |
+ BIT(NL80211_IFTYPE_MESH_POINT);
+
+ hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ hw->queues = 4;
+ hw->max_rates = 4;
+ hw->channel_change_time = 5000;
+ hw->max_listen_interval = 10;
+ hw->max_rate_tries = 10;
+ hw->sta_data_size = sizeof(struct ath_node);
+ hw->vif_data_size = sizeof(struct ath_vif);
+
+ hw->rate_control_algorithm = "ath9k_rate_control";
+
+ if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
+ hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
+ &sc->sbands[IEEE80211_BAND_2GHZ];
+ if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
+ hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
+ &sc->sbands[IEEE80211_BAND_5GHZ];
+
+ if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
+ if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
+ setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
+ if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
+ setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
+ }
+
+ SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
+}
+
+int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
+ const struct ath_bus_ops *bus_ops)
+{
+ struct ieee80211_hw *hw = sc->hw;
+ struct ath_common *common;
+ struct ath_hw *ah;
+ int error = 0;
+ struct ath_regulatory *reg;
+
+ /* Bring up device */
+ error = ath9k_init_softc(devid, sc, subsysid, bus_ops);
+ if (error != 0)
+ goto error_init;
+
+ ah = sc->sc_ah;
+ common = ath9k_hw_common(ah);
+ ath9k_set_hw_capab(sc, hw);
+
+ /* Initialize regulatory */
+ error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
+ ath9k_reg_notifier);
+ if (error)
+ goto error_regd;
+
+ reg = &common->regulatory;
+
+ /* Setup TX DMA */
+ error = ath_tx_init(sc, ATH_TXBUF);
+ if (error != 0)
+ goto error_tx;
+
+ /* Setup RX DMA */
+ error = ath_rx_init(sc, ATH_RXBUF);
+ if (error != 0)
+ goto error_rx;
+
+ /* Register with mac80211 */
+ error = ieee80211_register_hw(hw);
+ if (error)
+ goto error_register;
+
+ /* Handle world regulatory */
+ if (!ath_is_world_regd(reg)) {
+ error = regulatory_hint(hw->wiphy, reg->alpha2);
+ if (error)
+ goto error_world;
+ }
+
+ INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
+ INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
+ sc->wiphy_scheduler_int = msecs_to_jiffies(500);
+
+ ath_init_leds(sc);
+ ath_start_rfkill_poll(sc);
+
+ return 0;
+
+error_world:
+ ieee80211_unregister_hw(hw);
+error_register:
+ ath_rx_cleanup(sc);
+error_rx:
+ ath_tx_cleanup(sc);
+error_tx:
+ /* Nothing */
+error_regd:
+ ath9k_deinit_softc(sc);
+error_init:
+ return error;
+}
+
+/*****************************/
+/* De-Initialization */
+/*****************************/
+
+static void ath9k_deinit_softc(struct ath_softc *sc)
+{
+ int i = 0;
+
+ if ((sc->btcoex.no_stomp_timer) &&
+ sc->sc_ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
+ ath_gen_timer_free(sc->sc_ah, sc->btcoex.no_stomp_timer);
+
+ for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
+ if (ATH_TXQ_SETUP(sc, i))
+ ath_tx_cleanupq(sc, &sc->tx.txq[i]);
+
+ ath9k_exit_debug(sc->sc_ah);
+ ath9k_hw_deinit(sc->sc_ah);
+
+ tasklet_kill(&sc->intr_tq);
+ tasklet_kill(&sc->bcon_tasklet);
+}
+
+void ath9k_deinit_device(struct ath_softc *sc)
+{
+ struct ieee80211_hw *hw = sc->hw;
+ int i = 0;
+
+ ath9k_ps_wakeup(sc);
+
+ wiphy_rfkill_stop_polling(sc->hw->wiphy);
+ ath_deinit_leds(sc);
+
+ for (i = 0; i < sc->num_sec_wiphy; i++) {
+ struct ath_wiphy *aphy = sc->sec_wiphy[i];
+ if (aphy == NULL)
+ continue;
+ sc->sec_wiphy[i] = NULL;
+ ieee80211_unregister_hw(aphy->hw);
+ ieee80211_free_hw(aphy->hw);
+ }
+ kfree(sc->sec_wiphy);
+
+ ieee80211_unregister_hw(hw);
+ ath_rx_cleanup(sc);
+ ath_tx_cleanup(sc);
+ ath9k_deinit_softc(sc);
+}
+
+void ath_descdma_cleanup(struct ath_softc *sc,
+ struct ath_descdma *dd,
+ struct list_head *head)
+{
+ dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
+ dd->dd_desc_paddr);
+
+ INIT_LIST_HEAD(head);
+ kfree(dd->dd_bufptr);
+ memset(dd, 0, sizeof(*dd));
+}
+
+/************************/
+/* Module Hooks */
+/************************/
+
+static int __init ath9k_init(void)
+{
+ int error;
+
+ /* Register rate control algorithm */
+ error = ath_rate_control_register();
+ if (error != 0) {
+ printk(KERN_ERR
+ "ath9k: Unable to register rate control "
+ "algorithm: %d\n",
+ error);
+ goto err_out;
+ }
+
+ error = ath9k_debug_create_root();
+ if (error) {
+ printk(KERN_ERR
+ "ath9k: Unable to create debugfs root: %d\n",
+ error);
+ goto err_rate_unregister;
+ }
+
+ error = ath_pci_init();
+ if (error < 0) {
+ printk(KERN_ERR
+ "ath9k: No PCI devices found, driver not installed.\n");
+ error = -ENODEV;
+ goto err_remove_root;
+ }
+
+ error = ath_ahb_init();
+ if (error < 0) {
+ error = -ENODEV;
+ goto err_pci_exit;
+ }
+
+ return 0;
+
+ err_pci_exit:
+ ath_pci_exit();
+
+ err_remove_root:
+ ath9k_debug_remove_root();
+ err_rate_unregister:
+ ath_rate_control_unregister();
+ err_out:
+ return error;
+}
+module_init(ath9k_init);
+
+static void __exit ath9k_exit(void)
+{
+ ath_ahb_exit();
+ ath_pci_exit();
+ ath9k_debug_remove_root();
+ ath_rate_control_unregister();
+ printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
+}
+module_exit(ath9k_exit);
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index e185479e295..29851e6376a 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -167,6 +167,40 @@ struct ath_rx_status {
#define ATH9K_RXKEYIX_INVALID ((u8)-1)
#define ATH9K_TXKEYIX_INVALID ((u32)-1)
+enum ath9k_phyerr {
+ ATH9K_PHYERR_UNDERRUN = 0, /* Transmit underrun */
+ ATH9K_PHYERR_TIMING = 1, /* Timing error */
+ ATH9K_PHYERR_PARITY = 2, /* Illegal parity */
+ ATH9K_PHYERR_RATE = 3, /* Illegal rate */
+ ATH9K_PHYERR_LENGTH = 4, /* Illegal length */
+ ATH9K_PHYERR_RADAR = 5, /* Radar detect */
+ ATH9K_PHYERR_SERVICE = 6, /* Illegal service */
+ ATH9K_PHYERR_TOR = 7, /* Transmit override receive */
+
+ ATH9K_PHYERR_OFDM_TIMING = 17,
+ ATH9K_PHYERR_OFDM_SIGNAL_PARITY = 18,
+ ATH9K_PHYERR_OFDM_RATE_ILLEGAL = 19,
+ ATH9K_PHYERR_OFDM_LENGTH_ILLEGAL = 20,
+ ATH9K_PHYERR_OFDM_POWER_DROP = 21,
+ ATH9K_PHYERR_OFDM_SERVICE = 22,
+ ATH9K_PHYERR_OFDM_RESTART = 23,
+ ATH9K_PHYERR_FALSE_RADAR_EXT = 24,
+
+ ATH9K_PHYERR_CCK_TIMING = 25,
+ ATH9K_PHYERR_CCK_HEADER_CRC = 26,
+ ATH9K_PHYERR_CCK_RATE_ILLEGAL = 27,
+ ATH9K_PHYERR_CCK_SERVICE = 30,
+ ATH9K_PHYERR_CCK_RESTART = 31,
+ ATH9K_PHYERR_CCK_LENGTH_ILLEGAL = 32,
+ ATH9K_PHYERR_CCK_POWER_DROP = 33,
+
+ ATH9K_PHYERR_HT_CRC_ERROR = 34,
+ ATH9K_PHYERR_HT_LENGTH_ILLEGAL = 35,
+ ATH9K_PHYERR_HT_RATE_ILLEGAL = 36,
+
+ ATH9K_PHYERR_MAX = 37,
+};
+
struct ath_desc {
u32 ds_link;
u32 ds_data;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 643bea35686..67ca4e5a601 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -18,118 +18,6 @@
#include "ath9k.h"
#include "btcoex.h"
-static char *dev_info = "ath9k";
-
-MODULE_AUTHOR("Atheros Communications");
-MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
-MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
-MODULE_LICENSE("Dual BSD/GPL");
-
-static int modparam_nohwcrypt;
-module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
-MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
-
-static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
-module_param_named(debug, ath9k_debug, uint, 0);
-MODULE_PARM_DESC(debug, "Debugging mask");
-
-/* We use the hw_value as an index into our private channel structure */
-
-#define CHAN2G(_freq, _idx) { \
- .center_freq = (_freq), \
- .hw_value = (_idx), \
- .max_power = 20, \
-}
-
-#define CHAN5G(_freq, _idx) { \
- .band = IEEE80211_BAND_5GHZ, \
- .center_freq = (_freq), \
- .hw_value = (_idx), \
- .max_power = 20, \
-}
-
-/* Some 2 GHz radios are actually tunable on 2312-2732
- * on 5 MHz steps, we support the channels which we know
- * we have calibration data for all cards though to make
- * this static */
-static struct ieee80211_channel ath9k_2ghz_chantable[] = {
- CHAN2G(2412, 0), /* Channel 1 */
- CHAN2G(2417, 1), /* Channel 2 */
- CHAN2G(2422, 2), /* Channel 3 */
- CHAN2G(2427, 3), /* Channel 4 */
- CHAN2G(2432, 4), /* Channel 5 */
- CHAN2G(2437, 5), /* Channel 6 */
- CHAN2G(2442, 6), /* Channel 7 */
- CHAN2G(2447, 7), /* Channel 8 */
- CHAN2G(2452, 8), /* Channel 9 */
- CHAN2G(2457, 9), /* Channel 10 */
- CHAN2G(2462, 10), /* Channel 11 */
- CHAN2G(2467, 11), /* Channel 12 */
- CHAN2G(2472, 12), /* Channel 13 */
- CHAN2G(2484, 13), /* Channel 14 */
-};
-
-/* Some 5 GHz radios are actually tunable on XXXX-YYYY
- * on 5 MHz steps, we support the channels which we know
- * we have calibration data for all cards though to make
- * this static */
-static struct ieee80211_channel ath9k_5ghz_chantable[] = {
- /* _We_ call this UNII 1 */
- CHAN5G(5180, 14), /* Channel 36 */
- CHAN5G(5200, 15), /* Channel 40 */
- CHAN5G(5220, 16), /* Channel 44 */
- CHAN5G(5240, 17), /* Channel 48 */
- /* _We_ call this UNII 2 */
- CHAN5G(5260, 18), /* Channel 52 */
- CHAN5G(5280, 19), /* Channel 56 */
- CHAN5G(5300, 20), /* Channel 60 */
- CHAN5G(5320, 21), /* Channel 64 */
- /* _We_ call this "Middle band" */
- CHAN5G(5500, 22), /* Channel 100 */
- CHAN5G(5520, 23), /* Channel 104 */
- CHAN5G(5540, 24), /* Channel 108 */
- CHAN5G(5560, 25), /* Channel 112 */
- CHAN5G(5580, 26), /* Channel 116 */
- CHAN5G(5600, 27), /* Channel 120 */
- CHAN5G(5620, 28), /* Channel 124 */
- CHAN5G(5640, 29), /* Channel 128 */
- CHAN5G(5660, 30), /* Channel 132 */
- CHAN5G(5680, 31), /* Channel 136 */
- CHAN5G(5700, 32), /* Channel 140 */
- /* _We_ call this UNII 3 */
- CHAN5G(5745, 33), /* Channel 149 */
- CHAN5G(5765, 34), /* Channel 153 */
- CHAN5G(5785, 35), /* Channel 157 */
- CHAN5G(5805, 36), /* Channel 161 */
- CHAN5G(5825, 37), /* Channel 165 */
-};
-
-/* Atheros hardware rate code addition for short premble */
-#define SHPCHECK(__hw_rate, __flags) \
- ((__flags & IEEE80211_RATE_SHORT_PREAMBLE) ? (__hw_rate | 0x04 ) : 0)
-
-#define RATE(_bitrate, _hw_rate, _flags) { \
- .bitrate = (_bitrate), \
- .flags = (_flags), \
- .hw_value = (_hw_rate), \
- .hw_value_short = (SHPCHECK(_hw_rate, _flags)) \
-}
-
-static struct ieee80211_rate ath9k_legacy_rates[] = {
- RATE(10, 0x1b, 0),
- RATE(20, 0x1a, IEEE80211_RATE_SHORT_PREAMBLE),
- RATE(55, 0x19, IEEE80211_RATE_SHORT_PREAMBLE),
- RATE(110, 0x18, IEEE80211_RATE_SHORT_PREAMBLE),
- RATE(60, 0x0b, 0),
- RATE(90, 0x0f, 0),
- RATE(120, 0x0a, 0),
- RATE(180, 0x0e, 0),
- RATE(240, 0x09, 0),
- RATE(360, 0x0d, 0),
- RATE(480, 0x08, 0),
- RATE(540, 0x0c, 0),
-};
-
static void ath_cache_conf_rate(struct ath_softc *sc,
struct ieee80211_conf *conf)
{
@@ -221,7 +109,7 @@ static struct ath9k_channel *ath_get_curchannel(struct ath_softc *sc,
return channel;
}
-static bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
+bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
{
unsigned long flags;
bool ret;
@@ -255,11 +143,13 @@ void ath9k_ps_restore(struct ath_softc *sc)
if (--sc->ps_usecount != 0)
goto unlock;
- if (sc->ps_enabled &&
- !(sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
- SC_OP_WAIT_FOR_CAB |
- SC_OP_WAIT_FOR_PSPOLL_DATA |
- SC_OP_WAIT_FOR_TX_ACK)))
+ if (sc->ps_idle)
+ ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP);
+ else if (sc->ps_enabled &&
+ !(sc->ps_flags & (PS_WAIT_FOR_BEACON |
+ PS_WAIT_FOR_CAB |
+ PS_WAIT_FOR_PSPOLL_DATA |
+ PS_WAIT_FOR_TX_ACK)))
ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_NETWORK_SLEEP);
unlock:
@@ -316,7 +206,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
r = ath9k_hw_reset(ah, hchan, fastcc);
if (r) {
ath_print(common, ATH_DBG_FATAL,
- "Unable to reset channel (%u Mhz) "
+ "Unable to reset channel (%u MHz), "
"reset status %d\n",
channel->center_freq, r);
spin_unlock_bh(&sc->sc_resetlock);
@@ -349,7 +239,7 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
* When the task is complete, it reschedules itself depending on the
* appropriate interval that was calculated.
*/
-static void ath_ani_calibrate(unsigned long data)
+void ath_ani_calibrate(unsigned long data)
{
struct ath_softc *sc = (struct ath_softc *)data;
struct ath_hw *ah = sc->sc_ah;
@@ -363,14 +253,6 @@ static void ath_ani_calibrate(unsigned long data)
short_cal_interval = (ah->opmode == NL80211_IFTYPE_AP) ?
ATH_AP_SHORT_CALINTERVAL : ATH_STA_SHORT_CALINTERVAL;
- /*
- * don't calibrate when we're scanning.
- * we are most likely not on our home channel.
- */
- spin_lock(&sc->ani_lock);
- if (sc->sc_flags & SC_OP_SCANNING)
- goto set_timer;
-
/* Only calibrate if awake */
if (sc->sc_ah->power_mode != ATH9K_PM_AWAKE)
goto set_timer;
@@ -437,7 +319,6 @@ static void ath_ani_calibrate(unsigned long data)
ath9k_ps_restore(sc);
set_timer:
- spin_unlock(&sc->ani_lock);
/*
* Set timer interval based on previous results.
* The interval must be the shortest necessary to satisfy ANI,
@@ -513,7 +394,7 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
ath_tx_node_cleanup(sc, an);
}
-static void ath9k_tasklet(unsigned long data)
+void ath9k_tasklet(unsigned long data)
{
struct ath_softc *sc = (struct ath_softc *)data;
struct ath_hw *ah = sc->sc_ah;
@@ -545,7 +426,7 @@ static void ath9k_tasklet(unsigned long data)
*/
ath_print(common, ATH_DBG_PS,
"TSFOOR - Sync with next Beacon\n");
- sc->sc_flags |= SC_OP_WAIT_FOR_BEACON | SC_OP_BEACON_SYNC;
+ sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC;
}
if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
@@ -646,7 +527,7 @@ irqreturn_t ath_isr(int irq, void *dev)
* receive frames */
ath9k_setpower(sc, ATH9K_PM_AWAKE);
ath9k_hw_setrxabort(sc->sc_ah, 0);
- sc->sc_flags |= SC_OP_WAIT_FOR_BEACON;
+ sc->ps_flags |= PS_WAIT_FOR_BEACON;
}
chip_reset:
@@ -928,49 +809,12 @@ static void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf
clear_bit(key->hw_key_idx + 64, common->keymap);
if (common->splitmic) {
+ ath9k_hw_keyreset(ah, key->hw_key_idx + 32);
clear_bit(key->hw_key_idx + 32, common->keymap);
clear_bit(key->hw_key_idx + 64 + 32, common->keymap);
}
}
-static void setup_ht_cap(struct ath_softc *sc,
- struct ieee80211_sta_ht_cap *ht_info)
-{
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- u8 tx_streams, rx_streams;
-
- ht_info->ht_supported = true;
- ht_info->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
- IEEE80211_HT_CAP_SM_PS |
- IEEE80211_HT_CAP_SGI_40 |
- IEEE80211_HT_CAP_DSSSCCK40;
-
- ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
- ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
-
- /* set up supported mcs set */
- memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
- tx_streams = !(common->tx_chainmask & (common->tx_chainmask - 1)) ?
- 1 : 2;
- rx_streams = !(common->rx_chainmask & (common->rx_chainmask - 1)) ?
- 1 : 2;
-
- if (tx_streams != rx_streams) {
- ath_print(common, ATH_DBG_CONFIG,
- "TX streams %d, RX streams: %d\n",
- tx_streams, rx_streams);
- ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
- ht_info->mcs.tx_params |= ((tx_streams - 1) <<
- IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
- }
-
- ht_info->mcs.rx_mask[0] = 0xff;
- if (rx_streams >= 2)
- ht_info->mcs.rx_mask[1] = 0xff;
-
- ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
-}
-
static void ath9k_bss_assoc_info(struct ath_softc *sc,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf)
@@ -992,7 +836,7 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
* on the receipt of the first Beacon frame (i.e.,
* after time sync with the AP).
*/
- sc->sc_flags |= SC_OP_BEACON_SYNC;
+ sc->ps_flags |= PS_BEACON_SYNC;
/* Configure the beacon */
ath_beacon_config(sc, vif);
@@ -1009,174 +853,6 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
}
}
-/********************************/
-/* LED functions */
-/********************************/
-
-static void ath_led_blink_work(struct work_struct *work)
-{
- struct ath_softc *sc = container_of(work, struct ath_softc,
- ath_led_blink_work.work);
-
- if (!(sc->sc_flags & SC_OP_LED_ASSOCIATED))
- return;
-
- if ((sc->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
- (sc->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
- else
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
- (sc->sc_flags & SC_OP_LED_ON) ? 1 : 0);
-
- ieee80211_queue_delayed_work(sc->hw,
- &sc->ath_led_blink_work,
- (sc->sc_flags & SC_OP_LED_ON) ?
- msecs_to_jiffies(sc->led_off_duration) :
- msecs_to_jiffies(sc->led_on_duration));
-
- sc->led_on_duration = sc->led_on_cnt ?
- max((ATH_LED_ON_DURATION_IDLE - sc->led_on_cnt), 25) :
- ATH_LED_ON_DURATION_IDLE;
- sc->led_off_duration = sc->led_off_cnt ?
- max((ATH_LED_OFF_DURATION_IDLE - sc->led_off_cnt), 10) :
- ATH_LED_OFF_DURATION_IDLE;
- sc->led_on_cnt = sc->led_off_cnt = 0;
- if (sc->sc_flags & SC_OP_LED_ON)
- sc->sc_flags &= ~SC_OP_LED_ON;
- else
- sc->sc_flags |= SC_OP_LED_ON;
-}
-
-static void ath_led_brightness(struct led_classdev *led_cdev,
- enum led_brightness brightness)
-{
- struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
- struct ath_softc *sc = led->sc;
-
- switch (brightness) {
- case LED_OFF:
- if (led->led_type == ATH_LED_ASSOC ||
- led->led_type == ATH_LED_RADIO) {
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin,
- (led->led_type == ATH_LED_RADIO));
- sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
- if (led->led_type == ATH_LED_RADIO)
- sc->sc_flags &= ~SC_OP_LED_ON;
- } else {
- sc->led_off_cnt++;
- }
- break;
- case LED_FULL:
- if (led->led_type == ATH_LED_ASSOC) {
- sc->sc_flags |= SC_OP_LED_ASSOCIATED;
- ieee80211_queue_delayed_work(sc->hw,
- &sc->ath_led_blink_work, 0);
- } else if (led->led_type == ATH_LED_RADIO) {
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 0);
- sc->sc_flags |= SC_OP_LED_ON;
- } else {
- sc->led_on_cnt++;
- }
- break;
- default:
- break;
- }
-}
-
-static int ath_register_led(struct ath_softc *sc, struct ath_led *led,
- char *trigger)
-{
- int ret;
-
- led->sc = sc;
- led->led_cdev.name = led->name;
- led->led_cdev.default_trigger = trigger;
- led->led_cdev.brightness_set = ath_led_brightness;
-
- ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev);
- if (ret)
- ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
- "Failed to register led:%s", led->name);
- else
- led->registered = 1;
- return ret;
-}
-
-static void ath_unregister_led(struct ath_led *led)
-{
- if (led->registered) {
- led_classdev_unregister(&led->led_cdev);
- led->registered = 0;
- }
-}
-
-static void ath_deinit_leds(struct ath_softc *sc)
-{
- ath_unregister_led(&sc->assoc_led);
- sc->sc_flags &= ~SC_OP_LED_ASSOCIATED;
- ath_unregister_led(&sc->tx_led);
- ath_unregister_led(&sc->rx_led);
- ath_unregister_led(&sc->radio_led);
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
-}
-
-static void ath_init_leds(struct ath_softc *sc)
-{
- char *trigger;
- int ret;
-
- if (AR_SREV_9287(sc->sc_ah))
- sc->sc_ah->led_pin = ATH_LED_PIN_9287;
- else
- sc->sc_ah->led_pin = ATH_LED_PIN_DEF;
-
- /* Configure gpio 1 for output */
- ath9k_hw_cfg_output(sc->sc_ah, sc->sc_ah->led_pin,
- AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
- /* LED off, active low */
- ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
-
- INIT_DELAYED_WORK(&sc->ath_led_blink_work, ath_led_blink_work);
-
- trigger = ieee80211_get_radio_led_name(sc->hw);
- snprintf(sc->radio_led.name, sizeof(sc->radio_led.name),
- "ath9k-%s::radio", wiphy_name(sc->hw->wiphy));
- ret = ath_register_led(sc, &sc->radio_led, trigger);
- sc->radio_led.led_type = ATH_LED_RADIO;
- if (ret)
- goto fail;
-
- trigger = ieee80211_get_assoc_led_name(sc->hw);
- snprintf(sc->assoc_led.name, sizeof(sc->assoc_led.name),
- "ath9k-%s::assoc", wiphy_name(sc->hw->wiphy));
- ret = ath_register_led(sc, &sc->assoc_led, trigger);
- sc->assoc_led.led_type = ATH_LED_ASSOC;
- if (ret)
- goto fail;
-
- trigger = ieee80211_get_tx_led_name(sc->hw);
- snprintf(sc->tx_led.name, sizeof(sc->tx_led.name),
- "ath9k-%s::tx", wiphy_name(sc->hw->wiphy));
- ret = ath_register_led(sc, &sc->tx_led, trigger);
- sc->tx_led.led_type = ATH_LED_TX;
- if (ret)
- goto fail;
-
- trigger = ieee80211_get_rx_led_name(sc->hw);
- snprintf(sc->rx_led.name, sizeof(sc->rx_led.name),
- "ath9k-%s::rx", wiphy_name(sc->hw->wiphy));
- ret = ath_register_led(sc, &sc->rx_led, trigger);
- sc->rx_led.led_type = ATH_LED_RX;
- if (ret)
- goto fail;
-
- return;
-
-fail:
- cancel_delayed_work_sync(&sc->ath_led_blink_work);
- ath_deinit_leds(sc);
-}
-
void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
{
struct ath_hw *ah = sc->sc_ah;
@@ -1194,7 +870,7 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
r = ath9k_hw_reset(ah, ah->curchan, false);
if (r) {
ath_print(common, ATH_DBG_FATAL,
- "Unable to reset channel %u (%uMhz) ",
+ "Unable to reset channel (%u MHz), "
"reset status %d\n",
channel->center_freq, r);
}
@@ -1249,7 +925,7 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
r = ath9k_hw_reset(ah, ah->curchan, false);
if (r) {
ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
- "Unable to reset channel %u (%uMhz) "
+ "Unable to reset channel (%u MHz), "
"reset status %d\n",
channel->center_freq, r);
}
@@ -1261,711 +937,6 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
}
-/*******************/
-/* Rfkill */
-/*******************/
-
-static bool ath_is_rfkill_set(struct ath_softc *sc)
-{
- struct ath_hw *ah = sc->sc_ah;
-
- return ath9k_hw_gpio_get(ah, ah->rfkill_gpio) ==
- ah->rfkill_polarity;
-}
-
-static void ath9k_rfkill_poll_state(struct ieee80211_hw *hw)
-{
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
- bool blocked = !!ath_is_rfkill_set(sc);
-
- wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
-}
-
-static void ath_start_rfkill_poll(struct ath_softc *sc)
-{
- struct ath_hw *ah = sc->sc_ah;
-
- if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
- wiphy_rfkill_start_polling(sc->hw->wiphy);
-}
-
-static void ath9k_uninit_hw(struct ath_softc *sc)
-{
- struct ath_hw *ah = sc->sc_ah;
-
- BUG_ON(!ah);
-
- ath9k_exit_debug(ah);
- ath9k_hw_detach(ah);
- sc->sc_ah = NULL;
-}
-
-static void ath_clean_core(struct ath_softc *sc)
-{
- struct ieee80211_hw *hw = sc->hw;
- struct ath_hw *ah = sc->sc_ah;
- int i = 0;
-
- ath9k_ps_wakeup(sc);
-
- dev_dbg(sc->dev, "Detach ATH hw\n");
-
- ath_deinit_leds(sc);
- wiphy_rfkill_stop_polling(sc->hw->wiphy);
-
- for (i = 0; i < sc->num_sec_wiphy; i++) {
- struct ath_wiphy *aphy = sc->sec_wiphy[i];
- if (aphy == NULL)
- continue;
- sc->sec_wiphy[i] = NULL;
- ieee80211_unregister_hw(aphy->hw);
- ieee80211_free_hw(aphy->hw);
- }
- ieee80211_unregister_hw(hw);
- ath_rx_cleanup(sc);
- ath_tx_cleanup(sc);
-
- tasklet_kill(&sc->intr_tq);
- tasklet_kill(&sc->bcon_tasklet);
-
- if (!(sc->sc_flags & SC_OP_INVALID))
- ath9k_setpower(sc, ATH9K_PM_AWAKE);
-
- /* cleanup tx queues */
- for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
- if (ATH_TXQ_SETUP(sc, i))
- ath_tx_cleanupq(sc, &sc->tx.txq[i]);
-
- if ((sc->btcoex.no_stomp_timer) &&
- ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
- ath_gen_timer_free(ah, sc->btcoex.no_stomp_timer);
-}
-
-void ath_detach(struct ath_softc *sc)
-{
- ath_clean_core(sc);
- ath9k_uninit_hw(sc);
-}
-
-void ath_cleanup(struct ath_softc *sc)
-{
- struct ath_hw *ah = sc->sc_ah;
- struct ath_common *common = ath9k_hw_common(ah);
-
- ath_clean_core(sc);
- free_irq(sc->irq, sc);
- ath_bus_cleanup(common);
- kfree(sc->sec_wiphy);
- ieee80211_free_hw(sc->hw);
-
- ath9k_uninit_hw(sc);
-}
-
-static int ath9k_reg_notifier(struct wiphy *wiphy,
- struct regulatory_request *request)
-{
- struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
- struct ath_wiphy *aphy = hw->priv;
- struct ath_softc *sc = aphy->sc;
- struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
-
- return ath_reg_notifier_apply(wiphy, request, reg);
-}
-
-/*
- * Detects if there is any priority bt traffic
- */
-static void ath_detect_bt_priority(struct ath_softc *sc)
-{
- struct ath_btcoex *btcoex = &sc->btcoex;
- struct ath_hw *ah = sc->sc_ah;
-
- if (ath9k_hw_gpio_get(sc->sc_ah, ah->btcoex_hw.btpriority_gpio))
- btcoex->bt_priority_cnt++;
-
- if (time_after(jiffies, btcoex->bt_priority_time +
- msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) {
- if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
- ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
- "BT priority traffic detected");
- sc->sc_flags |= SC_OP_BT_PRIORITY_DETECTED;
- } else {
- sc->sc_flags &= ~SC_OP_BT_PRIORITY_DETECTED;
- }
-
- btcoex->bt_priority_cnt = 0;
- btcoex->bt_priority_time = jiffies;
- }
-}
-
-/*
- * Configures appropriate weight based on stomp type.
- */
-static void ath9k_btcoex_bt_stomp(struct ath_softc *sc,
- enum ath_stomp_type stomp_type)
-{
- struct ath_hw *ah = sc->sc_ah;
-
- switch (stomp_type) {
- case ATH_BTCOEX_STOMP_ALL:
- ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
- AR_STOMP_ALL_WLAN_WGHT);
- break;
- case ATH_BTCOEX_STOMP_LOW:
- ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
- AR_STOMP_LOW_WLAN_WGHT);
- break;
- case ATH_BTCOEX_STOMP_NONE:
- ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
- AR_STOMP_NONE_WLAN_WGHT);
- break;
- default:
- ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
- "Invalid Stomptype\n");
- break;
- }
-
- ath9k_hw_btcoex_enable(ah);
-}
-
-static void ath9k_gen_timer_start(struct ath_hw *ah,
- struct ath_gen_timer *timer,
- u32 timer_next,
- u32 timer_period)
-{
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath_softc *sc = (struct ath_softc *) common->priv;
-
- ath9k_hw_gen_timer_start(ah, timer, timer_next, timer_period);
-
- if ((sc->imask & ATH9K_INT_GENTIMER) == 0) {
- ath9k_hw_set_interrupts(ah, 0);
- sc->imask |= ATH9K_INT_GENTIMER;
- ath9k_hw_set_interrupts(ah, sc->imask);
- }
-}
-
-static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
-{
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath_softc *sc = (struct ath_softc *) common->priv;
- struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
-
- ath9k_hw_gen_timer_stop(ah, timer);
-
- /* if no timer is enabled, turn off interrupt mask */
- if (timer_table->timer_mask.val == 0) {
- ath9k_hw_set_interrupts(ah, 0);
- sc->imask &= ~ATH9K_INT_GENTIMER;
- ath9k_hw_set_interrupts(ah, sc->imask);
- }
-}
-
-/*
- * This is the master bt coex timer which runs for every
- * 45ms, bt traffic will be given priority during 55% of this
- * period while wlan gets remaining 45%
- */
-static void ath_btcoex_period_timer(unsigned long data)
-{
- struct ath_softc *sc = (struct ath_softc *) data;
- struct ath_hw *ah = sc->sc_ah;
- struct ath_btcoex *btcoex = &sc->btcoex;
-
- ath_detect_bt_priority(sc);
-
- spin_lock_bh(&btcoex->btcoex_lock);
-
- ath9k_btcoex_bt_stomp(sc, btcoex->bt_stomp_type);
-
- spin_unlock_bh(&btcoex->btcoex_lock);
-
- if (btcoex->btcoex_period != btcoex->btcoex_no_stomp) {
- if (btcoex->hw_timer_enabled)
- ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
-
- ath9k_gen_timer_start(ah,
- btcoex->no_stomp_timer,
- (ath9k_hw_gettsf32(ah) +
- btcoex->btcoex_no_stomp),
- btcoex->btcoex_no_stomp * 10);
- btcoex->hw_timer_enabled = true;
- }
-
- mod_timer(&btcoex->period_timer, jiffies +
- msecs_to_jiffies(ATH_BTCOEX_DEF_BT_PERIOD));
-}
-
-/*
- * Generic tsf based hw timer which configures weight
- * registers to time slice between wlan and bt traffic
- */
-static void ath_btcoex_no_stomp_timer(void *arg)
-{
- struct ath_softc *sc = (struct ath_softc *)arg;
- struct ath_hw *ah = sc->sc_ah;
- struct ath_btcoex *btcoex = &sc->btcoex;
-
- ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
- "no stomp timer running \n");
-
- spin_lock_bh(&btcoex->btcoex_lock);
-
- if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW)
- ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_NONE);
- else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
- ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_LOW);
-
- spin_unlock_bh(&btcoex->btcoex_lock);
-}
-
-static int ath_init_btcoex_timer(struct ath_softc *sc)
-{
- struct ath_btcoex *btcoex = &sc->btcoex;
-
- btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000;
- btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
- btcoex->btcoex_period / 100;
-
- setup_timer(&btcoex->period_timer, ath_btcoex_period_timer,
- (unsigned long) sc);
-
- spin_lock_init(&btcoex->btcoex_lock);
-
- btcoex->no_stomp_timer = ath_gen_timer_alloc(sc->sc_ah,
- ath_btcoex_no_stomp_timer,
- ath_btcoex_no_stomp_timer,
- (void *) sc, AR_FIRST_NDP_TIMER);
-
- if (!btcoex->no_stomp_timer)
- return -ENOMEM;
-
- return 0;
-}
-
-/*
- * Read and write, they both share the same lock. We do this to serialize
- * reads and writes on Atheros 802.11n PCI devices only. This is required
- * as the FIFO on these devices can only accept sanely 2 requests. After
- * that the device goes bananas. Serializing the reads/writes prevents this
- * from happening.
- */
-
-static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
-{
- struct ath_hw *ah = (struct ath_hw *) hw_priv;
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath_softc *sc = (struct ath_softc *) common->priv;
-
- if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
- unsigned long flags;
- spin_lock_irqsave(&sc->sc_serial_rw, flags);
- iowrite32(val, sc->mem + reg_offset);
- spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
- } else
- iowrite32(val, sc->mem + reg_offset);
-}
-
-static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
-{
- struct ath_hw *ah = (struct ath_hw *) hw_priv;
- struct ath_common *common = ath9k_hw_common(ah);
- struct ath_softc *sc = (struct ath_softc *) common->priv;
- u32 val;
-
- if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
- unsigned long flags;
- spin_lock_irqsave(&sc->sc_serial_rw, flags);
- val = ioread32(sc->mem + reg_offset);
- spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
- } else
- val = ioread32(sc->mem + reg_offset);
- return val;
-}
-
-static const struct ath_ops ath9k_common_ops = {
- .read = ath9k_ioread32,
- .write = ath9k_iowrite32,
-};
-
-/*
- * Initialize and fill ath_softc, ath_sofct is the
- * "Software Carrier" struct. Historically it has existed
- * to allow the separation between hardware specific
- * variables (now in ath_hw) and driver specific variables.
- */
-static int ath_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
- const struct ath_bus_ops *bus_ops)
-{
- struct ath_hw *ah = NULL;
- struct ath_common *common;
- int r = 0, i;
- int csz = 0;
- int qnum;
-
- /* XXX: hardware will not be ready until ath_open() being called */
- sc->sc_flags |= SC_OP_INVALID;
-
- spin_lock_init(&sc->wiphy_lock);
- spin_lock_init(&sc->sc_resetlock);
- spin_lock_init(&sc->sc_serial_rw);
- spin_lock_init(&sc->ani_lock);
- spin_lock_init(&sc->sc_pm_lock);
- mutex_init(&sc->mutex);
- tasklet_init(&sc->intr_tq, ath9k_tasklet, (unsigned long)sc);
- tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
- (unsigned long)sc);
-
- ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
- if (!ah)
- return -ENOMEM;
-
- ah->hw_version.devid = devid;
- ah->hw_version.subsysid = subsysid;
- sc->sc_ah = ah;
-
- common = ath9k_hw_common(ah);
- common->ops = &ath9k_common_ops;
- common->bus_ops = bus_ops;
- common->ah = ah;
- common->hw = sc->hw;
- common->priv = sc;
- common->debug_mask = ath9k_debug;
-
- /*
- * Cache line size is used to size and align various
- * structures used to communicate with the hardware.
- */
- ath_read_cachesize(common, &csz);
- /* XXX assert csz is non-zero */
- common->cachelsz = csz << 2; /* convert to bytes */
-
- r = ath9k_hw_init(ah);
- if (r) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to initialize hardware; "
- "initialization status: %d\n", r);
- goto bad_free_hw;
- }
-
- if (ath9k_init_debug(ah) < 0) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to create debugfs files\n");
- goto bad_free_hw;
- }
-
- /* Get the hardware key cache size. */
- common->keymax = ah->caps.keycache_size;
- if (common->keymax > ATH_KEYMAX) {
- ath_print(common, ATH_DBG_ANY,
- "Warning, using only %u entries in %u key cache\n",
- ATH_KEYMAX, common->keymax);
- common->keymax = ATH_KEYMAX;
- }
-
- /*
- * Reset the key cache since some parts do not
- * reset the contents on initial power up.
- */
- for (i = 0; i < common->keymax; i++)
- ath9k_hw_keyreset(ah, (u16) i);
-
- /* default to MONITOR mode */
- sc->sc_ah->opmode = NL80211_IFTYPE_MONITOR;
-
- /*
- * Allocate hardware transmit queues: one queue for
- * beacon frames and one data queue for each QoS
- * priority. Note that the hal handles reseting
- * these queues at the needed time.
- */
- sc->beacon.beaconq = ath9k_hw_beaconq_setup(ah);
- if (sc->beacon.beaconq == -1) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup a beacon xmit queue\n");
- r = -EIO;
- goto bad2;
- }
- sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
- if (sc->beacon.cabq == NULL) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup CAB xmit queue\n");
- r = -EIO;
- goto bad2;
- }
-
- sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
- ath_cabq_update(sc);
-
- for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
- sc->tx.hwq_map[i] = -1;
-
- /* Setup data queues */
- /* NB: ensure BK queue is the lowest priority h/w queue */
- if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup xmit queue for BK traffic\n");
- r = -EIO;
- goto bad2;
- }
-
- if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup xmit queue for BE traffic\n");
- r = -EIO;
- goto bad2;
- }
- if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup xmit queue for VI traffic\n");
- r = -EIO;
- goto bad2;
- }
- if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
- ath_print(common, ATH_DBG_FATAL,
- "Unable to setup xmit queue for VO traffic\n");
- r = -EIO;
- goto bad2;
- }
-
- /* Initializes the noise floor to a reasonable default value.
- * Later on this will be updated during ANI processing. */
-
- common->ani.noise_floor = ATH_DEFAULT_NOISE_FLOOR;
- setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
-
- if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
- ATH9K_CIPHER_TKIP, NULL)) {
- /*
- * Whether we should enable h/w TKIP MIC.
- * XXX: if we don't support WME TKIP MIC, then we wouldn't
- * report WMM capable, so it's always safe to turn on
- * TKIP MIC in this case.
- */
- ath9k_hw_setcapability(sc->sc_ah, ATH9K_CAP_TKIP_MIC,
- 0, 1, NULL);
- }
-
- /*
- * Check whether the separate key cache entries
- * are required to handle both tx+rx MIC keys.
- * With split mic keys the number of stations is limited
- * to 27 otherwise 59.
- */
- if (ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
- ATH9K_CIPHER_TKIP, NULL)
- && ath9k_hw_getcapability(ah, ATH9K_CAP_CIPHER,
- ATH9K_CIPHER_MIC, NULL)
- && ath9k_hw_getcapability(ah, ATH9K_CAP_TKIP_SPLIT,
- 0, NULL))
- common->splitmic = 1;
-
- /* turn on mcast key search if possible */
- if (!ath9k_hw_getcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 0, NULL))
- (void)ath9k_hw_setcapability(ah, ATH9K_CAP_MCAST_KEYSRCH, 1,
- 1, NULL);
-
- sc->config.txpowlimit = ATH_TXPOWER_MAX;
-
- /* 11n Capabilities */
- if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
- sc->sc_flags |= SC_OP_TXAGGR;
- sc->sc_flags |= SC_OP_RXAGGR;
- }
-
- common->tx_chainmask = ah->caps.tx_chainmask;
- common->rx_chainmask = ah->caps.rx_chainmask;
-
- ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
- sc->rx.defant = ath9k_hw_getdefantenna(ah);
-
- if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
- memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
-
- sc->beacon.slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */
-
- /* initialize beacon slots */
- for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
- sc->beacon.bslot[i] = NULL;
- sc->beacon.bslot_aphy[i] = NULL;
- }
-
- /* setup channels and rates */
-
- if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes)) {
- sc->sbands[IEEE80211_BAND_2GHZ].channels = ath9k_2ghz_chantable;
- sc->sbands[IEEE80211_BAND_2GHZ].band = IEEE80211_BAND_2GHZ;
- sc->sbands[IEEE80211_BAND_2GHZ].n_channels =
- ARRAY_SIZE(ath9k_2ghz_chantable);
- sc->sbands[IEEE80211_BAND_2GHZ].bitrates = ath9k_legacy_rates;
- sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates =
- ARRAY_SIZE(ath9k_legacy_rates);
- }
-
- if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) {
- sc->sbands[IEEE80211_BAND_5GHZ].channels = ath9k_5ghz_chantable;
- sc->sbands[IEEE80211_BAND_5GHZ].band = IEEE80211_BAND_5GHZ;
- sc->sbands[IEEE80211_BAND_5GHZ].n_channels =
- ARRAY_SIZE(ath9k_5ghz_chantable);
- sc->sbands[IEEE80211_BAND_5GHZ].bitrates =
- ath9k_legacy_rates + 4;
- sc->sbands[IEEE80211_BAND_5GHZ].n_bitrates =
- ARRAY_SIZE(ath9k_legacy_rates) - 4;
- }
-
- switch (ah->btcoex_hw.scheme) {
- case ATH_BTCOEX_CFG_NONE:
- break;
- case ATH_BTCOEX_CFG_2WIRE:
- ath9k_hw_btcoex_init_2wire(ah);
- break;
- case ATH_BTCOEX_CFG_3WIRE:
- ath9k_hw_btcoex_init_3wire(ah);
- r = ath_init_btcoex_timer(sc);
- if (r)
- goto bad2;
- qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
- ath9k_hw_init_btcoex_hw(ah, qnum);
- sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
- break;
- default:
- WARN_ON(1);
- break;
- }
-
- return 0;
-bad2:
- /* cleanup tx queues */
- for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
- if (ATH_TXQ_SETUP(sc, i))
- ath_tx_cleanupq(sc, &sc->tx.txq[i]);
-
-bad_free_hw:
- ath9k_uninit_hw(sc);
- return r;
-}
-
-void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
-{
- hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
- IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_AMPDU_AGGREGATION |
- IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_PS_NULLFUNC_STACK |
- IEEE80211_HW_SPECTRUM_MGMT;
-
- if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt)
- hw->flags |= IEEE80211_HW_MFP_CAPABLE;
-
- hw->wiphy->interface_modes =
- BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_STATION) |
- BIT(NL80211_IFTYPE_ADHOC) |
- BIT(NL80211_IFTYPE_MESH_POINT);
-
- hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
-
- hw->queues = 4;
- hw->max_rates = 4;
- hw->channel_change_time = 5000;
- hw->max_listen_interval = 10;
- /* Hardware supports 10 but we use 4 */
- hw->max_rate_tries = 4;
- hw->sta_data_size = sizeof(struct ath_node);
- hw->vif_data_size = sizeof(struct ath_vif);
-
- hw->rate_control_algorithm = "ath9k_rate_control";
-
- if (test_bit(ATH9K_MODE_11G, sc->sc_ah->caps.wireless_modes))
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
- &sc->sbands[IEEE80211_BAND_2GHZ];
- if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes))
- hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
- &sc->sbands[IEEE80211_BAND_5GHZ];
-}
-
-/* Device driver core initialization */
-int ath_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
- const struct ath_bus_ops *bus_ops)
-{
- struct ieee80211_hw *hw = sc->hw;
- struct ath_common *common;
- struct ath_hw *ah;
- int error = 0, i;
- struct ath_regulatory *reg;
-
- dev_dbg(sc->dev, "Attach ATH hw\n");
-
- error = ath_init_softc(devid, sc, subsysid, bus_ops);
- if (error != 0)
- return error;
-
- ah = sc->sc_ah;
- common = ath9k_hw_common(ah);
-
- /* get mac address from hardware and set in mac80211 */
-
- SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
-
- ath_set_hw_capab(sc, hw);
-
- error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
- ath9k_reg_notifier);
- if (error)
- return error;
-
- reg = &common->regulatory;
-
- if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
- if (test_bit(ATH9K_MODE_11G, ah->caps.wireless_modes))
- setup_ht_cap(sc,
- &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
- if (test_bit(ATH9K_MODE_11A, ah->caps.wireless_modes))
- setup_ht_cap(sc,
- &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
- }
-
- /* initialize tx/rx engine */
- error = ath_tx_init(sc, ATH_TXBUF);
- if (error != 0)
- goto error_attach;
-
- error = ath_rx_init(sc, ATH_RXBUF);
- if (error != 0)
- goto error_attach;
-
- INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
- INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
- sc->wiphy_scheduler_int = msecs_to_jiffies(500);
-
- error = ieee80211_register_hw(hw);
-
- if (!ath_is_world_regd(reg)) {
- error = regulatory_hint(hw->wiphy, reg->alpha2);
- if (error)
- goto error_attach;
- }
-
- /* Initialize LED control */
- ath_init_leds(sc);
-
- ath_start_rfkill_poll(sc);
-
- return 0;
-
-error_attach:
- /* cleanup tx queues */
- for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
- if (ATH_TXQ_SETUP(sc, i))
- ath_tx_cleanupq(sc, &sc->tx.txq[i]);
-
- ath9k_uninit_hw(sc);
-
- return error;
-}
-
int ath_reset(struct ath_softc *sc, bool retry_tx)
{
struct ath_hw *ah = sc->sc_ah;
@@ -1976,6 +947,8 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
/* Stop ANI */
del_timer_sync(&common->ani.timer);
+ ieee80211_stop_queues(hw);
+
ath9k_hw_set_interrupts(ah, 0);
ath_drain_all_txq(sc, retry_tx);
ath_stoprecv(sc);
@@ -2017,131 +990,14 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
}
}
+ ieee80211_wake_queues(hw);
+
/* Start ANI */
ath_start_ani(common);
return r;
}
-/*
- * This function will allocate both the DMA descriptor structure, and the
- * buffers it contains. These are used to contain the descriptors used
- * by the system.
-*/
-int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
- struct list_head *head, const char *name,
- int nbuf, int ndesc)
-{
-#define DS2PHYS(_dd, _ds) \
- ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
-#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
-#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_desc *ds;
- struct ath_buf *bf;
- int i, bsize, error;
-
- ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
- name, nbuf, ndesc);
-
- INIT_LIST_HEAD(head);
- /* ath_desc must be a multiple of DWORDs */
- if ((sizeof(struct ath_desc) % 4) != 0) {
- ath_print(common, ATH_DBG_FATAL,
- "ath_desc not DWORD aligned\n");
- BUG_ON((sizeof(struct ath_desc) % 4) != 0);
- error = -ENOMEM;
- goto fail;
- }
-
- dd->dd_desc_len = sizeof(struct ath_desc) * nbuf * ndesc;
-
- /*
- * Need additional DMA memory because we can't use
- * descriptors that cross the 4K page boundary. Assume
- * one skipped descriptor per 4K page.
- */
- if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_4KB_SPLITTRANS)) {
- u32 ndesc_skipped =
- ATH_DESC_4KB_BOUND_NUM_SKIPPED(dd->dd_desc_len);
- u32 dma_len;
-
- while (ndesc_skipped) {
- dma_len = ndesc_skipped * sizeof(struct ath_desc);
- dd->dd_desc_len += dma_len;
-
- ndesc_skipped = ATH_DESC_4KB_BOUND_NUM_SKIPPED(dma_len);
- };
- }
-
- /* allocate descriptors */
- dd->dd_desc = dma_alloc_coherent(sc->dev, dd->dd_desc_len,
- &dd->dd_desc_paddr, GFP_KERNEL);
- if (dd->dd_desc == NULL) {
- error = -ENOMEM;
- goto fail;
- }
- ds = dd->dd_desc;
- ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
- name, ds, (u32) dd->dd_desc_len,
- ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
-
- /* allocate buffers */
- bsize = sizeof(struct ath_buf) * nbuf;
- bf = kzalloc(bsize, GFP_KERNEL);
- if (bf == NULL) {
- error = -ENOMEM;
- goto fail2;
- }
- dd->dd_bufptr = bf;
-
- for (i = 0; i < nbuf; i++, bf++, ds += ndesc) {
- bf->bf_desc = ds;
- bf->bf_daddr = DS2PHYS(dd, ds);
-
- if (!(sc->sc_ah->caps.hw_caps &
- ATH9K_HW_CAP_4KB_SPLITTRANS)) {
- /*
- * Skip descriptor addresses which can cause 4KB
- * boundary crossing (addr + length) with a 32 dword
- * descriptor fetch.
- */
- while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
- BUG_ON((caddr_t) bf->bf_desc >=
- ((caddr_t) dd->dd_desc +
- dd->dd_desc_len));
-
- ds += ndesc;
- bf->bf_desc = ds;
- bf->bf_daddr = DS2PHYS(dd, ds);
- }
- }
- list_add_tail(&bf->list, head);
- }
- return 0;
-fail2:
- dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
- dd->dd_desc_paddr);
-fail:
- memset(dd, 0, sizeof(*dd));
- return error;
-#undef ATH_DESC_4KB_BOUND_CHECK
-#undef ATH_DESC_4KB_BOUND_NUM_SKIPPED
-#undef DS2PHYS
-}
-
-void ath_descdma_cleanup(struct ath_softc *sc,
- struct ath_descdma *dd,
- struct list_head *head)
-{
- dma_free_coherent(sc->dev, dd->dd_desc_len, dd->dd_desc,
- dd->dd_desc_paddr);
-
- INIT_LIST_HEAD(head);
- kfree(dd->dd_bufptr);
- memset(dd, 0, sizeof(*dd));
-}
-
int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
{
int qnum;
@@ -2220,28 +1076,6 @@ void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
/* mac80211 callbacks */
/**********************/
-/*
- * (Re)start btcoex timers
- */
-static void ath9k_btcoex_timer_resume(struct ath_softc *sc)
-{
- struct ath_btcoex *btcoex = &sc->btcoex;
- struct ath_hw *ah = sc->sc_ah;
-
- ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
- "Starting btcoex timers");
-
- /* make sure duty cycle timer is also stopped when resuming */
- if (btcoex->hw_timer_enabled)
- ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
-
- btcoex->bt_priority_cnt = 0;
- btcoex->bt_priority_time = jiffies;
- sc->sc_flags &= ~SC_OP_BT_PRIORITY_DETECTED;
-
- mod_timer(&btcoex->period_timer, jiffies);
-}
-
static int ath9k_start(struct ieee80211_hw *hw)
{
struct ath_wiphy *aphy = hw->priv;
@@ -2411,11 +1245,11 @@ static int ath9k_tx(struct ieee80211_hw *hw,
if (ieee80211_is_pspoll(hdr->frame_control)) {
ath_print(common, ATH_DBG_PS,
"Sending PS-Poll to pick a buffered frame\n");
- sc->sc_flags |= SC_OP_WAIT_FOR_PSPOLL_DATA;
+ sc->ps_flags |= PS_WAIT_FOR_PSPOLL_DATA;
} else {
ath_print(common, ATH_DBG_PS,
"Wake up to complete TX\n");
- sc->sc_flags |= SC_OP_WAIT_FOR_TX_ACK;
+ sc->ps_flags |= PS_WAIT_FOR_TX_ACK;
}
/*
* The actual restore operation will happen only after
@@ -2468,22 +1302,6 @@ exit:
return 0;
}
-/*
- * Pause btcoex timer and bt duty cycle timer
- */
-static void ath9k_btcoex_timer_pause(struct ath_softc *sc)
-{
- struct ath_btcoex *btcoex = &sc->btcoex;
- struct ath_hw *ah = sc->sc_ah;
-
- del_timer_sync(&btcoex->period_timer);
-
- if (btcoex->hw_timer_enabled)
- ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
-
- btcoex->hw_timer_enabled = false;
-}
-
static void ath9k_stop(struct ieee80211_hw *hw)
{
struct ath_wiphy *aphy = hw->priv;
@@ -2550,12 +1368,12 @@ static void ath9k_stop(struct ieee80211_hw *hw)
}
static int ath9k_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_vif *avp = (void *)conf->vif->drv_priv;
+ struct ath_vif *avp = (void *)vif->drv_priv;
enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED;
int ret = 0;
@@ -2567,7 +1385,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
goto out;
}
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
ic_opmode = NL80211_IFTYPE_STATION;
break;
@@ -2578,11 +1396,11 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
ret = -ENOBUFS;
goto out;
}
- ic_opmode = conf->type;
+ ic_opmode = vif->type;
break;
default:
ath_print(common, ATH_DBG_FATAL,
- "Interface type %d not yet supported\n", conf->type);
+ "Interface type %d not yet supported\n", vif->type);
ret = -EOPNOTSUPP;
goto out;
}
@@ -2614,18 +1432,18 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
* Enable MIB interrupts when there are hardware phy counters.
* Note we only do this (at the moment) for station mode.
*/
- if ((conf->type == NL80211_IFTYPE_STATION) ||
- (conf->type == NL80211_IFTYPE_ADHOC) ||
- (conf->type == NL80211_IFTYPE_MESH_POINT)) {
+ if ((vif->type == NL80211_IFTYPE_STATION) ||
+ (vif->type == NL80211_IFTYPE_ADHOC) ||
+ (vif->type == NL80211_IFTYPE_MESH_POINT)) {
sc->imask |= ATH9K_INT_MIB;
sc->imask |= ATH9K_INT_TSFOOR;
}
ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
- if (conf->type == NL80211_IFTYPE_AP ||
- conf->type == NL80211_IFTYPE_ADHOC ||
- conf->type == NL80211_IFTYPE_MONITOR)
+ if (vif->type == NL80211_IFTYPE_AP ||
+ vif->type == NL80211_IFTYPE_ADHOC ||
+ vif->type == NL80211_IFTYPE_MONITOR)
ath_start_ani(common);
out:
@@ -2634,12 +1452,12 @@ out:
}
static void ath9k_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_vif *avp = (void *)conf->vif->drv_priv;
+ struct ath_vif *avp = (void *)vif->drv_priv;
int i;
ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n");
@@ -2662,7 +1480,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
sc->sc_flags &= ~SC_OP_BEACONS;
for (i = 0; i < ARRAY_SIZE(sc->beacon.bslot); i++) {
- if (sc->beacon.bslot[i] == conf->vif) {
+ if (sc->beacon.bslot[i] == vif) {
printk(KERN_DEBUG "%s: vif had allocated beacon "
"slot\n", __func__);
sc->beacon.bslot[i] = NULL;
@@ -2675,6 +1493,19 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
mutex_unlock(&sc->mutex);
}
+void ath9k_enable_ps(struct ath_softc *sc)
+{
+ sc->ps_enabled = true;
+ if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
+ if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) {
+ sc->imask |= ATH9K_INT_TIM_TIMER;
+ ath9k_hw_set_interrupts(sc->sc_ah,
+ sc->imask);
+ }
+ }
+ ath9k_hw_setrxabort(sc->sc_ah, 1);
+}
+
static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
{
struct ath_wiphy *aphy = hw->priv;
@@ -2713,6 +1544,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
spin_unlock_bh(&sc->wiphy_lock);
if (enable_radio) {
+ sc->ps_idle = false;
ath_radio_enable(sc, hw);
ath_print(common, ATH_DBG_CONFIG,
"not-idle: enabling radio\n");
@@ -2727,36 +1559,27 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
*/
if (changed & IEEE80211_CONF_CHANGE_PS) {
if (conf->flags & IEEE80211_CONF_PS) {
- sc->sc_flags |= SC_OP_PS_ENABLED;
- if (!(ah->caps.hw_caps &
- ATH9K_HW_CAP_AUTOSLEEP)) {
- if ((sc->imask & ATH9K_INT_TIM_TIMER) == 0) {
- sc->imask |= ATH9K_INT_TIM_TIMER;
- ath9k_hw_set_interrupts(sc->sc_ah,
- sc->imask);
- }
- }
+ sc->ps_flags |= PS_ENABLED;
/*
* At this point we know hardware has received an ACK
* of a previously sent null data frame.
*/
- if ((sc->sc_flags & SC_OP_NULLFUNC_COMPLETED)) {
- sc->sc_flags &= ~SC_OP_NULLFUNC_COMPLETED;
- sc->ps_enabled = true;
- ath9k_hw_setrxabort(sc->sc_ah, 1);
+ if ((sc->ps_flags & PS_NULLFUNC_COMPLETED)) {
+ sc->ps_flags &= ~PS_NULLFUNC_COMPLETED;
+ ath9k_enable_ps(sc);
}
} else {
sc->ps_enabled = false;
- sc->sc_flags &= ~(SC_OP_PS_ENABLED |
- SC_OP_NULLFUNC_COMPLETED);
+ sc->ps_flags &= ~(PS_ENABLED |
+ PS_NULLFUNC_COMPLETED);
ath9k_setpower(sc, ATH9K_PM_AWAKE);
if (!(ah->caps.hw_caps &
ATH9K_HW_CAP_AUTOSLEEP)) {
ath9k_hw_setrxabort(sc->sc_ah, 0);
- sc->sc_flags &= ~(SC_OP_WAIT_FOR_BEACON |
- SC_OP_WAIT_FOR_CAB |
- SC_OP_WAIT_FOR_PSPOLL_DATA |
- SC_OP_WAIT_FOR_TX_ACK);
+ sc->ps_flags &= ~(PS_WAIT_FOR_BEACON |
+ PS_WAIT_FOR_CAB |
+ PS_WAIT_FOR_PSPOLL_DATA |
+ PS_WAIT_FOR_TX_ACK);
if (sc->imask & ATH9K_INT_TIM_TIMER) {
sc->imask &= ~ATH9K_INT_TIM_TIMER;
ath9k_hw_set_interrupts(sc->sc_ah,
@@ -2766,6 +1589,14 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
}
}
+ if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
+ if (conf->flags & IEEE80211_CONF_MONITOR) {
+ ath_print(common, ATH_DBG_CONFIG,
+ "HW opmode set to Monitor mode\n");
+ sc->sc_ah->opmode = NL80211_IFTYPE_MONITOR;
+ }
+ }
+
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
struct ieee80211_channel *curchan = hw->conf.channel;
int pos = curchan->hw_value;
@@ -2801,8 +1632,10 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
}
skip_chan_change:
- if (changed & IEEE80211_CONF_CHANGE_POWER)
+ if (changed & IEEE80211_CONF_CHANGE_POWER) {
sc->config.txpowlimit = 2 * conf->power_level;
+ ath_update_txpow(sc);
+ }
spin_lock_bh(&sc->wiphy_lock);
disable_radio = ath9k_all_wiphys_idle(sc);
@@ -2810,6 +1643,7 @@ skip_chan_change:
if (disable_radio) {
ath_print(common, ATH_DBG_CONFIG, "idle: disabling radio\n");
+ sc->ps_idle = true;
ath_radio_disable(sc, hw);
}
@@ -2850,24 +1684,28 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw,
"Set HW RX filter: 0x%x\n", rfilt);
}
-static void ath9k_sta_notify(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- enum sta_notify_cmd cmd,
- struct ieee80211_sta *sta)
+static int ath9k_sta_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
{
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
- switch (cmd) {
- case STA_NOTIFY_ADD:
- ath_node_attach(sc, sta);
- break;
- case STA_NOTIFY_REMOVE:
- ath_node_detach(sc, sta);
- break;
- default:
- break;
- }
+ ath_node_attach(sc, sta);
+
+ return 0;
+}
+
+static int ath9k_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct ath_wiphy *aphy = hw->priv;
+ struct ath_softc *sc = aphy->sc;
+
+ ath_node_detach(sc, sta);
+
+ return 0;
}
static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
@@ -2966,6 +1804,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
struct ath_hw *ah = sc->sc_ah;
struct ath_common *common = ath9k_hw_common(ah);
struct ath_vif *avp = (void *)vif->drv_priv;
+ int slottime;
int error;
mutex_lock(&sc->mutex);
@@ -3001,6 +1840,25 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
ath_beacon_config(sc, vif);
}
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ if (bss_conf->use_short_slot)
+ slottime = 9;
+ else
+ slottime = 20;
+ if (vif->type == NL80211_IFTYPE_AP) {
+ /*
+ * Defer update, so that connected stations can adjust
+ * their settings at the same time.
+ * See beacon.c for more details
+ */
+ sc->beacon.slottime = slottime;
+ sc->beacon.updateslot = UPDATE;
+ } else {
+ ah->slottime = slottime;
+ ath9k_hw_init_global_settings(ah);
+ }
+ }
+
/* Disable transmission of beacons */
if ((changed & BSS_CHANGED_BEACON_ENABLED) && !bss_conf->enable_beacon)
ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
@@ -3133,6 +1991,7 @@ static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
{
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
mutex_lock(&sc->mutex);
if (ath9k_wiphy_scanning(sc)) {
@@ -3148,10 +2007,9 @@ static void ath9k_sw_scan_start(struct ieee80211_hw *hw)
aphy->state = ATH_WIPHY_SCAN;
ath9k_wiphy_pause_all_forced(sc, aphy);
-
- spin_lock_bh(&sc->ani_lock);
sc->sc_flags |= SC_OP_SCANNING;
- spin_unlock_bh(&sc->ani_lock);
+ del_timer_sync(&common->ani.timer);
+ cancel_delayed_work_sync(&sc->tx_complete_work);
mutex_unlock(&sc->mutex);
}
@@ -3159,17 +2017,30 @@ static void ath9k_sw_scan_complete(struct ieee80211_hw *hw)
{
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
+ struct ath_common *common = ath9k_hw_common(sc->sc_ah);
mutex_lock(&sc->mutex);
- spin_lock_bh(&sc->ani_lock);
aphy->state = ATH_WIPHY_ACTIVE;
sc->sc_flags &= ~SC_OP_SCANNING;
sc->sc_flags |= SC_OP_FULL_RESET;
- spin_unlock_bh(&sc->ani_lock);
+ ath_start_ani(common);
+ ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
ath_beacon_config(sc, NULL);
mutex_unlock(&sc->mutex);
}
+static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
+{
+ struct ath_wiphy *aphy = hw->priv;
+ struct ath_softc *sc = aphy->sc;
+ struct ath_hw *ah = sc->sc_ah;
+
+ mutex_lock(&sc->mutex);
+ ah->coverage_class = coverage_class;
+ ath9k_hw_init_global_settings(ah);
+ mutex_unlock(&sc->mutex);
+}
+
struct ieee80211_ops ath9k_ops = {
.tx = ath9k_tx,
.start = ath9k_start,
@@ -3178,7 +2049,8 @@ struct ieee80211_ops ath9k_ops = {
.remove_interface = ath9k_remove_interface,
.config = ath9k_config,
.configure_filter = ath9k_configure_filter,
- .sta_notify = ath9k_sta_notify,
+ .sta_add = ath9k_sta_add,
+ .sta_remove = ath9k_sta_remove,
.conf_tx = ath9k_conf_tx,
.bss_info_changed = ath9k_bss_info_changed,
.set_key = ath9k_set_key,
@@ -3189,64 +2061,5 @@ struct ieee80211_ops ath9k_ops = {
.sw_scan_start = ath9k_sw_scan_start,
.sw_scan_complete = ath9k_sw_scan_complete,
.rfkill_poll = ath9k_rfkill_poll_state,
+ .set_coverage_class = ath9k_set_coverage_class,
};
-
-static int __init ath9k_init(void)
-{
- int error;
-
- /* Register rate control algorithm */
- error = ath_rate_control_register();
- if (error != 0) {
- printk(KERN_ERR
- "ath9k: Unable to register rate control "
- "algorithm: %d\n",
- error);
- goto err_out;
- }
-
- error = ath9k_debug_create_root();
- if (error) {
- printk(KERN_ERR
- "ath9k: Unable to create debugfs root: %d\n",
- error);
- goto err_rate_unregister;
- }
-
- error = ath_pci_init();
- if (error < 0) {
- printk(KERN_ERR
- "ath9k: No PCI devices found, driver not installed.\n");
- error = -ENODEV;
- goto err_remove_root;
- }
-
- error = ath_ahb_init();
- if (error < 0) {
- error = -ENODEV;
- goto err_pci_exit;
- }
-
- return 0;
-
- err_pci_exit:
- ath_pci_exit();
-
- err_remove_root:
- ath9k_debug_remove_root();
- err_rate_unregister:
- ath_rate_control_unregister();
- err_out:
- return error;
-}
-module_init(ath9k_init);
-
-static void __exit ath9k_exit(void)
-{
- ath_ahb_exit();
- ath_pci_exit();
- ath9k_debug_remove_root();
- ath_rate_control_unregister();
- printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
-}
-module_exit(ath9k_exit);
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index f7af5ea5475..9441c6718a3 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -18,13 +18,14 @@
#include <linux/pci.h>
#include "ath9k.h"
-static struct pci_device_id ath_pci_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
{ PCI_VDEVICE(ATHEROS, 0x0023) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x0024) }, /* PCI-E */
{ PCI_VDEVICE(ATHEROS, 0x0027) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x0029) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x002A) }, /* PCI-E */
{ PCI_VDEVICE(ATHEROS, 0x002B) }, /* PCI-E */
+ { PCI_VDEVICE(ATHEROS, 0x002C) }, /* PCI-E 802.11n bonded out */
{ PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */
{ PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */
{ 0 }
@@ -49,16 +50,6 @@ static void ath_pci_read_cachesize(struct ath_common *common, int *csz)
*csz = DEFAULT_CACHELINE >> 2; /* Use the default size */
}
-static void ath_pci_cleanup(struct ath_common *common)
-{
- struct ath_softc *sc = (struct ath_softc *) common->priv;
- struct pci_dev *pdev = to_pci_dev(sc->dev);
-
- pci_iounmap(pdev, sc->mem);
- pci_disable_device(pdev);
- pci_release_region(pdev, 0);
-}
-
static bool ath_pci_eeprom_read(struct ath_common *common, u32 off, u16 *data)
{
struct ath_hw *ah = (struct ath_hw *) common->ah;
@@ -98,7 +89,6 @@ static void ath_pci_bt_coex_prep(struct ath_common *common)
static const struct ath_bus_ops ath_pci_bus_ops = {
.read_cachesize = ath_pci_read_cachesize,
- .cleanup = ath_pci_cleanup,
.eeprom_read = ath_pci_eeprom_read,
.bt_coex_prep = ath_pci_bt_coex_prep,
};
@@ -113,25 +103,22 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
u16 subsysid;
u32 val;
int ret = 0;
- struct ath_hw *ah;
char hw_name[64];
if (pci_enable_device(pdev))
return -EIO;
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-
if (ret) {
printk(KERN_ERR "ath9k: 32-bit DMA not available\n");
- goto bad;
+ goto err_dma;
}
ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-
if (ret) {
printk(KERN_ERR "ath9k: 32-bit DMA consistent "
"DMA enable failed\n");
- goto bad;
+ goto err_dma;
}
/*
@@ -171,22 +158,22 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (ret) {
dev_err(&pdev->dev, "PCI memory region reserve error\n");
ret = -ENODEV;
- goto bad;
+ goto err_region;
}
mem = pci_iomap(pdev, 0, 0);
if (!mem) {
printk(KERN_ERR "PCI memory map error\n") ;
ret = -EIO;
- goto bad1;
+ goto err_iomap;
}
hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy) +
sizeof(struct ath_softc), &ath9k_ops);
if (!hw) {
- dev_err(&pdev->dev, "no memory for ieee80211_hw\n");
+ dev_err(&pdev->dev, "No memory for ieee80211_hw\n");
ret = -ENOMEM;
- goto bad2;
+ goto err_alloc_hw;
}
SET_IEEE80211_DEV(hw, &pdev->dev);
@@ -201,25 +188,25 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
sc->dev = &pdev->dev;
sc->mem = mem;
- pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsysid);
- ret = ath_init_device(id->device, sc, subsysid, &ath_pci_bus_ops);
- if (ret) {
- dev_err(&pdev->dev, "failed to initialize device\n");
- goto bad3;
- }
-
- /* setup interrupt service routine */
+ /* Will be cleared in ath9k_start() */
+ sc->sc_flags |= SC_OP_INVALID;
ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc);
if (ret) {
dev_err(&pdev->dev, "request_irq failed\n");
- goto bad4;
+ goto err_irq;
}
sc->irq = pdev->irq;
- ah = sc->sc_ah;
- ath9k_hw_name(ah, hw_name, sizeof(hw_name));
+ pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsysid);
+ ret = ath9k_init_device(id->device, sc, subsysid, &ath_pci_bus_ops);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to initialize device\n");
+ goto err_init;
+ }
+
+ ath9k_hw_name(sc->sc_ah, hw_name, sizeof(hw_name));
printk(KERN_INFO
"%s: %s mem=0x%lx, irq=%d\n",
wiphy_name(hw->wiphy),
@@ -227,15 +214,18 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
(unsigned long)mem, pdev->irq);
return 0;
-bad4:
- ath_detach(sc);
-bad3:
+
+err_init:
+ free_irq(sc->irq, sc);
+err_irq:
ieee80211_free_hw(hw);
-bad2:
+err_alloc_hw:
pci_iounmap(pdev, mem);
-bad1:
+err_iomap:
pci_release_region(pdev, 0);
-bad:
+err_region:
+ /* Nothing */
+err_dma:
pci_disable_device(pdev);
return ret;
}
@@ -245,8 +235,15 @@ static void ath_pci_remove(struct pci_dev *pdev)
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct ath_wiphy *aphy = hw->priv;
struct ath_softc *sc = aphy->sc;
+ void __iomem *mem = sc->mem;
+
+ ath9k_deinit_device(sc);
+ free_irq(sc->irq, sc);
+ ieee80211_free_hw(sc->hw);
- ath_cleanup(sc);
+ pci_iounmap(pdev, mem);
+ pci_disable_device(pdev);
+ pci_release_region(pdev, 0);
}
#ifdef CONFIG_PM
diff --git a/drivers/net/wireless/ath/ath9k/phy.h b/drivers/net/wireless/ath/ath9k/phy.h
index 31de27dc0c4..0999a495fd4 100644
--- a/drivers/net/wireless/ath/ath9k/phy.h
+++ b/drivers/net/wireless/ath/ath9k/phy.h
@@ -384,6 +384,9 @@ bool ath9k_hw_set_rf_regs(struct ath_hw *ah,
#define AR_PHY_HEAVY_CLIP_ENABLE 0x99E0
+#define AR_PHY_HEAVY_CLIP_FACTOR_RIFS 0x99EC
+#define AR_PHY_RIFS_INIT_DELAY 0x03ff0000
+
#define AR_PHY_M_SLEEP 0x99f0
#define AR_PHY_REFCLKDLY 0x99f4
#define AR_PHY_REFCLKPD 0x99f8
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 70fdb9d8db8..ac34a055c71 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -668,7 +668,7 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
struct ieee80211_tx_rate *rates = tx_info->control.rates;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
__le16 fc = hdr->frame_control;
- u8 try_per_rate, i = 0, rix, nrix;
+ u8 try_per_rate, i = 0, rix;
int is_probe = 0;
if (rate_control_send_low(sta, priv_sta, txrc))
@@ -678,48 +678,47 @@ static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
* For Multi Rate Retry we use a different number of
* retry attempt counts. This ends up looking like this:
*
- * MRR[0] = 2
- * MRR[1] = 2
- * MRR[2] = 2
- * MRR[3] = 4
+ * MRR[0] = 4
+ * MRR[1] = 4
+ * MRR[2] = 4
+ * MRR[3] = 8
*
*/
- try_per_rate = sc->hw->max_rate_tries;
+ try_per_rate = 4;
rate_table = sc->cur_rate_table;
rix = ath_rc_get_highest_rix(sc, ath_rc_priv, rate_table, &is_probe);
- nrix = rix;
if (is_probe) {
/* set one try for probe rates. For the
* probes don't enable rts */
ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
- 1, nrix, 0);
+ 1, rix, 0);
/* Get the next tried/allowed rate. No RTS for the next series
* after the probe rate
*/
- ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &nrix);
+ ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix);
ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
- try_per_rate, nrix, 0);
+ try_per_rate, rix, 0);
tx_info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
} else {
/* Set the choosen rate. No RTS for first series entry. */
ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
- try_per_rate, nrix, 0);
+ try_per_rate, rix, 0);
}
/* Fill in the other rates for multirate retry */
for ( ; i < 4; i++) {
/* Use twice the number of tries for the last MRR segment. */
if (i + 1 == 4)
- try_per_rate = 4;
+ try_per_rate = 8;
- ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &nrix);
+ ath_rc_get_lower_rix(rate_table, ath_rc_priv, rix, &rix);
/* All other rates in the series have RTS enabled */
ath_rc_rate_set_series(rate_table, &rates[i], txrc,
- try_per_rate, nrix, 1);
+ try_per_rate, rix, 1);
}
/*
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
index 9eb96f50699..4f6d6fd442f 100644
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ b/drivers/net/wireless/ath/ath9k/rc.h
@@ -57,6 +57,10 @@ enum {
|| (_phy == WLAN_RC_PHY_HT_40_DS) \
|| (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
|| (_phy == WLAN_RC_PHY_HT_40_DS_HGI))
+#define WLAN_RC_PHY_20(_phy) ((_phy == WLAN_RC_PHY_HT_20_SS) \
+ || (_phy == WLAN_RC_PHY_HT_20_DS) \
+ || (_phy == WLAN_RC_PHY_HT_20_SS_HGI) \
+ || (_phy == WLAN_RC_PHY_HT_20_DS_HGI))
#define WLAN_RC_PHY_40(_phy) ((_phy == WLAN_RC_PHY_HT_40_SS) \
|| (_phy == WLAN_RC_PHY_HT_40_DS) \
|| (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 477365e5ae6..1ca42e5148c 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -364,10 +364,10 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0)
return; /* not from our current AP */
- sc->sc_flags &= ~SC_OP_WAIT_FOR_BEACON;
+ sc->ps_flags &= ~PS_WAIT_FOR_BEACON;
- if (sc->sc_flags & SC_OP_BEACON_SYNC) {
- sc->sc_flags &= ~SC_OP_BEACON_SYNC;
+ if (sc->ps_flags & PS_BEACON_SYNC) {
+ sc->ps_flags &= ~PS_BEACON_SYNC;
ath_print(common, ATH_DBG_PS,
"Reconfigure Beacon timers based on "
"timestamp from the AP\n");
@@ -384,17 +384,17 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
*/
ath_print(common, ATH_DBG_PS, "Received DTIM beacon indicating "
"buffered broadcast/multicast frame(s)\n");
- sc->sc_flags |= SC_OP_WAIT_FOR_CAB | SC_OP_WAIT_FOR_BEACON;
+ sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON;
return;
}
- if (sc->sc_flags & SC_OP_WAIT_FOR_CAB) {
+ if (sc->ps_flags & PS_WAIT_FOR_CAB) {
/*
* This can happen if a broadcast frame is dropped or the AP
* fails to send a frame indicating that all CAB frames have
* been delivered.
*/
- sc->sc_flags &= ~SC_OP_WAIT_FOR_CAB;
+ sc->ps_flags &= ~PS_WAIT_FOR_CAB;
ath_print(common, ATH_DBG_PS,
"PS wait for CAB frames timed out\n");
}
@@ -408,10 +408,10 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
hdr = (struct ieee80211_hdr *)skb->data;
/* Process Beacon and CAB receive in PS state */
- if ((sc->sc_flags & SC_OP_WAIT_FOR_BEACON) &&
+ if ((sc->ps_flags & PS_WAIT_FOR_BEACON) &&
ieee80211_is_beacon(hdr->frame_control))
ath_rx_ps_beacon(sc, skb);
- else if ((sc->sc_flags & SC_OP_WAIT_FOR_CAB) &&
+ else if ((sc->ps_flags & PS_WAIT_FOR_CAB) &&
(ieee80211_is_data(hdr->frame_control) ||
ieee80211_is_action(hdr->frame_control)) &&
is_multicast_ether_addr(hdr->addr1) &&
@@ -420,20 +420,20 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
* No more broadcast/multicast frames to be received at this
* point.
*/
- sc->sc_flags &= ~SC_OP_WAIT_FOR_CAB;
+ sc->ps_flags &= ~PS_WAIT_FOR_CAB;
ath_print(common, ATH_DBG_PS,
"All PS CAB frames received, back to sleep\n");
- } else if ((sc->sc_flags & SC_OP_WAIT_FOR_PSPOLL_DATA) &&
+ } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
!is_multicast_ether_addr(hdr->addr1) &&
!ieee80211_has_morefrags(hdr->frame_control)) {
- sc->sc_flags &= ~SC_OP_WAIT_FOR_PSPOLL_DATA;
+ sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA;
ath_print(common, ATH_DBG_PS,
"Going back to sleep after having received "
- "PS-Poll data (0x%x)\n",
- sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
- SC_OP_WAIT_FOR_CAB |
- SC_OP_WAIT_FOR_PSPOLL_DATA |
- SC_OP_WAIT_FOR_TX_ACK));
+ "PS-Poll data (0x%lx)\n",
+ sc->ps_flags & (PS_WAIT_FOR_BEACON |
+ PS_WAIT_FOR_CAB |
+ PS_WAIT_FOR_PSPOLL_DATA |
+ PS_WAIT_FOR_TX_ACK));
}
}
@@ -571,6 +571,8 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
hw = ath_get_virt_hw(sc, hdr);
rx_stats = &ds->ds_rxstat;
+ ath_debug_stat_rx(sc, bf);
+
/*
* If we're asked to flush receive queue, directly
* chain it back at the queue without processing it.
@@ -631,9 +633,9 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
sc->rx.rxotherant = 0;
}
- if (unlikely(sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
- SC_OP_WAIT_FOR_CAB |
- SC_OP_WAIT_FOR_PSPOLL_DATA)))
+ if (unlikely(sc->ps_flags & (PS_WAIT_FOR_BEACON |
+ PS_WAIT_FOR_CAB |
+ PS_WAIT_FOR_PSPOLL_DATA)))
ath_rx_ps(sc, skb);
ath_rx_send_to_mac80211(hw, sc, skb, rxs);
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 8e653fb937a..72cfa8ebd9a 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -1547,9 +1547,9 @@ enum {
#define AR_BT_COEX_WEIGHT 0x8174
#define AR_BT_COEX_WGHT 0xff55
-#define AR_STOMP_ALL_WLAN_WGHT 0xffcc
-#define AR_STOMP_LOW_WLAN_WGHT 0xaaa8
-#define AR_STOMP_NONE_WLAN_WGHT 0xaa00
+#define AR_STOMP_ALL_WLAN_WGHT 0xfcfc
+#define AR_STOMP_LOW_WLAN_WGHT 0xa8a8
+#define AR_STOMP_NONE_WLAN_WGHT 0x0000
#define AR_BTCOEX_BT_WGHT 0x0000ffff
#define AR_BTCOEX_BT_WGHT_S 0
#define AR_BTCOEX_WL_WGHT 0xffff0000
diff --git a/drivers/net/wireless/ath/ath9k/virtual.c b/drivers/net/wireless/ath/ath9k/virtual.c
index cd26caaf44e..a43fbf84dab 100644
--- a/drivers/net/wireless/ath/ath9k/virtual.c
+++ b/drivers/net/wireless/ath/ath9k/virtual.c
@@ -152,7 +152,7 @@ int ath9k_wiphy_add(struct ath_softc *sc)
SET_IEEE80211_PERM_ADDR(hw, addr);
- ath_set_hw_capab(sc, hw);
+ ath9k_set_hw_capab(sc, hw);
error = ieee80211_register_hw(hw);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 29bf33692f7..47294f90bbe 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1498,26 +1498,6 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
if (sc->sc_flags & SC_OP_PREAMBLE_SHORT)
ctsrate |= rate->hw_value_short;
- /*
- * ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive.
- * Check the first rate in the series to decide whether RTS/CTS
- * or CTS-to-self has to be used.
- */
- if (rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
- flags = ATH9K_TXDESC_CTSENA;
- else if (rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
- flags = ATH9K_TXDESC_RTSENA;
-
- /* FIXME: Handle aggregation protection */
- if (sc->config.ath_aggr_prot &&
- (!bf_isaggr(bf) || (bf_isaggr(bf) && bf->bf_al < 8192))) {
- flags = ATH9K_TXDESC_RTSENA;
- }
-
- /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
- if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
- flags &= ~(ATH9K_TXDESC_RTSENA);
-
for (i = 0; i < 4; i++) {
bool is_40, is_sgi, is_sp;
int phy;
@@ -1529,8 +1509,15 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
series[i].Tries = rates[i].count;
series[i].ChSel = common->tx_chainmask;
- if (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)
+ if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
+ (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
+ flags |= ATH9K_TXDESC_RTSENA;
+ } else if (rates[i].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
+ series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
+ flags |= ATH9K_TXDESC_CTSENA;
+ }
+
if (rates[i].flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
series[i].RateFlags |= ATH9K_RATESERIES_2040;
if (rates[i].flags & IEEE80211_TX_RC_SHORT_GI)
@@ -1568,6 +1555,14 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp);
}
+ /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
+ if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit))
+ flags &= ~ATH9K_TXDESC_RTSENA;
+
+ /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
+ if (flags & ATH9K_TXDESC_RTSENA)
+ flags &= ~ATH9K_TXDESC_CTSENA;
+
/* set dur_update_en for l-sig computation except for PS-Poll frames */
ath9k_hw_set11n_ratescenario(sc->sc_ah, bf->bf_desc,
bf->bf_lastbf->bf_desc,
@@ -1648,7 +1643,7 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
/* tag if this is a nullfunc frame to enable PS when AP acks it */
if (ieee80211_is_nullfunc(fc) && ieee80211_has_pm(fc)) {
bf->bf_isnullfunc = true;
- sc->sc_flags &= ~SC_OP_NULLFUNC_COMPLETED;
+ sc->ps_flags &= ~PS_NULLFUNC_COMPLETED;
} else
bf->bf_isnullfunc = false;
@@ -1858,15 +1853,15 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
skb_pull(skb, padsize);
}
- if (sc->sc_flags & SC_OP_WAIT_FOR_TX_ACK) {
- sc->sc_flags &= ~SC_OP_WAIT_FOR_TX_ACK;
+ if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
+ sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
ath_print(common, ATH_DBG_PS,
"Going back to sleep after having "
- "received TX status (0x%x)\n",
- sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
- SC_OP_WAIT_FOR_CAB |
- SC_OP_WAIT_FOR_PSPOLL_DATA |
- SC_OP_WAIT_FOR_TX_ACK));
+ "received TX status (0x%lx)\n",
+ sc->ps_flags & (PS_WAIT_FOR_BEACON |
+ PS_WAIT_FOR_CAB |
+ PS_WAIT_FOR_PSPOLL_DATA |
+ PS_WAIT_FOR_TX_ACK));
}
if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL))
@@ -2053,11 +2048,10 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
*/
if (bf->bf_isnullfunc &&
(ds->ds_txstat.ts_status & ATH9K_TX_ACKED)) {
- if ((sc->sc_flags & SC_OP_PS_ENABLED)) {
- sc->ps_enabled = true;
- ath9k_hw_setrxabort(sc->sc_ah, 1);
- } else
- sc->sc_flags |= SC_OP_NULLFUNC_COMPLETED;
+ if ((sc->ps_flags & PS_ENABLED))
+ ath9k_enable_ps(sc);
+ else
+ sc->ps_flags |= PS_NULLFUNC_COMPLETED;
}
/*
diff --git a/drivers/net/wireless/ath/debug.h b/drivers/net/wireless/ath/debug.h
index d6b685a06c5..8263633c003 100644
--- a/drivers/net/wireless/ath/debug.h
+++ b/drivers/net/wireless/ath/debug.h
@@ -65,11 +65,11 @@ enum ATH_DEBUG {
#define ATH_DBG_DEFAULT (ATH_DBG_FATAL)
#ifdef CONFIG_ATH_DEBUG
-void ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...);
+void ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...)
+ __attribute__ ((format (printf, 3, 4)));
#else
-static inline void ath_print(struct ath_common *common,
- int dbg_mask,
- const char *fmt, ...)
+static inline void __attribute__ ((format (printf, 3, 4)))
+ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...)
{
}
#endif /* CONFIG_ATH_DEBUG */
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 039ac490465..04abd1f556b 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -110,8 +110,9 @@ static const struct ieee80211_regdomain ath_world_regdom_67_68_6A = {
static inline bool is_wwr_sku(u16 regd)
{
- return ((regd & WORLD_SKU_MASK) == WORLD_SKU_PREFIX) ||
- (regd == WORLD);
+ return ((regd & COUNTRY_ERD_FLAG) != COUNTRY_ERD_FLAG) &&
+ (((regd & WORLD_SKU_MASK) == WORLD_SKU_PREFIX) ||
+ (regd == WORLD));
}
static u16 ath_regd_get_eepromRD(struct ath_regulatory *reg)
diff --git a/drivers/net/wireless/atmel_pci.c b/drivers/net/wireless/atmel_pci.c
index 92f87fbe750..9ab1192004c 100644
--- a/drivers/net/wireless/atmel_pci.c
+++ b/drivers/net/wireless/atmel_pci.c
@@ -31,7 +31,7 @@ MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards.")
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("Atmel at76c506 PCI wireless cards");
-static struct pci_device_id card_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(card_ids) = {
{ 0x1114, 0x0506, PCI_ANY_ID, PCI_ANY_ID },
{ 0, }
};
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 64c12e1bced..0a00d42642c 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -78,11 +78,11 @@ config B43_SDIO
If unsure, say N.
-# Data transfers to the device via PIO
-# This is only needed on PCMCIA and SDIO devices. All others can do DMA properly.
+#Data transfers to the device via PIO. We want it as a fallback even
+# if we can do DMA.
config B43_PIO
bool
- depends on B43 && (B43_SDIO || B43_PCMCIA || B43_FORCE_PIO)
+ depends on B43
select SSB_BLOCKIO
default y
diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile
index 84772a2542d..5e83b6f0a3a 100644
--- a/drivers/net/wireless/b43/Makefile
+++ b/drivers/net/wireless/b43/Makefile
@@ -12,7 +12,7 @@ b43-y += xmit.o
b43-y += lo.o
b43-y += wa.o
b43-y += dma.o
-b43-$(CONFIG_B43_PIO) += pio.o
+b43-y += pio.o
b43-y += rfkill.o
b43-$(CONFIG_B43_LEDS) += leds.o
b43-$(CONFIG_B43_PCMCIA) += pcmcia.o
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index c484cc25389..b8807fb12c9 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -254,6 +254,14 @@ enum {
#define B43_SHM_SH_MAXBFRAMES 0x0080 /* Maximum number of frames in a burst */
#define B43_SHM_SH_SPUWKUP 0x0094 /* pre-wakeup for synth PU in us */
#define B43_SHM_SH_PRETBTT 0x0096 /* pre-TBTT in us */
+/* SHM_SHARED tx iq workarounds */
+#define B43_SHM_SH_NPHY_TXIQW0 0x0700
+#define B43_SHM_SH_NPHY_TXIQW1 0x0702
+#define B43_SHM_SH_NPHY_TXIQW2 0x0704
+#define B43_SHM_SH_NPHY_TXIQW3 0x0706
+/* SHM_SHARED tx pwr ctrl */
+#define B43_SHM_SH_NPHY_TXPWR_INDX0 0x0708
+#define B43_SHM_SH_NPHY_TXPWR_INDX1 0x070E
/* SHM_SCRATCH offsets */
#define B43_SHM_SC_MINCONT 0x0003 /* Minimum contention window */
@@ -694,6 +702,7 @@ struct b43_wldev {
bool radio_hw_enable; /* saved state of radio hardware enabled state */
bool qos_enabled; /* TRUE, if QoS is used. */
bool hwcrypto_enabled; /* TRUE, if HW crypto acceleration is enabled. */
+ bool use_pio; /* TRUE if next init should use PIO */
/* PHY/Radio device. */
struct b43_phy phy;
@@ -822,11 +831,9 @@ struct b43_wl {
/* The device LEDs. */
struct b43_leds leds;
-#ifdef CONFIG_B43_PIO
/* Kmalloc'ed scratch space for PIO TX/RX. Protected by wl->mutex. */
u8 pio_scratchspace[110] __attribute__((__aligned__(8)));
u8 pio_tailspace[4] __attribute__((__aligned__(8)));
-#endif /* CONFIG_B43_PIO */
};
static inline struct b43_wl *hw_to_b43_wl(struct ieee80211_hw *hw)
@@ -877,20 +884,15 @@ static inline void b43_write32(struct b43_wldev *dev, u16 offset, u32 value)
static inline bool b43_using_pio_transfers(struct b43_wldev *dev)
{
-#ifdef CONFIG_B43_PIO
return dev->__using_pio_transfers;
-#else
- return 0;
-#endif
}
#ifdef CONFIG_B43_FORCE_PIO
-# define B43_FORCE_PIO 1
+# define B43_PIO_DEFAULT 1
#else
-# define B43_FORCE_PIO 0
+# define B43_PIO_DEFAULT 0
#endif
-
/* Message printing */
void b43info(struct b43_wl *wl, const char *fmt, ...)
__attribute__ ((format(printf, 2, 3)));
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 88d1fd02d40..be7abf8916a 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -1369,7 +1369,6 @@ int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
b43err(dev->wl, "DMA tx mapping failure\n");
goto out;
}
- ring->nr_tx_packets++;
if ((free_slots(ring) < TX_SLOTS_PER_FRAME) ||
should_inject_overflow(ring)) {
/* This TX ring is full. */
@@ -1500,22 +1499,6 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
}
}
-void b43_dma_get_tx_stats(struct b43_wldev *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
- const int nr_queues = dev->wl->hw->queues;
- struct b43_dmaring *ring;
- int i;
-
- for (i = 0; i < nr_queues; i++) {
- ring = select_ring_by_priority(dev, i);
-
- stats[i].len = ring->used_slots / TX_SLOTS_PER_FRAME;
- stats[i].limit = ring->nr_slots / TX_SLOTS_PER_FRAME;
- stats[i].count = ring->nr_tx_packets;
- }
-}
-
static void dma_rx(struct b43_dmaring *ring, int *slot)
{
const struct b43_dma_ops *ops = ring->ops;
@@ -1653,7 +1636,6 @@ void b43_dma_tx_resume(struct b43_wldev *dev)
b43_power_saving_ctl_bits(dev, 0);
}
-#ifdef CONFIG_B43_PIO
static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type,
u16 mmio_base, bool enable)
{
@@ -1687,4 +1669,3 @@ void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
mmio_base = b43_dmacontroller_base(type, engine_index);
direct_fifo_rx(dev, type, mmio_base, enable);
}
-#endif /* CONFIG_B43_PIO */
diff --git a/drivers/net/wireless/b43/dma.h b/drivers/net/wireless/b43/dma.h
index f7ab37c4cdb..dc91944d602 100644
--- a/drivers/net/wireless/b43/dma.h
+++ b/drivers/net/wireless/b43/dma.h
@@ -228,8 +228,6 @@ struct b43_dmaring {
int used_slots;
/* Currently used slot in the ring. */
int current_slot;
- /* Total number of packets sent. Statistics only. */
- unsigned int nr_tx_packets;
/* Frameoffset in octets. */
u32 frameoffset;
/* Descriptor buffer size. */
@@ -278,9 +276,6 @@ void b43_dma_free(struct b43_wldev *dev);
void b43_dma_tx_suspend(struct b43_wldev *dev);
void b43_dma_tx_resume(struct b43_wldev *dev);
-void b43_dma_get_tx_stats(struct b43_wldev *dev,
- struct ieee80211_tx_queue_stats *stats);
-
int b43_dma_tx(struct b43_wldev *dev,
struct sk_buff *skb);
void b43_dma_handle_txstatus(struct b43_wldev *dev,
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 490fb45d1d0..1521b1e78d2 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -67,7 +67,12 @@ MODULE_AUTHOR("Gábor Stefanik");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(B43_SUPPORTED_FIRMWARE_ID);
-
+MODULE_FIRMWARE("b43/ucode11.fw");
+MODULE_FIRMWARE("b43/ucode13.fw");
+MODULE_FIRMWARE("b43/ucode14.fw");
+MODULE_FIRMWARE("b43/ucode15.fw");
+MODULE_FIRMWARE("b43/ucode5.fw");
+MODULE_FIRMWARE("b43/ucode9.fw");
static int modparam_bad_frames_preempt;
module_param_named(bad_frames_preempt, modparam_bad_frames_preempt, int, 0444);
@@ -102,6 +107,9 @@ int b43_modparam_verbose = B43_VERBOSITY_DEFAULT;
module_param_named(verbose, b43_modparam_verbose, int, 0644);
MODULE_PARM_DESC(verbose, "Log message verbosity: 0=error, 1=warn, 2=info(default), 3=debug");
+int b43_modparam_pio = B43_PIO_DEFAULT;
+module_param_named(pio, b43_modparam_pio, int, 0644);
+MODULE_PARM_DESC(pio, "Use PIO accesses by default: 0=DMA, 1=PIO");
static const struct ssb_device_id b43_ssb_tbl[] = {
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 5),
@@ -110,6 +118,7 @@ static const struct ssb_device_id b43_ssb_tbl[] = {
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 9),
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 10),
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 11),
+ SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 12),
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 13),
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 15),
SSB_DEVICE(SSB_VENDOR_BROADCOM, SSB_DEV_80211, 16),
@@ -842,8 +851,10 @@ static void rx_tkip_phase1_write(struct b43_wldev *dev, u8 index, u32 iv32,
}
static void b43_op_update_tkip_key(struct ieee80211_hw *hw,
- struct ieee80211_key_conf *keyconf, const u8 *addr,
- u32 iv32, u16 *phase1key)
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta,
+ u32 iv32, u16 *phase1key)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
struct b43_wldev *dev;
@@ -852,19 +863,19 @@ static void b43_op_update_tkip_key(struct ieee80211_hw *hw,
if (B43_WARN_ON(!modparam_hwtkip))
return;
- mutex_lock(&wl->mutex);
-
+ /* This is only called from the RX path through mac80211, where
+ * our mutex is already locked. */
+ B43_WARN_ON(!mutex_is_locked(&wl->mutex));
dev = wl->current_dev;
- if (!dev || b43_status(dev) < B43_STAT_INITIALIZED)
- goto out_unlock;
+ B43_WARN_ON(!dev || b43_status(dev) < B43_STAT_INITIALIZED);
keymac_write(dev, index, NULL); /* First zero out mac to avoid race */
rx_tkip_phase1_write(dev, index, iv32, phase1key);
- keymac_write(dev, index, addr);
-
-out_unlock:
- mutex_unlock(&wl->mutex);
+ /* only pairwise TKIP keys are supported right now */
+ if (WARN_ON(!sta))
+ return;
+ keymac_write(dev, index, sta->addr);
}
static void do_key_write(struct b43_wldev *dev,
@@ -1793,8 +1804,9 @@ static void b43_do_interrupt_thread(struct b43_wldev *dev)
dma_reason[4], dma_reason[5]);
b43err(dev->wl, "This device does not support DMA "
"on your system. Please use PIO instead.\n");
- b43err(dev->wl, "CONFIG_B43_FORCE_PIO must be set in "
- "your kernel configuration.\n");
+ /* Fall back to PIO transfers if we get fatal DMA errors! */
+ dev->use_pio = 1;
+ b43_controller_restart(dev, "DMA error");
return;
}
if (merged_dma_reason & B43_DMAIRQ_NONFATALMASK) {
@@ -3345,27 +3357,6 @@ out_unlock:
return err;
}
-static int b43_op_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *stats)
-{
- struct b43_wl *wl = hw_to_b43_wl(hw);
- struct b43_wldev *dev;
- int err = -ENODEV;
-
- mutex_lock(&wl->mutex);
- dev = wl->current_dev;
- if (dev && b43_status(dev) >= B43_STAT_STARTED) {
- if (b43_using_pio_transfers(dev))
- b43_pio_get_tx_stats(dev, stats);
- else
- b43_dma_get_tx_stats(dev, stats);
- err = 0;
- }
- mutex_unlock(&wl->mutex);
-
- return err;
-}
-
static int b43_op_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
@@ -3569,6 +3560,12 @@ static int b43_op_config(struct ieee80211_hw *hw, u32 changed)
dev = wl->current_dev;
phy = &dev->phy;
+ if (conf_is_ht(conf))
+ phy->is_40mhz =
+ (conf_is_ht40_minus(conf) || conf_is_ht40_plus(conf));
+ else
+ phy->is_40mhz = false;
+
b43_mac_suspend(dev);
if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
@@ -3970,6 +3967,7 @@ static int b43_wireless_core_start(struct b43_wldev *dev)
}
/* We are ready to run. */
+ ieee80211_wake_queues(dev->wl->hw);
b43_set_status(dev, B43_STAT_STARTED);
/* Start data flow (TX/RX). */
@@ -4360,7 +4358,7 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
if ((dev->dev->bus->bustype == SSB_BUSTYPE_PCMCIA) ||
(dev->dev->bus->bustype == SSB_BUSTYPE_SDIO) ||
- B43_FORCE_PIO) {
+ dev->use_pio) {
dev->__using_pio_transfers = 1;
err = b43_pio_init(dev);
} else {
@@ -4379,8 +4377,6 @@ static int b43_wireless_core_init(struct b43_wldev *dev)
ieee80211_wake_queues(dev->wl->hw);
- ieee80211_wake_queues(dev->wl->hw);
-
b43_set_status(dev, B43_STAT_INITIALIZED);
out:
@@ -4395,7 +4391,7 @@ err_busdown:
}
static int b43_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
struct b43_wldev *dev;
@@ -4403,24 +4399,24 @@ static int b43_op_add_interface(struct ieee80211_hw *hw,
/* TODO: allow WDS/AP devices to coexist */
- if (conf->type != NL80211_IFTYPE_AP &&
- conf->type != NL80211_IFTYPE_MESH_POINT &&
- conf->type != NL80211_IFTYPE_STATION &&
- conf->type != NL80211_IFTYPE_WDS &&
- conf->type != NL80211_IFTYPE_ADHOC)
+ if (vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_MESH_POINT &&
+ vif->type != NL80211_IFTYPE_STATION &&
+ vif->type != NL80211_IFTYPE_WDS &&
+ vif->type != NL80211_IFTYPE_ADHOC)
return -EOPNOTSUPP;
mutex_lock(&wl->mutex);
if (wl->operating)
goto out_mutex_unlock;
- b43dbg(wl, "Adding Interface type %d\n", conf->type);
+ b43dbg(wl, "Adding Interface type %d\n", vif->type);
dev = wl->current_dev;
wl->operating = 1;
- wl->vif = conf->vif;
- wl->if_type = conf->type;
- memcpy(wl->mac_addr, conf->mac_addr, ETH_ALEN);
+ wl->vif = vif;
+ wl->if_type = vif->type;
+ memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
b43_adjust_opmode(dev);
b43_set_pretbtt(dev);
@@ -4435,17 +4431,17 @@ static int b43_op_add_interface(struct ieee80211_hw *hw,
}
static void b43_op_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct b43_wl *wl = hw_to_b43_wl(hw);
struct b43_wldev *dev = wl->current_dev;
- b43dbg(wl, "Removing Interface type %d\n", conf->type);
+ b43dbg(wl, "Removing Interface type %d\n", vif->type);
mutex_lock(&wl->mutex);
B43_WARN_ON(!wl->operating);
- B43_WARN_ON(wl->vif != conf->vif);
+ B43_WARN_ON(wl->vif != vif);
wl->vif = NULL;
wl->operating = 0;
@@ -4586,7 +4582,6 @@ static const struct ieee80211_ops b43_hw_ops = {
.set_key = b43_op_set_key,
.update_tkip_key = b43_op_update_tkip_key,
.get_stats = b43_op_get_stats,
- .get_tx_stats = b43_op_get_tx_stats,
.get_tsf = b43_op_get_tsf,
.set_tsf = b43_op_set_tsf,
.start = b43_op_start,
@@ -4830,6 +4825,7 @@ static int b43_one_core_attach(struct ssb_device *dev, struct b43_wl *wl)
if (!wldev)
goto out;
+ wldev->use_pio = b43_modparam_pio;
wldev->dev = dev;
wldev->wl = wl;
b43_set_status(wldev, B43_STAT_UNINIT);
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
index 75b26e175e8..8f7d7eff2d8 100644
--- a/drivers/net/wireless/b43/phy_common.c
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -421,3 +421,48 @@ void b43_phyop_switch_analog_generic(struct b43_wldev *dev, bool on)
{
b43_write16(dev, B43_MMIO_PHY0, on ? 0 : 0xF4);
}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/Cordic */
+struct b43_c32 b43_cordic(int theta)
+{
+ u32 arctg[] = { 2949120, 1740967, 919879, 466945, 234379, 117304,
+ 58666, 29335, 14668, 7334, 3667, 1833, 917, 458,
+ 229, 115, 57, 29, };
+ u8 i;
+ s32 tmp;
+ s8 signx = 1;
+ u32 angle = 0;
+ struct b43_c32 ret = { .i = 39797, .q = 0, };
+
+ while (theta > (180 << 16))
+ theta -= (360 << 16);
+ while (theta < -(180 << 16))
+ theta += (360 << 16);
+
+ if (theta > (90 << 16)) {
+ theta -= (180 << 16);
+ signx = -1;
+ } else if (theta < -(90 << 16)) {
+ theta += (180 << 16);
+ signx = -1;
+ }
+
+ for (i = 0; i <= 17; i++) {
+ if (theta > angle) {
+ tmp = ret.i - (ret.q >> i);
+ ret.q += ret.i >> i;
+ ret.i = tmp;
+ angle += arctg[i];
+ } else {
+ tmp = ret.i + (ret.q >> i);
+ ret.q -= ret.i >> i;
+ ret.i = tmp;
+ angle -= arctg[i];
+ }
+ }
+
+ ret.i *= signx;
+ ret.q *= signx;
+
+ return ret;
+}
diff --git a/drivers/net/wireless/b43/phy_common.h b/drivers/net/wireless/b43/phy_common.h
index 9edd4e8e0c8..bd480b481bf 100644
--- a/drivers/net/wireless/b43/phy_common.h
+++ b/drivers/net/wireless/b43/phy_common.h
@@ -5,6 +5,12 @@
struct b43_wldev;
+/* Complex number using 2 32-bit signed integers */
+struct b43_c32 { s32 i, q; };
+
+#define CORDIC_CONVERT(value) (((value) >= 0) ? \
+ ((((value) >> 15) + 1) >> 1) : \
+ -((((-(value)) >> 15) + 1) >> 1))
/* PHY register routing bits */
#define B43_PHYROUTE 0x0C00 /* PHY register routing bits mask */
@@ -212,6 +218,9 @@ struct b43_phy {
bool supports_2ghz;
bool supports_5ghz;
+ /* HT info */
+ bool is_40mhz;
+
/* GMODE bit enabled? */
bool gmode;
@@ -418,5 +427,6 @@ int b43_phy_shm_tssi_read(struct b43_wldev *dev, u16 shm_offset);
*/
void b43_phyop_switch_analog_generic(struct b43_wldev *dev, bool on);
+struct b43_c32 b43_cordic(int theta);
#endif /* LINUX_B43_PHY_COMMON_H_ */
diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
index 3e046ec1ff8..185219e0a55 100644
--- a/drivers/net/wireless/b43/phy_lp.c
+++ b/drivers/net/wireless/b43/phy_lp.c
@@ -80,6 +80,7 @@ static void b43_lpphy_op_free(struct b43_wldev *dev)
dev->phy.lp = NULL;
}
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/LP/ReadBandSrom */
static void lpphy_read_band_sprom(struct b43_wldev *dev)
{
struct b43_phy_lp *lpphy = dev->phy.lp;
@@ -101,6 +102,12 @@ static void lpphy_read_band_sprom(struct b43_wldev *dev)
maxpwr = bus->sprom.maxpwr_bg;
lpphy->max_tx_pwr_med_band = maxpwr;
cckpo = bus->sprom.cck2gpo;
+ /*
+ * We don't read SPROM's opo as specs say. On rev8 SPROMs
+ * opo == ofdm2gpo and we don't know any SSB with LP-PHY
+ * and SPROM rev below 8.
+ */
+ B43_WARN_ON(bus->sprom.revision < 8);
ofdmpo = bus->sprom.ofdm2gpo;
if (cckpo) {
for (i = 0; i < 4; i++) {
@@ -1703,19 +1710,6 @@ static const struct lpphy_rx_iq_comp lpphy_rev2plus_iq_comp = {
.c0 = 0,
};
-static u8 lpphy_nbits(s32 val)
-{
- u32 tmp = abs(val);
- u8 nbits = 0;
-
- while (tmp != 0) {
- nbits++;
- tmp >>= 1;
- }
-
- return nbits;
-}
-
static int lpphy_calc_rx_iq_comp(struct b43_wldev *dev, u16 samples)
{
struct lpphy_iq_est iq_est;
@@ -1742,8 +1736,8 @@ static int lpphy_calc_rx_iq_comp(struct b43_wldev *dev, u16 samples)
goto out;
}
- prod_msb = lpphy_nbits(prod);
- q_msb = lpphy_nbits(qpwr);
+ prod_msb = fls(abs(prod));
+ q_msb = fls(abs(qpwr));
tmp1 = prod_msb - 20;
if (tmp1 >= 0) {
@@ -1773,47 +1767,6 @@ out:
return ret;
}
-/* Complex number using 2 32-bit signed integers */
-typedef struct {s32 i, q;} lpphy_c32;
-
-static lpphy_c32 lpphy_cordic(int theta)
-{
- u32 arctg[] = { 2949120, 1740967, 919879, 466945, 234379, 117304,
- 58666, 29335, 14668, 7334, 3667, 1833, 917, 458,
- 229, 115, 57, 29, };
- int i, tmp, signx = 1, angle = 0;
- lpphy_c32 ret = { .i = 39797, .q = 0, };
-
- theta = clamp_t(int, theta, -180, 180);
-
- if (theta > 90) {
- theta -= 180;
- signx = -1;
- } else if (theta < -90) {
- theta += 180;
- signx = -1;
- }
-
- for (i = 0; i <= 17; i++) {
- if (theta > angle) {
- tmp = ret.i - (ret.q >> i);
- ret.q += ret.i >> i;
- ret.i = tmp;
- angle += arctg[i];
- } else {
- tmp = ret.i + (ret.q >> i);
- ret.q -= ret.i >> i;
- ret.i = tmp;
- angle -= arctg[i];
- }
- }
-
- ret.i *= signx;
- ret.q *= signx;
-
- return ret;
-}
-
static void lpphy_run_samples(struct b43_wldev *dev, u16 samples, u16 loops,
u16 wait)
{
@@ -1831,8 +1784,9 @@ static void lpphy_start_tx_tone(struct b43_wldev *dev, s32 freq, u16 max)
{
struct b43_phy_lp *lpphy = dev->phy.lp;
u16 buf[64];
- int i, samples = 0, angle = 0, rotation = (9 * freq) / 500;
- lpphy_c32 sample;
+ int i, samples = 0, angle = 0;
+ int rotation = (((36 * freq) / 20) << 16) / 100;
+ struct b43_c32 sample;
lpphy->tx_tone_freq = freq;
@@ -1848,10 +1802,10 @@ static void lpphy_start_tx_tone(struct b43_wldev *dev, s32 freq, u16 max)
}
for (i = 0; i < samples; i++) {
- sample = lpphy_cordic(angle);
+ sample = b43_cordic(angle);
angle += rotation;
- buf[i] = ((sample.i * max) & 0xFF) << 8;
- buf[i] |= (sample.q * max) & 0xFF;
+ buf[i] = CORDIC_CONVERT((sample.i * max) & 0xFF) << 8;
+ buf[i] |= CORDIC_CONVERT((sample.q * max) & 0xFF);
}
b43_lptab_write_bulk(dev, B43_LPTAB16(5, 0), samples, buf);
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index 992318a7807..795bb1e3345 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -28,7 +28,50 @@
#include "b43.h"
#include "phy_n.h"
#include "tables_nphy.h"
+#include "main.h"
+struct nphy_txgains {
+ u16 txgm[2];
+ u16 pga[2];
+ u16 pad[2];
+ u16 ipa[2];
+};
+
+struct nphy_iqcal_params {
+ u16 txgm;
+ u16 pga;
+ u16 pad;
+ u16 ipa;
+ u16 cal_gain;
+ u16 ncorr[5];
+};
+
+struct nphy_iq_est {
+ s32 iq0_prod;
+ u32 i0_pwr;
+ u32 q0_pwr;
+ s32 iq1_prod;
+ u32 i1_pwr;
+ u32 q1_pwr;
+};
+
+enum b43_nphy_rf_sequence {
+ B43_RFSEQ_RX2TX,
+ B43_RFSEQ_TX2RX,
+ B43_RFSEQ_RESET2RX,
+ B43_RFSEQ_UPDATE_GAINH,
+ B43_RFSEQ_UPDATE_GAINL,
+ B43_RFSEQ_UPDATE_GAINU,
+};
+
+static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd,
+ u8 *events, u8 *delays, u8 length);
+static void b43_nphy_force_rf_sequence(struct b43_wldev *dev,
+ enum b43_nphy_rf_sequence seq);
+static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
+ u16 value, u8 core, bool off);
+static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
+ u16 value, u8 core);
void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna)
{//TODO
@@ -197,173 +240,1020 @@ void b43_nphy_radio_turn_off(struct b43_wldev *dev)
~B43_NPHY_RFCTL_CMD_EN);
}
-#define ntab_upload(dev, offset, data) do { \
- unsigned int i; \
- for (i = 0; i < (offset##_SIZE); i++) \
- b43_ntab_write(dev, (offset) + i, (data)[i]); \
- } while (0)
-
-/* Upload the N-PHY tables. */
+/*
+ * Upload the N-PHY tables.
+ * http://bcm-v4.sipsolutions.net/802.11/PHY/N/InitTables
+ */
static void b43_nphy_tables_init(struct b43_wldev *dev)
{
- /* Static tables */
- ntab_upload(dev, B43_NTAB_FRAMESTRUCT, b43_ntab_framestruct);
- ntab_upload(dev, B43_NTAB_FRAMELT, b43_ntab_framelookup);
- ntab_upload(dev, B43_NTAB_TMAP, b43_ntab_tmap);
- ntab_upload(dev, B43_NTAB_TDTRN, b43_ntab_tdtrn);
- ntab_upload(dev, B43_NTAB_INTLEVEL, b43_ntab_intlevel);
- ntab_upload(dev, B43_NTAB_PILOT, b43_ntab_pilot);
- ntab_upload(dev, B43_NTAB_PILOTLT, b43_ntab_pilotlt);
- ntab_upload(dev, B43_NTAB_TDI20A0, b43_ntab_tdi20a0);
- ntab_upload(dev, B43_NTAB_TDI20A1, b43_ntab_tdi20a1);
- ntab_upload(dev, B43_NTAB_TDI40A0, b43_ntab_tdi40a0);
- ntab_upload(dev, B43_NTAB_TDI40A1, b43_ntab_tdi40a1);
- ntab_upload(dev, B43_NTAB_BDI, b43_ntab_bdi);
- ntab_upload(dev, B43_NTAB_CHANEST, b43_ntab_channelest);
- ntab_upload(dev, B43_NTAB_MCS, b43_ntab_mcs);
-
- /* Volatile tables */
- ntab_upload(dev, B43_NTAB_NOISEVAR10, b43_ntab_noisevar10);
- ntab_upload(dev, B43_NTAB_NOISEVAR11, b43_ntab_noisevar11);
- ntab_upload(dev, B43_NTAB_C0_ESTPLT, b43_ntab_estimatepowerlt0);
- ntab_upload(dev, B43_NTAB_C1_ESTPLT, b43_ntab_estimatepowerlt1);
- ntab_upload(dev, B43_NTAB_C0_ADJPLT, b43_ntab_adjustpower0);
- ntab_upload(dev, B43_NTAB_C1_ADJPLT, b43_ntab_adjustpower1);
- ntab_upload(dev, B43_NTAB_C0_GAINCTL, b43_ntab_gainctl0);
- ntab_upload(dev, B43_NTAB_C1_GAINCTL, b43_ntab_gainctl1);
- ntab_upload(dev, B43_NTAB_C0_IQLT, b43_ntab_iqlt0);
- ntab_upload(dev, B43_NTAB_C1_IQLT, b43_ntab_iqlt1);
- ntab_upload(dev, B43_NTAB_C0_LOFEEDTH, b43_ntab_loftlt0);
- ntab_upload(dev, B43_NTAB_C1_LOFEEDTH, b43_ntab_loftlt1);
+ if (dev->phy.rev < 3)
+ b43_nphy_rev0_1_2_tables_init(dev);
+ else
+ b43_nphy_rev3plus_tables_init(dev);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PA%20override */
+static void b43_nphy_pa_override(struct b43_wldev *dev, bool enable)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ enum ieee80211_band band;
+ u16 tmp;
+
+ if (!enable) {
+ nphy->rfctrl_intc1_save = b43_phy_read(dev,
+ B43_NPHY_RFCTL_INTC1);
+ nphy->rfctrl_intc2_save = b43_phy_read(dev,
+ B43_NPHY_RFCTL_INTC2);
+ band = b43_current_band(dev->wl);
+ if (dev->phy.rev >= 3) {
+ if (band == IEEE80211_BAND_5GHZ)
+ tmp = 0x600;
+ else
+ tmp = 0x480;
+ } else {
+ if (band == IEEE80211_BAND_5GHZ)
+ tmp = 0x180;
+ else
+ tmp = 0x120;
+ }
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, tmp);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, tmp);
+ } else {
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1,
+ nphy->rfctrl_intc1_save);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2,
+ nphy->rfctrl_intc2_save);
+ }
}
-static void b43_nphy_workarounds(struct b43_wldev *dev)
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxLpFbw */
+static void b43_nphy_tx_lp_fbw(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ u16 tmp;
+ enum ieee80211_band band = b43_current_band(dev->wl);
+ bool ipa = (nphy->ipa2g_on && band == IEEE80211_BAND_2GHZ) ||
+ (nphy->ipa5g_on && band == IEEE80211_BAND_5GHZ);
+
+ if (dev->phy.rev >= 3) {
+ if (ipa) {
+ tmp = 4;
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S2,
+ (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp);
+ }
+
+ tmp = 1;
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S2,
+ (((((tmp << 3) | tmp) << 3) | tmp) << 3) | tmp);
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/BmacPhyClkFgc */
+static void b43_nphy_bmac_clock_fgc(struct b43_wldev *dev, bool force)
+{
+ u32 tmslow;
+
+ if (dev->phy.type != B43_PHYTYPE_N)
+ return;
+
+ tmslow = ssb_read32(dev->dev, SSB_TMSLOW);
+ if (force)
+ tmslow |= SSB_TMSLOW_FGC;
+ else
+ tmslow &= ~SSB_TMSLOW_FGC;
+ ssb_write32(dev->dev, SSB_TMSLOW, tmslow);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CCA */
+static void b43_nphy_reset_cca(struct b43_wldev *dev)
+{
+ u16 bbcfg;
+
+ b43_nphy_bmac_clock_fgc(dev, 1);
+ bbcfg = b43_phy_read(dev, B43_NPHY_BBCFG);
+ b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg | B43_NPHY_BBCFG_RSTCCA);
+ udelay(1);
+ b43_phy_write(dev, B43_NPHY_BBCFG, bbcfg & ~B43_NPHY_BBCFG_RSTCCA);
+ b43_nphy_bmac_clock_fgc(dev, 0);
+ b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/MIMOConfig */
+static void b43_nphy_update_mimo_config(struct b43_wldev *dev, s32 preamble)
+{
+ u16 mimocfg = b43_phy_read(dev, B43_NPHY_MIMOCFG);
+
+ mimocfg |= B43_NPHY_MIMOCFG_AUTO;
+ if (preamble == 1)
+ mimocfg |= B43_NPHY_MIMOCFG_GFMIX;
+ else
+ mimocfg &= ~B43_NPHY_MIMOCFG_GFMIX;
+
+ b43_phy_write(dev, B43_NPHY_MIMOCFG, mimocfg);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Chains */
+static void b43_nphy_update_txrx_chain(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ bool override = false;
+ u16 chain = 0x33;
+
+ if (nphy->txrx_chain == 0) {
+ chain = 0x11;
+ override = true;
+ } else if (nphy->txrx_chain == 1) {
+ chain = 0x22;
+ override = true;
+ }
+
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA,
+ ~(B43_NPHY_RFSEQCA_TXEN | B43_NPHY_RFSEQCA_RXEN),
+ chain);
+
+ if (override)
+ b43_phy_set(dev, B43_NPHY_RFSEQMODE,
+ B43_NPHY_RFSEQMODE_CAOVER);
+ else
+ b43_phy_mask(dev, B43_NPHY_RFSEQMODE,
+ ~B43_NPHY_RFSEQMODE_CAOVER);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqEst */
+static void b43_nphy_rx_iq_est(struct b43_wldev *dev, struct nphy_iq_est *est,
+ u16 samps, u8 time, bool wait)
+{
+ int i;
+ u16 tmp;
+
+ b43_phy_write(dev, B43_NPHY_IQEST_SAMCNT, samps);
+ b43_phy_maskset(dev, B43_NPHY_IQEST_WT, ~B43_NPHY_IQEST_WT_VAL, time);
+ if (wait)
+ b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_MODE);
+ else
+ b43_phy_mask(dev, B43_NPHY_IQEST_CMD, ~B43_NPHY_IQEST_CMD_MODE);
+
+ b43_phy_set(dev, B43_NPHY_IQEST_CMD, B43_NPHY_IQEST_CMD_START);
+
+ for (i = 1000; i; i--) {
+ tmp = b43_phy_read(dev, B43_NPHY_IQEST_CMD);
+ if (!(tmp & B43_NPHY_IQEST_CMD_START)) {
+ est->i0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI0) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO0);
+ est->q0_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI0) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO0);
+ est->iq0_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI0) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO0);
+
+ est->i1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_IPACC_HI1) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_IPACC_LO1);
+ est->q1_pwr = (b43_phy_read(dev, B43_NPHY_IQEST_QPACC_HI1) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_QPACC_LO1);
+ est->iq1_prod = (b43_phy_read(dev, B43_NPHY_IQEST_IQACC_HI1) << 16) |
+ b43_phy_read(dev, B43_NPHY_IQEST_IQACC_LO1);
+ return;
+ }
+ udelay(10);
+ }
+ memset(est, 0, sizeof(*est));
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxIqCoeffs */
+static void b43_nphy_rx_iq_coeffs(struct b43_wldev *dev, bool write,
+ struct b43_phy_n_iq_comp *pcomp)
+{
+ if (write) {
+ b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPA0, pcomp->a0);
+ b43_phy_write(dev, B43_NPHY_C1_RXIQ_COMPB0, pcomp->b0);
+ b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPA1, pcomp->a1);
+ b43_phy_write(dev, B43_NPHY_C2_RXIQ_COMPB1, pcomp->b1);
+ } else {
+ pcomp->a0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPA0);
+ pcomp->b0 = b43_phy_read(dev, B43_NPHY_C1_RXIQ_COMPB0);
+ pcomp->a1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPA1);
+ pcomp->b1 = b43_phy_read(dev, B43_NPHY_C2_RXIQ_COMPB1);
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhyCleanup */
+static void b43_nphy_rx_cal_phy_cleanup(struct b43_wldev *dev, u8 core)
+{
+ u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
+
+ b43_phy_write(dev, B43_NPHY_RFSEQCA, regs[0]);
+ if (core == 0) {
+ b43_phy_write(dev, B43_NPHY_AFECTL_C1, regs[1]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, regs[2]);
+ } else {
+ b43_phy_write(dev, B43_NPHY_AFECTL_C2, regs[1]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER, regs[2]);
+ }
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs[3]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs[4]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO1, regs[5]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO2, regs[6]);
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S1, regs[7]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_OVER, regs[8]);
+ b43_phy_write(dev, B43_NPHY_PAPD_EN0, regs[9]);
+ b43_phy_write(dev, B43_NPHY_PAPD_EN1, regs[10]);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhySetup */
+static void b43_nphy_rx_cal_phy_setup(struct b43_wldev *dev, u8 core)
+{
+ u8 rxval, txval;
+ u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
+
+ regs[0] = b43_phy_read(dev, B43_NPHY_RFSEQCA);
+ if (core == 0) {
+ regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C1);
+ regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1);
+ } else {
+ regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2);
+ regs[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
+ }
+ regs[3] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
+ regs[4] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
+ regs[5] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO1);
+ regs[6] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO2);
+ regs[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S1);
+ regs[8] = b43_phy_read(dev, B43_NPHY_RFCTL_OVER);
+ regs[9] = b43_phy_read(dev, B43_NPHY_PAPD_EN0);
+ regs[10] = b43_phy_read(dev, B43_NPHY_PAPD_EN1);
+
+ b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x0001);
+ b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x0001);
+
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA, (u16)~B43_NPHY_RFSEQCA_RXDIS,
+ ((1 - core) << B43_NPHY_RFSEQCA_RXDIS_SHIFT));
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXEN,
+ ((1 - core) << B43_NPHY_RFSEQCA_TXEN_SHIFT));
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_RXEN,
+ (core << B43_NPHY_RFSEQCA_RXEN_SHIFT));
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXDIS,
+ (core << B43_NPHY_RFSEQCA_TXDIS_SHIFT));
+
+ if (core == 0) {
+ b43_phy_mask(dev, B43_NPHY_AFECTL_C1, ~0x0007);
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0007);
+ } else {
+ b43_phy_mask(dev, B43_NPHY_AFECTL_C2, ~0x0007);
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0007);
+ }
+
+ b43_nphy_rf_control_intc_override(dev, 2, 0, 3);
+ b43_nphy_rf_control_override(dev, 8, 0, 3, false);
+ b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX);
+
+ if (core == 0) {
+ rxval = 1;
+ txval = 8;
+ } else {
+ rxval = 4;
+ txval = 2;
+ }
+ b43_nphy_rf_control_intc_override(dev, 1, rxval, (core + 1));
+ b43_nphy_rf_control_intc_override(dev, 1, txval, (2 - core));
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalcRxIqComp */
+static void b43_nphy_calc_rx_iq_comp(struct b43_wldev *dev, u8 mask)
+{
+ int i;
+ s32 iq;
+ u32 ii;
+ u32 qq;
+ int iq_nbits, qq_nbits;
+ int arsh, brsh;
+ u16 tmp, a, b;
+
+ struct nphy_iq_est est;
+ struct b43_phy_n_iq_comp old;
+ struct b43_phy_n_iq_comp new = { };
+ bool error = false;
+
+ if (mask == 0)
+ return;
+
+ b43_nphy_rx_iq_coeffs(dev, false, &old);
+ b43_nphy_rx_iq_coeffs(dev, true, &new);
+ b43_nphy_rx_iq_est(dev, &est, 0x4000, 32, false);
+ new = old;
+
+ for (i = 0; i < 2; i++) {
+ if (i == 0 && (mask & 1)) {
+ iq = est.iq0_prod;
+ ii = est.i0_pwr;
+ qq = est.q0_pwr;
+ } else if (i == 1 && (mask & 2)) {
+ iq = est.iq1_prod;
+ ii = est.i1_pwr;
+ qq = est.q1_pwr;
+ } else {
+ B43_WARN_ON(1);
+ continue;
+ }
+
+ if (ii + qq < 2) {
+ error = true;
+ break;
+ }
+
+ iq_nbits = fls(abs(iq));
+ qq_nbits = fls(qq);
+
+ arsh = iq_nbits - 20;
+ if (arsh >= 0) {
+ a = -((iq << (30 - iq_nbits)) + (ii >> (1 + arsh)));
+ tmp = ii >> arsh;
+ } else {
+ a = -((iq << (30 - iq_nbits)) + (ii << (-1 - arsh)));
+ tmp = ii << -arsh;
+ }
+ if (tmp == 0) {
+ error = true;
+ break;
+ }
+ a /= tmp;
+
+ brsh = qq_nbits - 11;
+ if (brsh >= 0) {
+ b = (qq << (31 - qq_nbits));
+ tmp = ii >> brsh;
+ } else {
+ b = (qq << (31 - qq_nbits));
+ tmp = ii << -brsh;
+ }
+ if (tmp == 0) {
+ error = true;
+ break;
+ }
+ b = int_sqrt(b / tmp - a * a) - (1 << 10);
+
+ if (i == 0 && (mask & 0x1)) {
+ if (dev->phy.rev >= 3) {
+ new.a0 = a & 0x3FF;
+ new.b0 = b & 0x3FF;
+ } else {
+ new.a0 = b & 0x3FF;
+ new.b0 = a & 0x3FF;
+ }
+ } else if (i == 1 && (mask & 0x2)) {
+ if (dev->phy.rev >= 3) {
+ new.a1 = a & 0x3FF;
+ new.b1 = b & 0x3FF;
+ } else {
+ new.a1 = b & 0x3FF;
+ new.b1 = a & 0x3FF;
+ }
+ }
+ }
+
+ if (error)
+ new = old;
+
+ b43_nphy_rx_iq_coeffs(dev, true, &new);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxIqWar */
+static void b43_nphy_tx_iq_workaround(struct b43_wldev *dev)
+{
+ u16 array[4];
+ int i;
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x3C50);
+ for (i = 0; i < 4; i++)
+ array[i] = b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+
+ b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW0, array[0]);
+ b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW1, array[1]);
+ b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW2, array[2]);
+ b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_NPHY_TXIQW3, array[3]);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
+static void b43_nphy_write_clip_detection(struct b43_wldev *dev, u16 *clip_st)
+{
+ b43_phy_write(dev, B43_NPHY_C1_CLIP1THRES, clip_st[0]);
+ b43_phy_write(dev, B43_NPHY_C2_CLIP1THRES, clip_st[1]);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
+static void b43_nphy_read_clip_detection(struct b43_wldev *dev, u16 *clip_st)
+{
+ clip_st[0] = b43_phy_read(dev, B43_NPHY_C1_CLIP1THRES);
+ clip_st[1] = b43_phy_read(dev, B43_NPHY_C2_CLIP1THRES);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/classifier */
+static u16 b43_nphy_classifier(struct b43_wldev *dev, u16 mask, u16 val)
+{
+ u16 tmp;
+
+ if (dev->dev->id.revision == 16)
+ b43_mac_suspend(dev);
+
+ tmp = b43_phy_read(dev, B43_NPHY_CLASSCTL);
+ tmp &= (B43_NPHY_CLASSCTL_CCKEN | B43_NPHY_CLASSCTL_OFDMEN |
+ B43_NPHY_CLASSCTL_WAITEDEN);
+ tmp &= ~mask;
+ tmp |= (val & mask);
+ b43_phy_maskset(dev, B43_NPHY_CLASSCTL, 0xFFF8, tmp);
+
+ if (dev->dev->id.revision == 16)
+ b43_mac_enable(dev);
+
+ return tmp;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/carriersearch */
+static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable)
{
struct b43_phy *phy = &dev->phy;
- unsigned int i;
+ struct b43_phy_n *nphy = phy->n;
- b43_phy_set(dev, B43_NPHY_IQFLIP,
- B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2);
- if (1 /* FIXME band is 2.4GHz */) {
- b43_phy_set(dev, B43_NPHY_CLASSCTL,
- B43_NPHY_CLASSCTL_CCKEN);
- } else {
- b43_phy_mask(dev, B43_NPHY_CLASSCTL,
- ~B43_NPHY_CLASSCTL_CCKEN);
- }
- b43_radio_set(dev, B2055_C1_TX_RF_SPARE, 0x8);
- b43_phy_write(dev, B43_NPHY_TXFRAMEDELAY, 8);
-
- /* Fixup some tables */
- b43_ntab_write(dev, B43_NTAB16(8, 0x00), 0xA);
- b43_ntab_write(dev, B43_NTAB16(8, 0x10), 0xA);
- b43_ntab_write(dev, B43_NTAB16(8, 0x02), 0xCDAA);
- b43_ntab_write(dev, B43_NTAB16(8, 0x12), 0xCDAA);
- b43_ntab_write(dev, B43_NTAB16(8, 0x08), 0);
- b43_ntab_write(dev, B43_NTAB16(8, 0x18), 0);
- b43_ntab_write(dev, B43_NTAB16(8, 0x07), 0x7AAB);
- b43_ntab_write(dev, B43_NTAB16(8, 0x17), 0x7AAB);
- b43_ntab_write(dev, B43_NTAB16(8, 0x06), 0x800);
- b43_ntab_write(dev, B43_NTAB16(8, 0x16), 0x800);
-
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8);
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301);
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
- b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
-
- //TODO set RF sequence
-
- /* Set narrowband clip threshold */
- b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, 66);
- b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, 66);
-
- /* Set wideband clip 2 threshold */
- b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES,
- ~B43_NPHY_C1_CLIPWBTHRES_CLIP2,
- 21 << B43_NPHY_C1_CLIPWBTHRES_CLIP2_SHIFT);
- b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES,
- ~B43_NPHY_C2_CLIPWBTHRES_CLIP2,
- 21 << B43_NPHY_C2_CLIPWBTHRES_CLIP2_SHIFT);
-
- /* Set Clip 2 detect */
- b43_phy_set(dev, B43_NPHY_C1_CGAINI,
- B43_NPHY_C1_CGAINI_CL2DETECT);
- b43_phy_set(dev, B43_NPHY_C2_CGAINI,
- B43_NPHY_C2_CGAINI_CL2DETECT);
-
- if (0 /*FIXME*/) {
- /* Set dwell lengths */
- b43_phy_write(dev, B43_NPHY_CLIP1_NBDWELL_LEN, 43);
- b43_phy_write(dev, B43_NPHY_CLIP2_NBDWELL_LEN, 43);
- b43_phy_write(dev, B43_NPHY_W1CLIP1_DWELL_LEN, 9);
- b43_phy_write(dev, B43_NPHY_W1CLIP2_DWELL_LEN, 9);
-
- /* Set gain backoff */
- b43_phy_maskset(dev, B43_NPHY_C1_CGAINI,
- ~B43_NPHY_C1_CGAINI_GAINBKOFF,
- 1 << B43_NPHY_C1_CGAINI_GAINBKOFF_SHIFT);
- b43_phy_maskset(dev, B43_NPHY_C2_CGAINI,
- ~B43_NPHY_C2_CGAINI_GAINBKOFF,
- 1 << B43_NPHY_C2_CGAINI_GAINBKOFF_SHIFT);
+ if (enable) {
+ u16 clip[] = { 0xFFFF, 0xFFFF };
+ if (nphy->deaf_count++ == 0) {
+ nphy->classifier_state = b43_nphy_classifier(dev, 0, 0);
+ b43_nphy_classifier(dev, 0x7, 0);
+ b43_nphy_read_clip_detection(dev, nphy->clip_state);
+ b43_nphy_write_clip_detection(dev, clip);
+ }
+ b43_nphy_reset_cca(dev);
+ } else {
+ if (--nphy->deaf_count == 0) {
+ b43_nphy_classifier(dev, 0x7, nphy->classifier_state);
+ b43_nphy_write_clip_detection(dev, nphy->clip_state);
+ }
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/stop-playback */
+static void b43_nphy_stop_playback(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ u16 tmp;
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ tmp = b43_phy_read(dev, B43_NPHY_SAMP_STAT);
+ if (tmp & 0x1)
+ b43_phy_set(dev, B43_NPHY_SAMP_CMD, B43_NPHY_SAMP_CMD_STOP);
+ else if (tmp & 0x2)
+ b43_phy_mask(dev, B43_NPHY_IQLOCAL_CMDGCTL, (u16)~0x8000);
+
+ b43_phy_mask(dev, B43_NPHY_SAMP_CMD, ~0x0004);
+
+ if (nphy->bb_mult_save & 0x80000000) {
+ tmp = nphy->bb_mult_save & 0xFFFF;
+ b43_ntab_write(dev, B43_NTAB16(15, 87), tmp);
+ nphy->bb_mult_save = 0;
+ }
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SpurWar */
+static void b43_nphy_spur_workaround(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ unsigned int channel;
+ int tone[2] = { 57, 58 };
+ u32 noise[2] = { 0x3FF, 0x3FF };
+
+ B43_WARN_ON(dev->phy.rev < 3);
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ /* FIXME: channel = radio_chanspec */
+
+ if (nphy->gband_spurwar_en) {
+ /* TODO: N PHY Adjust Analog Pfbw (7) */
+ if (channel == 11 && dev->phy.is_40mhz)
+ ; /* TODO: N PHY Adjust Min Noise Var(2, tone, noise)*/
+ else
+ ; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/
+ /* TODO: N PHY Adjust CRS Min Power (0x1E) */
+ }
+
+ if (nphy->aband_spurwar_en) {
+ if (channel == 54) {
+ tone[0] = 0x20;
+ noise[0] = 0x25F;
+ } else if (channel == 38 || channel == 102 || channel == 118) {
+ if (0 /* FIXME */) {
+ tone[0] = 0x20;
+ noise[0] = 0x21F;
+ } else {
+ tone[0] = 0;
+ noise[0] = 0;
+ }
+ } else if (channel == 134) {
+ tone[0] = 0x20;
+ noise[0] = 0x21F;
+ } else if (channel == 151) {
+ tone[0] = 0x10;
+ noise[0] = 0x23F;
+ } else if (channel == 153 || channel == 161) {
+ tone[0] = 0x30;
+ noise[0] = 0x23F;
+ } else {
+ tone[0] = 0;
+ noise[0] = 0;
+ }
+
+ if (!tone[0] && !noise[0])
+ ; /* TODO: N PHY Adjust Min Noise Var(1, tone, noise)*/
+ else
+ ; /* TODO: N PHY Adjust Min Noise Var(0, NULL, NULL)*/
+ }
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/WorkaroundsGainCtrl */
+static void b43_nphy_gain_crtl_workarounds(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ u8 i, j;
+ u8 code;
+
+ /* TODO: for PHY >= 3
+ s8 *lna1_gain, *lna2_gain;
+ u8 *gain_db, *gain_bits;
+ u16 *rfseq_init;
+ u8 lpf_gain[6] = { 0x00, 0x06, 0x0C, 0x12, 0x12, 0x12 };
+ u8 lpf_bits[6] = { 0, 1, 2, 3, 3, 3 };
+ */
+
+ u8 rfseq_events[3] = { 6, 8, 7 };
+ u8 rfseq_delays[3] = { 10, 30, 1 };
+
+ if (dev->phy.rev >= 3) {
+ /* TODO */
+ } else {
+ /* Set Clip 2 detect */
+ b43_phy_set(dev, B43_NPHY_C1_CGAINI,
+ B43_NPHY_C1_CGAINI_CL2DETECT);
+ b43_phy_set(dev, B43_NPHY_C2_CGAINI,
+ B43_NPHY_C2_CGAINI_CL2DETECT);
+
+ /* Set narrowband clip threshold */
+ b43_phy_set(dev, B43_NPHY_C1_NBCLIPTHRES, 0x84);
+ b43_phy_set(dev, B43_NPHY_C2_NBCLIPTHRES, 0x84);
+
+ if (!dev->phy.is_40mhz) {
+ /* Set dwell lengths */
+ b43_phy_set(dev, B43_NPHY_CLIP1_NBDWELL_LEN, 0x002B);
+ b43_phy_set(dev, B43_NPHY_CLIP2_NBDWELL_LEN, 0x002B);
+ b43_phy_set(dev, B43_NPHY_W1CLIP1_DWELL_LEN, 0x0009);
+ b43_phy_set(dev, B43_NPHY_W1CLIP2_DWELL_LEN, 0x0009);
+ }
+
+ /* Set wideband clip 2 threshold */
+ b43_phy_maskset(dev, B43_NPHY_C1_CLIPWBTHRES,
+ ~B43_NPHY_C1_CLIPWBTHRES_CLIP2,
+ 21);
+ b43_phy_maskset(dev, B43_NPHY_C2_CLIPWBTHRES,
+ ~B43_NPHY_C2_CLIPWBTHRES_CLIP2,
+ 21);
+
+ if (!dev->phy.is_40mhz) {
+ b43_phy_maskset(dev, B43_NPHY_C1_CGAINI,
+ ~B43_NPHY_C1_CGAINI_GAINBKOFF, 0x1);
+ b43_phy_maskset(dev, B43_NPHY_C2_CGAINI,
+ ~B43_NPHY_C2_CGAINI_GAINBKOFF, 0x1);
+ b43_phy_maskset(dev, B43_NPHY_C1_CCK_CGAINI,
+ ~B43_NPHY_C1_CCK_CGAINI_GAINBKOFF, 0x1);
+ b43_phy_maskset(dev, B43_NPHY_C2_CCK_CGAINI,
+ ~B43_NPHY_C2_CCK_CGAINI_GAINBKOFF, 0x1);
+ }
+
+ b43_phy_set(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C);
+
+ if (nphy->gain_boost) {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ &&
+ dev->phy.is_40mhz)
+ code = 4;
+ else
+ code = 5;
+ } else {
+ code = dev->phy.is_40mhz ? 6 : 7;
+ }
/* Set HPVGA2 index */
b43_phy_maskset(dev, B43_NPHY_C1_INITGAIN,
~B43_NPHY_C1_INITGAIN_HPVGA2,
- 6 << B43_NPHY_C1_INITGAIN_HPVGA2_SHIFT);
+ code << B43_NPHY_C1_INITGAIN_HPVGA2_SHIFT);
b43_phy_maskset(dev, B43_NPHY_C2_INITGAIN,
~B43_NPHY_C2_INITGAIN_HPVGA2,
- 6 << B43_NPHY_C2_INITGAIN_HPVGA2_SHIFT);
+ code << B43_NPHY_C2_INITGAIN_HPVGA2_SHIFT);
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+ (code << 8 | 0x7C));
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+ (code << 8 | 0x7C));
+
+ /* TODO: b43_nphy_adjust_lna_gain_table(dev); */
+
+ if (nphy->elna_gain_config) {
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0808);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x0C08);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+ (code << 8 | 0x74));
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+ (code << 8 | 0x74));
+ }
- //FIXME verify that the specs really mean to use autoinc here.
- for (i = 0; i < 3; i++)
- b43_ntab_write(dev, B43_NTAB16(7, 0x106) + i, 0x673);
+ if (dev->phy.rev == 2) {
+ for (i = 0; i < 4; i++) {
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
+ (0x0400 * i) + 0x0020);
+ for (j = 0; j < 21; j++)
+ b43_phy_write(dev,
+ B43_NPHY_TABLE_DATALO, 3 * j);
+ }
+
+ b43_nphy_set_rf_sequence(dev, 5,
+ rfseq_events, rfseq_delays, 3);
+ b43_phy_maskset(dev, B43_NPHY_OVER_DGAIN1,
+ (u16)~B43_NPHY_OVER_DGAIN_CCKDGECV,
+ 0x5A << B43_NPHY_OVER_DGAIN_CCKDGECV_SHIFT);
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ b43_phy_maskset(dev, B43_PHY_N(0xC5D),
+ 0xFF80, 4);
+ }
}
+}
- /* Set minimum gain value */
- b43_phy_maskset(dev, B43_NPHY_C1_MINMAX_GAIN,
- ~B43_NPHY_C1_MINGAIN,
- 23 << B43_NPHY_C1_MINGAIN_SHIFT);
- b43_phy_maskset(dev, B43_NPHY_C2_MINMAX_GAIN,
- ~B43_NPHY_C2_MINGAIN,
- 23 << B43_NPHY_C2_MINGAIN_SHIFT);
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/Workarounds */
+static void b43_nphy_workarounds(struct b43_wldev *dev)
+{
+ struct ssb_bus *bus = dev->dev->bus;
+ struct b43_phy *phy = &dev->phy;
+ struct b43_phy_n *nphy = phy->n;
- if (phy->rev < 2) {
- b43_phy_mask(dev, B43_NPHY_SCRAM_SIGCTL,
- ~B43_NPHY_SCRAM_SIGCTL_SCM);
+ u8 events1[7] = { 0x0, 0x1, 0x2, 0x8, 0x4, 0x5, 0x3 };
+ u8 delays1[7] = { 0x8, 0x6, 0x6, 0x2, 0x4, 0x3C, 0x1 };
+
+ u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 };
+ u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 };
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ b43_nphy_classifier(dev, 1, 0);
+ else
+ b43_nphy_classifier(dev, 1, 1);
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ b43_phy_set(dev, B43_NPHY_IQFLIP,
+ B43_NPHY_IQFLIP_ADC1 | B43_NPHY_IQFLIP_ADC2);
+
+ if (dev->phy.rev >= 3) {
+ /* TODO */
+ } else {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ &&
+ nphy->band5g_pwrgain) {
+ b43_radio_mask(dev, B2055_C1_TX_RF_SPARE, ~0x8);
+ b43_radio_mask(dev, B2055_C2_TX_RF_SPARE, ~0x8);
+ } else {
+ b43_radio_set(dev, B2055_C1_TX_RF_SPARE, 0x8);
+ b43_radio_set(dev, B2055_C2_TX_RF_SPARE, 0x8);
+ }
+
+ /* TODO: convert to b43_ntab_write? */
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2000);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x000A);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2010);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x000A);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2002);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0xCDAA);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2012);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0xCDAA);
+
+ if (dev->phy.rev < 2) {
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2008);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0000);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2018);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0000);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2007);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x7AAB);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2017);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x7AAB);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2006);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0800);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2016);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0800);
+ }
+
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0x301);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO2, 0x2D8);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0x301);
+
+ if (bus->sprom.boardflags2_lo & 0x100 &&
+ bus->boardinfo.type == 0x8B) {
+ delays1[0] = 0x1;
+ delays1[5] = 0x14;
+ }
+ b43_nphy_set_rf_sequence(dev, 0, events1, delays1, 7);
+ b43_nphy_set_rf_sequence(dev, 1, events2, delays2, 7);
+
+ b43_nphy_gain_crtl_workarounds(dev);
+
+ if (dev->phy.rev < 2) {
+ if (b43_phy_read(dev, B43_NPHY_RXCTL) & 0x2)
+ ; /*TODO: b43_mhf(dev, 2, 0x0010, 0x0010, 3);*/
+ } else if (dev->phy.rev == 2) {
+ b43_phy_write(dev, B43_NPHY_CRSCHECK2, 0);
+ b43_phy_write(dev, B43_NPHY_CRSCHECK3, 0);
+ }
+
+ if (dev->phy.rev < 2)
+ b43_phy_mask(dev, B43_NPHY_SCRAM_SIGCTL,
+ ~B43_NPHY_SCRAM_SIGCTL_SCM);
+
+ /* Set phase track alpha and beta */
+ b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x125);
+ b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x1B3);
+ b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x105);
+ b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x16E);
+ b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0xCD);
+ b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20);
+
+ b43_phy_mask(dev, B43_NPHY_PIL_DW1,
+ (u16)~B43_NPHY_PIL_DW_64QAM);
+ b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B1, 0xB5);
+ b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B2, 0xA4);
+ b43_phy_write(dev, B43_NPHY_TXF_20CO_S2B3, 0x00);
+
+ if (dev->phy.rev == 2)
+ b43_phy_set(dev, B43_NPHY_FINERX2_CGC,
+ B43_NPHY_FINERX2_CGC_DECGC);
}
- /* Set phase track alpha and beta */
- b43_phy_write(dev, B43_NPHY_PHASETR_A0, 0x125);
- b43_phy_write(dev, B43_NPHY_PHASETR_A1, 0x1B3);
- b43_phy_write(dev, B43_NPHY_PHASETR_A2, 0x105);
- b43_phy_write(dev, B43_NPHY_PHASETR_B0, 0x16E);
- b43_phy_write(dev, B43_NPHY_PHASETR_B1, 0xCD);
- b43_phy_write(dev, B43_NPHY_PHASETR_B2, 0x20);
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
}
-static void b43_nphy_reset_cca(struct b43_wldev *dev)
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/LoadSampleTable */
+static int b43_nphy_load_samples(struct b43_wldev *dev,
+ struct b43_c32 *samples, u16 len) {
+ struct b43_phy_n *nphy = dev->phy.n;
+ u16 i;
+ u32 *data;
+
+ data = kzalloc(len * sizeof(u32), GFP_KERNEL);
+ if (!data) {
+ b43err(dev->wl, "allocation for samples loading failed\n");
+ return -ENOMEM;
+ }
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ for (i = 0; i < len; i++) {
+ data[i] = (samples[i].i & 0x3FF << 10);
+ data[i] |= samples[i].q & 0x3FF;
+ }
+ b43_ntab_write_bulk(dev, B43_NTAB32(17, 0), len, data);
+
+ kfree(data);
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
+ return 0;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GenLoadSamples */
+static u16 b43_nphy_gen_load_samples(struct b43_wldev *dev, u32 freq, u16 max,
+ bool test)
{
- u16 bbcfg;
+ int i;
+ u16 bw, len, rot, angle;
+ struct b43_c32 *samples;
- ssb_write32(dev->dev, SSB_TMSLOW,
- ssb_read32(dev->dev, SSB_TMSLOW) | SSB_TMSLOW_FGC);
- bbcfg = b43_phy_read(dev, B43_NPHY_BBCFG);
- b43_phy_set(dev, B43_NPHY_BBCFG, B43_NPHY_BBCFG_RSTCCA);
- b43_phy_write(dev, B43_NPHY_BBCFG,
- bbcfg & ~B43_NPHY_BBCFG_RSTCCA);
- ssb_write32(dev->dev, SSB_TMSLOW,
- ssb_read32(dev->dev, SSB_TMSLOW) & ~SSB_TMSLOW_FGC);
+
+ bw = (dev->phy.is_40mhz) ? 40 : 20;
+ len = bw << 3;
+
+ if (test) {
+ if (b43_phy_read(dev, B43_NPHY_BBCFG) & B43_NPHY_BBCFG_RSTRX)
+ bw = 82;
+ else
+ bw = 80;
+
+ if (dev->phy.is_40mhz)
+ bw <<= 1;
+
+ len = bw << 1;
+ }
+
+ samples = kzalloc(len * sizeof(struct b43_c32), GFP_KERNEL);
+ if (!samples) {
+ b43err(dev->wl, "allocation for samples generation failed\n");
+ return 0;
+ }
+ rot = (((freq * 36) / bw) << 16) / 100;
+ angle = 0;
+
+ for (i = 0; i < len; i++) {
+ samples[i] = b43_cordic(angle);
+ angle += rot;
+ samples[i].q = CORDIC_CONVERT(samples[i].q * max);
+ samples[i].i = CORDIC_CONVERT(samples[i].i * max);
+ }
+
+ i = b43_nphy_load_samples(dev, samples, len);
+ kfree(samples);
+ return (i < 0) ? 0 : len;
}
-enum b43_nphy_rf_sequence {
- B43_RFSEQ_RX2TX,
- B43_RFSEQ_TX2RX,
- B43_RFSEQ_RESET2RX,
- B43_RFSEQ_UPDATE_GAINH,
- B43_RFSEQ_UPDATE_GAINL,
- B43_RFSEQ_UPDATE_GAINU,
-};
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RunSamples */
+static void b43_nphy_run_samples(struct b43_wldev *dev, u16 samps, u16 loops,
+ u16 wait, bool iqmode, bool dac_test)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ int i;
+ u16 seq_mode;
+ u32 tmp;
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, true);
+
+ if ((nphy->bb_mult_save & 0x80000000) == 0) {
+ tmp = b43_ntab_read(dev, B43_NTAB16(15, 87));
+ nphy->bb_mult_save = (tmp & 0xFFFF) | 0x80000000;
+ }
+
+ if (!dev->phy.is_40mhz)
+ tmp = 0x6464;
+ else
+ tmp = 0x4747;
+ b43_ntab_write(dev, B43_NTAB16(15, 87), tmp);
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, false);
+
+ b43_phy_write(dev, B43_NPHY_SAMP_DEPCNT, (samps - 1));
+
+ if (loops != 0xFFFF)
+ b43_phy_write(dev, B43_NPHY_SAMP_LOOPCNT, (loops - 1));
+ else
+ b43_phy_write(dev, B43_NPHY_SAMP_LOOPCNT, loops);
+
+ b43_phy_write(dev, B43_NPHY_SAMP_WAITCNT, wait);
+
+ seq_mode = b43_phy_read(dev, B43_NPHY_RFSEQMODE);
+
+ b43_phy_set(dev, B43_NPHY_RFSEQMODE, B43_NPHY_RFSEQMODE_CAOVER);
+ if (iqmode) {
+ b43_phy_mask(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x7FFF);
+ b43_phy_set(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x8000);
+ } else {
+ if (dac_test)
+ b43_phy_write(dev, B43_NPHY_SAMP_CMD, 5);
+ else
+ b43_phy_write(dev, B43_NPHY_SAMP_CMD, 1);
+ }
+ for (i = 0; i < 100; i++) {
+ if (b43_phy_read(dev, B43_NPHY_RFSEQST) & 1) {
+ i = 0;
+ break;
+ }
+ udelay(10);
+ }
+ if (i)
+ b43err(dev->wl, "run samples timeout\n");
+
+ b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode);
+}
+
+/*
+ * Transmits a known value for LO calibration
+ * http://bcm-v4.sipsolutions.net/802.11/PHY/N/TXTone
+ */
+static int b43_nphy_tx_tone(struct b43_wldev *dev, u32 freq, u16 max_val,
+ bool iqmode, bool dac_test)
+{
+ u16 samp = b43_nphy_gen_load_samples(dev, freq, max_val, dac_test);
+ if (samp == 0)
+ return -1;
+ b43_nphy_run_samples(dev, samp, 0xFFFF, 0, iqmode, dac_test);
+ return 0;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlCoefSetup */
+static void b43_nphy_tx_pwr_ctrl_coef_setup(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ int i, j;
+ u32 tmp;
+ u32 cur_real, cur_imag, real_part, imag_part;
+
+ u16 buffer[7];
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, true);
+
+ b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 7, buffer);
+
+ for (i = 0; i < 2; i++) {
+ tmp = ((buffer[i * 2] & 0x3FF) << 10) |
+ (buffer[i * 2 + 1] & 0x3FF);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
+ (((i + 26) << 10) | 320));
+ for (j = 0; j < 128; j++) {
+ b43_phy_write(dev, B43_NPHY_TABLE_DATAHI,
+ ((tmp >> 16) & 0xFFFF));
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+ (tmp & 0xFFFF));
+ }
+ }
+
+ for (i = 0; i < 2; i++) {
+ tmp = buffer[5 + i];
+ real_part = (tmp >> 8) & 0xFF;
+ imag_part = (tmp & 0xFF);
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
+ (((i + 26) << 10) | 448));
+
+ if (dev->phy.rev >= 3) {
+ cur_real = real_part;
+ cur_imag = imag_part;
+ tmp = ((cur_real & 0xFF) << 8) | (cur_imag & 0xFF);
+ }
+
+ for (j = 0; j < 128; j++) {
+ if (dev->phy.rev < 3) {
+ cur_real = (real_part * loscale[j] + 128) >> 8;
+ cur_imag = (imag_part * loscale[j] + 128) >> 8;
+ tmp = ((cur_real & 0xFF) << 8) |
+ (cur_imag & 0xFF);
+ }
+ b43_phy_write(dev, B43_NPHY_TABLE_DATAHI,
+ ((tmp >> 16) & 0xFFFF));
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+ (tmp & 0xFFFF));
+ }
+ }
+
+ if (dev->phy.rev >= 3) {
+ b43_shm_write16(dev, B43_SHM_SHARED,
+ B43_SHM_SH_NPHY_TXPWR_INDX0, 0xFFFF);
+ b43_shm_write16(dev, B43_SHM_SHARED,
+ B43_SHM_SH_NPHY_TXPWR_INDX1, 0xFFFF);
+ }
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, false);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRfSeq */
+static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd,
+ u8 *events, u8 *delays, u8 length)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ u8 i;
+ u8 end = (dev->phy.rev >= 3) ? 0x1F : 0x0F;
+ u16 offset1 = cmd << 4;
+ u16 offset2 = offset1 + 0x80;
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, true);
+
+ b43_ntab_write_bulk(dev, B43_NTAB8(7, offset1), length, events);
+ b43_ntab_write_bulk(dev, B43_NTAB8(7, offset2), length, delays);
+
+ for (i = length; i < 16; i++) {
+ b43_ntab_write(dev, B43_NTAB8(7, offset1 + i), end);
+ b43_ntab_write(dev, B43_NTAB8(7, offset2 + i), 1);
+ }
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, false);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ForceRFSeq */
static void b43_nphy_force_rf_sequence(struct b43_wldev *dev,
enum b43_nphy_rf_sequence seq)
{
@@ -376,6 +1266,7 @@ static void b43_nphy_force_rf_sequence(struct b43_wldev *dev,
[B43_RFSEQ_UPDATE_GAINU] = B43_NPHY_RFSEQTR_UPGU,
};
int i;
+ u16 seq_mode = b43_phy_read(dev, B43_NPHY_RFSEQMODE);
B43_WARN_ON(seq >= ARRAY_SIZE(trigger));
@@ -389,8 +1280,181 @@ static void b43_nphy_force_rf_sequence(struct b43_wldev *dev,
}
b43err(dev->wl, "RF sequence status timeout\n");
ok:
- b43_phy_mask(dev, B43_NPHY_RFSEQMODE,
- ~(B43_NPHY_RFSEQMODE_CAOVER | B43_NPHY_RFSEQMODE_TROVER));
+ b43_phy_write(dev, B43_NPHY_RFSEQMODE, seq_mode);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlOverride */
+static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
+ u16 value, u8 core, bool off)
+{
+ int i;
+ u8 index = fls(field);
+ u8 addr, en_addr, val_addr;
+ /* we expect only one bit set */
+ B43_WARN_ON(field & (~(1 << (index - 1))));
+
+ if (dev->phy.rev >= 3) {
+ const struct nphy_rf_control_override_rev3 *rf_ctrl;
+ for (i = 0; i < 2; i++) {
+ if (index == 0 || index == 16) {
+ b43err(dev->wl,
+ "Unsupported RF Ctrl Override call\n");
+ return;
+ }
+
+ rf_ctrl = &tbl_rf_control_override_rev3[index - 1];
+ en_addr = B43_PHY_N((i == 0) ?
+ rf_ctrl->en_addr0 : rf_ctrl->en_addr1);
+ val_addr = B43_PHY_N((i == 0) ?
+ rf_ctrl->val_addr0 : rf_ctrl->val_addr1);
+
+ if (off) {
+ b43_phy_mask(dev, en_addr, ~(field));
+ b43_phy_mask(dev, val_addr,
+ ~(rf_ctrl->val_mask));
+ } else {
+ if (core == 0 || ((1 << core) & i) != 0) {
+ b43_phy_set(dev, en_addr, field);
+ b43_phy_maskset(dev, val_addr,
+ ~(rf_ctrl->val_mask),
+ (value << rf_ctrl->val_shift));
+ }
+ }
+ }
+ } else {
+ const struct nphy_rf_control_override_rev2 *rf_ctrl;
+ if (off) {
+ b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~(field));
+ value = 0;
+ } else {
+ b43_phy_set(dev, B43_NPHY_RFCTL_OVER, field);
+ }
+
+ for (i = 0; i < 2; i++) {
+ if (index <= 1 || index == 16) {
+ b43err(dev->wl,
+ "Unsupported RF Ctrl Override call\n");
+ return;
+ }
+
+ if (index == 2 || index == 10 ||
+ (index >= 13 && index <= 15)) {
+ core = 1;
+ }
+
+ rf_ctrl = &tbl_rf_control_override_rev2[index - 2];
+ addr = B43_PHY_N((i == 0) ?
+ rf_ctrl->addr0 : rf_ctrl->addr1);
+
+ if ((core & (1 << i)) != 0)
+ b43_phy_maskset(dev, addr, ~(rf_ctrl->bmask),
+ (value << rf_ctrl->shift));
+
+ b43_phy_set(dev, B43_NPHY_RFCTL_OVER, 0x1);
+ b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
+ B43_NPHY_RFCTL_CMD_START);
+ udelay(1);
+ b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, 0xFFFE);
+ }
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RFCtrlIntcOverride */
+static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
+ u16 value, u8 core)
+{
+ u8 i, j;
+ u16 reg, tmp, val;
+
+ B43_WARN_ON(dev->phy.rev < 3);
+ B43_WARN_ON(field > 4);
+
+ for (i = 0; i < 2; i++) {
+ if ((core == 1 && i == 1) || (core == 2 && !i))
+ continue;
+
+ reg = (i == 0) ?
+ B43_NPHY_RFCTL_INTC1 : B43_NPHY_RFCTL_INTC2;
+ b43_phy_mask(dev, reg, 0xFBFF);
+
+ switch (field) {
+ case 0:
+ b43_phy_write(dev, reg, 0);
+ b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
+ break;
+ case 1:
+ if (!i) {
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC1,
+ 0xFC3F, (value << 6));
+ b43_phy_maskset(dev, B43_NPHY_TXF_40CO_B1S1,
+ 0xFFFE, 1);
+ b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
+ B43_NPHY_RFCTL_CMD_START);
+ for (j = 0; j < 100; j++) {
+ if (b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_START) {
+ j = 0;
+ break;
+ }
+ udelay(10);
+ }
+ if (j)
+ b43err(dev->wl,
+ "intc override timeout\n");
+ b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S1,
+ 0xFFFE);
+ } else {
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_INTC2,
+ 0xFC3F, (value << 6));
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER,
+ 0xFFFE, 1);
+ b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
+ B43_NPHY_RFCTL_CMD_RXTX);
+ for (j = 0; j < 100; j++) {
+ if (b43_phy_read(dev, B43_NPHY_RFCTL_CMD) & B43_NPHY_RFCTL_CMD_RXTX) {
+ j = 0;
+ break;
+ }
+ udelay(10);
+ }
+ if (j)
+ b43err(dev->wl,
+ "intc override timeout\n");
+ b43_phy_mask(dev, B43_NPHY_RFCTL_OVER,
+ 0xFFFE);
+ }
+ break;
+ case 2:
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ tmp = 0x0020;
+ val = value << 5;
+ } else {
+ tmp = 0x0010;
+ val = value << 4;
+ }
+ b43_phy_maskset(dev, reg, ~tmp, val);
+ break;
+ case 3:
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ tmp = 0x0001;
+ val = value;
+ } else {
+ tmp = 0x0004;
+ val = value << 2;
+ }
+ b43_phy_maskset(dev, reg, ~tmp, val);
+ break;
+ case 4:
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ tmp = 0x0002;
+ val = value << 1;
+ } else {
+ tmp = 0x0008;
+ val = value << 3;
+ }
+ b43_phy_maskset(dev, reg, ~tmp, val);
+ break;
+ }
+ }
}
static void b43_nphy_bphy_init(struct b43_wldev *dev)
@@ -411,81 +1475,1680 @@ static void b43_nphy_bphy_init(struct b43_wldev *dev)
b43_phy_write(dev, B43_PHY_N_BMODE(0x38), 0x668);
}
-/* RSSI Calibration */
-static void b43_nphy_rssi_cal(struct b43_wldev *dev, u8 type)
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ScaleOffsetRssi */
+static void b43_nphy_scale_offset_rssi(struct b43_wldev *dev, u16 scale,
+ s8 offset, u8 core, u8 rail, u8 type)
{
- //TODO
+ u16 tmp;
+ bool core1or5 = (core == 1) || (core == 5);
+ bool core2or5 = (core == 2) || (core == 5);
+
+ offset = clamp_val(offset, -32, 31);
+ tmp = ((scale & 0x3F) << 8) | (offset & 0x3F);
+
+ if (core1or5 && (rail == 0) && (type == 2))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Z, tmp);
+ if (core1or5 && (rail == 1) && (type == 2))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Z, tmp);
+ if (core2or5 && (rail == 0) && (type == 2))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Z, tmp);
+ if (core2or5 && (rail == 1) && (type == 2))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Z, tmp);
+ if (core1or5 && (rail == 0) && (type == 0))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_X, tmp);
+ if (core1or5 && (rail == 1) && (type == 0))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_X, tmp);
+ if (core2or5 && (rail == 0) && (type == 0))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_X, tmp);
+ if (core2or5 && (rail == 1) && (type == 0))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_X, tmp);
+ if (core1or5 && (rail == 0) && (type == 1))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Y, tmp);
+ if (core1or5 && (rail == 1) && (type == 1))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Y, tmp);
+ if (core2or5 && (rail == 0) && (type == 1))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Y, tmp);
+ if (core2or5 && (rail == 1) && (type == 1))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y, tmp);
+ if (core1or5 && (rail == 0) && (type == 6))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0I_TBD, tmp);
+ if (core1or5 && (rail == 1) && (type == 6))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_TBD, tmp);
+ if (core2or5 && (rail == 0) && (type == 6))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1I_TBD, tmp);
+ if (core2or5 && (rail == 1) && (type == 6))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_TBD, tmp);
+ if (core1or5 && (rail == 0) && (type == 3))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0I_PWRDET, tmp);
+ if (core1or5 && (rail == 1) && (type == 3))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_PWRDET, tmp);
+ if (core2or5 && (rail == 0) && (type == 3))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1I_PWRDET, tmp);
+ if (core2or5 && (rail == 1) && (type == 3))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_PWRDET, tmp);
+ if (core1or5 && (type == 4))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0I_TSSI, tmp);
+ if (core2or5 && (type == 4))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1I_TSSI, tmp);
+ if (core1or5 && (type == 5))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_TSSI, tmp);
+ if (core2or5 && (type == 5))
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_TSSI, tmp);
+}
+
+static void b43_nphy_rev2_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
+{
+ u16 val;
+
+ if (type < 3)
+ val = 0;
+ else if (type == 6)
+ val = 1;
+ else if (type == 3)
+ val = 2;
+ else
+ val = 3;
+
+ val = (val << 12) | (val << 14);
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, val);
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, val);
+
+ if (type < 3) {
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO1, 0xFFCF,
+ (type + 1) << 4);
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_RSSIO2, 0xFFCF,
+ (type + 1) << 4);
+ }
+
+ /* TODO use some definitions */
+ if (code == 0) {
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_OVER, 0xCFFF, 0);
+ if (type < 3) {
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD, 0xFEC7, 0);
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER, 0xEFDC, 0);
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD, 0xFFFE, 0);
+ udelay(20);
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER, 0xFFFE, 0);
+ }
+ } else {
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_OVER, 0xCFFF,
+ 0x3000);
+ if (type < 3) {
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD,
+ 0xFEC7, 0x0180);
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER,
+ 0xEFDC, (code << 1 | 0x1021));
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD, 0xFFFE, 0x1);
+ udelay(20);
+ b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER, 0xFFFE, 0);
+ }
+ }
+}
+
+static void b43_nphy_rev3_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ u8 i;
+ u16 reg, val;
+
+ if (code == 0) {
+ b43_phy_mask(dev, B43_NPHY_AFECTL_OVER1, 0xFDFF);
+ b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, 0xFDFF);
+ b43_phy_mask(dev, B43_NPHY_AFECTL_C1, 0xFCFF);
+ b43_phy_mask(dev, B43_NPHY_AFECTL_C2, 0xFCFF);
+ b43_phy_mask(dev, B43_NPHY_TXF_40CO_B1S0, 0xFFDF);
+ b43_phy_mask(dev, B43_NPHY_TXF_40CO_B32S1, 0xFFDF);
+ b43_phy_mask(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1, 0xFFC3);
+ b43_phy_mask(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2, 0xFFC3);
+ } else {
+ for (i = 0; i < 2; i++) {
+ if ((code == 1 && i == 1) || (code == 2 && !i))
+ continue;
+
+ reg = (i == 0) ?
+ B43_NPHY_AFECTL_OVER1 : B43_NPHY_AFECTL_OVER;
+ b43_phy_maskset(dev, reg, 0xFDFF, 0x0200);
+
+ if (type < 3) {
+ reg = (i == 0) ?
+ B43_NPHY_AFECTL_C1 :
+ B43_NPHY_AFECTL_C2;
+ b43_phy_maskset(dev, reg, 0xFCFF, 0);
+
+ reg = (i == 0) ?
+ B43_NPHY_RFCTL_LUT_TRSW_UP1 :
+ B43_NPHY_RFCTL_LUT_TRSW_UP2;
+ b43_phy_maskset(dev, reg, 0xFFC3, 0);
+
+ if (type == 0)
+ val = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ? 4 : 8;
+ else if (type == 1)
+ val = 16;
+ else
+ val = 32;
+ b43_phy_set(dev, reg, val);
+
+ reg = (i == 0) ?
+ B43_NPHY_TXF_40CO_B1S0 :
+ B43_NPHY_TXF_40CO_B32S1;
+ b43_phy_set(dev, reg, 0x0020);
+ } else {
+ if (type == 6)
+ val = 0x0100;
+ else if (type == 3)
+ val = 0x0200;
+ else
+ val = 0x0300;
+
+ reg = (i == 0) ?
+ B43_NPHY_AFECTL_C1 :
+ B43_NPHY_AFECTL_C2;
+
+ b43_phy_maskset(dev, reg, 0xFCFF, val);
+ b43_phy_maskset(dev, reg, 0xF3FF, val << 2);
+
+ if (type != 3 && type != 6) {
+ enum ieee80211_band band =
+ b43_current_band(dev->wl);
+
+ if ((nphy->ipa2g_on &&
+ band == IEEE80211_BAND_2GHZ) ||
+ (nphy->ipa5g_on &&
+ band == IEEE80211_BAND_5GHZ))
+ val = (band == IEEE80211_BAND_5GHZ) ? 0xC : 0xE;
+ else
+ val = 0x11;
+ reg = (i == 0) ? 0x2000 : 0x3000;
+ reg |= B2055_PADDRV;
+ b43_radio_write16(dev, reg, val);
+
+ reg = (i == 0) ?
+ B43_NPHY_AFECTL_OVER1 :
+ B43_NPHY_AFECTL_OVER;
+ b43_phy_set(dev, reg, 0x0200);
+ }
+ }
+ }
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSISel */
+static void b43_nphy_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
+{
+ if (dev->phy.rev >= 3)
+ b43_nphy_rev3_rssi_select(dev, code, type);
+ else
+ b43_nphy_rev2_rssi_select(dev, code, type);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SetRssi2055Vcm */
+static void b43_nphy_set_rssi_2055_vcm(struct b43_wldev *dev, u8 type, u8 *buf)
+{
+ int i;
+ for (i = 0; i < 2; i++) {
+ if (type == 2) {
+ if (i == 0) {
+ b43_radio_maskset(dev, B2055_C1_B0NB_RSSIVCM,
+ 0xFC, buf[0]);
+ b43_radio_maskset(dev, B2055_C1_RX_BB_RSSICTL5,
+ 0xFC, buf[1]);
+ } else {
+ b43_radio_maskset(dev, B2055_C2_B0NB_RSSIVCM,
+ 0xFC, buf[2 * i]);
+ b43_radio_maskset(dev, B2055_C2_RX_BB_RSSICTL5,
+ 0xFC, buf[2 * i + 1]);
+ }
+ } else {
+ if (i == 0)
+ b43_radio_maskset(dev, B2055_C1_RX_BB_RSSICTL5,
+ 0xF3, buf[0] << 2);
+ else
+ b43_radio_maskset(dev, B2055_C2_RX_BB_RSSICTL5,
+ 0xF3, buf[2 * i + 1] << 2);
+ }
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/PollRssi */
+static int b43_nphy_poll_rssi(struct b43_wldev *dev, u8 type, s32 *buf,
+ u8 nsamp)
+{
+ int i;
+ int out;
+ u16 save_regs_phy[9];
+ u16 s[2];
+
+ if (dev->phy.rev >= 3) {
+ save_regs_phy[0] = b43_phy_read(dev,
+ B43_NPHY_RFCTL_LUT_TRSW_UP1);
+ save_regs_phy[1] = b43_phy_read(dev,
+ B43_NPHY_RFCTL_LUT_TRSW_UP2);
+ save_regs_phy[2] = b43_phy_read(dev, B43_NPHY_AFECTL_C1);
+ save_regs_phy[3] = b43_phy_read(dev, B43_NPHY_AFECTL_C2);
+ save_regs_phy[4] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1);
+ save_regs_phy[5] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
+ save_regs_phy[6] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S0);
+ save_regs_phy[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B32S1);
+ }
+
+ b43_nphy_rssi_select(dev, 5, type);
+
+ if (dev->phy.rev < 2) {
+ save_regs_phy[8] = b43_phy_read(dev, B43_NPHY_GPIO_SEL);
+ b43_phy_write(dev, B43_NPHY_GPIO_SEL, 5);
+ }
+
+ for (i = 0; i < 4; i++)
+ buf[i] = 0;
+
+ for (i = 0; i < nsamp; i++) {
+ if (dev->phy.rev < 2) {
+ s[0] = b43_phy_read(dev, B43_NPHY_GPIO_LOOUT);
+ s[1] = b43_phy_read(dev, B43_NPHY_GPIO_HIOUT);
+ } else {
+ s[0] = b43_phy_read(dev, B43_NPHY_RSSI1);
+ s[1] = b43_phy_read(dev, B43_NPHY_RSSI2);
+ }
+
+ buf[0] += ((s8)((s[0] & 0x3F) << 2)) >> 2;
+ buf[1] += ((s8)(((s[0] >> 8) & 0x3F) << 2)) >> 2;
+ buf[2] += ((s8)((s[1] & 0x3F) << 2)) >> 2;
+ buf[3] += ((s8)(((s[1] >> 8) & 0x3F) << 2)) >> 2;
+ }
+ out = (buf[0] & 0xFF) << 24 | (buf[1] & 0xFF) << 16 |
+ (buf[2] & 0xFF) << 8 | (buf[3] & 0xFF);
+
+ if (dev->phy.rev < 2)
+ b43_phy_write(dev, B43_NPHY_GPIO_SEL, save_regs_phy[8]);
+
+ if (dev->phy.rev >= 3) {
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP1,
+ save_regs_phy[0]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_UP2,
+ save_regs_phy[1]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_C1, save_regs_phy[2]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_C2, save_regs_phy[3]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, save_regs_phy[4]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[5]);
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S0, save_regs_phy[6]);
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S1, save_regs_phy[7]);
+ }
+
+ return out;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal */
+static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)
+{
+ int i, j;
+ u8 state[4];
+ u8 code, val;
+ u16 class, override;
+ u8 regs_save_radio[2];
+ u16 regs_save_phy[2];
+ s8 offset[4];
+
+ u16 clip_state[2];
+ u16 clip_off[2] = { 0xFFFF, 0xFFFF };
+ s32 results_min[4] = { };
+ u8 vcm_final[4] = { };
+ s32 results[4][4] = { };
+ s32 miniq[4][2] = { };
+
+ if (type == 2) {
+ code = 0;
+ val = 6;
+ } else if (type < 2) {
+ code = 25;
+ val = 4;
+ } else {
+ B43_WARN_ON(1);
+ return;
+ }
+
+ class = b43_nphy_classifier(dev, 0, 0);
+ b43_nphy_classifier(dev, 7, 4);
+ b43_nphy_read_clip_detection(dev, clip_state);
+ b43_nphy_write_clip_detection(dev, clip_off);
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ override = 0x140;
+ else
+ override = 0x110;
+
+ regs_save_phy[0] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
+ regs_save_radio[0] = b43_radio_read16(dev, B2055_C1_PD_RXTX);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, override);
+ b43_radio_write16(dev, B2055_C1_PD_RXTX, val);
+
+ regs_save_phy[1] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
+ regs_save_radio[1] = b43_radio_read16(dev, B2055_C2_PD_RXTX);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, override);
+ b43_radio_write16(dev, B2055_C2_PD_RXTX, val);
+
+ state[0] = b43_radio_read16(dev, B2055_C1_PD_RSSIMISC) & 0x07;
+ state[1] = b43_radio_read16(dev, B2055_C2_PD_RSSIMISC) & 0x07;
+ b43_radio_mask(dev, B2055_C1_PD_RSSIMISC, 0xF8);
+ b43_radio_mask(dev, B2055_C2_PD_RSSIMISC, 0xF8);
+ state[2] = b43_radio_read16(dev, B2055_C1_SP_RSSI) & 0x07;
+ state[3] = b43_radio_read16(dev, B2055_C2_SP_RSSI) & 0x07;
+
+ b43_nphy_rssi_select(dev, 5, type);
+ b43_nphy_scale_offset_rssi(dev, 0, 0, 5, 0, type);
+ b43_nphy_scale_offset_rssi(dev, 0, 0, 5, 1, type);
+
+ for (i = 0; i < 4; i++) {
+ u8 tmp[4];
+ for (j = 0; j < 4; j++)
+ tmp[j] = i;
+ if (type != 1)
+ b43_nphy_set_rssi_2055_vcm(dev, type, tmp);
+ b43_nphy_poll_rssi(dev, type, results[i], 8);
+ if (type < 2)
+ for (j = 0; j < 2; j++)
+ miniq[i][j] = min(results[i][2 * j],
+ results[i][2 * j + 1]);
+ }
+
+ for (i = 0; i < 4; i++) {
+ s32 mind = 40;
+ u8 minvcm = 0;
+ s32 minpoll = 249;
+ s32 curr;
+ for (j = 0; j < 4; j++) {
+ if (type == 2)
+ curr = abs(results[j][i]);
+ else
+ curr = abs(miniq[j][i / 2] - code * 8);
+
+ if (curr < mind) {
+ mind = curr;
+ minvcm = j;
+ }
+
+ if (results[j][i] < minpoll)
+ minpoll = results[j][i];
+ }
+ results_min[i] = minpoll;
+ vcm_final[i] = minvcm;
+ }
+
+ if (type != 1)
+ b43_nphy_set_rssi_2055_vcm(dev, type, vcm_final);
+
+ for (i = 0; i < 4; i++) {
+ offset[i] = (code * 8) - results[vcm_final[i]][i];
+
+ if (offset[i] < 0)
+ offset[i] = -((abs(offset[i]) + 4) / 8);
+ else
+ offset[i] = (offset[i] + 4) / 8;
+
+ if (results_min[i] == 248)
+ offset[i] = code - 32;
+
+ if (i % 2 == 0)
+ b43_nphy_scale_offset_rssi(dev, 0, offset[i], 1, 0,
+ type);
+ else
+ b43_nphy_scale_offset_rssi(dev, 0, offset[i], 2, 1,
+ type);
+ }
+
+ b43_radio_maskset(dev, B2055_C1_PD_RSSIMISC, 0xF8, state[0]);
+ b43_radio_maskset(dev, B2055_C1_PD_RSSIMISC, 0xF8, state[1]);
+
+ switch (state[2]) {
+ case 1:
+ b43_nphy_rssi_select(dev, 1, 2);
+ break;
+ case 4:
+ b43_nphy_rssi_select(dev, 1, 0);
+ break;
+ case 2:
+ b43_nphy_rssi_select(dev, 1, 1);
+ break;
+ default:
+ b43_nphy_rssi_select(dev, 1, 1);
+ break;
+ }
+
+ switch (state[3]) {
+ case 1:
+ b43_nphy_rssi_select(dev, 2, 2);
+ break;
+ case 4:
+ b43_nphy_rssi_select(dev, 2, 0);
+ break;
+ default:
+ b43_nphy_rssi_select(dev, 2, 1);
+ break;
+ }
+
+ b43_nphy_rssi_select(dev, 0, type);
+
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs_save_phy[0]);
+ b43_radio_write16(dev, B2055_C1_PD_RXTX, regs_save_radio[0]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs_save_phy[1]);
+ b43_radio_write16(dev, B2055_C2_PD_RXTX, regs_save_radio[1]);
+
+ b43_nphy_classifier(dev, 7, class);
+ b43_nphy_write_clip_detection(dev, clip_state);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICalRev3 */
+static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
+{
+ /* TODO */
+}
+
+/*
+ * RSSI Calibration
+ * http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICal
+ */
+static void b43_nphy_rssi_cal(struct b43_wldev *dev)
+{
+ if (dev->phy.rev >= 3) {
+ b43_nphy_rev3_rssi_cal(dev);
+ } else {
+ b43_nphy_rev2_rssi_cal(dev, 2);
+ b43_nphy_rev2_rssi_cal(dev, 0);
+ b43_nphy_rev2_rssi_cal(dev, 1);
+ }
}
+/*
+ * Restore RSSI Calibration
+ * http://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreRssiCal
+ */
+static void b43_nphy_restore_rssi_cal(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ u16 *rssical_radio_regs = NULL;
+ u16 *rssical_phy_regs = NULL;
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (!nphy->rssical_chanspec_2G)
+ return;
+ rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_2G;
+ rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_2G;
+ } else {
+ if (!nphy->rssical_chanspec_5G)
+ return;
+ rssical_radio_regs = nphy->rssical_cache.rssical_radio_regs_5G;
+ rssical_phy_regs = nphy->rssical_cache.rssical_phy_regs_5G;
+ }
+
+ /* TODO use some definitions */
+ b43_radio_maskset(dev, 0x602B, 0xE3, rssical_radio_regs[0]);
+ b43_radio_maskset(dev, 0x702B, 0xE3, rssical_radio_regs[1]);
+
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Z, rssical_phy_regs[0]);
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Z, rssical_phy_regs[1]);
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Z, rssical_phy_regs[2]);
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Z, rssical_phy_regs[3]);
+
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_X, rssical_phy_regs[4]);
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_X, rssical_phy_regs[5]);
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_X, rssical_phy_regs[6]);
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_X, rssical_phy_regs[7]);
+
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Y, rssical_phy_regs[8]);
+ b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Y, rssical_phy_regs[9]);
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Y, rssical_phy_regs[10]);
+ b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y, rssical_phy_regs[11]);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetIpaGainTbl */
+static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev)
+{
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (dev->phy.rev >= 6) {
+ /* TODO If the chip is 47162
+ return txpwrctrl_tx_gain_ipa_rev5 */
+ return txpwrctrl_tx_gain_ipa_rev6;
+ } else if (dev->phy.rev >= 5) {
+ return txpwrctrl_tx_gain_ipa_rev5;
+ } else {
+ return txpwrctrl_tx_gain_ipa;
+ }
+ } else {
+ return txpwrctrl_tx_gain_ipa_5g;
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalRadioSetup */
+static void b43_nphy_tx_cal_radio_setup(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ u16 *save = nphy->tx_rx_cal_radio_saveregs;
+ u16 tmp;
+ u8 offset, i;
+
+ if (dev->phy.rev >= 3) {
+ for (i = 0; i < 2; i++) {
+ tmp = (i == 0) ? 0x2000 : 0x3000;
+ offset = i * 11;
+
+ save[offset + 0] = b43_radio_read16(dev, B2055_CAL_RVARCTL);
+ save[offset + 1] = b43_radio_read16(dev, B2055_CAL_LPOCTL);
+ save[offset + 2] = b43_radio_read16(dev, B2055_CAL_TS);
+ save[offset + 3] = b43_radio_read16(dev, B2055_CAL_RCCALRTS);
+ save[offset + 4] = b43_radio_read16(dev, B2055_CAL_RCALRTS);
+ save[offset + 5] = b43_radio_read16(dev, B2055_PADDRV);
+ save[offset + 6] = b43_radio_read16(dev, B2055_XOCTL1);
+ save[offset + 7] = b43_radio_read16(dev, B2055_XOCTL2);
+ save[offset + 8] = b43_radio_read16(dev, B2055_XOREGUL);
+ save[offset + 9] = b43_radio_read16(dev, B2055_XOMISC);
+ save[offset + 10] = b43_radio_read16(dev, B2055_PLL_LFC1);
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ b43_radio_write16(dev, tmp | B2055_CAL_RVARCTL, 0x0A);
+ b43_radio_write16(dev, tmp | B2055_CAL_LPOCTL, 0x40);
+ b43_radio_write16(dev, tmp | B2055_CAL_TS, 0x55);
+ b43_radio_write16(dev, tmp | B2055_CAL_RCCALRTS, 0);
+ b43_radio_write16(dev, tmp | B2055_CAL_RCALRTS, 0);
+ if (nphy->ipa5g_on) {
+ b43_radio_write16(dev, tmp | B2055_PADDRV, 4);
+ b43_radio_write16(dev, tmp | B2055_XOCTL1, 1);
+ } else {
+ b43_radio_write16(dev, tmp | B2055_PADDRV, 0);
+ b43_radio_write16(dev, tmp | B2055_XOCTL1, 0x2F);
+ }
+ b43_radio_write16(dev, tmp | B2055_XOCTL2, 0);
+ } else {
+ b43_radio_write16(dev, tmp | B2055_CAL_RVARCTL, 0x06);
+ b43_radio_write16(dev, tmp | B2055_CAL_LPOCTL, 0x40);
+ b43_radio_write16(dev, tmp | B2055_CAL_TS, 0x55);
+ b43_radio_write16(dev, tmp | B2055_CAL_RCCALRTS, 0);
+ b43_radio_write16(dev, tmp | B2055_CAL_RCALRTS, 0);
+ b43_radio_write16(dev, tmp | B2055_XOCTL1, 0);
+ if (nphy->ipa2g_on) {
+ b43_radio_write16(dev, tmp | B2055_PADDRV, 6);
+ b43_radio_write16(dev, tmp | B2055_XOCTL2,
+ (dev->phy.rev < 5) ? 0x11 : 0x01);
+ } else {
+ b43_radio_write16(dev, tmp | B2055_PADDRV, 0);
+ b43_radio_write16(dev, tmp | B2055_XOCTL2, 0);
+ }
+ }
+ b43_radio_write16(dev, tmp | B2055_XOREGUL, 0);
+ b43_radio_write16(dev, tmp | B2055_XOMISC, 0);
+ b43_radio_write16(dev, tmp | B2055_PLL_LFC1, 0);
+ }
+ } else {
+ save[0] = b43_radio_read16(dev, B2055_C1_TX_RF_IQCAL1);
+ b43_radio_write16(dev, B2055_C1_TX_RF_IQCAL1, 0x29);
+
+ save[1] = b43_radio_read16(dev, B2055_C1_TX_RF_IQCAL2);
+ b43_radio_write16(dev, B2055_C1_TX_RF_IQCAL2, 0x54);
+
+ save[2] = b43_radio_read16(dev, B2055_C2_TX_RF_IQCAL1);
+ b43_radio_write16(dev, B2055_C2_TX_RF_IQCAL1, 0x29);
+
+ save[3] = b43_radio_read16(dev, B2055_C2_TX_RF_IQCAL2);
+ b43_radio_write16(dev, B2055_C2_TX_RF_IQCAL2, 0x54);
+
+ save[3] = b43_radio_read16(dev, B2055_C1_PWRDET_RXTX);
+ save[4] = b43_radio_read16(dev, B2055_C2_PWRDET_RXTX);
+
+ if (!(b43_phy_read(dev, B43_NPHY_BANDCTL) &
+ B43_NPHY_BANDCTL_5GHZ)) {
+ b43_radio_write16(dev, B2055_C1_PWRDET_RXTX, 0x04);
+ b43_radio_write16(dev, B2055_C2_PWRDET_RXTX, 0x04);
+ } else {
+ b43_radio_write16(dev, B2055_C1_PWRDET_RXTX, 0x20);
+ b43_radio_write16(dev, B2055_C2_PWRDET_RXTX, 0x20);
+ }
+
+ if (dev->phy.rev < 2) {
+ b43_radio_set(dev, B2055_C1_TX_BB_MXGM, 0x20);
+ b43_radio_set(dev, B2055_C2_TX_BB_MXGM, 0x20);
+ } else {
+ b43_radio_mask(dev, B2055_C1_TX_BB_MXGM, ~0x20);
+ b43_radio_mask(dev, B2055_C2_TX_BB_MXGM, ~0x20);
+ }
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/IqCalGainParams */
+static void b43_nphy_iq_cal_gain_params(struct b43_wldev *dev, u16 core,
+ struct nphy_txgains target,
+ struct nphy_iqcal_params *params)
+{
+ int i, j, indx;
+ u16 gain;
+
+ if (dev->phy.rev >= 3) {
+ params->txgm = target.txgm[core];
+ params->pga = target.pga[core];
+ params->pad = target.pad[core];
+ params->ipa = target.ipa[core];
+ params->cal_gain = (params->txgm << 12) | (params->pga << 8) |
+ (params->pad << 4) | (params->ipa);
+ for (j = 0; j < 5; j++)
+ params->ncorr[j] = 0x79;
+ } else {
+ gain = (target.pad[core]) | (target.pga[core] << 4) |
+ (target.txgm[core] << 8);
+
+ indx = (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ?
+ 1 : 0;
+ for (i = 0; i < 9; i++)
+ if (tbl_iqcal_gainparams[indx][i][0] == gain)
+ break;
+ i = min(i, 8);
+
+ params->txgm = tbl_iqcal_gainparams[indx][i][1];
+ params->pga = tbl_iqcal_gainparams[indx][i][2];
+ params->pad = tbl_iqcal_gainparams[indx][i][3];
+ params->cal_gain = (params->txgm << 7) | (params->pga << 4) |
+ (params->pad << 2);
+ for (j = 0; j < 4; j++)
+ params->ncorr[j] = tbl_iqcal_gainparams[indx][i][4 + j];
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/UpdateTxCalLadder */
+static void b43_nphy_update_tx_cal_ladder(struct b43_wldev *dev, u16 core)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ int i;
+ u16 scale, entry;
+
+ u16 tmp = nphy->txcal_bbmult;
+ if (core == 0)
+ tmp >>= 8;
+ tmp &= 0xff;
+
+ for (i = 0; i < 18; i++) {
+ scale = (ladder_lo[i].percent * tmp) / 100;
+ entry = ((scale & 0xFF) << 8) | ladder_lo[i].g_env;
+ b43_ntab_write(dev, B43_NTAB16(15, i), entry);
+
+ scale = (ladder_iq[i].percent * tmp) / 100;
+ entry = ((scale & 0xFF) << 8) | ladder_iq[i].g_env;
+ b43_ntab_write(dev, B43_NTAB16(15, i + 32), entry);
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ExtPaSetTxDigiFilts */
+static void b43_nphy_ext_pa_set_tx_dig_filters(struct b43_wldev *dev)
+{
+ int i;
+ for (i = 0; i < 15; i++)
+ b43_phy_write(dev, B43_PHY_N(0x2C5 + i),
+ tbl_tx_filter_coef_rev4[2][i]);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/IpaSetTxDigiFilts */
+static void b43_nphy_int_pa_set_tx_dig_filters(struct b43_wldev *dev)
+{
+ int i, j;
+ /* B43_NPHY_TXF_20CO_S0A1, B43_NPHY_TXF_40CO_S0A1, unknown */
+ u16 offset[] = { 0x186, 0x195, 0x2C5 };
+
+ for (i = 0; i < 3; i++)
+ for (j = 0; j < 15; j++)
+ b43_phy_write(dev, B43_PHY_N(offset[i] + j),
+ tbl_tx_filter_coef_rev4[i][j]);
+
+ if (dev->phy.is_40mhz) {
+ for (j = 0; j < 15; j++)
+ b43_phy_write(dev, B43_PHY_N(offset[0] + j),
+ tbl_tx_filter_coef_rev4[3][j]);
+ } else if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) {
+ for (j = 0; j < 15; j++)
+ b43_phy_write(dev, B43_PHY_N(offset[0] + j),
+ tbl_tx_filter_coef_rev4[5][j]);
+ }
+
+ if (dev->phy.channel == 14)
+ for (j = 0; j < 15; j++)
+ b43_phy_write(dev, B43_PHY_N(offset[0] + j),
+ tbl_tx_filter_coef_rev4[6][j]);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/GetTxGain */
+static struct nphy_txgains b43_nphy_get_tx_gains(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ u16 curr_gain[2];
+ struct nphy_txgains target;
+ const u32 *table = NULL;
+
+ if (nphy->txpwrctrl == 0) {
+ int i;
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, true);
+ b43_ntab_read_bulk(dev, B43_NTAB16(7, 0x110), 2, curr_gain);
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, false);
+
+ for (i = 0; i < 2; ++i) {
+ if (dev->phy.rev >= 3) {
+ target.ipa[i] = curr_gain[i] & 0x000F;
+ target.pad[i] = (curr_gain[i] & 0x00F0) >> 4;
+ target.pga[i] = (curr_gain[i] & 0x0F00) >> 8;
+ target.txgm[i] = (curr_gain[i] & 0x7000) >> 12;
+ } else {
+ target.ipa[i] = curr_gain[i] & 0x0003;
+ target.pad[i] = (curr_gain[i] & 0x000C) >> 2;
+ target.pga[i] = (curr_gain[i] & 0x0070) >> 4;
+ target.txgm[i] = (curr_gain[i] & 0x0380) >> 7;
+ }
+ }
+ } else {
+ int i;
+ u16 index[2];
+ index[0] = (b43_phy_read(dev, B43_NPHY_C1_TXPCTL_STAT) &
+ B43_NPHY_TXPCTL_STAT_BIDX) >>
+ B43_NPHY_TXPCTL_STAT_BIDX_SHIFT;
+ index[1] = (b43_phy_read(dev, B43_NPHY_C2_TXPCTL_STAT) &
+ B43_NPHY_TXPCTL_STAT_BIDX) >>
+ B43_NPHY_TXPCTL_STAT_BIDX_SHIFT;
+
+ for (i = 0; i < 2; ++i) {
+ if (dev->phy.rev >= 3) {
+ enum ieee80211_band band =
+ b43_current_band(dev->wl);
+
+ if ((nphy->ipa2g_on &&
+ band == IEEE80211_BAND_2GHZ) ||
+ (nphy->ipa5g_on &&
+ band == IEEE80211_BAND_5GHZ)) {
+ table = b43_nphy_get_ipa_gain_table(dev);
+ } else {
+ if (band == IEEE80211_BAND_5GHZ) {
+ if (dev->phy.rev == 3)
+ table = b43_ntab_tx_gain_rev3_5ghz;
+ else if (dev->phy.rev == 4)
+ table = b43_ntab_tx_gain_rev4_5ghz;
+ else
+ table = b43_ntab_tx_gain_rev5plus_5ghz;
+ } else {
+ table = b43_ntab_tx_gain_rev3plus_2ghz;
+ }
+ }
+
+ target.ipa[i] = (table[index[i]] >> 16) & 0xF;
+ target.pad[i] = (table[index[i]] >> 20) & 0xF;
+ target.pga[i] = (table[index[i]] >> 24) & 0xF;
+ target.txgm[i] = (table[index[i]] >> 28) & 0xF;
+ } else {
+ table = b43_ntab_tx_gain_rev0_1_2;
+
+ target.ipa[i] = (table[index[i]] >> 16) & 0x3;
+ target.pad[i] = (table[index[i]] >> 18) & 0x3;
+ target.pga[i] = (table[index[i]] >> 20) & 0x7;
+ target.txgm[i] = (table[index[i]] >> 23) & 0x7;
+ }
+ }
+ }
+
+ return target;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalPhyCleanup */
+static void b43_nphy_tx_cal_phy_cleanup(struct b43_wldev *dev)
+{
+ u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
+
+ if (dev->phy.rev >= 3) {
+ b43_phy_write(dev, B43_NPHY_AFECTL_C1, regs[0]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_C2, regs[1]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, regs[2]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER, regs[3]);
+ b43_phy_write(dev, B43_NPHY_BBCFG, regs[4]);
+ b43_ntab_write(dev, B43_NTAB16(8, 3), regs[5]);
+ b43_ntab_write(dev, B43_NTAB16(8, 19), regs[6]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs[7]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs[8]);
+ b43_phy_write(dev, B43_NPHY_PAPD_EN0, regs[9]);
+ b43_phy_write(dev, B43_NPHY_PAPD_EN1, regs[10]);
+ b43_nphy_reset_cca(dev);
+ } else {
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, regs[0]);
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, regs[1]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER, regs[2]);
+ b43_ntab_write(dev, B43_NTAB16(8, 2), regs[3]);
+ b43_ntab_write(dev, B43_NTAB16(8, 18), regs[4]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, regs[5]);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, regs[6]);
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxCalPhySetup */
+static void b43_nphy_tx_cal_phy_setup(struct b43_wldev *dev)
+{
+ u16 *regs = dev->phy.n->tx_rx_cal_phy_saveregs;
+ u16 tmp;
+
+ regs[0] = b43_phy_read(dev, B43_NPHY_AFECTL_C1);
+ regs[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2);
+ if (dev->phy.rev >= 3) {
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0xF0FF, 0x0A00);
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0xF0FF, 0x0A00);
+
+ tmp = b43_phy_read(dev, B43_NPHY_AFECTL_OVER1);
+ regs[2] = tmp;
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, tmp | 0x0600);
+
+ tmp = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
+ regs[3] = tmp;
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER, tmp | 0x0600);
+
+ regs[4] = b43_phy_read(dev, B43_NPHY_BBCFG);
+ b43_phy_mask(dev, B43_NPHY_BBCFG, (u16)~B43_NPHY_BBCFG_RSTRX);
+
+ tmp = b43_ntab_read(dev, B43_NTAB16(8, 3));
+ regs[5] = tmp;
+ b43_ntab_write(dev, B43_NTAB16(8, 3), 0);
+
+ tmp = b43_ntab_read(dev, B43_NTAB16(8, 19));
+ regs[6] = tmp;
+ b43_ntab_write(dev, B43_NTAB16(8, 19), 0);
+ regs[7] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
+ regs[8] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
+
+ b43_nphy_rf_control_intc_override(dev, 2, 1, 3);
+ b43_nphy_rf_control_intc_override(dev, 1, 2, 1);
+ b43_nphy_rf_control_intc_override(dev, 1, 8, 2);
+
+ regs[9] = b43_phy_read(dev, B43_NPHY_PAPD_EN0);
+ regs[10] = b43_phy_read(dev, B43_NPHY_PAPD_EN1);
+ b43_phy_mask(dev, B43_NPHY_PAPD_EN0, ~0x0001);
+ b43_phy_mask(dev, B43_NPHY_PAPD_EN1, ~0x0001);
+ } else {
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_C1, 0x0FFF, 0xA000);
+ b43_phy_maskset(dev, B43_NPHY_AFECTL_C2, 0x0FFF, 0xA000);
+ tmp = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
+ regs[2] = tmp;
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER, tmp | 0x3000);
+ tmp = b43_ntab_read(dev, B43_NTAB16(8, 2));
+ regs[3] = tmp;
+ tmp |= 0x2000;
+ b43_ntab_write(dev, B43_NTAB16(8, 2), tmp);
+ tmp = b43_ntab_read(dev, B43_NTAB16(8, 18));
+ regs[4] = tmp;
+ tmp |= 0x2000;
+ b43_ntab_write(dev, B43_NTAB16(8, 18), tmp);
+ regs[5] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC1);
+ regs[6] = b43_phy_read(dev, B43_NPHY_RFCTL_INTC2);
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
+ tmp = 0x0180;
+ else
+ tmp = 0x0120;
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, tmp);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, tmp);
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/SaveCal */
+static void b43_nphy_save_cal(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ struct b43_phy_n_iq_comp *rxcal_coeffs = NULL;
+ u16 *txcal_radio_regs = NULL;
+ u8 *iqcal_chanspec;
+ u16 *table = NULL;
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_2G;
+ txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_2G;
+ iqcal_chanspec = &nphy->iqcal_chanspec_2G;
+ table = nphy->cal_cache.txcal_coeffs_2G;
+ } else {
+ rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_5G;
+ txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_5G;
+ iqcal_chanspec = &nphy->iqcal_chanspec_5G;
+ table = nphy->cal_cache.txcal_coeffs_5G;
+ }
+
+ b43_nphy_rx_iq_coeffs(dev, false, rxcal_coeffs);
+ /* TODO use some definitions */
+ if (dev->phy.rev >= 3) {
+ txcal_radio_regs[0] = b43_radio_read(dev, 0x2021);
+ txcal_radio_regs[1] = b43_radio_read(dev, 0x2022);
+ txcal_radio_regs[2] = b43_radio_read(dev, 0x3021);
+ txcal_radio_regs[3] = b43_radio_read(dev, 0x3022);
+ txcal_radio_regs[4] = b43_radio_read(dev, 0x2023);
+ txcal_radio_regs[5] = b43_radio_read(dev, 0x2024);
+ txcal_radio_regs[6] = b43_radio_read(dev, 0x3023);
+ txcal_radio_regs[7] = b43_radio_read(dev, 0x3024);
+ } else {
+ txcal_radio_regs[0] = b43_radio_read(dev, 0x8B);
+ txcal_radio_regs[1] = b43_radio_read(dev, 0xBA);
+ txcal_radio_regs[2] = b43_radio_read(dev, 0x8D);
+ txcal_radio_regs[3] = b43_radio_read(dev, 0xBC);
+ }
+ *iqcal_chanspec = nphy->radio_chanspec;
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 80), 8, table);
+
+ if (nphy->hang_avoid)
+ b43_nphy_stay_in_carrier_search(dev, 0);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RestoreCal */
+static void b43_nphy_restore_cal(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+
+ u16 coef[4];
+ u16 *loft = NULL;
+ u16 *table = NULL;
+
+ int i;
+ u16 *txcal_radio_regs = NULL;
+ struct b43_phy_n_iq_comp *rxcal_coeffs = NULL;
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ if (nphy->iqcal_chanspec_2G == 0)
+ return;
+ table = nphy->cal_cache.txcal_coeffs_2G;
+ loft = &nphy->cal_cache.txcal_coeffs_2G[5];
+ } else {
+ if (nphy->iqcal_chanspec_5G == 0)
+ return;
+ table = nphy->cal_cache.txcal_coeffs_5G;
+ loft = &nphy->cal_cache.txcal_coeffs_5G[5];
+ }
+
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 80), 4, table);
+
+ for (i = 0; i < 4; i++) {
+ if (dev->phy.rev >= 3)
+ table[i] = coef[i];
+ else
+ coef[i] = 0;
+ }
+
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 88), 4, coef);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 85), 2, loft);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 93), 2, loft);
+
+ if (dev->phy.rev < 2)
+ b43_nphy_tx_iq_workaround(dev);
+
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
+ txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_2G;
+ rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_2G;
+ } else {
+ txcal_radio_regs = nphy->cal_cache.txcal_radio_regs_5G;
+ rxcal_coeffs = &nphy->cal_cache.rxcal_coeffs_5G;
+ }
+
+ /* TODO use some definitions */
+ if (dev->phy.rev >= 3) {
+ b43_radio_write(dev, 0x2021, txcal_radio_regs[0]);
+ b43_radio_write(dev, 0x2022, txcal_radio_regs[1]);
+ b43_radio_write(dev, 0x3021, txcal_radio_regs[2]);
+ b43_radio_write(dev, 0x3022, txcal_radio_regs[3]);
+ b43_radio_write(dev, 0x2023, txcal_radio_regs[4]);
+ b43_radio_write(dev, 0x2024, txcal_radio_regs[5]);
+ b43_radio_write(dev, 0x3023, txcal_radio_regs[6]);
+ b43_radio_write(dev, 0x3024, txcal_radio_regs[7]);
+ } else {
+ b43_radio_write(dev, 0x8B, txcal_radio_regs[0]);
+ b43_radio_write(dev, 0xBA, txcal_radio_regs[1]);
+ b43_radio_write(dev, 0x8D, txcal_radio_regs[2]);
+ b43_radio_write(dev, 0xBC, txcal_radio_regs[3]);
+ }
+ b43_nphy_rx_iq_coeffs(dev, true, rxcal_coeffs);
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalTxIqlo */
+static int b43_nphy_cal_tx_iq_lo(struct b43_wldev *dev,
+ struct nphy_txgains target,
+ bool full, bool mphase)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ int i;
+ int error = 0;
+ int freq;
+ bool avoid = false;
+ u8 length;
+ u16 tmp, core, type, count, max, numb, last, cmd;
+ const u16 *table;
+ bool phy6or5x;
+
+ u16 buffer[11];
+ u16 diq_start = 0;
+ u16 save[2];
+ u16 gain[2];
+ struct nphy_iqcal_params params[2];
+ bool updated[2] = { };
+
+ b43_nphy_stay_in_carrier_search(dev, true);
+
+ if (dev->phy.rev >= 4) {
+ avoid = nphy->hang_avoid;
+ nphy->hang_avoid = 0;
+ }
+
+ b43_ntab_read_bulk(dev, B43_NTAB16(7, 0x110), 2, save);
+
+ for (i = 0; i < 2; i++) {
+ b43_nphy_iq_cal_gain_params(dev, i, target, &params[i]);
+ gain[i] = params[i].cal_gain;
+ }
+
+ b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x110), 2, gain);
+
+ b43_nphy_tx_cal_radio_setup(dev);
+ b43_nphy_tx_cal_phy_setup(dev);
+
+ phy6or5x = dev->phy.rev >= 6 ||
+ (dev->phy.rev == 5 && nphy->ipa2g_on &&
+ b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ);
+ if (phy6or5x) {
+ if (dev->phy.is_40mhz) {
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 0), 18,
+ tbl_tx_iqlo_cal_loft_ladder_40);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 32), 18,
+ tbl_tx_iqlo_cal_iqimb_ladder_40);
+ } else {
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 0), 18,
+ tbl_tx_iqlo_cal_loft_ladder_20);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 32), 18,
+ tbl_tx_iqlo_cal_iqimb_ladder_20);
+ }
+ }
+
+ b43_phy_write(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0x8AA9);
+
+ if (!dev->phy.is_40mhz)
+ freq = 2500;
+ else
+ freq = 5000;
+
+ if (nphy->mphase_cal_phase_id > 2)
+ b43_nphy_run_samples(dev, (dev->phy.is_40mhz ? 40 : 20) * 8,
+ 0xFFFF, 0, true, false);
+ else
+ error = b43_nphy_tx_tone(dev, freq, 250, true, false);
+
+ if (error == 0) {
+ if (nphy->mphase_cal_phase_id > 2) {
+ table = nphy->mphase_txcal_bestcoeffs;
+ length = 11;
+ if (dev->phy.rev < 3)
+ length -= 2;
+ } else {
+ if (!full && nphy->txiqlocal_coeffsvalid) {
+ table = nphy->txiqlocal_bestc;
+ length = 11;
+ if (dev->phy.rev < 3)
+ length -= 2;
+ } else {
+ full = true;
+ if (dev->phy.rev >= 3) {
+ table = tbl_tx_iqlo_cal_startcoefs_nphyrev3;
+ length = B43_NTAB_TX_IQLO_CAL_STARTCOEFS_REV3;
+ } else {
+ table = tbl_tx_iqlo_cal_startcoefs;
+ length = B43_NTAB_TX_IQLO_CAL_STARTCOEFS;
+ }
+ }
+ }
+
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 64), length, table);
+
+ if (full) {
+ if (dev->phy.rev >= 3)
+ max = B43_NTAB_TX_IQLO_CAL_CMDS_FULLCAL_REV3;
+ else
+ max = B43_NTAB_TX_IQLO_CAL_CMDS_FULLCAL;
+ } else {
+ if (dev->phy.rev >= 3)
+ max = B43_NTAB_TX_IQLO_CAL_CMDS_RECAL_REV3;
+ else
+ max = B43_NTAB_TX_IQLO_CAL_CMDS_RECAL;
+ }
+
+ if (mphase) {
+ count = nphy->mphase_txcal_cmdidx;
+ numb = min(max,
+ (u16)(count + nphy->mphase_txcal_numcmds));
+ } else {
+ count = 0;
+ numb = max;
+ }
+
+ for (; count < numb; count++) {
+ if (full) {
+ if (dev->phy.rev >= 3)
+ cmd = tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[count];
+ else
+ cmd = tbl_tx_iqlo_cal_cmds_fullcal[count];
+ } else {
+ if (dev->phy.rev >= 3)
+ cmd = tbl_tx_iqlo_cal_cmds_recal_nphyrev3[count];
+ else
+ cmd = tbl_tx_iqlo_cal_cmds_recal[count];
+ }
+
+ core = (cmd & 0x3000) >> 12;
+ type = (cmd & 0x0F00) >> 8;
+
+ if (phy6or5x && updated[core] == 0) {
+ b43_nphy_update_tx_cal_ladder(dev, core);
+ updated[core] = 1;
+ }
+
+ tmp = (params[core].ncorr[type] << 8) | 0x66;
+ b43_phy_write(dev, B43_NPHY_IQLOCAL_CMDNNUM, tmp);
+
+ if (type == 1 || type == 3 || type == 4) {
+ buffer[0] = b43_ntab_read(dev,
+ B43_NTAB16(15, 69 + core));
+ diq_start = buffer[0];
+ buffer[0] = 0;
+ b43_ntab_write(dev, B43_NTAB16(15, 69 + core),
+ 0);
+ }
+
+ b43_phy_write(dev, B43_NPHY_IQLOCAL_CMD, cmd);
+ for (i = 0; i < 2000; i++) {
+ tmp = b43_phy_read(dev, B43_NPHY_IQLOCAL_CMD);
+ if (tmp & 0xC000)
+ break;
+ udelay(10);
+ }
+
+ b43_ntab_read_bulk(dev, B43_NTAB16(15, 96), length,
+ buffer);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 64), length,
+ buffer);
+
+ if (type == 1 || type == 3 || type == 4)
+ buffer[0] = diq_start;
+ }
+
+ if (mphase)
+ nphy->mphase_txcal_cmdidx = (numb >= max) ? 0 : numb;
+
+ last = (dev->phy.rev < 3) ? 6 : 7;
+
+ if (!mphase || nphy->mphase_cal_phase_id == last) {
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 96), 4, buffer);
+ b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 4, buffer);
+ if (dev->phy.rev < 3) {
+ buffer[0] = 0;
+ buffer[1] = 0;
+ buffer[2] = 0;
+ buffer[3] = 0;
+ }
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 88), 4,
+ buffer);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 101), 2,
+ buffer);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 85), 2,
+ buffer);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 93), 2,
+ buffer);
+ length = 11;
+ if (dev->phy.rev < 3)
+ length -= 2;
+ b43_ntab_read_bulk(dev, B43_NTAB16(15, 96), length,
+ nphy->txiqlocal_bestc);
+ nphy->txiqlocal_coeffsvalid = true;
+ /* TODO: Set nphy->txiqlocal_chanspec to
+ the current channel */
+ } else {
+ length = 11;
+ if (dev->phy.rev < 3)
+ length -= 2;
+ b43_ntab_read_bulk(dev, B43_NTAB16(15, 96), length,
+ nphy->mphase_txcal_bestcoeffs);
+ }
+
+ b43_nphy_stop_playback(dev);
+ b43_phy_write(dev, B43_NPHY_IQLOCAL_CMDGCTL, 0);
+ }
+
+ b43_nphy_tx_cal_phy_cleanup(dev);
+ b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x110), 2, save);
+
+ if (dev->phy.rev < 2 && (!mphase || nphy->mphase_cal_phase_id == last))
+ b43_nphy_tx_iq_workaround(dev);
+
+ if (dev->phy.rev >= 4)
+ nphy->hang_avoid = avoid;
+
+ b43_nphy_stay_in_carrier_search(dev, false);
+
+ return error;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ReapplyTxCalCoeffs */
+static void b43_nphy_reapply_tx_cal_coeffs(struct b43_wldev *dev)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ u8 i;
+ u16 buffer[7];
+ bool equal = true;
+
+ if (!nphy->txiqlocal_coeffsvalid || 1 /* FIXME */)
+ return;
+
+ b43_ntab_read_bulk(dev, B43_NTAB16(15, 80), 7, buffer);
+ for (i = 0; i < 4; i++) {
+ if (buffer[i] != nphy->txiqlocal_bestc[i]) {
+ equal = false;
+ break;
+ }
+ }
+
+ if (!equal) {
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 80), 4,
+ nphy->txiqlocal_bestc);
+ for (i = 0; i < 4; i++)
+ buffer[i] = 0;
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 88), 4,
+ buffer);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 85), 2,
+ &nphy->txiqlocal_bestc[5]);
+ b43_ntab_write_bulk(dev, B43_NTAB16(15, 93), 2,
+ &nphy->txiqlocal_bestc[5]);
+ }
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalRxIqRev2 */
+static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev,
+ struct nphy_txgains target, u8 type, bool debug)
+{
+ struct b43_phy_n *nphy = dev->phy.n;
+ int i, j, index;
+ u8 rfctl[2];
+ u8 afectl_core;
+ u16 tmp[6];
+ u16 cur_hpf1, cur_hpf2, cur_lna;
+ u32 real, imag;
+ enum ieee80211_band band;
+
+ u8 use;
+ u16 cur_hpf;
+ u16 lna[3] = { 3, 3, 1 };
+ u16 hpf1[3] = { 7, 2, 0 };
+ u16 hpf2[3] = { 2, 0, 0 };
+ u32 power[3] = { };
+ u16 gain_save[2];
+ u16 cal_gain[2];
+ struct nphy_iqcal_params cal_params[2];
+ struct nphy_iq_est est;
+ int ret = 0;
+ bool playtone = true;
+ int desired = 13;
+
+ b43_nphy_stay_in_carrier_search(dev, 1);
+
+ if (dev->phy.rev < 2)
+ b43_nphy_reapply_tx_cal_coeffs(dev);
+ b43_ntab_read_bulk(dev, B43_NTAB16(7, 0x110), 2, gain_save);
+ for (i = 0; i < 2; i++) {
+ b43_nphy_iq_cal_gain_params(dev, i, target, &cal_params[i]);
+ cal_gain[i] = cal_params[i].cal_gain;
+ }
+ b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x110), 2, cal_gain);
+
+ for (i = 0; i < 2; i++) {
+ if (i == 0) {
+ rfctl[0] = B43_NPHY_RFCTL_INTC1;
+ rfctl[1] = B43_NPHY_RFCTL_INTC2;
+ afectl_core = B43_NPHY_AFECTL_C1;
+ } else {
+ rfctl[0] = B43_NPHY_RFCTL_INTC2;
+ rfctl[1] = B43_NPHY_RFCTL_INTC1;
+ afectl_core = B43_NPHY_AFECTL_C2;
+ }
+
+ tmp[1] = b43_phy_read(dev, B43_NPHY_RFSEQCA);
+ tmp[2] = b43_phy_read(dev, afectl_core);
+ tmp[3] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
+ tmp[4] = b43_phy_read(dev, rfctl[0]);
+ tmp[5] = b43_phy_read(dev, rfctl[1]);
+
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA,
+ (u16)~B43_NPHY_RFSEQCA_RXDIS,
+ ((1 - i) << B43_NPHY_RFSEQCA_RXDIS_SHIFT));
+ b43_phy_maskset(dev, B43_NPHY_RFSEQCA, ~B43_NPHY_RFSEQCA_TXEN,
+ (1 - i));
+ b43_phy_set(dev, afectl_core, 0x0006);
+ b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0006);
+
+ band = b43_current_band(dev->wl);
+
+ if (nphy->rxcalparams & 0xFF000000) {
+ if (band == IEEE80211_BAND_5GHZ)
+ b43_phy_write(dev, rfctl[0], 0x140);
+ else
+ b43_phy_write(dev, rfctl[0], 0x110);
+ } else {
+ if (band == IEEE80211_BAND_5GHZ)
+ b43_phy_write(dev, rfctl[0], 0x180);
+ else
+ b43_phy_write(dev, rfctl[0], 0x120);
+ }
+
+ if (band == IEEE80211_BAND_5GHZ)
+ b43_phy_write(dev, rfctl[1], 0x148);
+ else
+ b43_phy_write(dev, rfctl[1], 0x114);
+
+ if (nphy->rxcalparams & 0x10000) {
+ b43_radio_maskset(dev, B2055_C1_GENSPARE2, 0xFC,
+ (i + 1));
+ b43_radio_maskset(dev, B2055_C2_GENSPARE2, 0xFC,
+ (2 - i));
+ }
+
+ for (j = 0; i < 4; j++) {
+ if (j < 3) {
+ cur_lna = lna[j];
+ cur_hpf1 = hpf1[j];
+ cur_hpf2 = hpf2[j];
+ } else {
+ if (power[1] > 10000) {
+ use = 1;
+ cur_hpf = cur_hpf1;
+ index = 2;
+ } else {
+ if (power[0] > 10000) {
+ use = 1;
+ cur_hpf = cur_hpf1;
+ index = 1;
+ } else {
+ index = 0;
+ use = 2;
+ cur_hpf = cur_hpf2;
+ }
+ }
+ cur_lna = lna[index];
+ cur_hpf1 = hpf1[index];
+ cur_hpf2 = hpf2[index];
+ cur_hpf += desired - hweight32(power[index]);
+ cur_hpf = clamp_val(cur_hpf, 0, 10);
+ if (use == 1)
+ cur_hpf1 = cur_hpf;
+ else
+ cur_hpf2 = cur_hpf;
+ }
+
+ tmp[0] = ((cur_hpf2 << 8) | (cur_hpf1 << 4) |
+ (cur_lna << 2));
+ b43_nphy_rf_control_override(dev, 0x400, tmp[0], 3,
+ false);
+ b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
+ b43_nphy_stop_playback(dev);
+
+ if (playtone) {
+ ret = b43_nphy_tx_tone(dev, 4000,
+ (nphy->rxcalparams & 0xFFFF),
+ false, false);
+ playtone = false;
+ } else {
+ b43_nphy_run_samples(dev, 160, 0xFFFF, 0,
+ false, false);
+ }
+
+ if (ret == 0) {
+ if (j < 3) {
+ b43_nphy_rx_iq_est(dev, &est, 1024, 32,
+ false);
+ if (i == 0) {
+ real = est.i0_pwr;
+ imag = est.q0_pwr;
+ } else {
+ real = est.i1_pwr;
+ imag = est.q1_pwr;
+ }
+ power[i] = ((real + imag) / 1024) + 1;
+ } else {
+ b43_nphy_calc_rx_iq_comp(dev, 1 << i);
+ }
+ b43_nphy_stop_playback(dev);
+ }
+
+ if (ret != 0)
+ break;
+ }
+
+ b43_radio_mask(dev, B2055_C1_GENSPARE2, 0xFC);
+ b43_radio_mask(dev, B2055_C2_GENSPARE2, 0xFC);
+ b43_phy_write(dev, rfctl[1], tmp[5]);
+ b43_phy_write(dev, rfctl[0], tmp[4]);
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER, tmp[3]);
+ b43_phy_write(dev, afectl_core, tmp[2]);
+ b43_phy_write(dev, B43_NPHY_RFSEQCA, tmp[1]);
+
+ if (ret != 0)
+ break;
+ }
+
+ b43_nphy_rf_control_override(dev, 0x400, 0, 3, true);
+ b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
+ b43_ntab_write_bulk(dev, B43_NTAB16(7, 0x110), 2, gain_save);
+
+ b43_nphy_stay_in_carrier_search(dev, 0);
+
+ return ret;
+}
+
+static int b43_nphy_rev3_cal_rx_iq(struct b43_wldev *dev,
+ struct nphy_txgains target, u8 type, bool debug)
+{
+ return -1;
+}
+
+/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalRxIq */
+static int b43_nphy_cal_rx_iq(struct b43_wldev *dev,
+ struct nphy_txgains target, u8 type, bool debug)
+{
+ if (dev->phy.rev >= 3)
+ return b43_nphy_rev3_cal_rx_iq(dev, target, type, debug);
+ else
+ return b43_nphy_rev2_cal_rx_iq(dev, target, type, debug);
+}
+
+/*
+ * Init N-PHY
+ * http://bcm-v4.sipsolutions.net/802.11/PHY/Init/N
+ */
int b43_phy_initn(struct b43_wldev *dev)
{
+ struct ssb_bus *bus = dev->dev->bus;
struct b43_phy *phy = &dev->phy;
+ struct b43_phy_n *nphy = phy->n;
+ u8 tx_pwr_state;
+ struct nphy_txgains target;
u16 tmp;
+ enum ieee80211_band tmp2;
+ bool do_rssi_cal;
- //TODO: Spectral management
+ u16 clip[2];
+ bool do_cal = false;
+
+ if ((dev->phy.rev >= 3) &&
+ (bus->sprom.boardflags_lo & B43_BFL_EXTLNA) &&
+ (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)) {
+ chipco_set32(&dev->dev->bus->chipco, SSB_CHIPCO_CHIPCTL, 0x40);
+ }
+ nphy->deaf_count = 0;
b43_nphy_tables_init(dev);
+ nphy->crsminpwr_adjusted = false;
+ nphy->noisevars_adjusted = false;
/* Clear all overrides */
- b43_phy_write(dev, B43_NPHY_RFCTL_OVER, 0);
+ if (dev->phy.rev >= 3) {
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S1, 0);
+ b43_phy_write(dev, B43_NPHY_RFCTL_OVER, 0);
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S0, 0);
+ b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S1, 0);
+ } else {
+ b43_phy_write(dev, B43_NPHY_RFCTL_OVER, 0);
+ }
b43_phy_write(dev, B43_NPHY_RFCTL_INTC1, 0);
b43_phy_write(dev, B43_NPHY_RFCTL_INTC2, 0);
- b43_phy_write(dev, B43_NPHY_RFCTL_INTC3, 0);
- b43_phy_write(dev, B43_NPHY_RFCTL_INTC4, 0);
+ if (dev->phy.rev < 6) {
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC3, 0);
+ b43_phy_write(dev, B43_NPHY_RFCTL_INTC4, 0);
+ }
b43_phy_mask(dev, B43_NPHY_RFSEQMODE,
~(B43_NPHY_RFSEQMODE_CAOVER |
B43_NPHY_RFSEQMODE_TROVER));
+ if (dev->phy.rev >= 3)
+ b43_phy_write(dev, B43_NPHY_AFECTL_OVER1, 0);
b43_phy_write(dev, B43_NPHY_AFECTL_OVER, 0);
- tmp = (phy->rev < 2) ? 64 : 59;
- b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3,
- ~B43_NPHY_BPHY_CTL3_SCALE,
- tmp << B43_NPHY_BPHY_CTL3_SCALE_SHIFT);
-
+ if (dev->phy.rev <= 2) {
+ tmp = (dev->phy.rev == 2) ? 0x3B : 0x40;
+ b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3,
+ ~B43_NPHY_BPHY_CTL3_SCALE,
+ tmp << B43_NPHY_BPHY_CTL3_SCALE_SHIFT);
+ }
b43_phy_write(dev, B43_NPHY_AFESEQ_TX2RX_PUD_20M, 0x20);
b43_phy_write(dev, B43_NPHY_AFESEQ_TX2RX_PUD_40M, 0x20);
- b43_phy_write(dev, B43_NPHY_TXREALFD, 184);
- b43_phy_write(dev, B43_NPHY_MIMO_CRSTXEXT, 200);
- b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 80);
- b43_phy_write(dev, B43_NPHY_C2_BCLIPBKOFF, 511);
+ if (bus->sprom.boardflags2_lo & 0x100 ||
+ (bus->boardinfo.vendor == PCI_VENDOR_ID_APPLE &&
+ bus->boardinfo.type == 0x8B))
+ b43_phy_write(dev, B43_NPHY_TXREALFD, 0xA0);
+ else
+ b43_phy_write(dev, B43_NPHY_TXREALFD, 0xB8);
+ b43_phy_write(dev, B43_NPHY_MIMO_CRSTXEXT, 0xC8);
+ b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 0x50);
+ b43_phy_write(dev, B43_NPHY_TXRIFS_FRDEL, 0x30);
- //TODO MIMO-Config
- //TODO Update TX/RX chain
+ b43_nphy_update_mimo_config(dev, nphy->preamble_override);
+ b43_nphy_update_txrx_chain(dev);
if (phy->rev < 2) {
b43_phy_write(dev, B43_NPHY_DUP40_GFBL, 0xAA8);
b43_phy_write(dev, B43_NPHY_DUP40_BL, 0x9A4);
}
+
+ tmp2 = b43_current_band(dev->wl);
+ if ((nphy->ipa2g_on && tmp2 == IEEE80211_BAND_2GHZ) ||
+ (nphy->ipa5g_on && tmp2 == IEEE80211_BAND_5GHZ)) {
+ b43_phy_set(dev, B43_NPHY_PAPD_EN0, 0x1);
+ b43_phy_maskset(dev, B43_NPHY_EPS_TABLE_ADJ0, 0x007F,
+ nphy->papd_epsilon_offset[0] << 7);
+ b43_phy_set(dev, B43_NPHY_PAPD_EN1, 0x1);
+ b43_phy_maskset(dev, B43_NPHY_EPS_TABLE_ADJ1, 0x007F,
+ nphy->papd_epsilon_offset[1] << 7);
+ b43_nphy_int_pa_set_tx_dig_filters(dev);
+ } else if (phy->rev >= 5) {
+ b43_nphy_ext_pa_set_tx_dig_filters(dev);
+ }
+
b43_nphy_workarounds(dev);
- b43_nphy_reset_cca(dev);
- ssb_write32(dev->dev, SSB_TMSLOW,
- ssb_read32(dev->dev, SSB_TMSLOW) | B43_TMSLOW_MACPHYCLKEN);
+ /* Reset CCA, in init code it differs a little from standard way */
+ b43_nphy_bmac_clock_fgc(dev, 1);
+ tmp = b43_phy_read(dev, B43_NPHY_BBCFG);
+ b43_phy_write(dev, B43_NPHY_BBCFG, tmp | B43_NPHY_BBCFG_RSTCCA);
+ b43_phy_write(dev, B43_NPHY_BBCFG, tmp & ~B43_NPHY_BBCFG_RSTCCA);
+ b43_nphy_bmac_clock_fgc(dev, 0);
+
+ /* TODO N PHY MAC PHY Clock Set with argument 1 */
+
+ b43_nphy_pa_override(dev, false);
b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RX2TX);
b43_nphy_force_rf_sequence(dev, B43_RFSEQ_RESET2RX);
+ b43_nphy_pa_override(dev, true);
+
+ b43_nphy_classifier(dev, 0, 0);
+ b43_nphy_read_clip_detection(dev, clip);
+ tx_pwr_state = nphy->txpwrctrl;
+ /* TODO N PHY TX power control with argument 0
+ (turning off power control) */
+ /* TODO Fix the TX Power Settings */
+ /* TODO N PHY TX Power Control Idle TSSI */
+ /* TODO N PHY TX Power Control Setup */
+
+ if (phy->rev >= 3) {
+ /* TODO */
+ } else {
+ b43_ntab_write_bulk(dev, B43_NTAB32(26, 192), 128,
+ b43_ntab_tx_gain_rev0_1_2);
+ b43_ntab_write_bulk(dev, B43_NTAB32(27, 192), 128,
+ b43_ntab_tx_gain_rev0_1_2);
+ }
+
+ if (nphy->phyrxchain != 3)
+ ;/* TODO N PHY RX Core Set State with phyrxchain as argument */
+ if (nphy->mphase_cal_phase_id > 0)
+ ;/* TODO PHY Periodic Calibration Multi-Phase Restart */
+
+ do_rssi_cal = false;
+ if (phy->rev >= 3) {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ do_rssi_cal = (nphy->rssical_chanspec_2G == 0);
+ else
+ do_rssi_cal = (nphy->rssical_chanspec_5G == 0);
+
+ if (do_rssi_cal)
+ b43_nphy_rssi_cal(dev);
+ else
+ b43_nphy_restore_rssi_cal(dev);
+ } else {
+ b43_nphy_rssi_cal(dev);
+ }
+
+ if (!((nphy->measure_hold & 0x6) != 0)) {
+ if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
+ do_cal = (nphy->iqcal_chanspec_2G == 0);
+ else
+ do_cal = (nphy->iqcal_chanspec_5G == 0);
+
+ if (nphy->mute)
+ do_cal = false;
+
+ if (do_cal) {
+ target = b43_nphy_get_tx_gains(dev);
+
+ if (nphy->antsel_type == 2)
+ ;/*TODO NPHY Superswitch Init with argument 1*/
+ if (nphy->perical != 2) {
+ b43_nphy_rssi_cal(dev);
+ if (phy->rev >= 3) {
+ nphy->cal_orig_pwr_idx[0] =
+ nphy->txpwrindex[0].index_internal;
+ nphy->cal_orig_pwr_idx[1] =
+ nphy->txpwrindex[1].index_internal;
+ /* TODO N PHY Pre Calibrate TX Gain */
+ target = b43_nphy_get_tx_gains(dev);
+ }
+ }
+ }
+ }
+
+ if (!b43_nphy_cal_tx_iq_lo(dev, target, true, false)) {
+ if (b43_nphy_cal_rx_iq(dev, target, 2, 0) == 0)
+ b43_nphy_save_cal(dev);
+ else if (nphy->mphase_cal_phase_id == 0)
+ ;/* N PHY Periodic Calibration with argument 3 */
+ } else {
+ b43_nphy_restore_cal(dev);
+ }
- b43_phy_read(dev, B43_NPHY_CLASSCTL); /* dummy read */
- //TODO read core1/2 clip1 thres regs
-
- if (1 /* FIXME Band is 2.4GHz */)
- b43_nphy_bphy_init(dev);
- //TODO disable TX power control
- //TODO Fix the TX power settings
- //TODO Init periodic calibration with reason 3
- b43_nphy_rssi_cal(dev, 2);
- b43_nphy_rssi_cal(dev, 0);
- b43_nphy_rssi_cal(dev, 1);
- //TODO get TX gain
- //TODO init superswitch
- //TODO calibrate LO
- //TODO idle TSSI TX pctl
- //TODO TX power control power setup
- //TODO table writes
- //TODO TX power control coefficients
- //TODO enable TX power control
- //TODO control antenna selection
- //TODO init radar detection
- //TODO reset channel if changed
+ b43_nphy_tx_pwr_ctrl_coef_setup(dev);
+ /* TODO N PHY TX Power Control Enable with argument tx_pwr_state */
+ b43_phy_write(dev, B43_NPHY_TXMACIF_HOLDOFF, 0x0015);
+ b43_phy_write(dev, B43_NPHY_TXMACDELAY, 0x0320);
+ if (phy->rev >= 3 && phy->rev <= 6)
+ b43_phy_write(dev, B43_NPHY_PLOAD_CSENSE_EXTLEN, 0x0014);
+ b43_nphy_tx_lp_fbw(dev);
+ if (phy->rev >= 3)
+ b43_nphy_spur_workaround(dev);
b43err(dev->wl, "IEEE 802.11n devices are not supported, yet.\n");
return 0;
diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h
index 1749aef4147..403aad3f894 100644
--- a/drivers/net/wireless/b43/phy_n.h
+++ b/drivers/net/wireless/b43/phy_n.h
@@ -231,6 +231,7 @@
#define B43_NPHY_C2_TXIQ_COMP_OFF B43_PHY_N(0x088) /* Core 2 TX I/Q comp offset */
#define B43_NPHY_C1_TXCTL B43_PHY_N(0x08B) /* Core 1 TX control */
#define B43_NPHY_C2_TXCTL B43_PHY_N(0x08C) /* Core 2 TX control */
+#define B43_NPHY_AFECTL_OVER1 B43_PHY_N(0x08F) /* AFE control override 1 */
#define B43_NPHY_SCRAM_SIGCTL B43_PHY_N(0x090) /* Scram signal control */
#define B43_NPHY_SCRAM_SIGCTL_INITST 0x007F /* Initial state value */
#define B43_NPHY_SCRAM_SIGCTL_INITST_SHIFT 0
@@ -705,6 +706,10 @@
#define B43_NPHY_TXPCTL_INIT B43_PHY_N(0x222) /* TX power controll init */
#define B43_NPHY_TXPCTL_INIT_PIDXI1 0x00FF /* Power index init 1 */
#define B43_NPHY_TXPCTL_INIT_PIDXI1_SHIFT 0
+#define B43_NPHY_PAPD_EN0 B43_PHY_N(0x297) /* PAPD Enable0 TBD */
+#define B43_NPHY_EPS_TABLE_ADJ0 B43_PHY_N(0x298) /* EPS Table Adj0 TBD */
+#define B43_NPHY_PAPD_EN1 B43_PHY_N(0x29B) /* PAPD Enable1 TBD */
+#define B43_NPHY_EPS_TABLE_ADJ1 B43_PHY_N(0x29C) /* EPS Table Adj1 TBD */
@@ -919,8 +924,99 @@
struct b43_wldev;
+struct b43_phy_n_iq_comp {
+ s16 a0;
+ s16 b0;
+ s16 a1;
+ s16 b1;
+};
+
+struct b43_phy_n_rssical_cache {
+ u16 rssical_radio_regs_2G[2];
+ u16 rssical_phy_regs_2G[12];
+
+ u16 rssical_radio_regs_5G[2];
+ u16 rssical_phy_regs_5G[12];
+};
+
+struct b43_phy_n_cal_cache {
+ u16 txcal_radio_regs_2G[8];
+ u16 txcal_coeffs_2G[8];
+ struct b43_phy_n_iq_comp rxcal_coeffs_2G;
+
+ u16 txcal_radio_regs_5G[8];
+ u16 txcal_coeffs_5G[8];
+ struct b43_phy_n_iq_comp rxcal_coeffs_5G;
+};
+
+struct b43_phy_n_txpwrindex {
+ s8 index;
+ s8 index_internal;
+ s8 index_internal_save;
+ u16 AfectrlOverride;
+ u16 AfeCtrlDacGain;
+ u16 rad_gain;
+ u8 bbmult;
+ u16 iqcomp_a;
+ u16 iqcomp_b;
+ u16 locomp;
+};
+
struct b43_phy_n {
- //TODO lots of missing stuff
+ u8 antsel_type;
+ u8 cal_orig_pwr_idx[2];
+ u8 measure_hold;
+ u8 phyrxchain;
+ u8 perical;
+ u32 deaf_count;
+ u32 rxcalparams;
+ bool hang_avoid;
+ bool mute;
+ u16 papd_epsilon_offset[2];
+ s32 preamble_override;
+ u32 bb_mult_save;
+ u16 radio_chanspec;
+
+ bool gain_boost;
+ bool elna_gain_config;
+ bool band5g_pwrgain;
+
+ u8 mphase_cal_phase_id;
+ u16 mphase_txcal_cmdidx;
+ u16 mphase_txcal_numcmds;
+ u16 mphase_txcal_bestcoeffs[11];
+
+ u8 txpwrctrl;
+ u16 txcal_bbmult;
+ u16 txiqlocal_bestc[11];
+ bool txiqlocal_coeffsvalid;
+ struct b43_phy_n_txpwrindex txpwrindex[2];
+
+ u8 txrx_chain;
+ u16 tx_rx_cal_phy_saveregs[11];
+ u16 tx_rx_cal_radio_saveregs[22];
+
+ u16 rfctrl_intc1_save;
+ u16 rfctrl_intc2_save;
+
+ u16 classifier_state;
+ u16 clip_state[2];
+
+ bool aband_spurwar_en;
+ bool gband_spurwar_en;
+
+ bool ipa2g_on;
+ u8 iqcal_chanspec_2G;
+ u8 rssical_chanspec_2G;
+
+ bool ipa5g_on;
+ u8 iqcal_chanspec_5G;
+ u8 rssical_chanspec_5G;
+
+ struct b43_phy_n_rssical_cache rssical_cache;
+ struct b43_phy_n_cal_cache cal_cache;
+ bool crsminpwr_adjusted;
+ bool noisevars_adjusted;
};
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
index c01b8e02412..a6062c3e89a 100644
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -559,7 +559,6 @@ int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb)
b43err(dev->wl, "PIO transmission failure\n");
goto out;
}
- q->nr_tx_packets++;
B43_WARN_ON(q->buffer_used > q->buffer_size);
if (((q->buffer_size - q->buffer_used) < roundup(2 + 2 + 6, 4)) ||
@@ -605,22 +604,6 @@ void b43_pio_handle_txstatus(struct b43_wldev *dev,
}
}
-void b43_pio_get_tx_stats(struct b43_wldev *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
- const int nr_queues = dev->wl->hw->queues;
- struct b43_pio_txqueue *q;
- int i;
-
- for (i = 0; i < nr_queues; i++) {
- q = select_queue_by_priority(dev, i);
-
- stats[i].len = B43_PIO_MAX_NR_TXPACKETS - q->free_packet_slots;
- stats[i].limit = B43_PIO_MAX_NR_TXPACKETS;
- stats[i].count = q->nr_tx_packets;
- }
-}
-
/* Returns whether we should fetch another frame. */
static bool pio_rx_frame(struct b43_pio_rxqueue *q)
{
diff --git a/drivers/net/wireless/b43/pio.h b/drivers/net/wireless/b43/pio.h
index 7dd649c9dda..1e516147424 100644
--- a/drivers/net/wireless/b43/pio.h
+++ b/drivers/net/wireless/b43/pio.h
@@ -55,8 +55,6 @@
#define B43_PIO_MAX_NR_TXPACKETS 32
-#ifdef CONFIG_B43_PIO
-
struct b43_pio_txpacket {
/* Pointer to the TX queue we belong to. */
struct b43_pio_txqueue *queue;
@@ -92,9 +90,6 @@ struct b43_pio_txqueue {
struct b43_pio_txpacket packets[B43_PIO_MAX_NR_TXPACKETS];
struct list_head packets_list;
- /* Total number of transmitted packets. */
- unsigned int nr_tx_packets;
-
/* Shortcut to the 802.11 core revision. This is to
* avoid horrible pointer dereferencing in the fastpaths. */
u8 rev;
@@ -162,49 +157,9 @@ void b43_pio_free(struct b43_wldev *dev);
int b43_pio_tx(struct b43_wldev *dev, struct sk_buff *skb);
void b43_pio_handle_txstatus(struct b43_wldev *dev,
const struct b43_txstatus *status);
-void b43_pio_get_tx_stats(struct b43_wldev *dev,
- struct ieee80211_tx_queue_stats *stats);
void b43_pio_rx(struct b43_pio_rxqueue *q);
void b43_pio_tx_suspend(struct b43_wldev *dev);
void b43_pio_tx_resume(struct b43_wldev *dev);
-
-#else /* CONFIG_B43_PIO */
-
-
-static inline int b43_pio_init(struct b43_wldev *dev)
-{
- return 0;
-}
-static inline void b43_pio_free(struct b43_wldev *dev)
-{
-}
-static inline void b43_pio_stop(struct b43_wldev *dev)
-{
-}
-static inline int b43_pio_tx(struct b43_wldev *dev,
- struct sk_buff *skb)
-{
- return 0;
-}
-static inline void b43_pio_handle_txstatus(struct b43_wldev *dev,
- const struct b43_txstatus *status)
-{
-}
-static inline void b43_pio_get_tx_stats(struct b43_wldev *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
-}
-static inline void b43_pio_rx(struct b43_pio_rxqueue *q)
-{
-}
-static inline void b43_pio_tx_suspend(struct b43_wldev *dev)
-{
-}
-static inline void b43_pio_tx_resume(struct b43_wldev *dev)
-{
-}
-
-#endif /* CONFIG_B43_PIO */
#endif /* B43_PIO_H_ */
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index 4e233631554..a00d509150f 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -1336,7 +1336,7 @@ b43_nphy_get_chantabent(struct b43_wldev *dev, u8 channel)
}
-const u8 b43_ntab_adjustpower0[] = {
+static const u8 b43_ntab_adjustpower0[] = {
0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01,
0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03,
0x04, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x05,
@@ -1355,7 +1355,7 @@ const u8 b43_ntab_adjustpower0[] = {
0x1E, 0x1E, 0x1E, 0x1E, 0x1F, 0x1F, 0x1F, 0x1F,
};
-const u8 b43_ntab_adjustpower1[] = {
+static const u8 b43_ntab_adjustpower1[] = {
0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01,
0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03,
0x04, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x05,
@@ -1374,11 +1374,11 @@ const u8 b43_ntab_adjustpower1[] = {
0x1E, 0x1E, 0x1E, 0x1E, 0x1F, 0x1F, 0x1F, 0x1F,
};
-const u16 b43_ntab_bdi[] = {
+static const u16 b43_ntab_bdi[] = {
0x0070, 0x0126, 0x012C, 0x0246, 0x048D, 0x04D2,
};
-const u32 b43_ntab_channelest[] = {
+static const u32 b43_ntab_channelest[] = {
0x44444444, 0x44444444, 0x44444444, 0x44444444,
0x44444444, 0x44444444, 0x44444444, 0x44444444,
0x10101010, 0x10101010, 0x10101010, 0x10101010,
@@ -1405,7 +1405,7 @@ const u32 b43_ntab_channelest[] = {
0x10101010, 0x10101010, 0x10101010, 0x10101010,
};
-const u8 b43_ntab_estimatepowerlt0[] = {
+static const u8 b43_ntab_estimatepowerlt0[] = {
0x50, 0x4F, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A, 0x49,
0x48, 0x47, 0x46, 0x45, 0x44, 0x43, 0x42, 0x41,
0x40, 0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39,
@@ -1416,7 +1416,7 @@ const u8 b43_ntab_estimatepowerlt0[] = {
0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11,
};
-const u8 b43_ntab_estimatepowerlt1[] = {
+static const u8 b43_ntab_estimatepowerlt1[] = {
0x50, 0x4F, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A, 0x49,
0x48, 0x47, 0x46, 0x45, 0x44, 0x43, 0x42, 0x41,
0x40, 0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39,
@@ -1427,14 +1427,14 @@ const u8 b43_ntab_estimatepowerlt1[] = {
0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11,
};
-const u8 b43_ntab_framelookup[] = {
+static const u8 b43_ntab_framelookup[] = {
0x02, 0x04, 0x14, 0x14, 0x03, 0x05, 0x16, 0x16,
0x0A, 0x0C, 0x1C, 0x1C, 0x0B, 0x0D, 0x1E, 0x1E,
0x06, 0x08, 0x18, 0x18, 0x07, 0x09, 0x1A, 0x1A,
0x0E, 0x10, 0x20, 0x28, 0x0F, 0x11, 0x22, 0x2A,
};
-const u32 b43_ntab_framestruct[] = {
+static const u32 b43_ntab_framestruct[] = {
0x08004A04, 0x00100000, 0x01000A05, 0x00100020,
0x09804506, 0x00100030, 0x09804507, 0x00100030,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -1645,7 +1645,7 @@ const u32 b43_ntab_framestruct[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
-const u32 b43_ntab_gainctl0[] = {
+static const u32 b43_ntab_gainctl0[] = {
0x007F003F, 0x007E013F, 0x007D023E, 0x007C033E,
0x007B043D, 0x007A053D, 0x0079063C, 0x0078073C,
0x0077083B, 0x0076093B, 0x00750A3A, 0x00740B3A,
@@ -1680,7 +1680,7 @@ const u32 b43_ntab_gainctl0[] = {
0x00030C01, 0x00020D01, 0x00010E00, 0x00000F00,
};
-const u32 b43_ntab_gainctl1[] = {
+static const u32 b43_ntab_gainctl1[] = {
0x007F003F, 0x007E013F, 0x007D023E, 0x007C033E,
0x007B043D, 0x007A053D, 0x0079063C, 0x0078073C,
0x0077083B, 0x0076093B, 0x00750A3A, 0x00740B3A,
@@ -1715,12 +1715,12 @@ const u32 b43_ntab_gainctl1[] = {
0x00030C01, 0x00020D01, 0x00010E00, 0x00000F00,
};
-const u32 b43_ntab_intlevel[] = {
+static const u32 b43_ntab_intlevel[] = {
0x00802070, 0x0671188D, 0x0A60192C, 0x0A300E46,
0x00C1188D, 0x080024D2, 0x00000070,
};
-const u32 b43_ntab_iqlt0[] = {
+static const u32 b43_ntab_iqlt0[] = {
0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
@@ -1755,7 +1755,7 @@ const u32 b43_ntab_iqlt0[] = {
0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
};
-const u32 b43_ntab_iqlt1[] = {
+static const u32 b43_ntab_iqlt1[] = {
0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
@@ -1790,7 +1790,7 @@ const u32 b43_ntab_iqlt1[] = {
0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F,
};
-const u16 b43_ntab_loftlt0[] = {
+static const u16 b43_ntab_loftlt0[] = {
0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101,
0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103,
0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101,
@@ -1815,7 +1815,7 @@ const u16 b43_ntab_loftlt0[] = {
0x0002, 0x0103,
};
-const u16 b43_ntab_loftlt1[] = {
+static const u16 b43_ntab_loftlt1[] = {
0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101,
0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103,
0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101,
@@ -1840,7 +1840,7 @@ const u16 b43_ntab_loftlt1[] = {
0x0002, 0x0103,
};
-const u8 b43_ntab_mcs[] = {
+static const u8 b43_ntab_mcs[] = {
0x00, 0x08, 0x0A, 0x10, 0x12, 0x19, 0x1A, 0x1C,
0x40, 0x48, 0x4A, 0x50, 0x52, 0x59, 0x5A, 0x5C,
0x80, 0x88, 0x8A, 0x90, 0x92, 0x99, 0x9A, 0x9C,
@@ -1859,7 +1859,7 @@ const u8 b43_ntab_mcs[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
-const u32 b43_ntab_noisevar10[] = {
+static const u32 b43_ntab_noisevar10[] = {
0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
@@ -1926,7 +1926,7 @@ const u32 b43_ntab_noisevar10[] = {
0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
};
-const u32 b43_ntab_noisevar11[] = {
+static const u32 b43_ntab_noisevar11[] = {
0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
@@ -1993,7 +1993,7 @@ const u32 b43_ntab_noisevar11[] = {
0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D,
};
-const u16 b43_ntab_pilot[] = {
+static const u16 b43_ntab_pilot[] = {
0xFF08, 0xFF08, 0xFF08, 0xFF08, 0xFF08, 0xFF08,
0xFF08, 0xFF08, 0x80D5, 0x80D5, 0x80D5, 0x80D5,
0x80D5, 0x80D5, 0x80D5, 0x80D5, 0xFF0A, 0xFF82,
@@ -2011,12 +2011,12 @@ const u16 b43_ntab_pilot[] = {
0xF0A0, 0xF028, 0xFFFF, 0xFFFF,
};
-const u32 b43_ntab_pilotlt[] = {
+static const u32 b43_ntab_pilotlt[] = {
0x76540123, 0x62407351, 0x76543201, 0x76540213,
0x76540123, 0x76430521,
};
-const u32 b43_ntab_tdi20a0[] = {
+static const u32 b43_ntab_tdi20a0[] = {
0x00091226, 0x000A1429, 0x000B56AD, 0x000C58B0,
0x000D5AB3, 0x000E9CB6, 0x000F9EBA, 0x0000C13D,
0x00020301, 0x00030504, 0x00040708, 0x0005090B,
@@ -2033,7 +2033,7 @@ const u32 b43_ntab_tdi20a0[] = {
0x00000000, 0x00000000, 0x00000000,
};
-const u32 b43_ntab_tdi20a1[] = {
+static const u32 b43_ntab_tdi20a1[] = {
0x00014B26, 0x00028D29, 0x000393AD, 0x00049630,
0x0005D833, 0x0006DA36, 0x00099C3A, 0x000A9E3D,
0x000BC081, 0x000CC284, 0x000DC488, 0x000F068B,
@@ -2050,7 +2050,7 @@ const u32 b43_ntab_tdi20a1[] = {
0x00000000, 0x00000000, 0x00000000,
};
-const u32 b43_ntab_tdi40a0[] = {
+static const u32 b43_ntab_tdi40a0[] = {
0x0011A346, 0x00136CCF, 0x0014F5D9, 0x001641E2,
0x0017CB6B, 0x00195475, 0x001B2383, 0x001CAD0C,
0x001E7616, 0x0000821F, 0x00020BA8, 0x0003D4B2,
@@ -2081,7 +2081,7 @@ const u32 b43_ntab_tdi40a0[] = {
0x00000000, 0x00000000,
};
-const u32 b43_ntab_tdi40a1[] = {
+static const u32 b43_ntab_tdi40a1[] = {
0x001EDB36, 0x000129CA, 0x0002B353, 0x00047CDD,
0x0005C8E6, 0x000791EF, 0x00091BF9, 0x000AAA07,
0x000C3391, 0x000DFD1A, 0x00120923, 0x0013D22D,
@@ -2112,7 +2112,7 @@ const u32 b43_ntab_tdi40a1[] = {
0x00000000, 0x00000000,
};
-const u32 b43_ntab_tdtrn[] = {
+static const u32 b43_ntab_tdtrn[] = {
0x061C061C, 0x0050EE68, 0xF592FE36, 0xFE5212F6,
0x00000C38, 0xFE5212F6, 0xF592FE36, 0x0050EE68,
0x061C061C, 0xEE680050, 0xFE36F592, 0x12F6FE52,
@@ -2291,7 +2291,7 @@ const u32 b43_ntab_tdtrn[] = {
0xFA58FC00, 0x0B64FC7E, 0x0800F7B6, 0x00F006BE,
};
-const u32 b43_ntab_tmap[] = {
+static const u32 b43_ntab_tmap[] = {
0x8A88AA80, 0x8AAAAA8A, 0x8A8A8AA8, 0x00000888,
0x88000000, 0x8A8A88AA, 0x8AA88888, 0x8888A8A8,
0xF1111110, 0x11111111, 0x11F11111, 0x00000111,
@@ -2406,6 +2406,544 @@ const u32 b43_ntab_tmap[] = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
};
+const u32 b43_ntab_tx_gain_rev0_1_2[] = {
+ 0x03cc2b44, 0x03cc2b42, 0x03cc2a44, 0x03cc2a42,
+ 0x03cc2944, 0x03c82b44, 0x03c82b42, 0x03c82a44,
+ 0x03c82a42, 0x03c82944, 0x03c82942, 0x03c82844,
+ 0x03c82842, 0x03c42b44, 0x03c42b42, 0x03c42a44,
+ 0x03c42a42, 0x03c42944, 0x03c42942, 0x03c42844,
+ 0x03c42842, 0x03c42744, 0x03c42742, 0x03c42644,
+ 0x03c42642, 0x03c42544, 0x03c42542, 0x03c42444,
+ 0x03c42442, 0x03c02b44, 0x03c02b42, 0x03c02a44,
+ 0x03c02a42, 0x03c02944, 0x03c02942, 0x03c02844,
+ 0x03c02842, 0x03c02744, 0x03c02742, 0x03b02b44,
+ 0x03b02b42, 0x03b02a44, 0x03b02a42, 0x03b02944,
+ 0x03b02942, 0x03b02844, 0x03b02842, 0x03b02744,
+ 0x03b02742, 0x03b02644, 0x03b02642, 0x03b02544,
+ 0x03b02542, 0x03a02b44, 0x03a02b42, 0x03a02a44,
+ 0x03a02a42, 0x03a02944, 0x03a02942, 0x03a02844,
+ 0x03a02842, 0x03a02744, 0x03a02742, 0x03902b44,
+ 0x03902b42, 0x03902a44, 0x03902a42, 0x03902944,
+ 0x03902942, 0x03902844, 0x03902842, 0x03902744,
+ 0x03902742, 0x03902644, 0x03902642, 0x03902544,
+ 0x03902542, 0x03802b44, 0x03802b42, 0x03802a44,
+ 0x03802a42, 0x03802944, 0x03802942, 0x03802844,
+ 0x03802842, 0x03802744, 0x03802742, 0x03802644,
+ 0x03802642, 0x03802544, 0x03802542, 0x03802444,
+ 0x03802442, 0x03802344, 0x03802342, 0x03802244,
+ 0x03802242, 0x03802144, 0x03802142, 0x03802044,
+ 0x03802042, 0x03801f44, 0x03801f42, 0x03801e44,
+ 0x03801e42, 0x03801d44, 0x03801d42, 0x03801c44,
+ 0x03801c42, 0x03801b44, 0x03801b42, 0x03801a44,
+ 0x03801a42, 0x03801944, 0x03801942, 0x03801844,
+ 0x03801842, 0x03801744, 0x03801742, 0x03801644,
+ 0x03801642, 0x03801544, 0x03801542, 0x03801444,
+ 0x03801442, 0x03801344, 0x03801342, 0x00002b00,
+};
+
+const u32 b43_ntab_tx_gain_rev3plus_2ghz[] = {
+ 0x1f410044, 0x1f410042, 0x1f410040, 0x1f41003e,
+ 0x1f41003c, 0x1f41003b, 0x1f410039, 0x1f410037,
+ 0x1e410044, 0x1e410042, 0x1e410040, 0x1e41003e,
+ 0x1e41003c, 0x1e41003b, 0x1e410039, 0x1e410037,
+ 0x1d410044, 0x1d410042, 0x1d410040, 0x1d41003e,
+ 0x1d41003c, 0x1d41003b, 0x1d410039, 0x1d410037,
+ 0x1c410044, 0x1c410042, 0x1c410040, 0x1c41003e,
+ 0x1c41003c, 0x1c41003b, 0x1c410039, 0x1c410037,
+ 0x1b410044, 0x1b410042, 0x1b410040, 0x1b41003e,
+ 0x1b41003c, 0x1b41003b, 0x1b410039, 0x1b410037,
+ 0x1a410044, 0x1a410042, 0x1a410040, 0x1a41003e,
+ 0x1a41003c, 0x1a41003b, 0x1a410039, 0x1a410037,
+ 0x19410044, 0x19410042, 0x19410040, 0x1941003e,
+ 0x1941003c, 0x1941003b, 0x19410039, 0x19410037,
+ 0x18410044, 0x18410042, 0x18410040, 0x1841003e,
+ 0x1841003c, 0x1841003b, 0x18410039, 0x18410037,
+ 0x17410044, 0x17410042, 0x17410040, 0x1741003e,
+ 0x1741003c, 0x1741003b, 0x17410039, 0x17410037,
+ 0x16410044, 0x16410042, 0x16410040, 0x1641003e,
+ 0x1641003c, 0x1641003b, 0x16410039, 0x16410037,
+ 0x15410044, 0x15410042, 0x15410040, 0x1541003e,
+ 0x1541003c, 0x1541003b, 0x15410039, 0x15410037,
+ 0x14410044, 0x14410042, 0x14410040, 0x1441003e,
+ 0x1441003c, 0x1441003b, 0x14410039, 0x14410037,
+ 0x13410044, 0x13410042, 0x13410040, 0x1341003e,
+ 0x1341003c, 0x1341003b, 0x13410039, 0x13410037,
+ 0x12410044, 0x12410042, 0x12410040, 0x1241003e,
+ 0x1241003c, 0x1241003b, 0x12410039, 0x12410037,
+ 0x11410044, 0x11410042, 0x11410040, 0x1141003e,
+ 0x1141003c, 0x1141003b, 0x11410039, 0x11410037,
+ 0x10410044, 0x10410042, 0x10410040, 0x1041003e,
+ 0x1041003c, 0x1041003b, 0x10410039, 0x10410037,
+};
+
+const u32 b43_ntab_tx_gain_rev3_5ghz[] = {
+ 0xcff70044, 0xcff70042, 0xcff70040, 0xcff7003e,
+ 0xcff7003c, 0xcff7003b, 0xcff70039, 0xcff70037,
+ 0xcef70044, 0xcef70042, 0xcef70040, 0xcef7003e,
+ 0xcef7003c, 0xcef7003b, 0xcef70039, 0xcef70037,
+ 0xcdf70044, 0xcdf70042, 0xcdf70040, 0xcdf7003e,
+ 0xcdf7003c, 0xcdf7003b, 0xcdf70039, 0xcdf70037,
+ 0xccf70044, 0xccf70042, 0xccf70040, 0xccf7003e,
+ 0xccf7003c, 0xccf7003b, 0xccf70039, 0xccf70037,
+ 0xcbf70044, 0xcbf70042, 0xcbf70040, 0xcbf7003e,
+ 0xcbf7003c, 0xcbf7003b, 0xcbf70039, 0xcbf70037,
+ 0xcaf70044, 0xcaf70042, 0xcaf70040, 0xcaf7003e,
+ 0xcaf7003c, 0xcaf7003b, 0xcaf70039, 0xcaf70037,
+ 0xc9f70044, 0xc9f70042, 0xc9f70040, 0xc9f7003e,
+ 0xc9f7003c, 0xc9f7003b, 0xc9f70039, 0xc9f70037,
+ 0xc8f70044, 0xc8f70042, 0xc8f70040, 0xc8f7003e,
+ 0xc8f7003c, 0xc8f7003b, 0xc8f70039, 0xc8f70037,
+ 0xc7f70044, 0xc7f70042, 0xc7f70040, 0xc7f7003e,
+ 0xc7f7003c, 0xc7f7003b, 0xc7f70039, 0xc7f70037,
+ 0xc6f70044, 0xc6f70042, 0xc6f70040, 0xc6f7003e,
+ 0xc6f7003c, 0xc6f7003b, 0xc6f70039, 0xc6f70037,
+ 0xc5f70044, 0xc5f70042, 0xc5f70040, 0xc5f7003e,
+ 0xc5f7003c, 0xc5f7003b, 0xc5f70039, 0xc5f70037,
+ 0xc4f70044, 0xc4f70042, 0xc4f70040, 0xc4f7003e,
+ 0xc4f7003c, 0xc4f7003b, 0xc4f70039, 0xc4f70037,
+ 0xc3f70044, 0xc3f70042, 0xc3f70040, 0xc3f7003e,
+ 0xc3f7003c, 0xc3f7003b, 0xc3f70039, 0xc3f70037,
+ 0xc2f70044, 0xc2f70042, 0xc2f70040, 0xc2f7003e,
+ 0xc2f7003c, 0xc2f7003b, 0xc2f70039, 0xc2f70037,
+ 0xc1f70044, 0xc1f70042, 0xc1f70040, 0xc1f7003e,
+ 0xc1f7003c, 0xc1f7003b, 0xc1f70039, 0xc1f70037,
+ 0xc0f70044, 0xc0f70042, 0xc0f70040, 0xc0f7003e,
+ 0xc0f7003c, 0xc0f7003b, 0xc0f70039, 0xc0f70037,
+};
+
+const u32 b43_ntab_tx_gain_rev4_5ghz[] = {
+ 0x2ff20044, 0x2ff20042, 0x2ff20040, 0x2ff2003e,
+ 0x2ff2003c, 0x2ff2003b, 0x2ff20039, 0x2ff20037,
+ 0x2ef20044, 0x2ef20042, 0x2ef20040, 0x2ef2003e,
+ 0x2ef2003c, 0x2ef2003b, 0x2ef20039, 0x2ef20037,
+ 0x2df20044, 0x2df20042, 0x2df20040, 0x2df2003e,
+ 0x2df2003c, 0x2df2003b, 0x2df20039, 0x2df20037,
+ 0x2cf20044, 0x2cf20042, 0x2cf20040, 0x2cf2003e,
+ 0x2cf2003c, 0x2cf2003b, 0x2cf20039, 0x2cf20037,
+ 0x2bf20044, 0x2bf20042, 0x2bf20040, 0x2bf2003e,
+ 0x2bf2003c, 0x2bf2003b, 0x2bf20039, 0x2bf20037,
+ 0x2af20044, 0x2af20042, 0x2af20040, 0x2af2003e,
+ 0x2af2003c, 0x2af2003b, 0x2af20039, 0x2af20037,
+ 0x29f20044, 0x29f20042, 0x29f20040, 0x29f2003e,
+ 0x29f2003c, 0x29f2003b, 0x29f20039, 0x29f20037,
+ 0x28f20044, 0x28f20042, 0x28f20040, 0x28f2003e,
+ 0x28f2003c, 0x28f2003b, 0x28f20039, 0x28f20037,
+ 0x27f20044, 0x27f20042, 0x27f20040, 0x27f2003e,
+ 0x27f2003c, 0x27f2003b, 0x27f20039, 0x27f20037,
+ 0x26f20044, 0x26f20042, 0x26f20040, 0x26f2003e,
+ 0x26f2003c, 0x26f2003b, 0x26f20039, 0x26f20037,
+ 0x25f20044, 0x25f20042, 0x25f20040, 0x25f2003e,
+ 0x25f2003c, 0x25f2003b, 0x25f20039, 0x25f20037,
+ 0x24f20044, 0x24f20042, 0x24f20040, 0x24f2003e,
+ 0x24f2003c, 0x24f2003b, 0x24f20039, 0x24f20038,
+ 0x23f20041, 0x23f20040, 0x23f2003f, 0x23f2003e,
+ 0x23f2003c, 0x23f2003b, 0x23f20039, 0x23f20037,
+ 0x22f20044, 0x22f20042, 0x22f20040, 0x22f2003e,
+ 0x22f2003c, 0x22f2003b, 0x22f20039, 0x22f20037,
+ 0x21f20044, 0x21f20042, 0x21f20040, 0x21f2003e,
+ 0x21f2003c, 0x21f2003b, 0x21f20039, 0x21f20037,
+ 0x20d20043, 0x20d20041, 0x20d2003e, 0x20d2003c,
+ 0x20d2003a, 0x20d20038, 0x20d20036, 0x20d20034,
+};
+
+const u32 b43_ntab_tx_gain_rev5plus_5ghz[] = {
+ 0x0f62004a, 0x0f620048, 0x0f620046, 0x0f620044,
+ 0x0f620042, 0x0f620040, 0x0f62003e, 0x0f62003c,
+ 0x0e620044, 0x0e620042, 0x0e620040, 0x0e62003e,
+ 0x0e62003c, 0x0e62003d, 0x0e62003b, 0x0e62003a,
+ 0x0d620043, 0x0d620041, 0x0d620040, 0x0d62003e,
+ 0x0d62003d, 0x0d62003c, 0x0d62003b, 0x0d62003a,
+ 0x0c620041, 0x0c620040, 0x0c62003f, 0x0c62003e,
+ 0x0c62003c, 0x0c62003b, 0x0c620039, 0x0c620037,
+ 0x0b620046, 0x0b620044, 0x0b620042, 0x0b620040,
+ 0x0b62003e, 0x0b62003c, 0x0b62003b, 0x0b62003a,
+ 0x0a620041, 0x0a620040, 0x0a62003e, 0x0a62003c,
+ 0x0a62003b, 0x0a62003a, 0x0a620039, 0x0a620038,
+ 0x0962003e, 0x0962003d, 0x0962003c, 0x0962003b,
+ 0x09620039, 0x09620037, 0x09620035, 0x09620033,
+ 0x08620044, 0x08620042, 0x08620040, 0x0862003e,
+ 0x0862003c, 0x0862003b, 0x0862003a, 0x08620039,
+ 0x07620043, 0x07620042, 0x07620040, 0x0762003f,
+ 0x0762003d, 0x0762003b, 0x0762003a, 0x07620039,
+ 0x0662003e, 0x0662003d, 0x0662003c, 0x0662003b,
+ 0x06620039, 0x06620037, 0x06620035, 0x06620033,
+ 0x05620046, 0x05620044, 0x05620042, 0x05620040,
+ 0x0562003e, 0x0562003c, 0x0562003b, 0x05620039,
+ 0x04620044, 0x04620042, 0x04620040, 0x0462003e,
+ 0x0462003c, 0x0462003b, 0x04620039, 0x04620038,
+ 0x0362003c, 0x0362003b, 0x0362003a, 0x03620039,
+ 0x03620038, 0x03620037, 0x03620035, 0x03620033,
+ 0x0262004c, 0x0262004a, 0x02620048, 0x02620047,
+ 0x02620046, 0x02620044, 0x02620043, 0x02620042,
+ 0x0162004a, 0x01620048, 0x01620046, 0x01620044,
+ 0x01620043, 0x01620042, 0x01620041, 0x01620040,
+ 0x00620042, 0x00620040, 0x0062003e, 0x0062003c,
+ 0x0062003b, 0x00620039, 0x00620037, 0x00620035,
+};
+
+const u32 txpwrctrl_tx_gain_ipa[] = {
+ 0x5ff7002d, 0x5ff7002b, 0x5ff7002a, 0x5ff70029,
+ 0x5ff70028, 0x5ff70027, 0x5ff70026, 0x5ff70025,
+ 0x5ef7002d, 0x5ef7002b, 0x5ef7002a, 0x5ef70029,
+ 0x5ef70028, 0x5ef70027, 0x5ef70026, 0x5ef70025,
+ 0x5df7002d, 0x5df7002b, 0x5df7002a, 0x5df70029,
+ 0x5df70028, 0x5df70027, 0x5df70026, 0x5df70025,
+ 0x5cf7002d, 0x5cf7002b, 0x5cf7002a, 0x5cf70029,
+ 0x5cf70028, 0x5cf70027, 0x5cf70026, 0x5cf70025,
+ 0x5bf7002d, 0x5bf7002b, 0x5bf7002a, 0x5bf70029,
+ 0x5bf70028, 0x5bf70027, 0x5bf70026, 0x5bf70025,
+ 0x5af7002d, 0x5af7002b, 0x5af7002a, 0x5af70029,
+ 0x5af70028, 0x5af70027, 0x5af70026, 0x5af70025,
+ 0x59f7002d, 0x59f7002b, 0x59f7002a, 0x59f70029,
+ 0x59f70028, 0x59f70027, 0x59f70026, 0x59f70025,
+ 0x58f7002d, 0x58f7002b, 0x58f7002a, 0x58f70029,
+ 0x58f70028, 0x58f70027, 0x58f70026, 0x58f70025,
+ 0x57f7002d, 0x57f7002b, 0x57f7002a, 0x57f70029,
+ 0x57f70028, 0x57f70027, 0x57f70026, 0x57f70025,
+ 0x56f7002d, 0x56f7002b, 0x56f7002a, 0x56f70029,
+ 0x56f70028, 0x56f70027, 0x56f70026, 0x56f70025,
+ 0x55f7002d, 0x55f7002b, 0x55f7002a, 0x55f70029,
+ 0x55f70028, 0x55f70027, 0x55f70026, 0x55f70025,
+ 0x54f7002d, 0x54f7002b, 0x54f7002a, 0x54f70029,
+ 0x54f70028, 0x54f70027, 0x54f70026, 0x54f70025,
+ 0x53f7002d, 0x53f7002b, 0x53f7002a, 0x53f70029,
+ 0x53f70028, 0x53f70027, 0x53f70026, 0x53f70025,
+ 0x52f7002d, 0x52f7002b, 0x52f7002a, 0x52f70029,
+ 0x52f70028, 0x52f70027, 0x52f70026, 0x52f70025,
+ 0x51f7002d, 0x51f7002b, 0x51f7002a, 0x51f70029,
+ 0x51f70028, 0x51f70027, 0x51f70026, 0x51f70025,
+ 0x50f7002d, 0x50f7002b, 0x50f7002a, 0x50f70029,
+ 0x50f70028, 0x50f70027, 0x50f70026, 0x50f70025,
+};
+
+const u32 txpwrctrl_tx_gain_ipa_rev5[] = {
+ 0x1ff7002d, 0x1ff7002b, 0x1ff7002a, 0x1ff70029,
+ 0x1ff70028, 0x1ff70027, 0x1ff70026, 0x1ff70025,
+ 0x1ef7002d, 0x1ef7002b, 0x1ef7002a, 0x1ef70029,
+ 0x1ef70028, 0x1ef70027, 0x1ef70026, 0x1ef70025,
+ 0x1df7002d, 0x1df7002b, 0x1df7002a, 0x1df70029,
+ 0x1df70028, 0x1df70027, 0x1df70026, 0x1df70025,
+ 0x1cf7002d, 0x1cf7002b, 0x1cf7002a, 0x1cf70029,
+ 0x1cf70028, 0x1cf70027, 0x1cf70026, 0x1cf70025,
+ 0x1bf7002d, 0x1bf7002b, 0x1bf7002a, 0x1bf70029,
+ 0x1bf70028, 0x1bf70027, 0x1bf70026, 0x1bf70025,
+ 0x1af7002d, 0x1af7002b, 0x1af7002a, 0x1af70029,
+ 0x1af70028, 0x1af70027, 0x1af70026, 0x1af70025,
+ 0x19f7002d, 0x19f7002b, 0x19f7002a, 0x19f70029,
+ 0x19f70028, 0x19f70027, 0x19f70026, 0x19f70025,
+ 0x18f7002d, 0x18f7002b, 0x18f7002a, 0x18f70029,
+ 0x18f70028, 0x18f70027, 0x18f70026, 0x18f70025,
+ 0x17f7002d, 0x17f7002b, 0x17f7002a, 0x17f70029,
+ 0x17f70028, 0x17f70027, 0x17f70026, 0x17f70025,
+ 0x16f7002d, 0x16f7002b, 0x16f7002a, 0x16f70029,
+ 0x16f70028, 0x16f70027, 0x16f70026, 0x16f70025,
+ 0x15f7002d, 0x15f7002b, 0x15f7002a, 0x15f70029,
+ 0x15f70028, 0x15f70027, 0x15f70026, 0x15f70025,
+ 0x14f7002d, 0x14f7002b, 0x14f7002a, 0x14f70029,
+ 0x14f70028, 0x14f70027, 0x14f70026, 0x14f70025,
+ 0x13f7002d, 0x13f7002b, 0x13f7002a, 0x13f70029,
+ 0x13f70028, 0x13f70027, 0x13f70026, 0x13f70025,
+ 0x12f7002d, 0x12f7002b, 0x12f7002a, 0x12f70029,
+ 0x12f70028, 0x12f70027, 0x12f70026, 0x12f70025,
+ 0x11f7002d, 0x11f7002b, 0x11f7002a, 0x11f70029,
+ 0x11f70028, 0x11f70027, 0x11f70026, 0x11f70025,
+ 0x10f7002d, 0x10f7002b, 0x10f7002a, 0x10f70029,
+ 0x10f70028, 0x10f70027, 0x10f70026, 0x10f70025,
+};
+
+const u32 txpwrctrl_tx_gain_ipa_rev6[] = {
+ 0x0ff7002d, 0x0ff7002b, 0x0ff7002a, 0x0ff70029,
+ 0x0ff70028, 0x0ff70027, 0x0ff70026, 0x0ff70025,
+ 0x0ef7002d, 0x0ef7002b, 0x0ef7002a, 0x0ef70029,
+ 0x0ef70028, 0x0ef70027, 0x0ef70026, 0x0ef70025,
+ 0x0df7002d, 0x0df7002b, 0x0df7002a, 0x0df70029,
+ 0x0df70028, 0x0df70027, 0x0df70026, 0x0df70025,
+ 0x0cf7002d, 0x0cf7002b, 0x0cf7002a, 0x0cf70029,
+ 0x0cf70028, 0x0cf70027, 0x0cf70026, 0x0cf70025,
+ 0x0bf7002d, 0x0bf7002b, 0x0bf7002a, 0x0bf70029,
+ 0x0bf70028, 0x0bf70027, 0x0bf70026, 0x0bf70025,
+ 0x0af7002d, 0x0af7002b, 0x0af7002a, 0x0af70029,
+ 0x0af70028, 0x0af70027, 0x0af70026, 0x0af70025,
+ 0x09f7002d, 0x09f7002b, 0x09f7002a, 0x09f70029,
+ 0x09f70028, 0x09f70027, 0x09f70026, 0x09f70025,
+ 0x08f7002d, 0x08f7002b, 0x08f7002a, 0x08f70029,
+ 0x08f70028, 0x08f70027, 0x08f70026, 0x08f70025,
+ 0x07f7002d, 0x07f7002b, 0x07f7002a, 0x07f70029,
+ 0x07f70028, 0x07f70027, 0x07f70026, 0x07f70025,
+ 0x06f7002d, 0x06f7002b, 0x06f7002a, 0x06f70029,
+ 0x06f70028, 0x06f70027, 0x06f70026, 0x06f70025,
+ 0x05f7002d, 0x05f7002b, 0x05f7002a, 0x05f70029,
+ 0x05f70028, 0x05f70027, 0x05f70026, 0x05f70025,
+ 0x04f7002d, 0x04f7002b, 0x04f7002a, 0x04f70029,
+ 0x04f70028, 0x04f70027, 0x04f70026, 0x04f70025,
+ 0x03f7002d, 0x03f7002b, 0x03f7002a, 0x03f70029,
+ 0x03f70028, 0x03f70027, 0x03f70026, 0x03f70025,
+ 0x02f7002d, 0x02f7002b, 0x02f7002a, 0x02f70029,
+ 0x02f70028, 0x02f70027, 0x02f70026, 0x02f70025,
+ 0x01f7002d, 0x01f7002b, 0x01f7002a, 0x01f70029,
+ 0x01f70028, 0x01f70027, 0x01f70026, 0x01f70025,
+ 0x00f7002d, 0x00f7002b, 0x00f7002a, 0x00f70029,
+ 0x00f70028, 0x00f70027, 0x00f70026, 0x00f70025,
+};
+
+const u32 txpwrctrl_tx_gain_ipa_5g[] = {
+ 0x7ff70035, 0x7ff70033, 0x7ff70032, 0x7ff70031,
+ 0x7ff7002f, 0x7ff7002e, 0x7ff7002d, 0x7ff7002b,
+ 0x7ff7002a, 0x7ff70029, 0x7ff70028, 0x7ff70027,
+ 0x7ff70026, 0x7ff70024, 0x7ff70023, 0x7ff70022,
+ 0x7ef70028, 0x7ef70027, 0x7ef70026, 0x7ef70025,
+ 0x7ef70024, 0x7ef70023, 0x7df70028, 0x7df70027,
+ 0x7df70026, 0x7df70025, 0x7df70024, 0x7df70023,
+ 0x7df70022, 0x7cf70029, 0x7cf70028, 0x7cf70027,
+ 0x7cf70026, 0x7cf70025, 0x7cf70023, 0x7cf70022,
+ 0x7bf70029, 0x7bf70028, 0x7bf70026, 0x7bf70025,
+ 0x7bf70024, 0x7bf70023, 0x7bf70022, 0x7bf70021,
+ 0x7af70029, 0x7af70028, 0x7af70027, 0x7af70026,
+ 0x7af70025, 0x7af70024, 0x7af70023, 0x7af70022,
+ 0x79f70029, 0x79f70028, 0x79f70027, 0x79f70026,
+ 0x79f70025, 0x79f70024, 0x79f70023, 0x79f70022,
+ 0x78f70029, 0x78f70028, 0x78f70027, 0x78f70026,
+ 0x78f70025, 0x78f70024, 0x78f70023, 0x78f70022,
+ 0x77f70029, 0x77f70028, 0x77f70027, 0x77f70026,
+ 0x77f70025, 0x77f70024, 0x77f70023, 0x77f70022,
+ 0x76f70029, 0x76f70028, 0x76f70027, 0x76f70026,
+ 0x76f70024, 0x76f70023, 0x76f70022, 0x76f70021,
+ 0x75f70029, 0x75f70028, 0x75f70027, 0x75f70026,
+ 0x75f70025, 0x75f70024, 0x75f70023, 0x74f70029,
+ 0x74f70028, 0x74f70026, 0x74f70025, 0x74f70024,
+ 0x74f70023, 0x74f70022, 0x73f70029, 0x73f70027,
+ 0x73f70026, 0x73f70025, 0x73f70024, 0x73f70023,
+ 0x73f70022, 0x72f70028, 0x72f70027, 0x72f70026,
+ 0x72f70025, 0x72f70024, 0x72f70023, 0x72f70022,
+ 0x71f70028, 0x71f70027, 0x71f70026, 0x71f70025,
+ 0x71f70024, 0x71f70023, 0x70f70028, 0x70f70027,
+ 0x70f70026, 0x70f70024, 0x70f70023, 0x70f70022,
+ 0x70f70021, 0x70f70020, 0x70f70020, 0x70f7001f,
+};
+
+const u16 tbl_iqcal_gainparams[2][9][8] = {
+ {
+ { 0x000, 0, 0, 2, 0x69, 0x69, 0x69, 0x69 },
+ { 0x700, 7, 0, 0, 0x69, 0x69, 0x69, 0x69 },
+ { 0x710, 7, 1, 0, 0x68, 0x68, 0x68, 0x68 },
+ { 0x720, 7, 2, 0, 0x67, 0x67, 0x67, 0x67 },
+ { 0x730, 7, 3, 0, 0x66, 0x66, 0x66, 0x66 },
+ { 0x740, 7, 4, 0, 0x65, 0x65, 0x65, 0x65 },
+ { 0x741, 7, 4, 1, 0x65, 0x65, 0x65, 0x65 },
+ { 0x742, 7, 4, 2, 0x65, 0x65, 0x65, 0x65 },
+ { 0x743, 7, 4, 3, 0x65, 0x65, 0x65, 0x65 }
+ },
+ {
+ { 0x000, 7, 0, 0, 0x79, 0x79, 0x79, 0x79 },
+ { 0x700, 7, 0, 0, 0x79, 0x79, 0x79, 0x79 },
+ { 0x710, 7, 1, 0, 0x79, 0x79, 0x79, 0x79 },
+ { 0x720, 7, 2, 0, 0x78, 0x78, 0x78, 0x78 },
+ { 0x730, 7, 3, 0, 0x78, 0x78, 0x78, 0x78 },
+ { 0x740, 7, 4, 0, 0x78, 0x78, 0x78, 0x78 },
+ { 0x741, 7, 4, 1, 0x78, 0x78, 0x78, 0x78 },
+ { 0x742, 7, 4, 2, 0x78, 0x78, 0x78, 0x78 },
+ { 0x743, 7, 4, 3, 0x78, 0x78, 0x78, 0x78 }
+ }
+};
+
+const struct nphy_txiqcal_ladder ladder_lo[] = {
+ { 3, 0 },
+ { 4, 0 },
+ { 6, 0 },
+ { 9, 0 },
+ { 13, 0 },
+ { 18, 0 },
+ { 25, 0 },
+ { 25, 1 },
+ { 25, 2 },
+ { 25, 3 },
+ { 25, 4 },
+ { 25, 5 },
+ { 25, 6 },
+ { 25, 7 },
+ { 35, 7 },
+ { 50, 7 },
+ { 71, 7 },
+ { 100, 7 }
+};
+
+const struct nphy_txiqcal_ladder ladder_iq[] = {
+ { 3, 0 },
+ { 4, 0 },
+ { 6, 0 },
+ { 9, 0 },
+ { 13, 0 },
+ { 18, 0 },
+ { 25, 0 },
+ { 35, 0 },
+ { 50, 0 },
+ { 71, 0 },
+ { 100, 0 },
+ { 100, 1 },
+ { 100, 2 },
+ { 100, 3 },
+ { 100, 4 },
+ { 100, 5 },
+ { 100, 6 },
+ { 100, 7 }
+};
+
+const u16 loscale[] = {
+ 256, 256, 271, 271,
+ 287, 256, 256, 271,
+ 271, 287, 287, 304,
+ 304, 256, 256, 271,
+ 271, 287, 287, 304,
+ 304, 322, 322, 341,
+ 341, 362, 362, 383,
+ 383, 256, 256, 271,
+ 271, 287, 287, 304,
+ 304, 322, 322, 256,
+ 256, 271, 271, 287,
+ 287, 304, 304, 322,
+ 322, 341, 341, 362,
+ 362, 256, 256, 271,
+ 271, 287, 287, 304,
+ 304, 322, 322, 256,
+ 256, 271, 271, 287,
+ 287, 304, 304, 322,
+ 322, 341, 341, 362,
+ 362, 256, 256, 271,
+ 271, 287, 287, 304,
+ 304, 322, 322, 341,
+ 341, 362, 362, 383,
+ 383, 406, 406, 430,
+ 430, 455, 455, 482,
+ 482, 511, 511, 541,
+ 541, 573, 573, 607,
+ 607, 643, 643, 681,
+ 681, 722, 722, 764,
+ 764, 810, 810, 858,
+ 858, 908, 908, 962,
+ 962, 1019, 1019, 256
+};
+
+const u16 tbl_tx_iqlo_cal_loft_ladder_40[] = {
+ 0x0200, 0x0300, 0x0400, 0x0700,
+ 0x0900, 0x0c00, 0x1200, 0x1201,
+ 0x1202, 0x1203, 0x1204, 0x1205,
+ 0x1206, 0x1207, 0x1907, 0x2307,
+ 0x3207, 0x4707
+};
+
+const u16 tbl_tx_iqlo_cal_loft_ladder_20[] = {
+ 0x0300, 0x0500, 0x0700, 0x0900,
+ 0x0d00, 0x1100, 0x1900, 0x1901,
+ 0x1902, 0x1903, 0x1904, 0x1905,
+ 0x1906, 0x1907, 0x2407, 0x3207,
+ 0x4607, 0x6407
+};
+
+const u16 tbl_tx_iqlo_cal_iqimb_ladder_40[] = {
+ 0x0100, 0x0200, 0x0400, 0x0700,
+ 0x0900, 0x0c00, 0x1200, 0x1900,
+ 0x2300, 0x3200, 0x4700, 0x4701,
+ 0x4702, 0x4703, 0x4704, 0x4705,
+ 0x4706, 0x4707
+};
+
+const u16 tbl_tx_iqlo_cal_iqimb_ladder_20[] = {
+ 0x0200, 0x0300, 0x0600, 0x0900,
+ 0x0d00, 0x1100, 0x1900, 0x2400,
+ 0x3200, 0x4600, 0x6400, 0x6401,
+ 0x6402, 0x6403, 0x6404, 0x6405,
+ 0x6406, 0x6407
+};
+
+const u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[B43_NTAB_TX_IQLO_CAL_STARTCOEFS_REV3] = { };
+
+const u16 tbl_tx_iqlo_cal_startcoefs[B43_NTAB_TX_IQLO_CAL_STARTCOEFS] = { };
+
+const u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[] = {
+ 0x8423, 0x8323, 0x8073, 0x8256,
+ 0x8045, 0x8223, 0x9423, 0x9323,
+ 0x9073, 0x9256, 0x9045, 0x9223
+};
+
+const u16 tbl_tx_iqlo_cal_cmds_recal[] = {
+ 0x8101, 0x8253, 0x8053, 0x8234,
+ 0x8034, 0x9101, 0x9253, 0x9053,
+ 0x9234, 0x9034
+};
+
+const u16 tbl_tx_iqlo_cal_cmds_fullcal[] = {
+ 0x8123, 0x8264, 0x8086, 0x8245,
+ 0x8056, 0x9123, 0x9264, 0x9086,
+ 0x9245, 0x9056
+};
+
+const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = {
+ 0x8434, 0x8334, 0x8084, 0x8267,
+ 0x8056, 0x8234, 0x9434, 0x9334,
+ 0x9084, 0x9267, 0x9056, 0x9234
+};
+
+const s16 tbl_tx_filter_coef_rev4[7][15] = {
+ { -377, 137, -407, 208, -1527,
+ 956, 93, 186, 93, 230,
+ -44, 230, 20, -191, 201 },
+ { -77, 20, -98, 49, -93,
+ 60, 56, 111, 56, 26,
+ -5, 26, 34, -32, 34 },
+ { -360, 164, -376, 164, -1533,
+ 576, 308, -314, 308, 121,
+ -73, 121, 91, 124, 91 },
+ { -295, 200, -363, 142, -1391,
+ 826, 151, 301, 151, 151,
+ 301, 151, 602, -752, 602 },
+ { -92, 58, -96, 49, -104,
+ 44, 17, 35, 17, 12,
+ 25, 12, 13, 27, 13 },
+ { -375, 136, -399, 209, -1479,
+ 949, 130, 260, 130, 230,
+ -44, 230, 201, -191, 201 },
+ { 0xed9, 0xc8, 0xe95, 0x8e, 0xa91,
+ 0x33a, 0x97, 0x12d, 0x97, 0x97,
+ 0x12d, 0x97, 0x25a, 0xd10, 0x25a }
+};
+
+/* addr0, addr1, bmask, shift */
+const struct nphy_rf_control_override_rev2 tbl_rf_control_override_rev2[] = {
+ { 0x78, 0x78, 0x0038, 3 }, /* for field == 0x0002 (fls == 2) */
+ { 0x7A, 0x7D, 0x0001, 0 }, /* for field == 0x0004 (fls == 3) */
+ { 0x7A, 0x7D, 0x0002, 1 }, /* for field == 0x0008 (fls == 4) */
+ { 0x7A, 0x7D, 0x0004, 2 }, /* for field == 0x0010 (fls == 5) */
+ { 0x7A, 0x7D, 0x0030, 4 }, /* for field == 0x0020 (fls == 6) */
+ { 0x7A, 0x7D, 0x00C0, 6 }, /* for field == 0x0040 (fls == 7) */
+ { 0x7A, 0x7D, 0x0100, 8 }, /* for field == 0x0080 (fls == 8) */
+ { 0x7A, 0x7D, 0x0200, 9 }, /* for field == 0x0100 (fls == 9) */
+ { 0x78, 0x78, 0x0004, 2 }, /* for field == 0x0200 (fls == 10) */
+ { 0x7B, 0x7E, 0x01FF, 0 }, /* for field == 0x0400 (fls == 11) */
+ { 0x7C, 0x7F, 0x01FF, 0 }, /* for field == 0x0800 (fls == 12) */
+ { 0x78, 0x78, 0x0100, 8 }, /* for field == 0x1000 (fls == 13) */
+ { 0x78, 0x78, 0x0200, 9 }, /* for field == 0x2000 (fls == 14) */
+ { 0x78, 0x78, 0xF000, 12 } /* for field == 0x4000 (fls == 15) */
+};
+
+/* val_mask, val_shift, en_addr0, val_addr0, en_addr1, val_addr1 */
+const struct nphy_rf_control_override_rev3 tbl_rf_control_override_rev3[] = {
+ { 0x8000, 15, 0xE5, 0xF9, 0xE6, 0xFB }, /* field == 0x0001 (fls 1) */
+ { 0x0001, 0, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0002 (fls 2) */
+ { 0x0002, 1, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0004 (fls 3) */
+ { 0x0004, 2, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0008 (fls 4) */
+ { 0x0016, 4, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0010 (fls 5) */
+ { 0x0020, 5, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0020 (fls 6) */
+ { 0x0040, 6, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0040 (fls 7) */
+ { 0x0080, 6, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0080 (fls 8) */
+ { 0x0100, 7, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0100 (fls 9) */
+ { 0x0007, 0, 0xE7, 0xF8, 0xEC, 0xFA }, /* field == 0x0200 (fls 10) */
+ { 0x0070, 4, 0xE7, 0xF8, 0xEC, 0xFA }, /* field == 0x0400 (fls 11) */
+ { 0xE000, 13, 0xE7, 0x7A, 0xEC, 0x7D }, /* field == 0x0800 (fls 12) */
+ { 0xFFFF, 0, 0xE7, 0x7B, 0xEC, 0x7E }, /* field == 0x1000 (fls 13) */
+ { 0xFFFF, 0, 0xE7, 0x7C, 0xEC, 0x7F }, /* field == 0x2000 (fls 14) */
+ { 0x00C0, 6, 0xE7, 0xF9, 0xEC, 0xFB } /* field == 0x4000 (fls 15) */
+};
+
static inline void assert_ntab_array_sizes(void)
{
#undef check
@@ -2442,6 +2980,72 @@ static inline void assert_ntab_array_sizes(void)
#undef check
}
+u32 b43_ntab_read(struct b43_wldev *dev, u32 offset)
+{
+ u32 type, value;
+
+ type = offset & B43_NTAB_TYPEMASK;
+ offset &= ~B43_NTAB_TYPEMASK;
+ B43_WARN_ON(offset > 0xFFFF);
+
+ switch (type) {
+ case B43_NTAB_8BIT:
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
+ value = b43_phy_read(dev, B43_NPHY_TABLE_DATALO) & 0xFF;
+ break;
+ case B43_NTAB_16BIT:
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
+ value = b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ break;
+ case B43_NTAB_32BIT:
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
+ value = b43_phy_read(dev, B43_NPHY_TABLE_DATAHI);
+ value <<= 16;
+ value |= b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ break;
+ default:
+ B43_WARN_ON(1);
+ value = 0;
+ }
+
+ return value;
+}
+
+void b43_ntab_read_bulk(struct b43_wldev *dev, u32 offset,
+ unsigned int nr_elements, void *_data)
+{
+ u32 type;
+ u8 *data = _data;
+ unsigned int i;
+
+ type = offset & B43_NTAB_TYPEMASK;
+ offset &= ~B43_NTAB_TYPEMASK;
+ B43_WARN_ON(offset > 0xFFFF);
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
+
+ for (i = 0; i < nr_elements; i++) {
+ switch (type) {
+ case B43_NTAB_8BIT:
+ *data = b43_phy_read(dev, B43_NPHY_TABLE_DATALO) & 0xFF;
+ data++;
+ break;
+ case B43_NTAB_16BIT:
+ *((u16 *)data) = b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ data += 2;
+ break;
+ case B43_NTAB_32BIT:
+ *((u32 *)data) = b43_phy_read(dev, B43_NPHY_TABLE_DATAHI);
+ *((u32 *)data) <<= 16;
+ *((u32 *)data) |= b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
+ data += 4;
+ break;
+ default:
+ B43_WARN_ON(1);
+ }
+ }
+}
+
void b43_ntab_write(struct b43_wldev *dev, u32 offset, u32 value)
{
u32 type;
@@ -2474,3 +3078,91 @@ void b43_ntab_write(struct b43_wldev *dev, u32 offset, u32 value)
/* Some compiletime assertions... */
assert_ntab_array_sizes();
}
+
+void b43_ntab_write_bulk(struct b43_wldev *dev, u32 offset,
+ unsigned int nr_elements, const void *_data)
+{
+ u32 type, value;
+ const u8 *data = _data;
+ unsigned int i;
+
+ type = offset & B43_NTAB_TYPEMASK;
+ offset &= ~B43_NTAB_TYPEMASK;
+ B43_WARN_ON(offset > 0xFFFF);
+
+ b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset);
+
+ for (i = 0; i < nr_elements; i++) {
+ switch (type) {
+ case B43_NTAB_8BIT:
+ value = *data;
+ data++;
+ B43_WARN_ON(value & ~0xFF);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, value);
+ break;
+ case B43_NTAB_16BIT:
+ value = *((u16 *)data);
+ data += 2;
+ B43_WARN_ON(value & ~0xFFFF);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO, value);
+ break;
+ case B43_NTAB_32BIT:
+ value = *((u32 *)data);
+ data += 4;
+ b43_phy_write(dev, B43_NPHY_TABLE_DATAHI, value >> 16);
+ b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
+ value & 0xFFFF);
+ break;
+ default:
+ B43_WARN_ON(1);
+ }
+ }
+}
+
+#define ntab_upload(dev, offset, data) do { \
+ unsigned int i; \
+ for (i = 0; i < (offset##_SIZE); i++) \
+ b43_ntab_write(dev, (offset) + i, (data)[i]); \
+ } while (0)
+
+void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev)
+{
+ /* Static tables */
+ ntab_upload(dev, B43_NTAB_FRAMESTRUCT, b43_ntab_framestruct);
+ ntab_upload(dev, B43_NTAB_FRAMELT, b43_ntab_framelookup);
+ ntab_upload(dev, B43_NTAB_TMAP, b43_ntab_tmap);
+ ntab_upload(dev, B43_NTAB_TDTRN, b43_ntab_tdtrn);
+ ntab_upload(dev, B43_NTAB_INTLEVEL, b43_ntab_intlevel);
+ ntab_upload(dev, B43_NTAB_PILOT, b43_ntab_pilot);
+ ntab_upload(dev, B43_NTAB_PILOTLT, b43_ntab_pilotlt);
+ ntab_upload(dev, B43_NTAB_TDI20A0, b43_ntab_tdi20a0);
+ ntab_upload(dev, B43_NTAB_TDI20A1, b43_ntab_tdi20a1);
+ ntab_upload(dev, B43_NTAB_TDI40A0, b43_ntab_tdi40a0);
+ ntab_upload(dev, B43_NTAB_TDI40A1, b43_ntab_tdi40a1);
+ ntab_upload(dev, B43_NTAB_BDI, b43_ntab_bdi);
+ ntab_upload(dev, B43_NTAB_CHANEST, b43_ntab_channelest);
+ ntab_upload(dev, B43_NTAB_MCS, b43_ntab_mcs);
+
+ /* Volatile tables */
+ ntab_upload(dev, B43_NTAB_NOISEVAR10, b43_ntab_noisevar10);
+ ntab_upload(dev, B43_NTAB_NOISEVAR11, b43_ntab_noisevar11);
+ ntab_upload(dev, B43_NTAB_C0_ESTPLT, b43_ntab_estimatepowerlt0);
+ ntab_upload(dev, B43_NTAB_C1_ESTPLT, b43_ntab_estimatepowerlt1);
+ ntab_upload(dev, B43_NTAB_C0_ADJPLT, b43_ntab_adjustpower0);
+ ntab_upload(dev, B43_NTAB_C1_ADJPLT, b43_ntab_adjustpower1);
+ ntab_upload(dev, B43_NTAB_C0_GAINCTL, b43_ntab_gainctl0);
+ ntab_upload(dev, B43_NTAB_C1_GAINCTL, b43_ntab_gainctl1);
+ ntab_upload(dev, B43_NTAB_C0_IQLT, b43_ntab_iqlt0);
+ ntab_upload(dev, B43_NTAB_C1_IQLT, b43_ntab_iqlt1);
+ ntab_upload(dev, B43_NTAB_C0_LOFEEDTH, b43_ntab_loftlt0);
+ ntab_upload(dev, B43_NTAB_C1_LOFEEDTH, b43_ntab_loftlt1);
+}
+
+void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev)
+{
+ /* Static tables */
+ /* TODO */
+
+ /* Volatile tables */
+ /* TODO */
+}
diff --git a/drivers/net/wireless/b43/tables_nphy.h b/drivers/net/wireless/b43/tables_nphy.h
index 4d498b053ec..9c1c6ecd367 100644
--- a/drivers/net/wireless/b43/tables_nphy.h
+++ b/drivers/net/wireless/b43/tables_nphy.h
@@ -46,6 +46,27 @@ struct b43_nphy_channeltab_entry {
struct b43_wldev;
+struct nphy_txiqcal_ladder {
+ u8 percent;
+ u8 g_env;
+};
+
+struct nphy_rf_control_override_rev2 {
+ u8 addr0;
+ u8 addr1;
+ u16 bmask;
+ u8 shift;
+};
+
+struct nphy_rf_control_override_rev3 {
+ u16 val_mask;
+ u8 val_shift;
+ u8 en_addr0;
+ u8 val_addr0;
+ u8 en_addr1;
+ u8 val_addr1;
+};
+
/* Upload the default register value table.
* If "ghz5" is true, we upload the 5Ghz table. Otherwise the 2.4Ghz
* table is uploaded. If "ignore_uploadflag" is true, we upload any value
@@ -126,34 +147,57 @@ b43_nphy_get_chantabent(struct b43_wldev *dev, u8 channel);
#define B43_NTAB_C1_LOFEEDTH B43_NTAB16(0x1B, 0x1C0) /* Local Oscillator Feed Through Lookup Table Core 1 */
#define B43_NTAB_C1_LOFEEDTH_SIZE 128
+#define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_40_SIZE 18
+#define B43_NTAB_TX_IQLO_CAL_LOFT_LADDER_20_SIZE 18
+#define B43_NTAB_TX_IQLO_CAL_IQIMB_LADDER_40_SIZE 18
+#define B43_NTAB_TX_IQLO_CAL_IQIMB_LADDER_20_SIZE 18
+#define B43_NTAB_TX_IQLO_CAL_STARTCOEFS_REV3 11
+#define B43_NTAB_TX_IQLO_CAL_STARTCOEFS 9
+#define B43_NTAB_TX_IQLO_CAL_CMDS_RECAL_REV3 12
+#define B43_NTAB_TX_IQLO_CAL_CMDS_RECAL 10
+#define B43_NTAB_TX_IQLO_CAL_CMDS_FULLCAL 10
+#define B43_NTAB_TX_IQLO_CAL_CMDS_FULLCAL_REV3 12
+
+u32 b43_ntab_read(struct b43_wldev *dev, u32 offset);
+void b43_ntab_read_bulk(struct b43_wldev *dev, u32 offset,
+ unsigned int nr_elements, void *_data);
void b43_ntab_write(struct b43_wldev *dev, u32 offset, u32 value);
-
-extern const u8 b43_ntab_adjustpower0[];
-extern const u8 b43_ntab_adjustpower1[];
-extern const u16 b43_ntab_bdi[];
-extern const u32 b43_ntab_channelest[];
-extern const u8 b43_ntab_estimatepowerlt0[];
-extern const u8 b43_ntab_estimatepowerlt1[];
-extern const u8 b43_ntab_framelookup[];
-extern const u32 b43_ntab_framestruct[];
-extern const u32 b43_ntab_gainctl0[];
-extern const u32 b43_ntab_gainctl1[];
-extern const u32 b43_ntab_intlevel[];
-extern const u32 b43_ntab_iqlt0[];
-extern const u32 b43_ntab_iqlt1[];
-extern const u16 b43_ntab_loftlt0[];
-extern const u16 b43_ntab_loftlt1[];
-extern const u8 b43_ntab_mcs[];
-extern const u32 b43_ntab_noisevar10[];
-extern const u32 b43_ntab_noisevar11[];
-extern const u16 b43_ntab_pilot[];
-extern const u32 b43_ntab_pilotlt[];
-extern const u32 b43_ntab_tdi20a0[];
-extern const u32 b43_ntab_tdi20a1[];
-extern const u32 b43_ntab_tdi40a0[];
-extern const u32 b43_ntab_tdi40a1[];
-extern const u32 b43_ntab_tdtrn[];
-extern const u32 b43_ntab_tmap[];
-
+void b43_ntab_write_bulk(struct b43_wldev *dev, u32 offset,
+ unsigned int nr_elements, const void *_data);
+
+void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev);
+void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev);
+
+extern const u32 b43_ntab_tx_gain_rev0_1_2[];
+extern const u32 b43_ntab_tx_gain_rev3plus_2ghz[];
+extern const u32 b43_ntab_tx_gain_rev3_5ghz[];
+extern const u32 b43_ntab_tx_gain_rev4_5ghz[];
+extern const u32 b43_ntab_tx_gain_rev5plus_5ghz[];
+
+extern const u32 txpwrctrl_tx_gain_ipa[];
+extern const u32 txpwrctrl_tx_gain_ipa_rev5[];
+extern const u32 txpwrctrl_tx_gain_ipa_rev6[];
+extern const u32 txpwrctrl_tx_gain_ipa_5g[];
+extern const u16 tbl_iqcal_gainparams[2][9][8];
+extern const struct nphy_txiqcal_ladder ladder_lo[];
+extern const struct nphy_txiqcal_ladder ladder_iq[];
+extern const u16 loscale[];
+
+extern const u16 tbl_tx_iqlo_cal_loft_ladder_40[];
+extern const u16 tbl_tx_iqlo_cal_loft_ladder_20[];
+extern const u16 tbl_tx_iqlo_cal_iqimb_ladder_40[];
+extern const u16 tbl_tx_iqlo_cal_iqimb_ladder_20[];
+extern const u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[];
+extern const u16 tbl_tx_iqlo_cal_startcoefs[];
+extern const u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[];
+extern const u16 tbl_tx_iqlo_cal_cmds_recal[];
+extern const u16 tbl_tx_iqlo_cal_cmds_fullcal[];
+extern const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[];
+extern const s16 tbl_tx_filter_coef_rev4[7][15];
+
+extern const struct nphy_rf_control_override_rev2
+ tbl_rf_control_override_rev2[];
+extern const struct nphy_rf_control_override_rev3
+ tbl_rf_control_override_rev3[];
#endif /* B43_TABLES_NPHY_H_ */
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index 0a86bdf5315..8b9387c6ff3 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -1411,7 +1411,6 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev,
b43legacyerr(dev->wl, "DMA tx mapping failure\n");
goto out_unlock;
}
- ring->nr_tx_packets++;
if ((free_slots(ring) < SLOTS_PER_PACKET) ||
should_inject_overflow(ring)) {
/* This TX ring is full. */
@@ -1527,25 +1526,6 @@ void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
spin_unlock(&ring->lock);
}
-void b43legacy_dma_get_tx_stats(struct b43legacy_wldev *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
- const int nr_queues = dev->wl->hw->queues;
- struct b43legacy_dmaring *ring;
- unsigned long flags;
- int i;
-
- for (i = 0; i < nr_queues; i++) {
- ring = priority_to_txring(dev, i);
-
- spin_lock_irqsave(&ring->lock, flags);
- stats[i].len = ring->used_slots / SLOTS_PER_PACKET;
- stats[i].limit = ring->nr_slots / SLOTS_PER_PACKET;
- stats[i].count = ring->nr_tx_packets;
- spin_unlock_irqrestore(&ring->lock, flags);
- }
-}
-
static void dma_rx(struct b43legacy_dmaring *ring,
int *slot)
{
diff --git a/drivers/net/wireless/b43legacy/dma.h b/drivers/net/wireless/b43legacy/dma.h
index 2f186003c31..f9681041c2d 100644
--- a/drivers/net/wireless/b43legacy/dma.h
+++ b/drivers/net/wireless/b43legacy/dma.h
@@ -243,8 +243,6 @@ struct b43legacy_dmaring {
int used_slots;
/* Currently used slot in the ring. */
int current_slot;
- /* Total number of packets sent. Statistics only. */
- unsigned int nr_tx_packets;
/* Frameoffset in octets. */
u32 frameoffset;
/* Descriptor buffer size. */
@@ -292,9 +290,6 @@ void b43legacy_dma_free(struct b43legacy_wldev *dev);
void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev);
void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev);
-void b43legacy_dma_get_tx_stats(struct b43legacy_wldev *dev,
- struct ieee80211_tx_queue_stats *stats);
-
int b43legacy_dma_tx(struct b43legacy_wldev *dev,
struct sk_buff *skb);
void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
@@ -315,11 +310,6 @@ void b43legacy_dma_free(struct b43legacy_wldev *dev)
{
}
static inline
-void b43legacy_dma_get_tx_stats(struct b43legacy_wldev *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
-}
-static inline
int b43legacy_dma_tx(struct b43legacy_wldev *dev,
struct sk_buff *skb)
{
diff --git a/drivers/net/wireless/b43legacy/leds.h b/drivers/net/wireless/b43legacy/leds.h
index 82167a90088..9ff6750dc57 100644
--- a/drivers/net/wireless/b43legacy/leds.h
+++ b/drivers/net/wireless/b43legacy/leds.h
@@ -45,7 +45,7 @@ enum b43legacy_led_behaviour {
void b43legacy_leds_init(struct b43legacy_wldev *dev);
void b43legacy_leds_exit(struct b43legacy_wldev *dev);
-#else /* CONFIG_B43EGACY_LEDS */
+#else /* CONFIG_B43LEGACY_LEDS */
/* LED support disabled */
struct b43legacy_led {
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 4a905b6a886..1d070be5a67 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -61,6 +61,8 @@ MODULE_AUTHOR("Michael Buesch");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(B43legacy_SUPPORTED_FIRMWARE_ID);
+MODULE_FIRMWARE("b43legacy/ucode2.fw");
+MODULE_FIRMWARE("b43legacy/ucode4.fw");
#if defined(CONFIG_B43LEGACY_DMA) && defined(CONFIG_B43LEGACY_PIO)
static int modparam_pio;
@@ -2444,29 +2446,6 @@ static int b43legacy_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
return 0;
}
-static int b43legacy_op_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *stats)
-{
- struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
- struct b43legacy_wldev *dev = wl->current_dev;
- unsigned long flags;
- int err = -ENODEV;
-
- if (!dev)
- goto out;
- spin_lock_irqsave(&wl->irq_lock, flags);
- if (likely(b43legacy_status(dev) >= B43legacy_STAT_STARTED)) {
- if (b43legacy_using_pio(dev))
- b43legacy_pio_get_tx_stats(dev, stats);
- else
- b43legacy_dma_get_tx_stats(dev, stats);
- err = 0;
- }
- spin_unlock_irqrestore(&wl->irq_lock, flags);
-out:
- return err;
-}
-
static int b43legacy_op_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
@@ -2921,6 +2900,7 @@ static int b43legacy_wireless_core_start(struct b43legacy_wldev *dev)
goto out;
}
/* We are ready to run. */
+ ieee80211_wake_queues(dev->wl->hw);
b43legacy_set_status(dev, B43legacy_STAT_STARTED);
/* Start data flow (TX/RX) */
@@ -3341,6 +3321,7 @@ static int b43legacy_wireless_core_init(struct b43legacy_wldev *dev)
b43legacy_security_init(dev);
b43legacy_rng_init(wl);
+ ieee80211_wake_queues(dev->wl->hw);
b43legacy_set_status(dev, B43legacy_STAT_INITIALIZED);
b43legacy_leds_init(dev);
@@ -3361,7 +3342,7 @@ err_kfree_lo_control:
}
static int b43legacy_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
struct b43legacy_wldev *dev;
@@ -3370,23 +3351,23 @@ static int b43legacy_op_add_interface(struct ieee80211_hw *hw,
/* TODO: allow WDS/AP devices to coexist */
- if (conf->type != NL80211_IFTYPE_AP &&
- conf->type != NL80211_IFTYPE_STATION &&
- conf->type != NL80211_IFTYPE_WDS &&
- conf->type != NL80211_IFTYPE_ADHOC)
+ if (vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_STATION &&
+ vif->type != NL80211_IFTYPE_WDS &&
+ vif->type != NL80211_IFTYPE_ADHOC)
return -EOPNOTSUPP;
mutex_lock(&wl->mutex);
if (wl->operating)
goto out_mutex_unlock;
- b43legacydbg(wl, "Adding Interface type %d\n", conf->type);
+ b43legacydbg(wl, "Adding Interface type %d\n", vif->type);
dev = wl->current_dev;
wl->operating = 1;
- wl->vif = conf->vif;
- wl->if_type = conf->type;
- memcpy(wl->mac_addr, conf->mac_addr, ETH_ALEN);
+ wl->vif = vif;
+ wl->if_type = vif->type;
+ memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
spin_lock_irqsave(&wl->irq_lock, flags);
b43legacy_adjust_opmode(dev);
@@ -3403,18 +3384,18 @@ static int b43legacy_op_add_interface(struct ieee80211_hw *hw,
}
static void b43legacy_op_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw);
struct b43legacy_wldev *dev = wl->current_dev;
unsigned long flags;
- b43legacydbg(wl, "Removing Interface type %d\n", conf->type);
+ b43legacydbg(wl, "Removing Interface type %d\n", vif->type);
mutex_lock(&wl->mutex);
B43legacy_WARN_ON(!wl->operating);
- B43legacy_WARN_ON(wl->vif != conf->vif);
+ B43legacy_WARN_ON(wl->vif != vif);
wl->vif = NULL;
wl->operating = 0;
@@ -3509,7 +3490,6 @@ static const struct ieee80211_ops b43legacy_hw_ops = {
.bss_info_changed = b43legacy_op_bss_info_changed,
.configure_filter = b43legacy_op_configure_filter,
.get_stats = b43legacy_op_get_stats,
- .get_tx_stats = b43legacy_op_get_tx_stats,
.start = b43legacy_op_start,
.stop = b43legacy_op_stop,
.set_tim = b43legacy_op_beacon_set_tim,
@@ -3960,7 +3940,7 @@ static struct ssb_driver b43legacy_ssb_driver = {
static void b43legacy_print_driverinfo(void)
{
- const char *feat_pci = "", *feat_leds = "", *feat_rfkill = "",
+ const char *feat_pci = "", *feat_leds = "",
*feat_pio = "", *feat_dma = "";
#ifdef CONFIG_B43LEGACY_PCI_AUTOSELECT
@@ -3969,9 +3949,6 @@ static void b43legacy_print_driverinfo(void)
#ifdef CONFIG_B43LEGACY_LEDS
feat_leds = "L";
#endif
-#ifdef CONFIG_B43LEGACY_RFKILL
- feat_rfkill = "R";
-#endif
#ifdef CONFIG_B43LEGACY_PIO
feat_pio = "I";
#endif
@@ -3979,9 +3956,9 @@ static void b43legacy_print_driverinfo(void)
feat_dma = "D";
#endif
printk(KERN_INFO "Broadcom 43xx-legacy driver loaded "
- "[ Features: %s%s%s%s%s, Firmware-ID: "
+ "[ Features: %s%s%s%s, Firmware-ID: "
B43legacy_SUPPORTED_FIRMWARE_ID " ]\n",
- feat_pci, feat_leds, feat_rfkill, feat_pio, feat_dma);
+ feat_pci, feat_leds, feat_pio, feat_dma);
}
static int __init b43legacy_init(void)
diff --git a/drivers/net/wireless/b43legacy/pio.c b/drivers/net/wireless/b43legacy/pio.c
index 51866c9a276..017c0e9c37e 100644
--- a/drivers/net/wireless/b43legacy/pio.c
+++ b/drivers/net/wireless/b43legacy/pio.c
@@ -477,7 +477,6 @@ int b43legacy_pio_tx(struct b43legacy_wldev *dev,
list_move_tail(&packet->list, &queue->txqueue);
queue->nr_txfree--;
- queue->nr_tx_packets++;
B43legacy_WARN_ON(queue->nr_txfree >= B43legacy_PIO_MAXTXPACKETS);
tasklet_schedule(&queue->txtask);
@@ -546,18 +545,6 @@ void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev,
tasklet_schedule(&queue->txtask);
}
-void b43legacy_pio_get_tx_stats(struct b43legacy_wldev *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
- struct b43legacy_pio *pio = &dev->pio;
- struct b43legacy_pioqueue *queue;
-
- queue = pio->queue1;
- stats[0].len = B43legacy_PIO_MAXTXPACKETS - queue->nr_txfree;
- stats[0].limit = B43legacy_PIO_MAXTXPACKETS;
- stats[0].count = queue->nr_tx_packets;
-}
-
static void pio_rx_error(struct b43legacy_pioqueue *queue,
int clear_buffers,
const char *error)
diff --git a/drivers/net/wireless/b43legacy/pio.h b/drivers/net/wireless/b43legacy/pio.h
index 464fec05a06..8e6773ea6e7 100644
--- a/drivers/net/wireless/b43legacy/pio.h
+++ b/drivers/net/wireless/b43legacy/pio.h
@@ -74,10 +74,6 @@ struct b43legacy_pioqueue {
* posted to the device. We are waiting for the txstatus.
*/
struct list_head txrunning;
- /* Total number or packets sent.
- * (This counter can obviously wrap).
- */
- unsigned int nr_tx_packets;
struct tasklet_struct txtask;
struct b43legacy_pio_txpacket
tx_packets_cache[B43legacy_PIO_MAXTXPACKETS];
@@ -106,8 +102,6 @@ int b43legacy_pio_tx(struct b43legacy_wldev *dev,
struct sk_buff *skb);
void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev,
const struct b43legacy_txstatus *status);
-void b43legacy_pio_get_tx_stats(struct b43legacy_wldev *dev,
- struct ieee80211_tx_queue_stats *stats);
void b43legacy_pio_rx(struct b43legacy_pioqueue *queue);
/* Suspend TX queue in hardware. */
@@ -140,11 +134,6 @@ void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev,
{
}
static inline
-void b43legacy_pio_get_tx_stats(struct b43legacy_wldev *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
-}
-static inline
void b43legacy_pio_rx(struct b43legacy_pioqueue *queue)
{
}
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index c9640a3e02c..d19748d90aa 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -794,13 +794,6 @@ static struct pcmcia_device_id hostap_cs_ids[] = {
PCMCIA_MFC_DEVICE_PROD_ID12(0, "SanDisk", "ConnectPlus",
0x7a954bd9, 0x74be00c6),
PCMCIA_DEVICE_PROD_ID123(
- "Intersil", "PRISM 2_5 PCMCIA ADAPTER", "ISL37300P",
- 0x4b801a17, 0x6345a0bf, 0xc9049a39),
- /* D-Link DWL-650 Rev. P1; manfid 0x000b, 0x7110 */
- PCMCIA_DEVICE_PROD_ID123(
- "D-Link", "DWL-650 Wireless PC Card RevP", "ISL37101P-10",
- 0x1a424a1c, 0x6ea57632, 0xdd97a26b),
- PCMCIA_DEVICE_PROD_ID123(
"Addtron", "AWP-100 Wireless PCMCIA", "Version 01.02",
0xe6ec52ce, 0x08649af2, 0x4b74baa0),
PCMCIA_DEVICE_PROD_ID123(
@@ -834,14 +827,12 @@ static struct pcmcia_device_id hostap_cs_ids[] = {
"Ver. 1.00",
0x5cd01705, 0x4271660f, 0x9d08ee12),
PCMCIA_DEVICE_PROD_ID123(
- "corega", "WL PCCL-11", "ISL37300P",
- 0xa21501a, 0x59868926, 0xc9049a39),
- PCMCIA_DEVICE_PROD_ID123(
- "The Linksys Group, Inc.", "Wireless Network CF Card", "ISL37300P",
- 0xa5f472c2, 0x9c05598d, 0xc9049a39),
- PCMCIA_DEVICE_PROD_ID123(
"Wireless LAN" , "11Mbps PC Card", "Version 01.02",
0x4b8870ff, 0x70e946d1, 0x4b74baa0),
+ PCMCIA_DEVICE_PROD_ID3("HFA3863", 0x355cb092),
+ PCMCIA_DEVICE_PROD_ID3("ISL37100P", 0x630d52b2),
+ PCMCIA_DEVICE_PROD_ID3("ISL37101P-10", 0xdd97a26b),
+ PCMCIA_DEVICE_PROD_ID3("ISL37300P", 0xc9049a39),
PCMCIA_DEVICE_NULL
};
MODULE_DEVICE_TABLE(pcmcia, hostap_cs_ids);
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index ff9b5c88218..d7073281942 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -2618,6 +2618,15 @@ static irqreturn_t prism2_interrupt(int irq, void *dev_id)
int events = 0;
u16 ev;
+ /* Detect early interrupt before driver is fully configued */
+ if (!dev->base_addr) {
+ if (net_ratelimit()) {
+ printk(KERN_DEBUG "%s: Interrupt, but dev not configured\n",
+ dev->name);
+ }
+ return IRQ_HANDLED;
+ }
+
iface = netdev_priv(dev);
local = iface->local;
diff --git a/drivers/net/wireless/hostap/hostap_pci.c b/drivers/net/wireless/hostap/hostap_pci.c
index 8fdd41f4b4f..4d97ae37499 100644
--- a/drivers/net/wireless/hostap/hostap_pci.c
+++ b/drivers/net/wireless/hostap/hostap_pci.c
@@ -39,7 +39,7 @@ struct hostap_pci_priv {
/* FIX: do we need mb/wmb/rmb with memory operations? */
-static struct pci_device_id prism2_pci_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(prism2_pci_id_table) = {
/* Intersil Prism3 ISL3872 11Mb/s WLAN Controller */
{ 0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID },
/* Intersil Prism2.5 ISL3874 11Mb/s WLAN Controller */
diff --git a/drivers/net/wireless/hostap/hostap_plx.c b/drivers/net/wireless/hostap/hostap_plx.c
index 0e5d51086a4..fc04ccdc5be 100644
--- a/drivers/net/wireless/hostap/hostap_plx.c
+++ b/drivers/net/wireless/hostap/hostap_plx.c
@@ -60,7 +60,7 @@ struct hostap_plx_priv {
#define PLXDEV(vendor,dev,str) { vendor, dev, PCI_ANY_ID, PCI_ANY_ID }
-static struct pci_device_id prism2_plx_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(prism2_plx_id_table) = {
PLXDEV(0x10b7, 0x7770, "3Com AirConnect PCI 777A"),
PLXDEV(0x111a, 0x1023, "Siemens SpeedStream SS1023"),
PLXDEV(0x126c, 0x8030, "Nortel emobility"),
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 56afcf041f8..9b72c45a774 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -6585,7 +6585,7 @@ static void ipw2100_shutdown(struct pci_dev *pci_dev)
#define IPW2100_DEV_ID(x) { PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, x }
-static struct pci_device_id ipw2100_pci_id_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(ipw2100_pci_id_table) = {
IPW2100_DEV_ID(0x2520), /* IN 2100A mPCI 3A */
IPW2100_DEV_ID(0x2521), /* IN 2100A mPCI 3B */
IPW2100_DEV_ID(0x2524), /* IN 2100A mPCI 3B */
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 09ddd3e6bed..63c2a7ade5f 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -11524,7 +11524,7 @@ out:
}
/* PCI driver stuff */
-static struct pci_device_id card_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(card_ids) = {
{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2701, 0, 0, 0},
{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2702, 0, 0, 0},
{PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, 0x2711, 0, 0, 0},
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index b16b06c2031..dc8ed152766 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -1,14 +1,8 @@
config IWLWIFI
tristate "Intel Wireless Wifi"
- depends on PCI && MAC80211 && EXPERIMENTAL
+ depends on PCI && MAC80211
select FW_LOADER
-config IWLWIFI_SPECTRUM_MEASUREMENT
- bool "Enable Spectrum Measurement in iwlagn driver"
- depends on IWLWIFI
- ---help---
- This option will enable spectrum measurement for the iwlagn driver.
-
config IWLWIFI_DEBUG
bool "Enable full debugging output in iwlagn and iwl3945 drivers"
depends on IWLWIFI
@@ -120,9 +114,3 @@ config IWL3945
inserted in and removed from the running kernel whenever you want),
say M here and read <file:Documentation/kbuild/modules.txt>. The
module will be called iwl3945.
-
-config IWL3945_SPECTRUM_MEASUREMENT
- bool "Enable Spectrum Measurement in iwl3945 driver"
- depends on IWL3945
- ---help---
- This option will enable spectrum measurement for the iwl3945 driver.
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 7f82044af24..4e378faee65 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -3,7 +3,6 @@ iwlcore-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
iwlcore-objs += iwl-rx.o iwl-tx.o iwl-sta.o iwl-calib.o
iwlcore-objs += iwl-scan.o iwl-led.o
iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
-iwlcore-$(CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT) += iwl-spectrum.o
iwlcore-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
CFLAGS_iwl-devtrace.o := -I$(src)
@@ -20,3 +19,5 @@ iwlagn-$(CONFIG_IWL5000) += iwl-1000.o
# 3945
obj-$(CONFIG_IWL3945) += iwl3945.o
iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o
+
+ccflags-y += -D__CHECK_ENDIAN__
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 8414178bcff..3bf2e6e9b2d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2008-2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -89,8 +89,78 @@ static void iwl1000_nic_config(struct iwl_priv *priv)
~APMG_SVR_VOLTAGE_CONFIG_BIT_MSK);
}
+static struct iwl_sensitivity_ranges iwl1000_sensitivity = {
+ .min_nrg_cck = 95,
+ .max_nrg_cck = 0, /* not used, set to 0 */
+ .auto_corr_min_ofdm = 90,
+ .auto_corr_min_ofdm_mrc = 170,
+ .auto_corr_min_ofdm_x1 = 120,
+ .auto_corr_min_ofdm_mrc_x1 = 240,
+
+ .auto_corr_max_ofdm = 120,
+ .auto_corr_max_ofdm_mrc = 210,
+ .auto_corr_max_ofdm_x1 = 155,
+ .auto_corr_max_ofdm_mrc_x1 = 290,
+
+ .auto_corr_min_cck = 125,
+ .auto_corr_max_cck = 200,
+ .auto_corr_min_cck_mrc = 170,
+ .auto_corr_max_cck_mrc = 400,
+ .nrg_th_cck = 95,
+ .nrg_th_ofdm = 95,
+
+ .barker_corr_th_min = 190,
+ .barker_corr_th_min_mrc = 390,
+ .nrg_th_cca = 62,
+};
+
+static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
+{
+ if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
+ priv->cfg->mod_params->num_of_queues <= IWL50_NUM_QUEUES)
+ priv->cfg->num_of_queues =
+ priv->cfg->mod_params->num_of_queues;
+
+ priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
+ priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
+ priv->hw_params.scd_bc_tbls_size =
+ priv->cfg->num_of_queues *
+ sizeof(struct iwl5000_scd_bc_tbl);
+ priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
+ priv->hw_params.max_stations = IWL5000_STATION_COUNT;
+ priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
+
+ priv->hw_params.max_data_size = IWL50_RTC_DATA_SIZE;
+ priv->hw_params.max_inst_size = IWL50_RTC_INST_SIZE;
+
+ priv->hw_params.max_bsm_size = 0;
+ priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
+ BIT(IEEE80211_BAND_5GHZ);
+ priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
+
+ priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
+ priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
+ priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
+ priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
+
+ if (priv->cfg->ops->lib->temp_ops.set_ct_kill)
+ priv->cfg->ops->lib->temp_ops.set_ct_kill(priv);
+
+ /* Set initial sensitivity parameters */
+ /* Set initial calibration set */
+ priv->hw_params.sens = &iwl1000_sensitivity;
+ priv->hw_params.calib_init_cfg =
+ BIT(IWL_CALIB_XTAL) |
+ BIT(IWL_CALIB_LO) |
+ BIT(IWL_CALIB_TX_IQ) |
+ BIT(IWL_CALIB_TX_IQ_PERD) |
+ BIT(IWL_CALIB_BASE_BAND);
+
+ return 0;
+}
+
static struct iwl_lib_ops iwl1000_lib = {
- .set_hw_params = iwl5000_hw_set_hw_params,
+ .set_hw_params = iwl1000_hw_set_hw_params,
.txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
.txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl,
.txq_set_sched = iwl5000_txq_set_sched,
@@ -105,6 +175,8 @@ static struct iwl_lib_ops iwl1000_lib = {
.load_ucode = iwl5000_load_ucode,
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
+ .dump_csr = iwl_dump_csr,
+ .dump_fh = iwl_dump_fh,
.init_alive_start = iwl5000_init_alive_start,
.alive_notify = iwl5000_alive_notify,
.send_tx_power = iwl5000_send_tx_power,
@@ -138,9 +210,10 @@ static struct iwl_lib_ops iwl1000_lib = {
.temperature = iwl5000_temperature,
.set_ct_kill = iwl1000_set_ct_threshold,
},
+ .add_bcast_station = iwl_add_bcast_station,
};
-static struct iwl_ops iwl1000_ops = {
+static const struct iwl_ops iwl1000_ops = {
.ucode = &iwl5000_ucode,
.lib = &iwl1000_lib,
.hcmd = &iwl5000_hcmd,
@@ -173,7 +246,8 @@ struct iwl_cfg iwl1000_bgn_cfg = {
.use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.support_ct_kill_exit = true,
- .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl1000_bg_cfg = {
@@ -200,6 +274,8 @@ struct iwl_cfg iwl1000_bg_cfg = {
.led_compensation = 51,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
.support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-fh.h b/drivers/net/wireless/iwlwifi/iwl-3945-fh.h
index 08ce259a0e6..042f6bc0df1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-fh.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
index 6fd10d443ba..3a876a8ece3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.c b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
index a871d09d598..abe2b739c4d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.h b/drivers/net/wireless/iwlwifi/iwl-3945-led.h
index 5a1033ca7aa..ce990adc51e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
index d4b49883b30..47909f94271 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 234891d8cc1..303cc8193ad 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -45,8 +45,8 @@
#include "iwl-sta.h"
#include "iwl-3945.h"
#include "iwl-eeprom.h"
-#include "iwl-helpers.h"
#include "iwl-core.h"
+#include "iwl-helpers.h"
#include "iwl-led.h"
#include "iwl-3945-led.h"
@@ -1951,11 +1951,7 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
}
/* Add the broadcast address so we can send broadcast frames */
- if (iwl_add_station(priv, iwl_bcast_addr, false, CMD_SYNC, NULL) ==
- IWL_INVALID_STATION) {
- IWL_ERR(priv, "Error adding BROADCAST address for transmit.\n");
- return -EIO;
- }
+ priv->cfg->ops->lib->add_bcast_station(priv);
/* If we have set the ASSOC_MSK and we are in BSS mode then
* add the IWL_AP_ID to the station rate table */
@@ -2474,11 +2470,9 @@ int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
memset((void *)&priv->hw_params, 0,
sizeof(struct iwl_hw_params));
- priv->shared_virt =
- pci_alloc_consistent(priv->pci_dev,
- sizeof(struct iwl3945_shared),
- &priv->shared_phys);
-
+ priv->shared_virt = dma_alloc_coherent(&priv->pci_dev->dev,
+ sizeof(struct iwl3945_shared),
+ &priv->shared_phys, GFP_KERNEL);
if (!priv->shared_virt) {
IWL_ERR(priv, "failed to allocate pci memory\n");
mutex_unlock(&priv->mutex);
@@ -2796,6 +2790,7 @@ static struct iwl_lib_ops iwl3945_lib = {
.post_associate = iwl3945_post_associate,
.isr = iwl_isr_legacy,
.config_ap = iwl3945_config_ap,
+ .add_bcast_station = iwl3945_add_bcast_station,
};
static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
@@ -2804,7 +2799,7 @@ static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
.rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag,
};
-static struct iwl_ops iwl3945_ops = {
+static const struct iwl_ops iwl3945_ops = {
.ucode = &iwl3945_ucode,
.lib = &iwl3945_lib,
.hcmd = &iwl3945_hcmd,
@@ -2830,6 +2825,7 @@ static struct iwl_cfg iwl3945_bg_cfg = {
.ht_greenfield_support = false,
.led_compensation = 64,
.broken_powersave = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
};
static struct iwl_cfg iwl3945_abg_cfg = {
@@ -2847,9 +2843,10 @@ static struct iwl_cfg iwl3945_abg_cfg = {
.ht_greenfield_support = false,
.led_compensation = 64,
.broken_powersave = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
};
-struct pci_device_id iwl3945_hw_card_ids[] = {
+DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = {
{IWL_PCI_DEVICE(0x4222, 0x1005, iwl3945_bg_cfg)},
{IWL_PCI_DEVICE(0x4222, 0x1034, iwl3945_bg_cfg)},
{IWL_PCI_DEVICE(0x4222, 0x1044, iwl3945_bg_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index 531fa125f5a..452dfd5456c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -37,7 +37,7 @@
#include <net/ieee80211_radiotap.h>
/* Hardware specific file defines the PCI IDs table for that hardware module */
-extern struct pci_device_id iwl3945_hw_card_ids[];
+extern const struct pci_device_id iwl3945_hw_card_ids[];
#include "iwl-csr.h"
#include "iwl-prph.h"
@@ -171,24 +171,6 @@ struct iwl3945_frame {
#define SCAN_INTERVAL 100
-#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
-#define STATUS_HCMD_SYNC_ACTIVE 1 /* sync host command in progress */
-#define STATUS_INT_ENABLED 2
-#define STATUS_RF_KILL_HW 3
-#define STATUS_INIT 5
-#define STATUS_ALIVE 6
-#define STATUS_READY 7
-#define STATUS_TEMPERATURE 8
-#define STATUS_GEO_CONFIGURED 9
-#define STATUS_EXIT_PENDING 10
-#define STATUS_STATISTICS 12
-#define STATUS_SCANNING 13
-#define STATUS_SCAN_ABORTING 14
-#define STATUS_SCAN_HW 15
-#define STATUS_POWER_PMI 16
-#define STATUS_FW_ERROR 17
-#define STATUS_CONF_PENDING 18
-
#define MAX_TID_COUNT 9
#define IWL_INVALID_RATE 0xFF
@@ -226,7 +208,8 @@ extern void iwl3945_rx_replenish(void *data);
extern void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
extern unsigned int iwl3945_fill_beacon_frame(struct iwl_priv *priv,
struct ieee80211_hdr *hdr,int left);
-extern void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log);
+extern int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
+ char **buf, bool display);
extern void iwl3945_dump_nic_error_log(struct iwl_priv *priv);
/*
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
index c606366b582..67ef562e8db 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 31462813bac..1bd2cd83602 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -581,6 +581,13 @@ static int iwl4965_alive_notify(struct iwl_priv *priv)
iwl4965_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
+ /* make sure all queue are not stopped */
+ memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
+ for (i = 0; i < 4; i++)
+ atomic_set(&priv->queue_stop_count[i], 0);
+
+ /* reset to 0 to enable all the queue first */
+ priv->txq_ctx_active_msk = 0;
/* Map each Tx/cmd queue to its corresponding fifo */
for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
int ac = default_queue_to_tx_fifo[i];
@@ -2206,9 +2213,10 @@ static struct iwl_lib_ops iwl4965_lib = {
.temperature = iwl4965_temperature_calib,
.set_ct_kill = iwl4965_set_ct_threshold,
},
+ .add_bcast_station = iwl_add_bcast_station,
};
-static struct iwl_ops iwl4965_ops = {
+static const struct iwl_ops iwl4965_ops = {
.ucode = &iwl4965_ucode,
.lib = &iwl4965_lib,
.hcmd = &iwl4965_hcmd,
@@ -2239,7 +2247,7 @@ struct iwl_cfg iwl4965_agn_cfg = {
.broken_powersave = true,
.led_compensation = 61,
.chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
- .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
};
/* Module firmware */
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
index bc056e9ab85..714e032f621 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index cffaae772d5..e476acb53aa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -179,14 +179,24 @@ static void iwl5000_gain_computation(struct iwl_priv *priv,
data->delta_gain_code[i] = 0;
continue;
}
- delta_g = (1000 * ((s32)average_noise[default_chain] -
+
+ delta_g = (priv->cfg->chain_noise_scale *
+ ((s32)average_noise[default_chain] -
(s32)average_noise[i])) / 1500;
+
/* bound gain by 2 bits value max, 3rd bit is sign */
data->delta_gain_code[i] =
min(abs(delta_g), (long) CHAIN_NOISE_MAX_DELTA_GAIN_CODE);
if (delta_g < 0)
- /* set negative sign */
+ /*
+ * set negative sign ...
+ * note to Intel developers: This is uCode API format,
+ * not the format of any internal device registers.
+ * Do not change this format for e.g. 6050 or similar
+ * devices. Change format only if more resolution
+ * (i.e. more than 2 bits magnitude) is needed.
+ */
data->delta_gain_code[i] |= (1 << 2);
}
@@ -263,8 +273,8 @@ static struct iwl_sensitivity_ranges iwl5000_sensitivity = {
.auto_corr_max_ofdm = 120,
.auto_corr_max_ofdm_mrc = 210,
- .auto_corr_max_ofdm_x1 = 155,
- .auto_corr_max_ofdm_mrc_x1 = 290,
+ .auto_corr_max_ofdm_x1 = 120,
+ .auto_corr_max_ofdm_mrc_x1 = 240,
.auto_corr_min_cck = 125,
.auto_corr_max_cck = 200,
@@ -412,12 +422,14 @@ static void iwl5000_rx_calib_complete(struct iwl_priv *priv,
/*
* ucode
*/
-static int iwl5000_load_section(struct iwl_priv *priv,
- struct fw_desc *image,
- u32 dst_addr)
+static int iwl5000_load_section(struct iwl_priv *priv, const char *name,
+ struct fw_desc *image, u32 dst_addr)
{
dma_addr_t phy_addr = image->p_addr;
u32 byte_cnt = image->len;
+ int ret;
+
+ priv->ucode_write_complete = 0;
iwl_write_direct32(priv,
FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
@@ -447,57 +459,36 @@ static int iwl5000_load_section(struct iwl_priv *priv,
FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
- return 0;
-}
-
-static int iwl5000_load_given_ucode(struct iwl_priv *priv,
- struct fw_desc *inst_image,
- struct fw_desc *data_image)
-{
- int ret = 0;
-
- ret = iwl5000_load_section(priv, inst_image,
- IWL50_RTC_INST_LOWER_BOUND);
- if (ret)
- return ret;
-
- IWL_DEBUG_INFO(priv, "INST uCode section being loaded...\n");
+ IWL_DEBUG_INFO(priv, "%s uCode section being loaded...\n", name);
ret = wait_event_interruptible_timeout(priv->wait_command_queue,
priv->ucode_write_complete, 5 * HZ);
if (ret == -ERESTARTSYS) {
- IWL_ERR(priv, "Could not load the INST uCode section due "
- "to interrupt\n");
+ IWL_ERR(priv, "Could not load the %s uCode section due "
+ "to interrupt\n", name);
return ret;
}
if (!ret) {
- IWL_ERR(priv, "Could not load the INST uCode section\n");
+ IWL_ERR(priv, "Could not load the %s uCode section\n",
+ name);
return -ETIMEDOUT;
}
- priv->ucode_write_complete = 0;
-
- ret = iwl5000_load_section(
- priv, data_image, IWL50_RTC_DATA_LOWER_BOUND);
- if (ret)
- return ret;
+ return 0;
+}
- IWL_DEBUG_INFO(priv, "DATA uCode section being loaded...\n");
+static int iwl5000_load_given_ucode(struct iwl_priv *priv,
+ struct fw_desc *inst_image,
+ struct fw_desc *data_image)
+{
+ int ret = 0;
- ret = wait_event_interruptible_timeout(priv->wait_command_queue,
- priv->ucode_write_complete, 5 * HZ);
- if (ret == -ERESTARTSYS) {
- IWL_ERR(priv, "Could not load the INST uCode section due "
- "to interrupt\n");
+ ret = iwl5000_load_section(priv, "INST", inst_image,
+ IWL50_RTC_INST_LOWER_BOUND);
+ if (ret)
return ret;
- } else if (!ret) {
- IWL_ERR(priv, "Could not load the DATA uCode section\n");
- return -ETIMEDOUT;
- } else
- ret = 0;
- priv->ucode_write_complete = 0;
-
- return ret;
+ return iwl5000_load_section(priv, "DATA", data_image,
+ IWL50_RTC_DATA_LOWER_BOUND);
}
int iwl5000_load_ucode(struct iwl_priv *priv)
@@ -657,6 +648,13 @@ int iwl5000_alive_notify(struct iwl_priv *priv)
iwl5000_set_wr_ptrs(priv, IWL_CMD_QUEUE_NUM, 0);
+ /* make sure all queue are not stopped */
+ memset(&priv->queue_stopped[0], 0, sizeof(priv->queue_stopped));
+ for (i = 0; i < 4; i++)
+ atomic_set(&priv->queue_stop_count[i], 0);
+
+ /* reset to 0 to enable all the queue first */
+ priv->txq_ctx_active_msk = 0;
/* map qos queues to fifos one-to-one */
for (i = 0; i < ARRAY_SIZE(iwl5000_default_queue_to_tx_fifo); i++) {
int ac = iwl5000_default_queue_to_tx_fifo[i];
@@ -781,7 +779,7 @@ void iwl5000_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
- if (txq->q.write_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
scd_bc_tbl[txq_id].
tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
}
@@ -800,12 +798,12 @@ void iwl5000_txq_inval_byte_cnt_tbl(struct iwl_priv *priv,
if (txq_id != IWL_CMD_QUEUE_NUM)
sta_id = txq->cmd[read_ptr]->cmd.tx.sta_id;
- bc_ent = cpu_to_le16(1 | (sta_id << 12));
+ bc_ent = cpu_to_le16(1 | (sta_id << 12));
scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
- if (txq->q.write_ptr < TFD_QUEUE_SIZE_BC_DUP)
+ if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
scd_bc_tbl[txq_id].
- tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
+ tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
}
static int iwl5000_tx_queue_set_q2ratid(struct iwl_priv *priv, u16 ra_tid,
@@ -1464,6 +1462,8 @@ struct iwl_lib_ops iwl5000_lib = {
.is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
+ .dump_csr = iwl_dump_csr,
+ .dump_fh = iwl_dump_fh,
.load_ucode = iwl5000_load_ucode,
.init_alive_start = iwl5000_init_alive_start,
.alive_notify = iwl5000_alive_notify,
@@ -1499,6 +1499,7 @@ struct iwl_lib_ops iwl5000_lib = {
.temperature = iwl5000_temperature,
.set_ct_kill = iwl5000_set_ct_threshold,
},
+ .add_bcast_station = iwl_add_bcast_station,
};
static struct iwl_lib_ops iwl5150_lib = {
@@ -1516,6 +1517,7 @@ static struct iwl_lib_ops iwl5150_lib = {
.is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
+ .dump_csr = iwl_dump_csr,
.load_ucode = iwl5000_load_ucode,
.init_alive_start = iwl5000_init_alive_start,
.alive_notify = iwl5000_alive_notify,
@@ -1551,9 +1553,10 @@ static struct iwl_lib_ops iwl5150_lib = {
.temperature = iwl5150_temperature,
.set_ct_kill = iwl5150_set_ct_threshold,
},
+ .add_bcast_station = iwl_add_bcast_station,
};
-static struct iwl_ops iwl5000_ops = {
+static const struct iwl_ops iwl5000_ops = {
.ucode = &iwl5000_ucode,
.lib = &iwl5000_lib,
.hcmd = &iwl5000_hcmd,
@@ -1561,7 +1564,7 @@ static struct iwl_ops iwl5000_ops = {
.led = &iwlagn_led_ops,
};
-static struct iwl_ops iwl5150_ops = {
+static const struct iwl_ops iwl5150_ops = {
.ucode = &iwl5000_ucode,
.lib = &iwl5150_lib,
.hcmd = &iwl5000_hcmd,
@@ -1598,7 +1601,8 @@ struct iwl_cfg iwl5300_agn_cfg = {
.led_compensation = 51,
.use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
- .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl5100_bgn_cfg = {
@@ -1623,6 +1627,8 @@ struct iwl_cfg iwl5100_bgn_cfg = {
.led_compensation = 51,
.use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl5100_abg_cfg = {
@@ -1645,6 +1651,8 @@ struct iwl_cfg iwl5100_abg_cfg = {
.use_bsm = false,
.led_compensation = 51,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl5100_agn_cfg = {
@@ -1669,7 +1677,8 @@ struct iwl_cfg iwl5100_agn_cfg = {
.led_compensation = 51,
.use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
- .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl5350_agn_cfg = {
@@ -1694,7 +1703,8 @@ struct iwl_cfg iwl5350_agn_cfg = {
.led_compensation = 51,
.use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
- .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl5150_agn_cfg = {
@@ -1719,7 +1729,8 @@ struct iwl_cfg iwl5150_agn_cfg = {
.led_compensation = 51,
.use_rts_for_ht = true, /* use rts/cts protection */
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
- .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl5150_abg_cfg = {
@@ -1742,6 +1753,8 @@ struct iwl_cfg iwl5150_abg_cfg = {
.use_bsm = false,
.led_compensation = 51,
.chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000-hw.h b/drivers/net/wireless/iwlwifi/iwl-6000-hw.h
index 90185777d98..ddba3999999 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-6000-hw.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 74e57104927..c4844adff92 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2008-2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -70,6 +70,14 @@ static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
priv->hw_params.ct_kill_exit_threshold = CT_KILL_EXIT_THRESHOLD;
}
+/* Indicate calibration version to uCode. */
+static void iwl6050_set_calib_version(struct iwl_priv *priv)
+{
+ if (priv->cfg->ops->lib->eeprom_ops.calib_version(priv) >= 6)
+ iwl_set_bit(priv, CSR_GP_DRIVER_REG,
+ CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6);
+}
+
/* NIC configuration for 6000 series */
static void iwl6000_nic_config(struct iwl_priv *priv)
{
@@ -96,6 +104,8 @@ static void iwl6000_nic_config(struct iwl_priv *priv)
CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA);
}
/* else do nothing, uCode configured */
+ if (priv->cfg->ops->lib->temp_ops.set_calib_version)
+ priv->cfg->ops->lib->temp_ops.set_calib_version(priv);
}
static struct iwl_sensitivity_ranges iwl6000_sensitivity = {
@@ -108,7 +118,7 @@ static struct iwl_sensitivity_ranges iwl6000_sensitivity = {
.auto_corr_max_ofdm = 145,
.auto_corr_max_ofdm_mrc = 232,
- .auto_corr_max_ofdm_x1 = 145,
+ .auto_corr_max_ofdm_x1 = 110,
.auto_corr_max_ofdm_mrc_x1 = 232,
.auto_corr_min_cck = 125,
@@ -158,11 +168,25 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
/* Set initial sensitivity parameters */
/* Set initial calibration set */
priv->hw_params.sens = &iwl6000_sensitivity;
- priv->hw_params.calib_init_cfg =
+ switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) {
+ case CSR_HW_REV_TYPE_6x50:
+ priv->hw_params.calib_init_cfg =
+ BIT(IWL_CALIB_XTAL) |
+ BIT(IWL_CALIB_DC) |
+ BIT(IWL_CALIB_LO) |
+ BIT(IWL_CALIB_TX_IQ) |
+ BIT(IWL_CALIB_BASE_BAND);
+
+ break;
+ default:
+ priv->hw_params.calib_init_cfg =
BIT(IWL_CALIB_XTAL) |
BIT(IWL_CALIB_LO) |
BIT(IWL_CALIB_TX_IQ) |
BIT(IWL_CALIB_BASE_BAND);
+ break;
+ }
+
return 0;
}
@@ -215,6 +239,8 @@ static struct iwl_lib_ops iwl6000_lib = {
.load_ucode = iwl5000_load_ucode,
.dump_nic_event_log = iwl_dump_nic_event_log,
.dump_nic_error_log = iwl_dump_nic_error_log,
+ .dump_csr = iwl_dump_csr,
+ .dump_fh = iwl_dump_fh,
.init_alive_start = iwl5000_init_alive_start,
.alive_notify = iwl5000_alive_notify,
.send_tx_power = iwl5000_send_tx_power,
@@ -250,9 +276,10 @@ static struct iwl_lib_ops iwl6000_lib = {
.temperature = iwl5000_temperature,
.set_ct_kill = iwl6000_set_ct_threshold,
},
+ .add_bcast_station = iwl_add_bcast_station,
};
-static struct iwl_ops iwl6000_ops = {
+static const struct iwl_ops iwl6000_ops = {
.ucode = &iwl5000_ucode,
.lib = &iwl6000_lib,
.hcmd = &iwl5000_hcmd,
@@ -260,18 +287,68 @@ static struct iwl_ops iwl6000_ops = {
.led = &iwlagn_led_ops,
};
-static struct iwl_hcmd_utils_ops iwl6050_hcmd_utils = {
- .get_hcmd_size = iwl5000_get_hcmd_size,
- .build_addsta_hcmd = iwl5000_build_addsta_hcmd,
- .rts_tx_cmd_flag = iwl5000_rts_tx_cmd_flag,
- .calc_rssi = iwl5000_calc_rssi,
+static struct iwl_lib_ops iwl6050_lib = {
+ .set_hw_params = iwl6000_hw_set_hw_params,
+ .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
+ .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl,
+ .txq_set_sched = iwl5000_txq_set_sched,
+ .txq_agg_enable = iwl5000_txq_agg_enable,
+ .txq_agg_disable = iwl5000_txq_agg_disable,
+ .txq_attach_buf_to_tfd = iwl_hw_txq_attach_buf_to_tfd,
+ .txq_free_tfd = iwl_hw_txq_free_tfd,
+ .txq_init = iwl_hw_tx_queue_init,
+ .rx_handler_setup = iwl5000_rx_handler_setup,
+ .setup_deferred_work = iwl5000_setup_deferred_work,
+ .is_valid_rtc_data_addr = iwl5000_hw_valid_rtc_data_addr,
+ .load_ucode = iwl5000_load_ucode,
+ .dump_nic_event_log = iwl_dump_nic_event_log,
+ .dump_nic_error_log = iwl_dump_nic_error_log,
+ .dump_csr = iwl_dump_csr,
+ .dump_fh = iwl_dump_fh,
+ .init_alive_start = iwl5000_init_alive_start,
+ .alive_notify = iwl5000_alive_notify,
+ .send_tx_power = iwl5000_send_tx_power,
+ .update_chain_flags = iwl_update_chain_flags,
+ .set_channel_switch = iwl6000_hw_channel_switch,
+ .apm_ops = {
+ .init = iwl_apm_init,
+ .stop = iwl_apm_stop,
+ .config = iwl6000_nic_config,
+ .set_pwr_src = iwl_set_pwr_src,
+ },
+ .eeprom_ops = {
+ .regulatory_bands = {
+ EEPROM_5000_REG_BAND_1_CHANNELS,
+ EEPROM_5000_REG_BAND_2_CHANNELS,
+ EEPROM_5000_REG_BAND_3_CHANNELS,
+ EEPROM_5000_REG_BAND_4_CHANNELS,
+ EEPROM_5000_REG_BAND_5_CHANNELS,
+ EEPROM_5000_REG_BAND_24_HT40_CHANNELS,
+ EEPROM_5000_REG_BAND_52_HT40_CHANNELS
+ },
+ .verify_signature = iwlcore_eeprom_verify_signature,
+ .acquire_semaphore = iwlcore_eeprom_acquire_semaphore,
+ .release_semaphore = iwlcore_eeprom_release_semaphore,
+ .calib_version = iwl5000_eeprom_calib_version,
+ .query_addr = iwl5000_eeprom_query_addr,
+ .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
+ },
+ .post_associate = iwl_post_associate,
+ .isr = iwl_isr_ict,
+ .config_ap = iwl_config_ap,
+ .temp_ops = {
+ .temperature = iwl5000_temperature,
+ .set_ct_kill = iwl6000_set_ct_threshold,
+ .set_calib_version = iwl6050_set_calib_version,
+ },
+ .add_bcast_station = iwl_add_bcast_station,
};
-static struct iwl_ops iwl6050_ops = {
+static const struct iwl_ops iwl6050_ops = {
.ucode = &iwl5000_ucode,
- .lib = &iwl6000_lib,
+ .lib = &iwl6050_lib,
.hcmd = &iwl5000_hcmd,
- .utils = &iwl6050_hcmd_utils,
+ .utils = &iwl5000_hcmd_utils,
.led = &iwlagn_led_ops,
};
@@ -306,7 +383,8 @@ struct iwl_cfg iwl6000i_2agn_cfg = {
.supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
- .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl6000i_2abg_cfg = {
@@ -336,6 +414,8 @@ struct iwl_cfg iwl6000i_2abg_cfg = {
.supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl6000i_2bg_cfg = {
@@ -365,6 +445,8 @@ struct iwl_cfg iwl6000i_2bg_cfg = {
.supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
struct iwl_cfg iwl6050_2agn_cfg = {
@@ -395,7 +477,8 @@ struct iwl_cfg iwl6050_2agn_cfg = {
.supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
- .sm_ps_mode = WLAN_HT_CAP_SM_PS_DYNAMIC,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1500,
};
struct iwl_cfg iwl6050_2abg_cfg = {
@@ -425,6 +508,8 @@ struct iwl_cfg iwl6050_2abg_cfg = {
.supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1500,
};
struct iwl_cfg iwl6000_3agn_cfg = {
@@ -455,7 +540,8 @@ struct iwl_cfg iwl6000_3agn_cfg = {
.supports_idle = true,
.adv_thermal_throttle = true,
.support_ct_kill_exit = true,
- .sm_ps_mode = WLAN_HT_CAP_SM_PS_DISABLED,
+ .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
+ .chain_noise_scale = 1000,
};
MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-led.c b/drivers/net/wireless/iwlwifi/iwl-agn-led.c
index 3bccba20f6d..1a24946bc20 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-led.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-led.h b/drivers/net/wireless/iwlwifi/iwl-agn-led.h
index ab55f92a161..a594e4fdc6b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-led.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index b93e4915819..8bf7c20b9d3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -298,10 +298,23 @@ static void rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
struct iwl_lq_sta *lq_data, u8 tid,
struct ieee80211_sta *sta)
{
+ int ret;
+
if (rs_tl_get_load(lq_data, tid) > IWL_AGG_LOAD_THRESHOLD) {
IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
sta->addr, tid);
- ieee80211_start_tx_ba_session(sta, tid);
+ ret = ieee80211_start_tx_ba_session(sta, tid);
+ if (ret == -EAGAIN) {
+ /*
+ * driver and mac80211 is out of sync
+ * this might be cause by reloading firmware
+ * stop the tx ba session here
+ */
+ IWL_DEBUG_HT(priv, "Fail start Tx agg on tid: %d\n",
+ tid);
+ ret = ieee80211_stop_tx_ba_session(sta, tid,
+ WLAN_BACK_INITIATOR);
+ }
}
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index affc0c5a2f2..e71923961e6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -191,7 +191,7 @@ enum {
IWL_RATE_2M_MASK)
#define IWL_CCK_RATES_MASK \
- (IWL_BASIC_RATES_MASK | \
+ (IWL_CCK_BASIC_RATES_MASK | \
IWL_RATE_5M_MASK | \
IWL_RATE_11M_MASK)
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 1c9866daf81..6aeb82b6992 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -73,13 +73,7 @@
#define VD
#endif
-#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT
-#define VS "s"
-#else
-#define VS
-#endif
-
-#define DRV_VERSION IWLWIFI_VERSION VD VS
+#define DRV_VERSION IWLWIFI_VERSION VD
MODULE_DESCRIPTION(DRV_DESCRIPTION);
@@ -203,7 +197,8 @@ int iwl_commit_rxon(struct iwl_priv *priv)
priv->start_calib = 0;
/* Add the broadcast address so we can send broadcast frames */
- iwl_add_bcast_station(priv);
+ priv->cfg->ops->lib->add_bcast_station(priv);
+
/* If we have set the ASSOC_MSK and we are in BSS mode then
* add the IWL_AP_ID to the station rate table */
@@ -657,6 +652,131 @@ static void iwl_bg_statistics_periodic(unsigned long data)
iwl_send_statistics_request(priv, CMD_ASYNC, false);
}
+
+static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
+ u32 start_idx, u32 num_events,
+ u32 mode)
+{
+ u32 i;
+ u32 ptr; /* SRAM byte address of log data */
+ u32 ev, time, data; /* event log data */
+ unsigned long reg_flags;
+
+ if (mode == 0)
+ ptr = base + (4 * sizeof(u32)) + (start_idx * 2 * sizeof(u32));
+ else
+ ptr = base + (4 * sizeof(u32)) + (start_idx * 3 * sizeof(u32));
+
+ /* Make sure device is powered up for SRAM reads */
+ spin_lock_irqsave(&priv->reg_lock, reg_flags);
+ if (iwl_grab_nic_access(priv)) {
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+ return;
+ }
+
+ /* Set starting address; reads will auto-increment */
+ _iwl_write_direct32(priv, HBUS_TARG_MEM_RADDR, ptr);
+ rmb();
+
+ /*
+ * "time" is actually "data" for mode 0 (no timestamp).
+ * place event id # at far right for easier visual parsing.
+ */
+ for (i = 0; i < num_events; i++) {
+ ev = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+ time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+ if (mode == 0) {
+ trace_iwlwifi_dev_ucode_cont_event(priv,
+ 0, time, ev);
+ } else {
+ data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
+ trace_iwlwifi_dev_ucode_cont_event(priv,
+ time, data, ev);
+ }
+ }
+ /* Allow device to power down */
+ iwl_release_nic_access(priv);
+ spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+}
+
+static void iwl_continuous_event_trace(struct iwl_priv *priv)
+{
+ u32 capacity; /* event log capacity in # entries */
+ u32 base; /* SRAM byte address of event log header */
+ u32 mode; /* 0 - no timestamp, 1 - timestamp recorded */
+ u32 num_wraps; /* # times uCode wrapped to top of log */
+ u32 next_entry; /* index of next entry to be written by uCode */
+
+ if (priv->ucode_type == UCODE_INIT)
+ base = le32_to_cpu(priv->card_alive_init.error_event_table_ptr);
+ else
+ base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
+ if (priv->cfg->ops->lib->is_valid_rtc_data_addr(base)) {
+ capacity = iwl_read_targ_mem(priv, base);
+ num_wraps = iwl_read_targ_mem(priv, base + (2 * sizeof(u32)));
+ mode = iwl_read_targ_mem(priv, base + (1 * sizeof(u32)));
+ next_entry = iwl_read_targ_mem(priv, base + (3 * sizeof(u32)));
+ } else
+ return;
+
+ if (num_wraps == priv->event_log.num_wraps) {
+ iwl_print_cont_event_trace(priv,
+ base, priv->event_log.next_entry,
+ next_entry - priv->event_log.next_entry,
+ mode);
+ priv->event_log.non_wraps_count++;
+ } else {
+ if ((num_wraps - priv->event_log.num_wraps) > 1)
+ priv->event_log.wraps_more_count++;
+ else
+ priv->event_log.wraps_once_count++;
+ trace_iwlwifi_dev_ucode_wrap_event(priv,
+ num_wraps - priv->event_log.num_wraps,
+ next_entry, priv->event_log.next_entry);
+ if (next_entry < priv->event_log.next_entry) {
+ iwl_print_cont_event_trace(priv, base,
+ priv->event_log.next_entry,
+ capacity - priv->event_log.next_entry,
+ mode);
+
+ iwl_print_cont_event_trace(priv, base, 0,
+ next_entry, mode);
+ } else {
+ iwl_print_cont_event_trace(priv, base,
+ next_entry, capacity - next_entry,
+ mode);
+
+ iwl_print_cont_event_trace(priv, base, 0,
+ next_entry, mode);
+ }
+ }
+ priv->event_log.num_wraps = num_wraps;
+ priv->event_log.next_entry = next_entry;
+}
+
+/**
+ * iwl_bg_ucode_trace - Timer callback to log ucode event
+ *
+ * The timer is continually set to execute every
+ * UCODE_TRACE_PERIOD milliseconds after the last timer expired
+ * this function is to perform continuous uCode event logging operation
+ * if enabled
+ */
+static void iwl_bg_ucode_trace(unsigned long data)
+{
+ struct iwl_priv *priv = (struct iwl_priv *)data;
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ if (priv->event_log.ucode_trace) {
+ iwl_continuous_event_trace(priv);
+ /* Reschedule the timer to occur in UCODE_TRACE_PERIOD */
+ mod_timer(&priv->ucode_trace,
+ jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
+ }
+}
+
static void iwl_rx_beacon_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
@@ -689,12 +809,14 @@ static void iwl_rx_card_state_notif(struct iwl_priv *priv,
u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
unsigned long status = priv->status;
- IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s\n",
+ IWL_DEBUG_RF_KILL(priv, "Card state received: HW:%s SW:%s CT:%s\n",
(flags & HW_CARD_DISABLED) ? "Kill" : "On",
- (flags & SW_CARD_DISABLED) ? "Kill" : "On");
+ (flags & SW_CARD_DISABLED) ? "Kill" : "On",
+ (flags & CT_CARD_DISABLED) ?
+ "Reached" : "Not reached");
if (flags & (SW_CARD_DISABLED | HW_CARD_DISABLED |
- RF_CARD_DISABLED)) {
+ CT_CARD_DISABLED)) {
iwl_write32(priv, CSR_UCODE_DRV_GP1_SET,
CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
@@ -708,10 +830,10 @@ static void iwl_rx_card_state_notif(struct iwl_priv *priv,
iwl_write_direct32(priv, HBUS_TARG_MBX_C,
HBUS_TARG_MBX_C_REG_BIT_CMD_BLOCKED);
}
- if (flags & RF_CARD_DISABLED)
+ if (flags & CT_CARD_DISABLED)
iwl_tt_enter_ct_kill(priv);
}
- if (!(flags & RF_CARD_DISABLED))
+ if (!(flags & CT_CARD_DISABLED))
iwl_tt_exit_ct_kill(priv);
if (flags & HW_CARD_DISABLED)
@@ -761,6 +883,8 @@ static void iwl_setup_rx_handlers(struct iwl_priv *priv)
priv->rx_handlers[REPLY_ALIVE] = iwl_rx_reply_alive;
priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error;
priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
+ priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
+ iwl_rx_spectrum_measure_notif;
priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
iwl_rx_pm_debug_statistics_notif;
@@ -774,7 +898,6 @@ static void iwl_setup_rx_handlers(struct iwl_priv *priv)
priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl_reply_statistics;
priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl_rx_statistics;
- iwl_setup_spectrum_handlers(priv);
iwl_setup_rx_scan_handlers(priv);
/* status change handler */
@@ -1634,7 +1757,7 @@ static const char *desc_lookup_text[] = {
"DEBUG_1",
"DEBUG_2",
"DEBUG_3",
- "UNKNOWN"
+ "ADVANCED SYSASSERT"
};
static const char *desc_lookup(int i)
@@ -1705,8 +1828,9 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
* iwl_print_event_log - Dump error event log to syslog
*
*/
-static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
- u32 num_events, u32 mode)
+static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
+ u32 num_events, u32 mode,
+ int pos, char **buf, size_t bufsz)
{
u32 i;
u32 base; /* SRAM byte address of event log header */
@@ -1716,7 +1840,7 @@ static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
unsigned long reg_flags;
if (num_events == 0)
- return;
+ return pos;
if (priv->ucode_type == UCODE_INIT)
base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
else
@@ -1744,27 +1868,44 @@ static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
if (mode == 0) {
/* data, ev */
- trace_iwlwifi_dev_ucode_event(priv, 0, time, ev);
- IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n", time, ev);
+ if (bufsz) {
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "EVT_LOG:0x%08x:%04u\n",
+ time, ev);
+ } else {
+ trace_iwlwifi_dev_ucode_event(priv, 0,
+ time, ev);
+ IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n",
+ time, ev);
+ }
} else {
data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
- IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
+ if (bufsz) {
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "EVT_LOGT:%010u:0x%08x:%04u\n",
+ time, data, ev);
+ } else {
+ IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
time, data, ev);
- trace_iwlwifi_dev_ucode_event(priv, time, data, ev);
+ trace_iwlwifi_dev_ucode_event(priv, time,
+ data, ev);
+ }
}
}
/* Allow device to power down */
iwl_release_nic_access(priv);
spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+ return pos;
}
/**
* iwl_print_last_event_logs - Dump the newest # of event log to syslog
*/
-static void iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
- u32 num_wraps, u32 next_entry,
- u32 size, u32 mode)
+static int iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
+ u32 num_wraps, u32 next_entry,
+ u32 size, u32 mode,
+ int pos, char **buf, size_t bufsz)
{
/*
* display the newest DEFAULT_LOG_ENTRIES entries
@@ -1772,21 +1913,26 @@ static void iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
*/
if (num_wraps) {
if (next_entry < size) {
- iwl_print_event_log(priv,
- capacity - (size - next_entry),
- size - next_entry, mode);
- iwl_print_event_log(priv, 0,
- next_entry, mode);
+ pos = iwl_print_event_log(priv,
+ capacity - (size - next_entry),
+ size - next_entry, mode,
+ pos, buf, bufsz);
+ pos = iwl_print_event_log(priv, 0,
+ next_entry, mode,
+ pos, buf, bufsz);
} else
- iwl_print_event_log(priv, next_entry - size,
- size, mode);
+ pos = iwl_print_event_log(priv, next_entry - size,
+ size, mode, pos, buf, bufsz);
} else {
- if (next_entry < size)
- iwl_print_event_log(priv, 0, next_entry, mode);
- else
- iwl_print_event_log(priv, next_entry - size,
- size, mode);
+ if (next_entry < size) {
+ pos = iwl_print_event_log(priv, 0, next_entry,
+ mode, pos, buf, bufsz);
+ } else {
+ pos = iwl_print_event_log(priv, next_entry - size,
+ size, mode, pos, buf, bufsz);
+ }
}
+ return pos;
}
/* For sanity check only. Actual size is determined by uCode, typ. 512 */
@@ -1794,7 +1940,8 @@ static void iwl_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
#define DEFAULT_DUMP_EVENT_LOG_ENTRIES (20)
-void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
+int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
+ char **buf, bool display)
{
u32 base; /* SRAM byte address of event log header */
u32 capacity; /* event log capacity in # entries */
@@ -1802,6 +1949,8 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
u32 num_wraps; /* # times uCode wrapped to top of log */
u32 next_entry; /* index of next entry to be written by uCode */
u32 size; /* # entries that we'll print */
+ int pos = 0;
+ size_t bufsz = 0;
if (priv->ucode_type == UCODE_INIT)
base = le32_to_cpu(priv->card_alive_init.log_event_table_ptr);
@@ -1812,7 +1961,7 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
IWL_ERR(priv,
"Invalid event log pointer 0x%08X for %s uCode\n",
base, (priv->ucode_type == UCODE_INIT) ? "Init" : "RT");
- return;
+ return -EINVAL;
}
/* event log header */
@@ -1838,7 +1987,7 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
/* bail out if nothing in log */
if (size == 0) {
IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
- return;
+ return pos;
}
#ifdef CONFIG_IWLWIFI_DEBUG
@@ -1853,6 +2002,15 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
size);
#ifdef CONFIG_IWLWIFI_DEBUG
+ if (display) {
+ if (full_log)
+ bufsz = capacity * 48;
+ else
+ bufsz = size * 48;
+ *buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!*buf)
+ return -ENOMEM;
+ }
if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
/*
* if uCode has wrapped back to top of log,
@@ -1860,17 +2018,22 @@ void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
* i.e the next one that uCode would fill.
*/
if (num_wraps)
- iwl_print_event_log(priv, next_entry,
- capacity - next_entry, mode);
+ pos = iwl_print_event_log(priv, next_entry,
+ capacity - next_entry, mode,
+ pos, buf, bufsz);
/* (then/else) start at top of log */
- iwl_print_event_log(priv, 0, next_entry, mode);
+ pos = iwl_print_event_log(priv, 0,
+ next_entry, mode, pos, buf, bufsz);
} else
- iwl_print_last_event_logs(priv, capacity, num_wraps,
- next_entry, size, mode);
+ pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
+ next_entry, size, mode,
+ pos, buf, bufsz);
#else
- iwl_print_last_event_logs(priv, capacity, num_wraps,
- next_entry, size, mode);
+ pos = iwl_print_last_event_logs(priv, capacity, num_wraps,
+ next_entry, size, mode,
+ pos, buf, bufsz);
#endif
+ return pos;
}
/**
@@ -2276,18 +2439,6 @@ static void iwl_bg_run_time_calib_work(struct work_struct *work)
return;
}
-static void iwl_bg_up(struct work_struct *data)
-{
- struct iwl_priv *priv = container_of(data, struct iwl_priv, up);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- mutex_lock(&priv->mutex);
- __iwl_up(priv);
- mutex_unlock(&priv->mutex);
-}
-
static void iwl_bg_restart(struct work_struct *data)
{
struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
@@ -2304,7 +2455,13 @@ static void iwl_bg_restart(struct work_struct *data)
ieee80211_restart_hw(priv->hw);
} else {
iwl_down(priv);
- queue_work(priv->workqueue, &priv->up);
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ mutex_lock(&priv->mutex);
+ __iwl_up(priv);
+ mutex_unlock(&priv->mutex);
}
}
@@ -2440,7 +2597,7 @@ void iwl_post_associate(struct iwl_priv *priv)
* Not a mac80211 entry point function, but it fits in with all the
* other mac80211 functions grouped here.
*/
-static int iwl_setup_mac(struct iwl_priv *priv)
+static int iwl_mac_setup_register(struct iwl_priv *priv)
{
int ret;
struct ieee80211_hw *hw = priv->hw;
@@ -2456,6 +2613,10 @@ static int iwl_setup_mac(struct iwl_priv *priv)
hw->flags |= IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
+ if (priv->cfg->sku & IWL_SKU_N)
+ hw->flags |= IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS |
+ IEEE80211_HW_SUPPORTS_STATIC_SMPS;
+
hw->sta_data_size = sizeof(struct iwl_station_priv);
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
@@ -2470,7 +2631,7 @@ static int iwl_setup_mac(struct iwl_priv *priv)
*/
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
- hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
+ hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX + 1;
/* we create the 802.11 header and a zero-length SSID element */
hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2;
@@ -2668,14 +2829,18 @@ void iwl_config_ap(struct iwl_priv *priv)
}
static void iwl_mac_update_tkip_key(struct ieee80211_hw *hw,
- struct ieee80211_key_conf *keyconf, const u8 *addr,
- u32 iv32, u16 *phase1key)
+ struct ieee80211_vif *vif,
+ struct ieee80211_key_conf *keyconf,
+ struct ieee80211_sta *sta,
+ u32 iv32, u16 *phase1key)
{
struct iwl_priv *priv = hw->priv;
IWL_DEBUG_MAC80211(priv, "enter\n");
- iwl_update_tkip_key(priv, keyconf, addr, iv32, phase1key);
+ iwl_update_tkip_key(priv, keyconf,
+ sta ? sta->addr : iwl_bcast_addr,
+ iv32, phase1key);
IWL_DEBUG_MAC80211(priv, "leave\n");
}
@@ -2784,6 +2949,9 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
return 0;
else
return ret;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ /* do nothing */
+ return -EOPNOTSUPP;
default:
IWL_DEBUG_HT(priv, "unknown\n");
return -EINVAL;
@@ -2833,6 +3001,8 @@ static void iwl_mac_sta_notify(struct ieee80211_hw *hw,
break;
case STA_NOTIFY_AWAKE:
WARN_ON(!sta_priv->client);
+ if (!sta_priv->asleep)
+ break;
sta_priv->asleep = false;
sta_id = iwl_find_station(priv, sta->addr);
if (sta_id != IWL_INVALID_STATION)
@@ -3109,7 +3279,6 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
init_waitqueue_head(&priv->wait_command_queue);
- INIT_WORK(&priv->up, iwl_bg_up);
INIT_WORK(&priv->restart, iwl_bg_restart);
INIT_WORK(&priv->rx_replenish, iwl_bg_rx_replenish);
INIT_WORK(&priv->beacon_update, iwl_bg_beacon_update);
@@ -3126,6 +3295,10 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
priv->statistics_periodic.data = (unsigned long)priv;
priv->statistics_periodic.function = iwl_bg_statistics_periodic;
+ init_timer(&priv->ucode_trace);
+ priv->ucode_trace.data = (unsigned long)priv;
+ priv->ucode_trace.function = iwl_bg_ucode_trace;
+
if (!priv->cfg->use_isr_legacy)
tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
iwl_irq_tasklet, (unsigned long)priv);
@@ -3144,6 +3317,7 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv)
cancel_delayed_work(&priv->alive_start);
cancel_work_sync(&priv->beacon_update);
del_timer_sync(&priv->statistics_periodic);
+ del_timer_sync(&priv->ucode_trace);
}
static void iwl_init_hw_rates(struct iwl_priv *priv,
@@ -3179,6 +3353,7 @@ static int iwl_init_drv(struct iwl_priv *priv)
INIT_LIST_HEAD(&priv->free_frames);
mutex_init(&priv->mutex);
+ mutex_init(&priv->sync_cmd_mutex);
/* Clear the driver's (not device's) station table */
iwl_clear_stations_table(priv);
@@ -3188,6 +3363,14 @@ static int iwl_init_drv(struct iwl_priv *priv)
priv->band = IEEE80211_BAND_2GHZ;
priv->iw_mode = NL80211_IFTYPE_STATION;
+ priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
+ priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
+
+ /* initialize force reset */
+ priv->force_reset[IWL_RF_RESET].reset_duration =
+ IWL_DELAY_NEXT_FORCE_RF_RESET;
+ priv->force_reset[IWL_FW_RESET].reset_duration =
+ IWL_DELAY_NEXT_FORCE_FW_RELOAD;
/* Choose which receivers/antennas to use */
if (priv->cfg->ops->hcmd->set_rxon_chain)
@@ -3264,7 +3447,6 @@ static struct ieee80211_ops iwl_hw_ops = {
.set_key = iwl_mac_set_key,
.update_tkip_key = iwl_mac_update_tkip_key,
.get_stats = iwl_mac_get_stats,
- .get_tx_stats = iwl_mac_get_tx_stats,
.conf_tx = iwl_mac_conf_tx,
.reset_tsf = iwl_mac_reset_tsf,
.bss_info_changed = iwl_bss_info_changed,
@@ -3365,6 +3547,14 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
spin_lock_init(&priv->reg_lock);
spin_lock_init(&priv->lock);
+
+ /*
+ * stop and reset the on-board processor just in case it is in a
+ * strange state ... like being left stranded by a primary kernel
+ * and this is now the kdump kernel trying to start up
+ */
+ iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
iwl_hw_detect(priv);
IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s REV=0x%X\n",
priv->cfg->name, priv->hw_rev);
@@ -3439,9 +3629,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
iwl_setup_deferred_work(priv);
iwl_setup_rx_handlers(priv);
- /**********************************
- * 8. Setup and register mac80211
- **********************************/
+ /*********************************************
+ * 8. Enable interrupts and read RFKILL state
+ *********************************************/
/* enable interrupts if needed: hw bug w/a */
pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
@@ -3452,14 +3642,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
iwl_enable_interrupts(priv);
- err = iwl_setup_mac(priv);
- if (err)
- goto out_remove_sysfs;
-
- err = iwl_dbgfs_register(priv, DRV_NAME);
- if (err)
- IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
-
/* If platform's RF_KILL switch is NOT set to KILL */
if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
clear_bit(STATUS_RF_KILL_HW, &priv->status);
@@ -3471,6 +3653,18 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
iwl_power_initialize(priv);
iwl_tt_initialize(priv);
+
+ /**************************************************
+ * 9. Setup and register with mac80211 and debugfs
+ **************************************************/
+ err = iwl_mac_setup_register(priv);
+ if (err)
+ goto out_remove_sysfs;
+
+ err = iwl_dbgfs_register(priv, DRV_NAME);
+ if (err)
+ IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
+
return 0;
out_remove_sysfs:
@@ -3589,7 +3783,7 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
*****************************************************************************/
/* Hardware specific file defines the PCI IDs table for that hardware module */
-static struct pci_device_id iwl_hw_card_ids[] = {
+static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
#ifdef CONFIG_IWL4965
{IWL_PCI_DEVICE(0x4229, PCI_ANY_ID, iwl4965_agn_cfg)},
{IWL_PCI_DEVICE(0x4230, PCI_ANY_ID, iwl4965_agn_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c
index 95a57b36a7e..845831ac053 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -414,7 +414,6 @@ static int iwl_sens_auto_corr_ofdm(struct iwl_priv *priv,
/* Prepare a SENSITIVITY_CMD, send to uCode if values have changed */
static int iwl_sensitivity_write(struct iwl_priv *priv)
{
- int ret = 0;
struct iwl_sensitivity_cmd cmd ;
struct iwl_sensitivity_data *data = NULL;
struct iwl_host_cmd cmd_out = {
@@ -477,11 +476,7 @@ static int iwl_sensitivity_write(struct iwl_priv *priv)
memcpy(&(priv->sensitivity_tbl[0]), &(cmd.table[0]),
sizeof(u16)*HD_TABLE_SIZE);
- ret = iwl_send_cmd(priv, &cmd_out);
- if (ret)
- IWL_ERR(priv, "SENSITIVITY_CMD failed\n");
-
- return ret;
+ return iwl_send_cmd(priv, &cmd_out);
}
void iwl_init_sensitivity(struct iwl_priv *priv)
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.h b/drivers/net/wireless/iwlwifi/iwl-calib.h
index b6cef989a79..2b7b1df83ba 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.h
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index e9150753192..6383d9f8c9b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -120,7 +120,6 @@ enum {
CALIBRATION_COMPLETE_NOTIFICATION = 0x67,
/* 802.11h related */
- RADAR_NOTIFICATION = 0x70, /* not used */
REPLY_QUIET_CMD = 0x71, /* not used */
REPLY_CHANNEL_SWITCH = 0x72,
CHANNEL_SWITCH_NOTIFICATION = 0x73,
@@ -2248,10 +2247,22 @@ struct iwl_link_quality_cmd {
__le32 reserved2;
} __attribute__ ((packed));
+/*
+ * BT configuration enable flags:
+ * bit 0 - 1: BT channel announcement enabled
+ * 0: disable
+ * bit 1 - 1: priority of BT device enabled
+ * 0: disable
+ * bit 2 - 1: BT 2 wire support enabled
+ * 0: disable
+ */
+#define BT_COEX_DISABLE (0x0)
+#define BT_ENABLE_CHANNEL_ANNOUNCE BIT(0)
+#define BT_ENABLE_PRIORITY BIT(1)
+#define BT_ENABLE_2_WIRE BIT(2)
+
#define BT_COEX_DISABLE (0x0)
-#define BT_COEX_MODE_2W (0x1)
-#define BT_COEX_MODE_3W (0x2)
-#define BT_COEX_MODE_4W (0x3)
+#define BT_COEX_ENABLE (BT_ENABLE_CHANNEL_ANNOUNCE | BT_ENABLE_PRIORITY)
#define BT_LEAD_TIME_MIN (0x0)
#define BT_LEAD_TIME_DEF (0x1E)
@@ -2510,7 +2521,7 @@ struct iwl_card_state_notif {
#define HW_CARD_DISABLED 0x01
#define SW_CARD_DISABLED 0x02
-#define RF_CARD_DISABLED 0x04
+#define CT_CARD_DISABLED 0x04
#define RXON_CARD_DISABLED 0x10
struct iwl_ct_kill_config {
@@ -2612,6 +2623,7 @@ struct iwl_ssid_ie {
#define TX_CMD_LIFE_TIME_INFINITE cpu_to_le32(0xFFFFFFFF)
#define IWL_GOOD_CRC_TH cpu_to_le16(1)
#define IWL_MAX_SCAN_SIZE 1024
+#define IWL_MAX_CMD_SIZE 4096
#define IWL_MAX_PROBE_REQUEST 200
/*
@@ -2984,7 +2996,7 @@ struct statistics_rx_ht_phy {
__le32 agg_crc32_good;
__le32 agg_mpdu_cnt;
__le32 agg_cnt;
- __le32 reserved2;
+ __le32 unsupport_mcs;
} __attribute__ ((packed));
#define INTERFERENCE_DATA_AVAILABLE cpu_to_le32(1)
@@ -3087,8 +3099,8 @@ struct statistics_div {
} __attribute__ ((packed));
struct statistics_general {
- __le32 temperature;
- __le32 temperature_m;
+ __le32 temperature; /* radio temperature */
+ __le32 temperature_m; /* for 5000 and up, this is radio voltage */
struct statistics_dbg dbg;
__le32 sleep_time;
__le32 slots_out;
@@ -3096,7 +3108,12 @@ struct statistics_general {
__le32 ttl_timestamp;
struct statistics_div div;
__le32 rx_enable_counter;
- __le32 reserved1;
+ /*
+ * num_of_sos_states:
+ * count the number of times we have to re-tune
+ * in order to get out of bad PHY status
+ */
+ __le32 num_of_sos_states;
__le32 reserved2;
__le32 reserved3;
} __attribute__ ((packed));
@@ -3161,13 +3178,30 @@ struct iwl_notif_statistics {
/*
* MISSED_BEACONS_NOTIFICATION = 0xa2 (notification only, not a command)
+ *
+ * uCode send MISSED_BEACONS_NOTIFICATION to driver when detect beacon missed
+ * in regardless of how many missed beacons, which mean when driver receive the
+ * notification, inside the command, it can find all the beacons information
+ * which include number of total missed beacons, number of consecutive missed
+ * beacons, number of beacons received and number of beacons expected to
+ * receive.
+ *
+ * If uCode detected consecutive_missed_beacons > 5, it will reset the radio
+ * in order to bring the radio/PHY back to working state; which has no relation
+ * to when driver will perform sensitivity calibration.
+ *
+ * Driver should set it own missed_beacon_threshold to decide when to perform
+ * sensitivity calibration based on number of consecutive missed beacons in
+ * order to improve overall performance, especially in noisy environment.
+ *
*/
-/* if ucode missed CONSECUTIVE_MISSED_BCONS_TH beacons in a row,
- * then this notification will be sent. */
-#define CONSECUTIVE_MISSED_BCONS_TH 20
+
+#define IWL_MISSED_BEACON_THRESHOLD_MIN (1)
+#define IWL_MISSED_BEACON_THRESHOLD_DEF (5)
+#define IWL_MISSED_BEACON_THRESHOLD_MAX IWL_MISSED_BEACON_THRESHOLD_DEF
struct iwl_missed_beacon_notif {
- __le32 consequtive_missed_beacons;
+ __le32 consecutive_missed_beacons;
__le32 total_missed_becons;
__le32 num_expected_beacons;
__le32 num_recvd_beacons;
@@ -3437,11 +3471,7 @@ enum {
IWL_PHY_CALIBRATE_DIFF_GAIN_CMD = 7,
IWL_PHY_CALIBRATE_DC_CMD = 8,
IWL_PHY_CALIBRATE_LO_CMD = 9,
- IWL_PHY_CALIBRATE_RX_BB_CMD = 10,
IWL_PHY_CALIBRATE_TX_IQ_CMD = 11,
- IWL_PHY_CALIBRATE_RX_IQ_CMD = 12,
- IWL_PHY_CALIBRATION_NOISE_CMD = 13,
- IWL_PHY_CALIBRATE_AGC_TABLE_CMD = 14,
IWL_PHY_CALIBRATE_CRYSTAL_FRQ_CMD = 15,
IWL_PHY_CALIBRATE_BASE_BAND_CMD = 16,
IWL_PHY_CALIBRATE_TX_IQ_PERD_CMD = 17,
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index f36f804804f..112149e9b31 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -47,6 +47,26 @@ MODULE_VERSION(IWLWIFI_VERSION);
MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
MODULE_LICENSE("GPL");
+/*
+ * set bt_coex_active to true, uCode will do kill/defer
+ * every time the priority line is asserted (BT is sending signals on the
+ * priority line in the PCIx).
+ * set bt_coex_active to false, uCode will ignore the BT activity and
+ * perform the normal operation
+ *
+ * User might experience transmit issue on some platform due to WiFi/BT
+ * co-exist problem. The possible behaviors are:
+ * Able to scan and finding all the available AP
+ * Not able to associate with any AP
+ * On those platforms, WiFi communication can be restored by set
+ * "bt_coex_active" module parameter to "false"
+ *
+ * default: bt_coex_active = true (BT_COEX_ENABLE)
+ */
+static bool bt_coex_active = true;
+module_param(bt_coex_active, bool, S_IRUGO);
+MODULE_PARM_DESC(bt_coex_active, "enable wifi/bluetooth co-exist\n");
+
static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
{COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
0, COEX_UNASSOC_IDLE_FLAGS},
@@ -257,8 +277,8 @@ int iwl_hw_nic_init(struct iwl_priv *priv)
spin_lock_irqsave(&priv->lock, flags);
priv->cfg->ops->lib->apm_ops.init(priv);
- /* Set interrupt coalescing timer to 512 usecs */
- iwl_write8(priv, CSR_INT_COALESCING, 512 / 32);
+ /* Set interrupt coalescing calibration timer to default (512 usecs) */
+ iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
spin_unlock_irqrestore(&priv->lock, flags);
@@ -450,8 +470,6 @@ static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
if (priv->cfg->ht_greenfield_support)
ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
- ht_info->cap |= (IEEE80211_HT_CAP_SM_PS &
- (priv->cfg->sm_ps_mode << 2));
max_bit_rate = MAX_BIT_RATE_20_MHZ;
if (priv->hw_params.ht40_channel & BIT(band)) {
ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
@@ -636,7 +654,7 @@ EXPORT_SYMBOL(iwlcore_rts_tx_cmd_flag);
static bool is_single_rx_stream(struct iwl_priv *priv)
{
- return !priv->current_ht_config.is_ht ||
+ return priv->current_ht_config.smps == IEEE80211_SMPS_STATIC ||
priv->current_ht_config.single_chain_sufficient;
}
@@ -1003,28 +1021,18 @@ static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
*/
static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
{
- int idle_cnt = active_cnt;
- bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
-
- /* # Rx chains when idling and maybe trying to save power */
- switch (priv->cfg->sm_ps_mode) {
- case WLAN_HT_CAP_SM_PS_STATIC:
- idle_cnt = (is_cam) ? active_cnt : IWL_NUM_IDLE_CHAINS_SINGLE;
- break;
- case WLAN_HT_CAP_SM_PS_DYNAMIC:
- idle_cnt = (is_cam) ? IWL_NUM_IDLE_CHAINS_DUAL :
- IWL_NUM_IDLE_CHAINS_SINGLE;
- break;
- case WLAN_HT_CAP_SM_PS_DISABLED:
- break;
- case WLAN_HT_CAP_SM_PS_INVALID:
+ /* # Rx chains when idling, depending on SMPS mode */
+ switch (priv->current_ht_config.smps) {
+ case IEEE80211_SMPS_STATIC:
+ case IEEE80211_SMPS_DYNAMIC:
+ return IWL_NUM_IDLE_CHAINS_SINGLE;
+ case IEEE80211_SMPS_OFF:
+ return active_cnt;
default:
- IWL_ERR(priv, "invalid sm_ps mode %u\n",
- priv->cfg->sm_ps_mode);
- WARN_ON(1);
- break;
+ WARN(1, "invalid SMPS mode %d",
+ priv->current_ht_config.smps);
+ return active_cnt;
}
- return idle_cnt;
}
/* up to 4 chains */
@@ -1363,7 +1371,11 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
priv->cfg->ops->lib->dump_nic_error_log(priv);
- priv->cfg->ops->lib->dump_nic_event_log(priv, false);
+ if (priv->cfg->ops->lib->dump_csr)
+ priv->cfg->ops->lib->dump_csr(priv);
+ if (priv->cfg->ops->lib->dump_fh)
+ priv->cfg->ops->lib->dump_fh(priv, NULL, false);
+ priv->cfg->ops->lib->dump_nic_event_log(priv, false, NULL, false);
#ifdef CONFIG_IWLWIFI_DEBUG
if (iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS)
iwl_print_rx_config_cmd(priv);
@@ -1658,9 +1670,9 @@ EXPORT_SYMBOL(iwl_set_tx_power);
void iwl_free_isr_ict(struct iwl_priv *priv)
{
if (priv->ict_tbl_vir) {
- pci_free_consistent(priv->pci_dev, (sizeof(u32) * ICT_COUNT) +
- PAGE_SIZE, priv->ict_tbl_vir,
- priv->ict_tbl_dma);
+ dma_free_coherent(&priv->pci_dev->dev,
+ (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
+ priv->ict_tbl_vir, priv->ict_tbl_dma);
priv->ict_tbl_vir = NULL;
}
}
@@ -1676,9 +1688,9 @@ int iwl_alloc_isr_ict(struct iwl_priv *priv)
if (priv->cfg->use_isr_legacy)
return 0;
/* allocate shrared data table */
- priv->ict_tbl_vir = pci_alloc_consistent(priv->pci_dev, (sizeof(u32) *
- ICT_COUNT) + PAGE_SIZE,
- &priv->ict_tbl_dma);
+ priv->ict_tbl_vir = dma_alloc_coherent(&priv->pci_dev->dev,
+ (sizeof(u32) * ICT_COUNT) + PAGE_SIZE,
+ &priv->ict_tbl_dma, GFP_KERNEL);
if (!priv->ict_tbl_vir)
return -ENOMEM;
@@ -1813,6 +1825,16 @@ irqreturn_t iwl_isr_ict(int irq, void *data)
if (val == 0xffffffff)
val = 0;
+ /*
+ * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
+ * (bit 15 before shifting it to 31) to clear when using interrupt
+ * coalescing. fortunately, bits 18 and 19 stay set when this happens
+ * so we use them to decide on the real state of the Rx bit.
+ * In order words, bit 15 is set if bit 18 or bit 19 are set.
+ */
+ if (val & 0xC0000)
+ val |= 0x8000;
+
inta = (0xff & val) | ((0xff00 & val) << 16);
IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x ict 0x%08x\n",
inta, inta_mask, val);
@@ -1975,13 +1997,20 @@ EXPORT_SYMBOL(iwl_isr_legacy);
int iwl_send_bt_config(struct iwl_priv *priv)
{
struct iwl_bt_cmd bt_cmd = {
- .flags = BT_COEX_MODE_4W,
.lead_time = BT_LEAD_TIME_DEF,
.max_kill = BT_MAX_KILL_DEF,
.kill_ack_mask = 0,
.kill_cts_mask = 0,
};
+ if (!bt_coex_active)
+ bt_cmd.flags = BT_COEX_DISABLE;
+ else
+ bt_cmd.flags = BT_COEX_ENABLE;
+
+ IWL_DEBUG_INFO(priv, "BT coex %s\n",
+ (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
+
return iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
sizeof(struct iwl_bt_cmd), &bt_cmd);
}
@@ -2599,44 +2628,43 @@ int iwl_set_mode(struct iwl_priv *priv, int mode)
EXPORT_SYMBOL(iwl_set_mode);
int iwl_mac_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct iwl_priv *priv = hw->priv;
- unsigned long flags;
+ int err = 0;
- IWL_DEBUG_MAC80211(priv, "enter: type %d\n", conf->type);
+ IWL_DEBUG_MAC80211(priv, "enter: type %d\n", vif->type);
+
+ mutex_lock(&priv->mutex);
if (priv->vif) {
IWL_DEBUG_MAC80211(priv, "leave - vif != NULL\n");
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ goto out;
}
- spin_lock_irqsave(&priv->lock, flags);
- priv->vif = conf->vif;
- priv->iw_mode = conf->type;
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- mutex_lock(&priv->mutex);
+ priv->vif = vif;
+ priv->iw_mode = vif->type;
- if (conf->mac_addr) {
- IWL_DEBUG_MAC80211(priv, "Set %pM\n", conf->mac_addr);
- memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN);
+ if (vif->addr) {
+ IWL_DEBUG_MAC80211(priv, "Set %pM\n", vif->addr);
+ memcpy(priv->mac_addr, vif->addr, ETH_ALEN);
}
- if (iwl_set_mode(priv, conf->type) == -EAGAIN)
+ if (iwl_set_mode(priv, vif->type) == -EAGAIN)
/* we are not ready, will run again when ready */
set_bit(STATUS_MODE_PENDING, &priv->status);
+ out:
mutex_unlock(&priv->mutex);
IWL_DEBUG_MAC80211(priv, "leave\n");
- return 0;
+ return err;
}
EXPORT_SYMBOL(iwl_mac_add_interface);
void iwl_mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct iwl_priv *priv = hw->priv;
@@ -2649,7 +2677,7 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw,
priv->staging_rxon.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
iwlcore_commit_rxon(priv);
}
- if (priv->vif == conf->vif) {
+ if (priv->vif == vif) {
priv->vif = NULL;
memset(priv->bssid, 0, ETH_ALEN);
}
@@ -2689,6 +2717,21 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
}
+ if (changed & (IEEE80211_CONF_CHANGE_SMPS |
+ IEEE80211_CONF_CHANGE_CHANNEL)) {
+ /* mac80211 uses static for non-HT which is what we want */
+ priv->current_ht_config.smps = conf->smps_mode;
+
+ /*
+ * Recalculate chain counts.
+ *
+ * If monitor mode is enabled then mac80211 will
+ * set up the SM PS mode to OFF if an HT channel is
+ * configured.
+ */
+ if (priv->cfg->ops->hcmd->set_rxon_chain)
+ priv->cfg->ops->hcmd->set_rxon_chain(priv);
+ }
/* during scanning mac80211 will delay channel setting until
* scan finish with changed = 0
@@ -2786,10 +2829,6 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
iwl_set_tx_power(priv, conf->power_level, false);
}
- /* call to ensure that 4965 rx_chain is set properly in monitor mode */
- if (priv->cfg->ops->hcmd->set_rxon_chain)
- priv->cfg->ops->hcmd->set_rxon_chain(priv);
-
if (!iwl_is_ready(priv)) {
IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
goto out;
@@ -2812,42 +2851,6 @@ out:
}
EXPORT_SYMBOL(iwl_mac_config);
-int iwl_mac_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *stats)
-{
- struct iwl_priv *priv = hw->priv;
- int i, avail;
- struct iwl_tx_queue *txq;
- struct iwl_queue *q;
- unsigned long flags;
-
- IWL_DEBUG_MAC80211(priv, "enter\n");
-
- if (!iwl_is_ready_rf(priv)) {
- IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
- return -EIO;
- }
-
- spin_lock_irqsave(&priv->lock, flags);
-
- for (i = 0; i < AC_NUM; i++) {
- txq = &priv->txq[i];
- q = &txq->q;
- avail = iwl_queue_space(q);
-
- stats[i].len = q->n_window - avail;
- stats[i].limit = q->n_window - q->high_mark;
- stats[i].count = q->n_window;
-
- }
- spin_unlock_irqrestore(&priv->lock, flags);
-
- IWL_DEBUG_MAC80211(priv, "leave\n");
-
- return 0;
-}
-EXPORT_SYMBOL(iwl_mac_get_tx_stats);
-
void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
{
struct iwl_priv *priv = hw->priv;
@@ -3197,6 +3200,207 @@ void iwl_update_stats(struct iwl_priv *priv, bool is_tx, __le16 fc, u16 len)
EXPORT_SYMBOL(iwl_update_stats);
#endif
+const static char *get_csr_string(int cmd)
+{
+ switch (cmd) {
+ IWL_CMD(CSR_HW_IF_CONFIG_REG);
+ IWL_CMD(CSR_INT_COALESCING);
+ IWL_CMD(CSR_INT);
+ IWL_CMD(CSR_INT_MASK);
+ IWL_CMD(CSR_FH_INT_STATUS);
+ IWL_CMD(CSR_GPIO_IN);
+ IWL_CMD(CSR_RESET);
+ IWL_CMD(CSR_GP_CNTRL);
+ IWL_CMD(CSR_HW_REV);
+ IWL_CMD(CSR_EEPROM_REG);
+ IWL_CMD(CSR_EEPROM_GP);
+ IWL_CMD(CSR_OTP_GP_REG);
+ IWL_CMD(CSR_GIO_REG);
+ IWL_CMD(CSR_GP_UCODE_REG);
+ IWL_CMD(CSR_GP_DRIVER_REG);
+ IWL_CMD(CSR_UCODE_DRV_GP1);
+ IWL_CMD(CSR_UCODE_DRV_GP2);
+ IWL_CMD(CSR_LED_REG);
+ IWL_CMD(CSR_DRAM_INT_TBL_REG);
+ IWL_CMD(CSR_GIO_CHICKEN_BITS);
+ IWL_CMD(CSR_ANA_PLL_CFG);
+ IWL_CMD(CSR_HW_REV_WA_REG);
+ IWL_CMD(CSR_DBG_HPET_MEM_REG);
+ default:
+ return "UNKNOWN";
+
+ }
+}
+
+void iwl_dump_csr(struct iwl_priv *priv)
+{
+ int i;
+ u32 csr_tbl[] = {
+ CSR_HW_IF_CONFIG_REG,
+ CSR_INT_COALESCING,
+ CSR_INT,
+ CSR_INT_MASK,
+ CSR_FH_INT_STATUS,
+ CSR_GPIO_IN,
+ CSR_RESET,
+ CSR_GP_CNTRL,
+ CSR_HW_REV,
+ CSR_EEPROM_REG,
+ CSR_EEPROM_GP,
+ CSR_OTP_GP_REG,
+ CSR_GIO_REG,
+ CSR_GP_UCODE_REG,
+ CSR_GP_DRIVER_REG,
+ CSR_UCODE_DRV_GP1,
+ CSR_UCODE_DRV_GP2,
+ CSR_LED_REG,
+ CSR_DRAM_INT_TBL_REG,
+ CSR_GIO_CHICKEN_BITS,
+ CSR_ANA_PLL_CFG,
+ CSR_HW_REV_WA_REG,
+ CSR_DBG_HPET_MEM_REG
+ };
+ IWL_ERR(priv, "CSR values:\n");
+ IWL_ERR(priv, "(2nd byte of CSR_INT_COALESCING is "
+ "CSR_INT_PERIODIC_REG)\n");
+ for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
+ IWL_ERR(priv, " %25s: 0X%08x\n",
+ get_csr_string(csr_tbl[i]),
+ iwl_read32(priv, csr_tbl[i]));
+ }
+}
+EXPORT_SYMBOL(iwl_dump_csr);
+
+const static char *get_fh_string(int cmd)
+{
+ switch (cmd) {
+ IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
+ IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
+ IWL_CMD(FH_RSCSR_CHNL0_WPTR);
+ IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
+ IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
+ IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
+ IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
+ IWL_CMD(FH_TSSR_TX_STATUS_REG);
+ IWL_CMD(FH_TSSR_TX_ERROR_REG);
+ default:
+ return "UNKNOWN";
+
+ }
+}
+
+int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display)
+{
+ int i;
+#ifdef CONFIG_IWLWIFI_DEBUG
+ int pos = 0;
+ size_t bufsz = 0;
+#endif
+ u32 fh_tbl[] = {
+ FH_RSCSR_CHNL0_STTS_WPTR_REG,
+ FH_RSCSR_CHNL0_RBDCB_BASE_REG,
+ FH_RSCSR_CHNL0_WPTR,
+ FH_MEM_RCSR_CHNL0_CONFIG_REG,
+ FH_MEM_RSSR_SHARED_CTRL_REG,
+ FH_MEM_RSSR_RX_STATUS_REG,
+ FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
+ FH_TSSR_TX_STATUS_REG,
+ FH_TSSR_TX_ERROR_REG
+ };
+#ifdef CONFIG_IWLWIFI_DEBUG
+ if (display) {
+ bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
+ *buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!*buf)
+ return -ENOMEM;
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "FH register values:\n");
+ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ " %34s: 0X%08x\n",
+ get_fh_string(fh_tbl[i]),
+ iwl_read_direct32(priv, fh_tbl[i]));
+ }
+ return pos;
+ }
+#endif
+ IWL_ERR(priv, "FH register values:\n");
+ for (i = 0; i < ARRAY_SIZE(fh_tbl); i++) {
+ IWL_ERR(priv, " %34s: 0X%08x\n",
+ get_fh_string(fh_tbl[i]),
+ iwl_read_direct32(priv, fh_tbl[i]));
+ }
+ return 0;
+}
+EXPORT_SYMBOL(iwl_dump_fh);
+
+static void iwl_force_rf_reset(struct iwl_priv *priv)
+{
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ if (!iwl_is_associated(priv)) {
+ IWL_DEBUG_SCAN(priv, "force reset rejected: not associated\n");
+ return;
+ }
+ /*
+ * There is no easy and better way to force reset the radio,
+ * the only known method is switching channel which will force to
+ * reset and tune the radio.
+ * Use internal short scan (single channel) operation to should
+ * achieve this objective.
+ * Driver should reset the radio when number of consecutive missed
+ * beacon, or any other uCode error condition detected.
+ */
+ IWL_DEBUG_INFO(priv, "perform radio reset.\n");
+ iwl_internal_short_hw_scan(priv);
+ return;
+}
+
+
+int iwl_force_reset(struct iwl_priv *priv, int mode)
+{
+ struct iwl_force_reset *force_reset;
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return -EINVAL;
+
+ if (mode >= IWL_MAX_FORCE_RESET) {
+ IWL_DEBUG_INFO(priv, "invalid reset request.\n");
+ return -EINVAL;
+ }
+ force_reset = &priv->force_reset[mode];
+ force_reset->reset_request_count++;
+ if (force_reset->last_force_reset_jiffies &&
+ time_after(force_reset->last_force_reset_jiffies +
+ force_reset->reset_duration, jiffies)) {
+ IWL_DEBUG_INFO(priv, "force reset rejected\n");
+ force_reset->reset_reject_count++;
+ return -EAGAIN;
+ }
+ force_reset->reset_success_count++;
+ force_reset->last_force_reset_jiffies = jiffies;
+ IWL_DEBUG_INFO(priv, "perform force reset (%d)\n", mode);
+ switch (mode) {
+ case IWL_RF_RESET:
+ iwl_force_rf_reset(priv);
+ break;
+ case IWL_FW_RESET:
+ IWL_ERR(priv, "On demand firmware reload\n");
+ /* Set the FW error flag -- cleared on iwl_down */
+ set_bit(STATUS_FW_ERROR, &priv->status);
+ wake_up_interruptible(&priv->wait_command_queue);
+ /*
+ * Keep the restart process from trying to send host
+ * commands by clearing the INIT status bit
+ */
+ clear_bit(STATUS_READY, &priv->status);
+ queue_work(priv->workqueue, &priv->restart);
+ break;
+ }
+ return 0;
+}
+
#ifdef CONFIG_PM
int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index b69e972671b..4ef7739f9e8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -63,8 +63,6 @@
#ifndef __iwl_core_h__
#define __iwl_core_h__
-#include <generated/utsrelease.h>
-
/************************
* forward declarations *
************************/
@@ -72,8 +70,8 @@ struct iwl_host_cmd;
struct iwl_cmd;
-#define IWLWIFI_VERSION UTS_RELEASE "-k"
-#define DRV_COPYRIGHT "Copyright(c) 2003-2009 Intel Corporation"
+#define IWLWIFI_VERSION "in-tree:"
+#define DRV_COPYRIGHT "Copyright(c) 2003-2010 Intel Corporation"
#define DRV_AUTHOR "<ilw@linux.intel.com>"
#define IWL_PCI_DEVICE(dev, subdev, cfg) \
@@ -119,6 +117,7 @@ struct iwl_apm_ops {
struct iwl_temp_ops {
void (*temperature)(struct iwl_priv *priv);
void (*set_ct_kill)(struct iwl_priv *priv);
+ void (*set_calib_version)(struct iwl_priv *priv);
};
struct iwl_ucode_ops {
@@ -169,8 +168,11 @@ struct iwl_lib_ops {
int (*is_valid_rtc_data_addr)(u32 addr);
/* 1st ucode load */
int (*load_ucode)(struct iwl_priv *priv);
- void (*dump_nic_event_log)(struct iwl_priv *priv, bool full_log);
+ int (*dump_nic_event_log)(struct iwl_priv *priv,
+ bool full_log, char **buf, bool display);
void (*dump_nic_error_log)(struct iwl_priv *priv);
+ void (*dump_csr)(struct iwl_priv *priv);
+ int (*dump_fh)(struct iwl_priv *priv, char **buf, bool display);
int (*set_channel_switch)(struct iwl_priv *priv, u16 channel);
/* power management */
struct iwl_apm_ops apm_ops;
@@ -187,6 +189,8 @@ struct iwl_lib_ops {
/* temperature */
struct iwl_temp_ops temp_ops;
+ /* station management */
+ void (*add_bcast_station)(struct iwl_priv *priv);
};
struct iwl_led_ops {
@@ -230,8 +234,9 @@ struct iwl_mod_params {
* @chain_noise_num_beacons: number of beacons used to compute chain noise
* @adv_thermal_throttle: support advance thermal throttle
* @support_ct_kill_exit: support ct kill exit condition
- * @sm_ps_mode: spatial multiplexing power save mode
* @support_wimax_coexist: support wimax/wifi co-exist
+ * @plcp_delta_threshold: plcp error rate threshold used to trigger
+ * radio tuning when there is a high receiving plcp error rate
*
* We enable the driver to be backward compatible wrt API version. The
* driver specifies which APIs it supports (with @ucode_api_max being the
@@ -287,8 +292,9 @@ struct iwl_cfg {
const bool supports_idle;
bool adv_thermal_throttle;
bool support_ct_kill_exit;
- u8 sm_ps_mode;
const bool support_wimax_coexist;
+ u8 plcp_delta_threshold;
+ s32 chain_noise_scale;
};
/***************************
@@ -332,13 +338,11 @@ int iwl_mac_beacon_update(struct ieee80211_hw *hw, struct sk_buff *skb);
int iwl_commit_rxon(struct iwl_priv *priv);
int iwl_set_mode(struct iwl_priv *priv, int mode);
int iwl_mac_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf);
+ struct ieee80211_vif *vif);
void iwl_mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf);
+ struct ieee80211_vif *vif);
int iwl_mac_config(struct ieee80211_hw *hw, u32 changed);
void iwl_config_ap(struct iwl_priv *priv);
-int iwl_mac_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *stats);
void iwl_mac_reset_tsf(struct ieee80211_hw *hw);
int iwl_alloc_txq_mem(struct iwl_priv *priv);
void iwl_free_txq_mem(struct iwl_priv *priv);
@@ -411,13 +415,13 @@ void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
void iwl_cmd_queue_free(struct iwl_priv *priv);
int iwl_rx_queue_alloc(struct iwl_priv *priv);
void iwl_rx_handle(struct iwl_priv *priv);
-int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
+void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv,
struct iwl_rx_queue *q);
void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
void iwl_rx_replenish(struct iwl_priv *priv);
void iwl_rx_replenish_now(struct iwl_priv *priv);
int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq);
-int iwl_rx_queue_restock(struct iwl_priv *priv);
+void iwl_rx_queue_restock(struct iwl_priv *priv);
int iwl_rx_queue_space(const struct iwl_rx_queue *q);
void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority);
void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb);
@@ -425,6 +429,8 @@ int iwl_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index);
/* Handlers */
void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
+void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb);
void iwl_rx_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb);
void iwl_reply_statistics(struct iwl_priv *priv,
@@ -445,9 +451,9 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb);
void iwl_hw_txq_ctx_free(struct iwl_priv *priv);
int iwl_hw_tx_queue_init(struct iwl_priv *priv,
struct iwl_tx_queue *txq);
-int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
void iwl_free_tfds_in_queue(struct iwl_priv *priv,
int sta_id, int tid, int freed);
+void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq);
int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
int slots_num, u32 txq_id);
void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id);
@@ -497,6 +503,8 @@ void iwl_init_scan_params(struct iwl_priv *priv);
int iwl_scan_cancel(struct iwl_priv *priv);
int iwl_scan_cancel_timeout(struct iwl_priv *priv, unsigned long ms);
int iwl_mac_hw_scan(struct ieee80211_hw *hw, struct cfg80211_scan_request *req);
+int iwl_internal_short_hw_scan(struct iwl_priv *priv);
+int iwl_force_reset(struct iwl_priv *priv, int mode);
u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
const u8 *ie, int ie_len, int left);
void iwl_setup_rx_scan_handlers(struct iwl_priv *priv);
@@ -527,14 +535,6 @@ int iwl_send_calib_results(struct iwl_priv *priv);
int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len);
void iwl_calib_free_results(struct iwl_priv *priv);
-/*******************************************************************************
- * Spectrum Measureemtns in iwl-spectrum.c
- ******************************************************************************/
-#ifdef CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT
-void iwl_setup_spectrum_handlers(struct iwl_priv *priv);
-#else
-static inline void iwl_setup_spectrum_handlers(struct iwl_priv *priv) {}
-#endif
/*****************************************************
* S e n d i n g H o s t C o m m a n d s *
*****************************************************/
@@ -583,7 +583,10 @@ int iwl_pci_resume(struct pci_dev *pdev);
* Error Handling Debugging
******************************************************/
void iwl_dump_nic_error_log(struct iwl_priv *priv);
-void iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log);
+int iwl_dump_nic_event_log(struct iwl_priv *priv,
+ bool full_log, char **buf, bool display);
+void iwl_dump_csr(struct iwl_priv *priv);
+int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display);
#ifdef CONFIG_IWLWIFI_DEBUG
void iwl_print_rx_config_cmd(struct iwl_priv *priv);
#else
@@ -603,7 +606,7 @@ void iwlcore_free_geos(struct iwl_priv *priv);
/*************** DRIVER STATUS FUNCTIONS *****/
#define STATUS_HCMD_ACTIVE 0 /* host command in progress */
-#define STATUS_HCMD_SYNC_ACTIVE 1 /* sync host command in progress */
+/* 1 is unused (used to be STATUS_HCMD_SYNC_ACTIVE) */
#define STATUS_INT_ENABLED 2
#define STATUS_RF_KILL_HW 3
#define STATUS_CT_KILL 4
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 1ec8cb4d5ea..808b7146bea 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -369,7 +369,7 @@
#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_3x3_HYB (0x00000000)
#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_HYB (0x00000001)
#define CSR_GP_DRIVER_REG_BIT_RADIO_SKU_2x2_IPA (0x00000002)
-
+#define CSR_GP_DRIVER_REG_BIT_CALIB_VERSION6 (0x00000004)
/* GIO Chicken Bits (PCI Express bus link power management) */
#define CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX (0x00800000)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index d61293ab67c..1c7b53d511c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project.
*
@@ -67,57 +67,6 @@ do { \
DUMP_PREFIX_OFFSET, 16, 1, p, len, 1); \
} while (0)
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-struct iwl_debugfs {
- const char *name;
- struct dentry *dir_drv;
- struct dentry *dir_data;
- struct dentry *dir_debug;
- struct dentry *dir_rf;
- struct dir_data_files {
- struct dentry *file_sram;
- struct dentry *file_nvm;
- struct dentry *file_stations;
- struct dentry *file_log_event;
- struct dentry *file_channels;
- struct dentry *file_status;
- struct dentry *file_interrupt;
- struct dentry *file_qos;
- struct dentry *file_thermal_throttling;
- struct dentry *file_led;
- struct dentry *file_disable_ht40;
- struct dentry *file_sleep_level_override;
- struct dentry *file_current_sleep_command;
- } dbgfs_data_files;
- struct dir_rf_files {
- struct dentry *file_disable_sensitivity;
- struct dentry *file_disable_chain_noise;
- struct dentry *file_disable_tx_power;
- } dbgfs_rf_files;
- struct dir_debug_files {
- struct dentry *file_rx_statistics;
- struct dentry *file_tx_statistics;
- struct dentry *file_traffic_log;
- struct dentry *file_rx_queue;
- struct dentry *file_tx_queue;
- struct dentry *file_ucode_rx_stats;
- struct dentry *file_ucode_tx_stats;
- struct dentry *file_ucode_general_stats;
- struct dentry *file_sensitivity;
- struct dentry *file_chain_noise;
- struct dentry *file_tx_power;
- struct dentry *file_power_save_status;
- struct dentry *file_clear_ucode_statistics;
- struct dentry *file_clear_traffic_statistics;
- } dbgfs_debug_files;
- u32 sram_offset;
- u32 sram_len;
-};
-
-int iwl_dbgfs_register(struct iwl_priv *priv, const char *name);
-void iwl_dbgfs_unregister(struct iwl_priv *priv);
-#endif
-
#else
#define IWL_DEBUG(__priv, level, fmt, args...)
#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
@@ -126,9 +75,10 @@ static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
{}
#endif /* CONFIG_IWLWIFI_DEBUG */
-
-
-#ifndef CONFIG_IWLWIFI_DEBUGFS
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+int iwl_dbgfs_register(struct iwl_priv *priv, const char *name);
+void iwl_dbgfs_unregister(struct iwl_priv *priv);
+#else
static inline int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
{
return 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index 21e0f6699da..7bf44f14679 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -41,43 +41,28 @@
#include "iwl-calib.h"
/* create and remove of files */
-#define DEBUGFS_ADD_DIR(name, parent) do { \
- dbgfs->dir_##name = debugfs_create_dir(#name, parent); \
- if (!(dbgfs->dir_##name)) \
- goto err; \
+#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
+ if (!debugfs_create_file(#name, mode, parent, priv, \
+ &iwl_dbgfs_##name##_ops)) \
+ goto err; \
} while (0)
-#define DEBUGFS_ADD_FILE(name, parent, mode) do { \
- dbgfs->dbgfs_##parent##_files.file_##name = \
- debugfs_create_file(#name, mode, \
- dbgfs->dir_##parent, priv, \
- &iwl_dbgfs_##name##_ops); \
- if (!(dbgfs->dbgfs_##parent##_files.file_##name)) \
- goto err; \
+#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
+ struct dentry *__tmp; \
+ __tmp = debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \
+ parent, ptr); \
+ if (IS_ERR(__tmp) || !__tmp) \
+ goto err; \
} while (0)
-#define DEBUGFS_ADD_BOOL(name, parent, ptr) do { \
- dbgfs->dbgfs_##parent##_files.file_##name = \
- debugfs_create_bool(#name, S_IWUSR | S_IRUSR, \
- dbgfs->dir_##parent, ptr); \
- if (IS_ERR(dbgfs->dbgfs_##parent##_files.file_##name) \
- || !dbgfs->dbgfs_##parent##_files.file_##name) \
- goto err; \
+#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
+ struct dentry *__tmp; \
+ __tmp = debugfs_create_x32(#name, S_IWUSR | S_IRUSR, \
+ parent, ptr); \
+ if (IS_ERR(__tmp) || !__tmp) \
+ goto err; \
} while (0)
-#define DEBUGFS_ADD_X32(name, parent, ptr) do { \
- dbgfs->dbgfs_##parent##_files.file_##name = \
- debugfs_create_x32(#name, S_IRUSR, dbgfs->dir_##parent, ptr); \
- if (IS_ERR(dbgfs->dbgfs_##parent##_files.file_##name) \
- || !dbgfs->dbgfs_##parent##_files.file_##name) \
- goto err; \
-} while (0)
-
-#define DEBUGFS_REMOVE(name) do { \
- debugfs_remove(name); \
- name = NULL; \
-} while (0);
-
/* file operation */
#define DEBUGFS_READ_FUNC(name) \
static ssize_t iwl_dbgfs_##name##_read(struct file *file, \
@@ -125,7 +110,7 @@ static ssize_t iwl_dbgfs_tx_statistics_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char *buf;
int pos = 0;
@@ -184,7 +169,7 @@ static ssize_t iwl_dbgfs_rx_statistics_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char *buf;
int pos = 0;
int cnt;
@@ -232,28 +217,28 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
ssize_t ret;
int i;
int pos = 0;
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
size_t bufsz;
/* default is to dump the entire data segment */
- if (!priv->dbgfs->sram_offset && !priv->dbgfs->sram_len) {
- priv->dbgfs->sram_offset = 0x800000;
+ if (!priv->dbgfs_sram_offset && !priv->dbgfs_sram_len) {
+ priv->dbgfs_sram_offset = 0x800000;
if (priv->ucode_type == UCODE_INIT)
- priv->dbgfs->sram_len = priv->ucode_init_data.len;
+ priv->dbgfs_sram_len = priv->ucode_init_data.len;
else
- priv->dbgfs->sram_len = priv->ucode_data.len;
+ priv->dbgfs_sram_len = priv->ucode_data.len;
}
- bufsz = 30 + priv->dbgfs->sram_len * sizeof(char) * 10;
+ bufsz = 30 + priv->dbgfs_sram_len * sizeof(char) * 10;
buf = kmalloc(bufsz, GFP_KERNEL);
if (!buf)
return -ENOMEM;
pos += scnprintf(buf + pos, bufsz - pos, "sram_len: 0x%x\n",
- priv->dbgfs->sram_len);
+ priv->dbgfs_sram_len);
pos += scnprintf(buf + pos, bufsz - pos, "sram_offset: 0x%x\n",
- priv->dbgfs->sram_offset);
- for (i = priv->dbgfs->sram_len; i > 0; i -= 4) {
- val = iwl_read_targ_mem(priv, priv->dbgfs->sram_offset + \
- priv->dbgfs->sram_len - i);
+ priv->dbgfs_sram_offset);
+ for (i = priv->dbgfs_sram_len; i > 0; i -= 4) {
+ val = iwl_read_targ_mem(priv, priv->dbgfs_sram_offset + \
+ priv->dbgfs_sram_len - i);
if (i < 4) {
switch (i) {
case 1:
@@ -293,11 +278,11 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
return -EFAULT;
if (sscanf(buf, "%x,%x", &offset, &len) == 2) {
- priv->dbgfs->sram_offset = offset;
- priv->dbgfs->sram_len = len;
+ priv->dbgfs_sram_offset = offset;
+ priv->dbgfs_sram_len = len;
} else {
- priv->dbgfs->sram_offset = 0;
- priv->dbgfs->sram_len = 0;
+ priv->dbgfs_sram_offset = 0;
+ priv->dbgfs_sram_len = 0;
}
return count;
@@ -306,7 +291,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
static ssize_t iwl_dbgfs_stations_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
struct iwl_station_entry *station;
int max_sta = priv->hw_params.max_stations;
char *buf;
@@ -376,7 +361,7 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
loff_t *ppos)
{
ssize_t ret;
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0, ofs = 0, buf_size = 0;
const u8 *ptr;
char *buf;
@@ -420,6 +405,24 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
return ret;
}
+static ssize_t iwl_dbgfs_log_event_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char *buf;
+ int pos = 0;
+ ssize_t ret = -ENOMEM;
+
+ ret = pos = priv->cfg->ops->lib->dump_nic_event_log(
+ priv, true, &buf, true);
+ if (buf) {
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ }
+ return ret;
+}
+
static ssize_t iwl_dbgfs_log_event_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
@@ -436,7 +439,8 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
if (sscanf(buf, "%d", &event_log_flag) != 1)
return -EFAULT;
if (event_log_flag == 1)
- priv->cfg->ops->lib->dump_nic_event_log(priv, true);
+ priv->cfg->ops->lib->dump_nic_event_log(priv, true,
+ NULL, false);
return count;
}
@@ -446,7 +450,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
static ssize_t iwl_dbgfs_channels_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
struct ieee80211_channel *channels = NULL;
const struct ieee80211_supported_band *supp_band = NULL;
int pos = 0, i, bufsz = PAGE_SIZE;
@@ -519,15 +523,13 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char buf[512];
int pos = 0;
const size_t bufsz = sizeof(buf);
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
test_bit(STATUS_HCMD_ACTIVE, &priv->status));
- pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
- test_bit(STATUS_HCMD_SYNC_ACTIVE, &priv->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
test_bit(STATUS_INT_ENABLED, &priv->status));
pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
@@ -567,7 +569,7 @@ static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0;
int cnt = 0;
char *buf;
@@ -654,7 +656,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0, i;
char buf[256];
const size_t bufsz = sizeof(buf);
@@ -677,7 +679,7 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0;
char buf[256];
const size_t bufsz = sizeof(buf);
@@ -703,7 +705,7 @@ static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
struct iwl_tt_restriction *restriction;
char buf[100];
@@ -763,7 +765,7 @@ static ssize_t iwl_dbgfs_disable_ht40_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char buf[100];
int pos = 0;
const size_t bufsz = sizeof(buf);
@@ -811,7 +813,9 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
priv->power_data.debug_sleep_level_override = value;
+ mutex_lock(&priv->mutex);
iwl_power_update_mode(priv, true);
+ mutex_unlock(&priv->mutex);
return count;
}
@@ -820,7 +824,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char buf[10];
int pos, value;
const size_t bufsz = sizeof(buf);
@@ -838,7 +842,7 @@ static ssize_t iwl_dbgfs_current_sleep_command_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char buf[200];
int pos = 0, i;
const size_t bufsz = sizeof(buf);
@@ -859,7 +863,7 @@ static ssize_t iwl_dbgfs_current_sleep_command_read(struct file *file,
}
DEBUGFS_READ_WRITE_FILE_OPS(sram);
-DEBUGFS_WRITE_FILE_OPS(log_event);
+DEBUGFS_READ_WRITE_FILE_OPS(log_event);
DEBUGFS_READ_FILE_OPS(nvm);
DEBUGFS_READ_FILE_OPS(stations);
DEBUGFS_READ_FILE_OPS(channels);
@@ -976,7 +980,7 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
struct iwl_tx_queue *txq;
struct iwl_queue *q;
char *buf;
@@ -1022,7 +1026,7 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
struct iwl_rx_queue *rxq = &priv->rxq;
char buf[256];
int pos = 0;
@@ -1063,36 +1067,33 @@ static int iwl_dbgfs_statistics_flag(struct iwl_priv *priv, char *buf,
return p;
}
+static const char ucode_stats_header[] =
+ "%-32s current acumulative delta max\n";
+static const char ucode_stats_short_format[] =
+ " %-30s %10u\n";
+static const char ucode_stats_format[] =
+ " %-30s %10u %10u %10u %10u\n";
static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0;
char *buf;
- int bufsz = sizeof(struct statistics_rx_phy) * 20 +
- sizeof(struct statistics_rx_non_phy) * 20 +
- sizeof(struct statistics_rx_ht_phy) * 20 + 400;
+ int bufsz = sizeof(struct statistics_rx_phy) * 40 +
+ sizeof(struct statistics_rx_non_phy) * 40 +
+ sizeof(struct statistics_rx_ht_phy) * 40 + 400;
ssize_t ret;
- struct statistics_rx_phy *ofdm, *accum_ofdm;
- struct statistics_rx_phy *cck, *accum_cck;
+ struct statistics_rx_phy *ofdm, *accum_ofdm, *delta_ofdm, *max_ofdm;
+ struct statistics_rx_phy *cck, *accum_cck, *delta_cck, *max_cck;
struct statistics_rx_non_phy *general, *accum_general;
- struct statistics_rx_ht_phy *ht, *accum_ht;
+ struct statistics_rx_non_phy *delta_general, *max_general;
+ struct statistics_rx_ht_phy *ht, *accum_ht, *delta_ht, *max_ht;
if (!iwl_is_alive(priv))
return -EAGAIN;
- /* make request to uCode to retrieve statistics information */
- mutex_lock(&priv->mutex);
- ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
- mutex_unlock(&priv->mutex);
-
- if (ret) {
- IWL_ERR(priv,
- "Error sending statistics request: %zd\n", ret);
- return -EAGAIN;
- }
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf) {
IWL_ERR(priv, "Can not allocate Buffer\n");
@@ -1111,264 +1112,401 @@ static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
accum_cck = &priv->accum_statistics.rx.cck;
accum_general = &priv->accum_statistics.rx.general;
accum_ht = &priv->accum_statistics.rx.ofdm_ht;
+ delta_ofdm = &priv->delta_statistics.rx.ofdm;
+ delta_cck = &priv->delta_statistics.rx.cck;
+ delta_general = &priv->delta_statistics.rx.general;
+ delta_ht = &priv->delta_statistics.rx.ofdm_ht;
+ max_ofdm = &priv->max_delta.rx.ofdm;
+ max_cck = &priv->max_delta.rx.cck;
+ max_general = &priv->max_delta.rx.general;
+ max_ht = &priv->max_delta.rx.ofdm_ht;
+
pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM:\n");
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t\t\tcurrent\t\t\taccumulative\n");
- pos += scnprintf(buf + pos, bufsz - pos, "ina_cnt:\t\t%u\t\t\t%u\n",
- le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, "fina_cnt:\t\t%u\t\t\t%u\n",
- le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, "plcp_err:\t\t%u\t\t\t%u\n",
- le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos, "crc32_err:\t\t%u\t\t\t%u\n",
- le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "overrun_err:\t\t%u\t\t\t%u\n",
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
+ "Statistics_Rx - OFDM:");
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "ina_cnt:", le32_to_cpu(ofdm->ina_cnt),
+ accum_ofdm->ina_cnt,
+ delta_ofdm->ina_cnt, max_ofdm->ina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "fina_cnt:",
+ le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt,
+ delta_ofdm->fina_cnt, max_ofdm->fina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "plcp_err:",
+ le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err,
+ delta_ofdm->plcp_err, max_ofdm->plcp_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "crc32_err:",
+ le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err,
+ delta_ofdm->crc32_err, max_ofdm->crc32_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "overrun_err:",
le32_to_cpu(ofdm->overrun_err),
- accum_ofdm->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "early_overrun_err:\t%u\t\t\t%u\n",
+ accum_ofdm->overrun_err,
+ delta_ofdm->overrun_err, max_ofdm->overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "early_overrun_err:",
le32_to_cpu(ofdm->early_overrun_err),
- accum_ofdm->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos, "crc32_good:\t\t%u\t\t\t%u\n",
+ accum_ofdm->early_overrun_err,
+ delta_ofdm->early_overrun_err,
+ max_ofdm->early_overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "crc32_good:",
le32_to_cpu(ofdm->crc32_good),
- accum_ofdm->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos,
- "false_alarm_cnt:\t%u\t\t\t%u\n",
+ accum_ofdm->crc32_good,
+ delta_ofdm->crc32_good, max_ofdm->crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "false_alarm_cnt:",
le32_to_cpu(ofdm->false_alarm_cnt),
- accum_ofdm->false_alarm_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "fina_sync_err_cnt:\t%u\t\t\t%u\n",
+ accum_ofdm->false_alarm_cnt,
+ delta_ofdm->false_alarm_cnt,
+ max_ofdm->false_alarm_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "fina_sync_err_cnt:",
le32_to_cpu(ofdm->fina_sync_err_cnt),
- accum_ofdm->fina_sync_err_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "sfd_timeout:\t\t%u\t\t\t%u\n",
+ accum_ofdm->fina_sync_err_cnt,
+ delta_ofdm->fina_sync_err_cnt,
+ max_ofdm->fina_sync_err_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "sfd_timeout:",
le32_to_cpu(ofdm->sfd_timeout),
- accum_ofdm->sfd_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- "fina_timeout:\t\t%u\t\t\t%u\n",
+ accum_ofdm->sfd_timeout,
+ delta_ofdm->sfd_timeout,
+ max_ofdm->sfd_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "fina_timeout:",
le32_to_cpu(ofdm->fina_timeout),
- accum_ofdm->fina_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- "unresponded_rts:\t%u\t\t\t%u\n",
+ accum_ofdm->fina_timeout,
+ delta_ofdm->fina_timeout,
+ max_ofdm->fina_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "unresponded_rts:",
le32_to_cpu(ofdm->unresponded_rts),
- accum_ofdm->unresponded_rts);
- pos += scnprintf(buf + pos, bufsz - pos,
- "rxe_frame_lmt_ovrun:\t%u\t\t\t%u\n",
+ accum_ofdm->unresponded_rts,
+ delta_ofdm->unresponded_rts,
+ max_ofdm->unresponded_rts);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "rxe_frame_lmt_ovrun:",
le32_to_cpu(ofdm->rxe_frame_limit_overrun),
- accum_ofdm->rxe_frame_limit_overrun);
- pos += scnprintf(buf + pos, bufsz - pos,
- "sent_ack_cnt:\t\t%u\t\t\t%u\n",
+ accum_ofdm->rxe_frame_limit_overrun,
+ delta_ofdm->rxe_frame_limit_overrun,
+ max_ofdm->rxe_frame_limit_overrun);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "sent_ack_cnt:",
le32_to_cpu(ofdm->sent_ack_cnt),
- accum_ofdm->sent_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "sent_cts_cnt:\t\t%u\t\t\t%u\n",
+ accum_ofdm->sent_ack_cnt,
+ delta_ofdm->sent_ack_cnt,
+ max_ofdm->sent_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "sent_cts_cnt:",
le32_to_cpu(ofdm->sent_cts_cnt),
- accum_ofdm->sent_cts_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "sent_ba_rsp_cnt:\t%u\t\t\t%u\n",
+ accum_ofdm->sent_cts_cnt,
+ delta_ofdm->sent_cts_cnt, max_ofdm->sent_cts_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "sent_ba_rsp_cnt:",
le32_to_cpu(ofdm->sent_ba_rsp_cnt),
- accum_ofdm->sent_ba_rsp_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "dsp_self_kill:\t\t%u\t\t\t%u\n",
+ accum_ofdm->sent_ba_rsp_cnt,
+ delta_ofdm->sent_ba_rsp_cnt,
+ max_ofdm->sent_ba_rsp_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "dsp_self_kill:",
le32_to_cpu(ofdm->dsp_self_kill),
- accum_ofdm->dsp_self_kill);
- pos += scnprintf(buf + pos, bufsz - pos,
- "mh_format_err:\t\t%u\t\t\t%u\n",
+ accum_ofdm->dsp_self_kill,
+ delta_ofdm->dsp_self_kill,
+ max_ofdm->dsp_self_kill);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "mh_format_err:",
le32_to_cpu(ofdm->mh_format_err),
- accum_ofdm->mh_format_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "re_acq_main_rssi_sum:\t%u\t\t\t%u\n",
+ accum_ofdm->mh_format_err,
+ delta_ofdm->mh_format_err,
+ max_ofdm->mh_format_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "re_acq_main_rssi_sum:",
le32_to_cpu(ofdm->re_acq_main_rssi_sum),
- accum_ofdm->re_acq_main_rssi_sum);
-
- pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - CCK:\n");
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t\t\tcurrent\t\t\taccumulative\n");
- pos += scnprintf(buf + pos, bufsz - pos, "ina_cnt:\t\t%u\t\t\t%u\n",
- le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, "fina_cnt:\t\t%u\t\t\t%u\n",
- le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, "plcp_err:\t\t%u\t\t\t%u\n",
- le32_to_cpu(cck->plcp_err), accum_cck->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos, "crc32_err:\t\t%u\t\t\t%u\n",
- le32_to_cpu(cck->crc32_err), accum_cck->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "overrun_err:\t\t%u\t\t\t%u\n",
+ accum_ofdm->re_acq_main_rssi_sum,
+ delta_ofdm->re_acq_main_rssi_sum,
+ max_ofdm->re_acq_main_rssi_sum);
+
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
+ "Statistics_Rx - CCK:");
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "ina_cnt:",
+ le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt,
+ delta_cck->ina_cnt, max_cck->ina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "fina_cnt:",
+ le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt,
+ delta_cck->fina_cnt, max_cck->fina_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "plcp_err:",
+ le32_to_cpu(cck->plcp_err), accum_cck->plcp_err,
+ delta_cck->plcp_err, max_cck->plcp_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "crc32_err:",
+ le32_to_cpu(cck->crc32_err), accum_cck->crc32_err,
+ delta_cck->crc32_err, max_cck->crc32_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "overrun_err:",
le32_to_cpu(cck->overrun_err),
- accum_cck->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "early_overrun_err:\t%u\t\t\t%u\n",
+ accum_cck->overrun_err,
+ delta_cck->overrun_err, max_cck->overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "early_overrun_err:",
le32_to_cpu(cck->early_overrun_err),
- accum_cck->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos, "crc32_good:\t\t%u\t\t\t%u\n",
- le32_to_cpu(cck->crc32_good), accum_cck->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos,
- "false_alarm_cnt:\t%u\t\t\t%u\n",
+ accum_cck->early_overrun_err,
+ delta_cck->early_overrun_err,
+ max_cck->early_overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "crc32_good:",
+ le32_to_cpu(cck->crc32_good), accum_cck->crc32_good,
+ delta_cck->crc32_good,
+ max_cck->crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "false_alarm_cnt:",
le32_to_cpu(cck->false_alarm_cnt),
- accum_cck->false_alarm_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "fina_sync_err_cnt:\t%u\t\t\t%u\n",
+ accum_cck->false_alarm_cnt,
+ delta_cck->false_alarm_cnt, max_cck->false_alarm_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "fina_sync_err_cnt:",
le32_to_cpu(cck->fina_sync_err_cnt),
- accum_cck->fina_sync_err_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "sfd_timeout:\t\t%u\t\t\t%u\n",
+ accum_cck->fina_sync_err_cnt,
+ delta_cck->fina_sync_err_cnt,
+ max_cck->fina_sync_err_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "sfd_timeout:",
le32_to_cpu(cck->sfd_timeout),
- accum_cck->sfd_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- "fina_timeout:\t\t%u\t\t\t%u\n",
+ accum_cck->sfd_timeout,
+ delta_cck->sfd_timeout, max_cck->sfd_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "fina_timeout:",
le32_to_cpu(cck->fina_timeout),
- accum_cck->fina_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- "unresponded_rts:\t%u\t\t\t%u\n",
+ accum_cck->fina_timeout,
+ delta_cck->fina_timeout, max_cck->fina_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "unresponded_rts:",
le32_to_cpu(cck->unresponded_rts),
- accum_cck->unresponded_rts);
- pos += scnprintf(buf + pos, bufsz - pos,
- "rxe_frame_lmt_ovrun:\t%u\t\t\t%u\n",
+ accum_cck->unresponded_rts,
+ delta_cck->unresponded_rts,
+ max_cck->unresponded_rts);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "rxe_frame_lmt_ovrun:",
le32_to_cpu(cck->rxe_frame_limit_overrun),
- accum_cck->rxe_frame_limit_overrun);
- pos += scnprintf(buf + pos, bufsz - pos,
- "sent_ack_cnt:\t\t%u\t\t\t%u\n",
+ accum_cck->rxe_frame_limit_overrun,
+ delta_cck->rxe_frame_limit_overrun,
+ max_cck->rxe_frame_limit_overrun);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "sent_ack_cnt:",
le32_to_cpu(cck->sent_ack_cnt),
- accum_cck->sent_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "sent_cts_cnt:\t\t%u\t\t\t%u\n",
+ accum_cck->sent_ack_cnt,
+ delta_cck->sent_ack_cnt,
+ max_cck->sent_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "sent_cts_cnt:",
le32_to_cpu(cck->sent_cts_cnt),
- accum_cck->sent_cts_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "sent_ba_rsp_cnt:\t%u\t\t\t%u\n",
+ accum_cck->sent_cts_cnt,
+ delta_cck->sent_cts_cnt,
+ max_cck->sent_cts_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "sent_ba_rsp_cnt:",
le32_to_cpu(cck->sent_ba_rsp_cnt),
- accum_cck->sent_ba_rsp_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "dsp_self_kill:\t\t%u\t\t\t%u\n",
+ accum_cck->sent_ba_rsp_cnt,
+ delta_cck->sent_ba_rsp_cnt,
+ max_cck->sent_ba_rsp_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "dsp_self_kill:",
le32_to_cpu(cck->dsp_self_kill),
- accum_cck->dsp_self_kill);
- pos += scnprintf(buf + pos, bufsz - pos,
- "mh_format_err:\t\t%u\t\t\t%u\n",
+ accum_cck->dsp_self_kill,
+ delta_cck->dsp_self_kill,
+ max_cck->dsp_self_kill);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "mh_format_err:",
le32_to_cpu(cck->mh_format_err),
- accum_cck->mh_format_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "re_acq_main_rssi_sum:\t%u\t\t\t%u\n",
+ accum_cck->mh_format_err,
+ delta_cck->mh_format_err, max_cck->mh_format_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "re_acq_main_rssi_sum:",
le32_to_cpu(cck->re_acq_main_rssi_sum),
- accum_cck->re_acq_main_rssi_sum);
-
- pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - GENERAL:\n");
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t\t\tcurrent\t\t\taccumulative\n");
- pos += scnprintf(buf + pos, bufsz - pos, "bogus_cts:\t\t%u\t\t\t%u\n",
+ accum_cck->re_acq_main_rssi_sum,
+ delta_cck->re_acq_main_rssi_sum,
+ max_cck->re_acq_main_rssi_sum);
+
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
+ "Statistics_Rx - GENERAL:");
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "bogus_cts:",
le32_to_cpu(general->bogus_cts),
- accum_general->bogus_cts);
- pos += scnprintf(buf + pos, bufsz - pos, "bogus_ack:\t\t%u\t\t\t%u\n",
+ accum_general->bogus_cts,
+ delta_general->bogus_cts, max_general->bogus_cts);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "bogus_ack:",
le32_to_cpu(general->bogus_ack),
- accum_general->bogus_ack);
- pos += scnprintf(buf + pos, bufsz - pos,
- "non_bssid_frames:\t%u\t\t\t%u\n",
+ accum_general->bogus_ack,
+ delta_general->bogus_ack, max_general->bogus_ack);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "non_bssid_frames:",
le32_to_cpu(general->non_bssid_frames),
- accum_general->non_bssid_frames);
- pos += scnprintf(buf + pos, bufsz - pos,
- "filtered_frames:\t%u\t\t\t%u\n",
+ accum_general->non_bssid_frames,
+ delta_general->non_bssid_frames,
+ max_general->non_bssid_frames);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "filtered_frames:",
le32_to_cpu(general->filtered_frames),
- accum_general->filtered_frames);
- pos += scnprintf(buf + pos, bufsz - pos,
- "non_channel_beacons:\t%u\t\t\t%u\n",
+ accum_general->filtered_frames,
+ delta_general->filtered_frames,
+ max_general->filtered_frames);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "non_channel_beacons:",
le32_to_cpu(general->non_channel_beacons),
- accum_general->non_channel_beacons);
- pos += scnprintf(buf + pos, bufsz - pos,
- "channel_beacons:\t%u\t\t\t%u\n",
+ accum_general->non_channel_beacons,
+ delta_general->non_channel_beacons,
+ max_general->non_channel_beacons);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "channel_beacons:",
le32_to_cpu(general->channel_beacons),
- accum_general->channel_beacons);
- pos += scnprintf(buf + pos, bufsz - pos,
- "num_missed_bcon:\t%u\t\t\t%u\n",
+ accum_general->channel_beacons,
+ delta_general->channel_beacons,
+ max_general->channel_beacons);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "num_missed_bcon:",
le32_to_cpu(general->num_missed_bcon),
- accum_general->num_missed_bcon);
- pos += scnprintf(buf + pos, bufsz - pos,
- "adc_rx_saturation_time:\t%u\t\t\t%u\n",
+ accum_general->num_missed_bcon,
+ delta_general->num_missed_bcon,
+ max_general->num_missed_bcon);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "adc_rx_saturation_time:",
le32_to_cpu(general->adc_rx_saturation_time),
- accum_general->adc_rx_saturation_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- "ina_detect_search_tm:\t%u\t\t\t%u\n",
+ accum_general->adc_rx_saturation_time,
+ delta_general->adc_rx_saturation_time,
+ max_general->adc_rx_saturation_time);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "ina_detect_search_tm:",
le32_to_cpu(general->ina_detection_search_time),
- accum_general->ina_detection_search_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- "beacon_silence_rssi_a:\t%u\t\t\t%u\n",
+ accum_general->ina_detection_search_time,
+ delta_general->ina_detection_search_time,
+ max_general->ina_detection_search_time);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "beacon_silence_rssi_a:",
le32_to_cpu(general->beacon_silence_rssi_a),
- accum_general->beacon_silence_rssi_a);
- pos += scnprintf(buf + pos, bufsz - pos,
- "beacon_silence_rssi_b:\t%u\t\t\t%u\n",
+ accum_general->beacon_silence_rssi_a,
+ delta_general->beacon_silence_rssi_a,
+ max_general->beacon_silence_rssi_a);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "beacon_silence_rssi_b:",
le32_to_cpu(general->beacon_silence_rssi_b),
- accum_general->beacon_silence_rssi_b);
- pos += scnprintf(buf + pos, bufsz - pos,
- "beacon_silence_rssi_c:\t%u\t\t\t%u\n",
+ accum_general->beacon_silence_rssi_b,
+ delta_general->beacon_silence_rssi_b,
+ max_general->beacon_silence_rssi_b);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "beacon_silence_rssi_c:",
le32_to_cpu(general->beacon_silence_rssi_c),
- accum_general->beacon_silence_rssi_c);
- pos += scnprintf(buf + pos, bufsz - pos,
- "interference_data_flag:\t%u\t\t\t%u\n",
+ accum_general->beacon_silence_rssi_c,
+ delta_general->beacon_silence_rssi_c,
+ max_general->beacon_silence_rssi_c);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "interference_data_flag:",
le32_to_cpu(general->interference_data_flag),
- accum_general->interference_data_flag);
- pos += scnprintf(buf + pos, bufsz - pos,
- "channel_load:\t\t%u\t\t\t%u\n",
+ accum_general->interference_data_flag,
+ delta_general->interference_data_flag,
+ max_general->interference_data_flag);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "channel_load:",
le32_to_cpu(general->channel_load),
- accum_general->channel_load);
- pos += scnprintf(buf + pos, bufsz - pos,
- "dsp_false_alarms:\t%u\t\t\t%u\n",
+ accum_general->channel_load,
+ delta_general->channel_load,
+ max_general->channel_load);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "dsp_false_alarms:",
le32_to_cpu(general->dsp_false_alarms),
- accum_general->dsp_false_alarms);
- pos += scnprintf(buf + pos, bufsz - pos,
- "beacon_rssi_a:\t\t%u\t\t\t%u\n",
+ accum_general->dsp_false_alarms,
+ delta_general->dsp_false_alarms,
+ max_general->dsp_false_alarms);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "beacon_rssi_a:",
le32_to_cpu(general->beacon_rssi_a),
- accum_general->beacon_rssi_a);
- pos += scnprintf(buf + pos, bufsz - pos,
- "beacon_rssi_b:\t\t%u\t\t\t%u\n",
+ accum_general->beacon_rssi_a,
+ delta_general->beacon_rssi_a,
+ max_general->beacon_rssi_a);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "beacon_rssi_b:",
le32_to_cpu(general->beacon_rssi_b),
- accum_general->beacon_rssi_b);
- pos += scnprintf(buf + pos, bufsz - pos,
- "beacon_rssi_c:\t\t%u\t\t\t%u\n",
+ accum_general->beacon_rssi_b,
+ delta_general->beacon_rssi_b,
+ max_general->beacon_rssi_b);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "beacon_rssi_c:",
le32_to_cpu(general->beacon_rssi_c),
- accum_general->beacon_rssi_c);
- pos += scnprintf(buf + pos, bufsz - pos,
- "beacon_energy_a:\t%u\t\t\t%u\n",
+ accum_general->beacon_rssi_c,
+ delta_general->beacon_rssi_c,
+ max_general->beacon_rssi_c);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "beacon_energy_a:",
le32_to_cpu(general->beacon_energy_a),
- accum_general->beacon_energy_a);
- pos += scnprintf(buf + pos, bufsz - pos,
- "beacon_energy_b:\t%u\t\t\t%u\n",
+ accum_general->beacon_energy_a,
+ delta_general->beacon_energy_a,
+ max_general->beacon_energy_a);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "beacon_energy_b:",
le32_to_cpu(general->beacon_energy_b),
- accum_general->beacon_energy_b);
- pos += scnprintf(buf + pos, bufsz - pos,
- "beacon_energy_c:\t%u\t\t\t%u\n",
+ accum_general->beacon_energy_b,
+ delta_general->beacon_energy_b,
+ max_general->beacon_energy_b);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "beacon_energy_c:",
le32_to_cpu(general->beacon_energy_c),
- accum_general->beacon_energy_c);
+ accum_general->beacon_energy_c,
+ delta_general->beacon_energy_c,
+ max_general->beacon_energy_c);
pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM_HT:\n");
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t\t\tcurrent\t\t\taccumulative\n");
- pos += scnprintf(buf + pos, bufsz - pos, "plcp_err:\t\t%u\t\t\t%u\n",
- le32_to_cpu(ht->plcp_err), accum_ht->plcp_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "overrun_err:\t\t%u\t\t\t%u\n",
- le32_to_cpu(ht->overrun_err), accum_ht->overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "early_overrun_err:\t%u\t\t\t%u\n",
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
+ "Statistics_Rx - OFDM_HT:");
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "plcp_err:",
+ le32_to_cpu(ht->plcp_err), accum_ht->plcp_err,
+ delta_ht->plcp_err, max_ht->plcp_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "overrun_err:",
+ le32_to_cpu(ht->overrun_err), accum_ht->overrun_err,
+ delta_ht->overrun_err, max_ht->overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "early_overrun_err:",
le32_to_cpu(ht->early_overrun_err),
- accum_ht->early_overrun_err);
- pos += scnprintf(buf + pos, bufsz - pos, "crc32_good:\t\t%u\t\t\t%u\n",
- le32_to_cpu(ht->crc32_good), accum_ht->crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos, "crc32_err:\t\t%u\t\t\t%u\n",
- le32_to_cpu(ht->crc32_err), accum_ht->crc32_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "mh_format_err:\t\t%u\t\t\t%u\n",
+ accum_ht->early_overrun_err,
+ delta_ht->early_overrun_err,
+ max_ht->early_overrun_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "crc32_good:",
+ le32_to_cpu(ht->crc32_good), accum_ht->crc32_good,
+ delta_ht->crc32_good, max_ht->crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "crc32_err:",
+ le32_to_cpu(ht->crc32_err), accum_ht->crc32_err,
+ delta_ht->crc32_err, max_ht->crc32_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "mh_format_err:",
le32_to_cpu(ht->mh_format_err),
- accum_ht->mh_format_err);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg_crc32_good:\t\t%u\t\t\t%u\n",
+ accum_ht->mh_format_err,
+ delta_ht->mh_format_err, max_ht->mh_format_err);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg_crc32_good:",
le32_to_cpu(ht->agg_crc32_good),
- accum_ht->agg_crc32_good);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg_mpdu_cnt:\t\t%u\t\t\t%u\n",
+ accum_ht->agg_crc32_good,
+ delta_ht->agg_crc32_good, max_ht->agg_crc32_good);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg_mpdu_cnt:",
le32_to_cpu(ht->agg_mpdu_cnt),
- accum_ht->agg_mpdu_cnt);
- pos += scnprintf(buf + pos, bufsz - pos, "agg_cnt:\t\t%u\t\t\t%u\n",
- le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt);
+ accum_ht->agg_mpdu_cnt,
+ delta_ht->agg_mpdu_cnt, max_ht->agg_mpdu_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg_cnt:",
+ le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt,
+ delta_ht->agg_cnt, max_ht->agg_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "unsupport_mcs:",
+ le32_to_cpu(ht->unsupport_mcs),
+ accum_ht->unsupport_mcs,
+ delta_ht->unsupport_mcs, max_ht->unsupport_mcs);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
@@ -1379,26 +1517,16 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0;
char *buf;
- int bufsz = (sizeof(struct statistics_tx) * 24) + 250;
+ int bufsz = (sizeof(struct statistics_tx) * 48) + 250;
ssize_t ret;
- struct statistics_tx *tx, *accum_tx;
+ struct statistics_tx *tx, *accum_tx, *delta_tx, *max_tx;
if (!iwl_is_alive(priv))
return -EAGAIN;
- /* make request to uCode to retrieve statistics information */
- mutex_lock(&priv->mutex);
- ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
- mutex_unlock(&priv->mutex);
-
- if (ret) {
- IWL_ERR(priv,
- "Error sending statistics request: %zd\n", ret);
- return -EAGAIN;
- }
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf) {
IWL_ERR(priv, "Can not allocate Buffer\n");
@@ -1411,106 +1539,148 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
*/
tx = &priv->statistics.tx;
accum_tx = &priv->accum_statistics.tx;
+ delta_tx = &priv->delta_statistics.tx;
+ max_tx = &priv->max_delta.tx;
pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Tx:\n");
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t\t\tcurrent\t\t\taccumulative\n");
- pos += scnprintf(buf + pos, bufsz - pos, "preamble:\t\t\t%u\t\t\t%u\n",
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
+ "Statistics_Tx:");
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "preamble:",
le32_to_cpu(tx->preamble_cnt),
- accum_tx->preamble_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "rx_detected_cnt:\t\t%u\t\t\t%u\n",
+ accum_tx->preamble_cnt,
+ delta_tx->preamble_cnt, max_tx->preamble_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "rx_detected_cnt:",
le32_to_cpu(tx->rx_detected_cnt),
- accum_tx->rx_detected_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "bt_prio_defer_cnt:\t\t%u\t\t\t%u\n",
+ accum_tx->rx_detected_cnt,
+ delta_tx->rx_detected_cnt, max_tx->rx_detected_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "bt_prio_defer_cnt:",
le32_to_cpu(tx->bt_prio_defer_cnt),
- accum_tx->bt_prio_defer_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "bt_prio_kill_cnt:\t\t%u\t\t\t%u\n",
+ accum_tx->bt_prio_defer_cnt,
+ delta_tx->bt_prio_defer_cnt,
+ max_tx->bt_prio_defer_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "bt_prio_kill_cnt:",
le32_to_cpu(tx->bt_prio_kill_cnt),
- accum_tx->bt_prio_kill_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "few_bytes_cnt:\t\t\t%u\t\t\t%u\n",
+ accum_tx->bt_prio_kill_cnt,
+ delta_tx->bt_prio_kill_cnt,
+ max_tx->bt_prio_kill_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "few_bytes_cnt:",
le32_to_cpu(tx->few_bytes_cnt),
- accum_tx->few_bytes_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "cts_timeout:\t\t\t%u\t\t\t%u\n",
- le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- "ack_timeout:\t\t\t%u\t\t\t%u\n",
+ accum_tx->few_bytes_cnt,
+ delta_tx->few_bytes_cnt, max_tx->few_bytes_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "cts_timeout:",
+ le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout,
+ delta_tx->cts_timeout, max_tx->cts_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "ack_timeout:",
le32_to_cpu(tx->ack_timeout),
- accum_tx->ack_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- "expected_ack_cnt:\t\t%u\t\t\t%u\n",
+ accum_tx->ack_timeout,
+ delta_tx->ack_timeout, max_tx->ack_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "expected_ack_cnt:",
le32_to_cpu(tx->expected_ack_cnt),
- accum_tx->expected_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "actual_ack_cnt:\t\t\t%u\t\t\t%u\n",
+ accum_tx->expected_ack_cnt,
+ delta_tx->expected_ack_cnt,
+ max_tx->expected_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "actual_ack_cnt:",
le32_to_cpu(tx->actual_ack_cnt),
- accum_tx->actual_ack_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "dump_msdu_cnt:\t\t\t%u\t\t\t%u\n",
+ accum_tx->actual_ack_cnt,
+ delta_tx->actual_ack_cnt,
+ max_tx->actual_ack_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "dump_msdu_cnt:",
le32_to_cpu(tx->dump_msdu_cnt),
- accum_tx->dump_msdu_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "abort_nxt_frame_mismatch:"
- "\t%u\t\t\t%u\n",
+ accum_tx->dump_msdu_cnt,
+ delta_tx->dump_msdu_cnt,
+ max_tx->dump_msdu_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "abort_nxt_frame_mismatch:",
le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
- accum_tx->burst_abort_next_frame_mismatch_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "abort_missing_nxt_frame:"
- "\t%u\t\t\t%u\n",
+ accum_tx->burst_abort_next_frame_mismatch_cnt,
+ delta_tx->burst_abort_next_frame_mismatch_cnt,
+ max_tx->burst_abort_next_frame_mismatch_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "abort_missing_nxt_frame:",
le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
- accum_tx->burst_abort_missing_next_frame_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "cts_timeout_collision:\t\t%u\t\t\t%u\n",
+ accum_tx->burst_abort_missing_next_frame_cnt,
+ delta_tx->burst_abort_missing_next_frame_cnt,
+ max_tx->burst_abort_missing_next_frame_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "cts_timeout_collision:",
le32_to_cpu(tx->cts_timeout_collision),
- accum_tx->cts_timeout_collision);
- pos += scnprintf(buf + pos, bufsz - pos,
- "ack_ba_timeout_collision:\t%u\t\t\t%u\n",
+ accum_tx->cts_timeout_collision,
+ delta_tx->cts_timeout_collision,
+ max_tx->cts_timeout_collision);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "ack_ba_timeout_collision:",
le32_to_cpu(tx->ack_or_ba_timeout_collision),
- accum_tx->ack_or_ba_timeout_collision);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg ba_timeout:\t\t\t%u\t\t\t%u\n",
+ accum_tx->ack_or_ba_timeout_collision,
+ delta_tx->ack_or_ba_timeout_collision,
+ max_tx->ack_or_ba_timeout_collision);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg ba_timeout:",
le32_to_cpu(tx->agg.ba_timeout),
- accum_tx->agg.ba_timeout);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg ba_resched_frames:\t\t%u\t\t\t%u\n",
+ accum_tx->agg.ba_timeout,
+ delta_tx->agg.ba_timeout,
+ max_tx->agg.ba_timeout);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg ba_resched_frames:",
le32_to_cpu(tx->agg.ba_reschedule_frames),
- accum_tx->agg.ba_reschedule_frames);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg scd_query_agg_frame:\t%u\t\t\t%u\n",
+ accum_tx->agg.ba_reschedule_frames,
+ delta_tx->agg.ba_reschedule_frames,
+ max_tx->agg.ba_reschedule_frames);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg scd_query_agg_frame:",
le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
- accum_tx->agg.scd_query_agg_frame_cnt);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg scd_query_no_agg:\t\t%u\t\t\t%u\n",
+ accum_tx->agg.scd_query_agg_frame_cnt,
+ delta_tx->agg.scd_query_agg_frame_cnt,
+ max_tx->agg.scd_query_agg_frame_cnt);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg scd_query_no_agg:",
le32_to_cpu(tx->agg.scd_query_no_agg),
- accum_tx->agg.scd_query_no_agg);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg scd_query_agg:\t\t%u\t\t\t%u\n",
+ accum_tx->agg.scd_query_no_agg,
+ delta_tx->agg.scd_query_no_agg,
+ max_tx->agg.scd_query_no_agg);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg scd_query_agg:",
le32_to_cpu(tx->agg.scd_query_agg),
- accum_tx->agg.scd_query_agg);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg scd_query_mismatch:\t\t%u\t\t\t%u\n",
+ accum_tx->agg.scd_query_agg,
+ delta_tx->agg.scd_query_agg,
+ max_tx->agg.scd_query_agg);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg scd_query_mismatch:",
le32_to_cpu(tx->agg.scd_query_mismatch),
- accum_tx->agg.scd_query_mismatch);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg frame_not_ready:\t\t%u\t\t\t%u\n",
+ accum_tx->agg.scd_query_mismatch,
+ delta_tx->agg.scd_query_mismatch,
+ max_tx->agg.scd_query_mismatch);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg frame_not_ready:",
le32_to_cpu(tx->agg.frame_not_ready),
- accum_tx->agg.frame_not_ready);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg underrun:\t\t\t%u\t\t\t%u\n",
+ accum_tx->agg.frame_not_ready,
+ delta_tx->agg.frame_not_ready,
+ max_tx->agg.frame_not_ready);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg underrun:",
le32_to_cpu(tx->agg.underrun),
- accum_tx->agg.underrun);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg bt_prio_kill:\t\t%u\t\t\t%u\n",
+ accum_tx->agg.underrun,
+ delta_tx->agg.underrun, max_tx->agg.underrun);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg bt_prio_kill:",
le32_to_cpu(tx->agg.bt_prio_kill),
- accum_tx->agg.bt_prio_kill);
- pos += scnprintf(buf + pos, bufsz - pos,
- "agg rx_ba_rsp_cnt:\t\t%u\t\t\t%u\n",
+ accum_tx->agg.bt_prio_kill,
+ delta_tx->agg.bt_prio_kill,
+ max_tx->agg.bt_prio_kill);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "agg rx_ba_rsp_cnt:",
le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
- accum_tx->agg.rx_ba_rsp_cnt);
+ accum_tx->agg.rx_ba_rsp_cnt,
+ delta_tx->agg.rx_ba_rsp_cnt,
+ max_tx->agg.rx_ba_rsp_cnt);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
@@ -1521,28 +1691,19 @@ static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0;
char *buf;
- int bufsz = sizeof(struct statistics_general) * 4 + 250;
+ int bufsz = sizeof(struct statistics_general) * 10 + 300;
ssize_t ret;
struct statistics_general *general, *accum_general;
- struct statistics_dbg *dbg, *accum_dbg;
- struct statistics_div *div, *accum_div;
+ struct statistics_general *delta_general, *max_general;
+ struct statistics_dbg *dbg, *accum_dbg, *delta_dbg, *max_dbg;
+ struct statistics_div *div, *accum_div, *delta_div, *max_div;
if (!iwl_is_alive(priv))
return -EAGAIN;
- /* make request to uCode to retrieve statistics information */
- mutex_lock(&priv->mutex);
- ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
- mutex_unlock(&priv->mutex);
-
- if (ret) {
- IWL_ERR(priv,
- "Error sending statistics request: %zd\n", ret);
- return -EAGAIN;
- }
buf = kzalloc(bufsz, GFP_KERNEL);
if (!buf) {
IWL_ERR(priv, "Can not allocate Buffer\n");
@@ -1557,52 +1718,78 @@ static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
dbg = &priv->statistics.general.dbg;
div = &priv->statistics.general.div;
accum_general = &priv->accum_statistics.general;
+ delta_general = &priv->delta_statistics.general;
+ max_general = &priv->max_delta.general;
accum_dbg = &priv->accum_statistics.general.dbg;
+ delta_dbg = &priv->delta_statistics.general.dbg;
+ max_dbg = &priv->max_delta.general.dbg;
accum_div = &priv->accum_statistics.general.div;
+ delta_div = &priv->delta_statistics.general.div;
+ max_div = &priv->max_delta.general.div;
pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
- pos += scnprintf(buf + pos, bufsz - pos, "Statistics_General:\n");
- pos += scnprintf(buf + pos, bufsz - pos,
- "\t\t\tcurrent\t\t\taccumulative\n");
- pos += scnprintf(buf + pos, bufsz - pos, "temperature:\t\t\t%u\n",
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_header,
+ "Statistics_General:");
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_short_format,
+ "temperature:",
le32_to_cpu(general->temperature));
- pos += scnprintf(buf + pos, bufsz - pos, "temperature_m:\t\t\t%u\n",
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_short_format,
+ "temperature_m:",
le32_to_cpu(general->temperature_m));
- pos += scnprintf(buf + pos, bufsz - pos,
- "burst_check:\t\t\t%u\t\t\t%u\n",
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "burst_check:",
le32_to_cpu(dbg->burst_check),
- accum_dbg->burst_check);
- pos += scnprintf(buf + pos, bufsz - pos,
- "burst_count:\t\t\t%u\t\t\t%u\n",
+ accum_dbg->burst_check,
+ delta_dbg->burst_check, max_dbg->burst_check);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "burst_count:",
le32_to_cpu(dbg->burst_count),
- accum_dbg->burst_count);
- pos += scnprintf(buf + pos, bufsz - pos,
- "sleep_time:\t\t\t%u\t\t\t%u\n",
+ accum_dbg->burst_count,
+ delta_dbg->burst_count, max_dbg->burst_count);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "sleep_time:",
le32_to_cpu(general->sleep_time),
- accum_general->sleep_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- "slots_out:\t\t\t%u\t\t\t%u\n",
+ accum_general->sleep_time,
+ delta_general->sleep_time, max_general->sleep_time);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "slots_out:",
le32_to_cpu(general->slots_out),
- accum_general->slots_out);
- pos += scnprintf(buf + pos, bufsz - pos,
- "slots_idle:\t\t\t%u\t\t\t%u\n",
+ accum_general->slots_out,
+ delta_general->slots_out, max_general->slots_out);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "slots_idle:",
le32_to_cpu(general->slots_idle),
- accum_general->slots_idle);
+ accum_general->slots_idle,
+ delta_general->slots_idle, max_general->slots_idle);
pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
le32_to_cpu(general->ttl_timestamp));
- pos += scnprintf(buf + pos, bufsz - pos, "tx_on_a:\t\t\t%u\t\t\t%u\n",
- le32_to_cpu(div->tx_on_a), accum_div->tx_on_a);
- pos += scnprintf(buf + pos, bufsz - pos, "tx_on_b:\t\t\t%u\t\t\t%u\n",
- le32_to_cpu(div->tx_on_b), accum_div->tx_on_b);
- pos += scnprintf(buf + pos, bufsz - pos,
- "exec_time:\t\t\t%u\t\t\t%u\n",
- le32_to_cpu(div->exec_time), accum_div->exec_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- "probe_time:\t\t\t%u\t\t\t%u\n",
- le32_to_cpu(div->probe_time), accum_div->probe_time);
- pos += scnprintf(buf + pos, bufsz - pos,
- "rx_enable_counter:\t\t%u\t\t\t%u\n",
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "tx_on_a:",
+ le32_to_cpu(div->tx_on_a), accum_div->tx_on_a,
+ delta_div->tx_on_a, max_div->tx_on_a);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "tx_on_b:",
+ le32_to_cpu(div->tx_on_b), accum_div->tx_on_b,
+ delta_div->tx_on_b, max_div->tx_on_b);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "exec_time:",
+ le32_to_cpu(div->exec_time), accum_div->exec_time,
+ delta_div->exec_time, max_div->exec_time);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "probe_time:",
+ le32_to_cpu(div->probe_time), accum_div->probe_time,
+ delta_div->probe_time, max_div->probe_time);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "rx_enable_counter:",
le32_to_cpu(general->rx_enable_counter),
- accum_general->rx_enable_counter);
+ accum_general->rx_enable_counter,
+ delta_general->rx_enable_counter,
+ max_general->rx_enable_counter);
+ pos += scnprintf(buf + pos, bufsz - pos, ucode_stats_format,
+ "num_of_sos_states:",
+ le32_to_cpu(general->num_of_sos_states),
+ accum_general->num_of_sos_states,
+ delta_general->num_of_sos_states,
+ max_general->num_of_sos_states);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
kfree(buf);
return ret;
@@ -1612,7 +1799,7 @@ static ssize_t iwl_dbgfs_sensitivity_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0;
int cnt = 0;
char *buf;
@@ -1693,7 +1880,7 @@ static ssize_t iwl_dbgfs_chain_noise_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
int pos = 0;
int cnt = 0;
char *buf;
@@ -1751,26 +1938,15 @@ static ssize_t iwl_dbgfs_tx_power_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos) {
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char buf[128];
int pos = 0;
- ssize_t ret;
const size_t bufsz = sizeof(buf);
struct statistics_tx *tx;
if (!iwl_is_alive(priv))
pos += scnprintf(buf + pos, bufsz - pos, "N/A\n");
else {
- /* make request to uCode to retrieve statistics information */
- mutex_lock(&priv->mutex);
- ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
- mutex_unlock(&priv->mutex);
-
- if (ret) {
- IWL_ERR(priv, "Error sending statistics request: %zd\n",
- ret);
- return -EAGAIN;
- }
tx = &priv->statistics.tx;
if (tx->tx_power.ant_a ||
tx->tx_power.ant_b ||
@@ -1802,7 +1978,7 @@ static ssize_t iwl_dbgfs_power_save_status_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
- struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ struct iwl_priv *priv = file->private_data;
char buf[60];
int pos = 0;
const size_t bufsz = sizeof(buf);
@@ -1845,6 +2021,262 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
return count;
}
+static ssize_t iwl_dbgfs_csr_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ int csr;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &csr) != 1)
+ return -EFAULT;
+
+ if (priv->cfg->ops->lib->dump_csr)
+ priv->cfg->ops->lib->dump_csr(priv);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_ucode_tracing_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ int pos = 0;
+ char buf[128];
+ const size_t bufsz = sizeof(buf);
+ ssize_t ret;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "ucode trace timer is %s\n",
+ priv->event_log.ucode_trace ? "On" : "Off");
+ pos += scnprintf(buf + pos, bufsz - pos, "non_wraps_count:\t\t %u\n",
+ priv->event_log.non_wraps_count);
+ pos += scnprintf(buf + pos, bufsz - pos, "wraps_once_count:\t\t %u\n",
+ priv->event_log.wraps_once_count);
+ pos += scnprintf(buf + pos, bufsz - pos, "wraps_more_count:\t\t %u\n",
+ priv->event_log.wraps_more_count);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ int trace;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &trace) != 1)
+ return -EFAULT;
+
+ if (trace) {
+ priv->event_log.ucode_trace = true;
+ /* schedule the ucode timer to occur in UCODE_TRACE_PERIOD */
+ mod_timer(&priv->ucode_trace,
+ jiffies + msecs_to_jiffies(UCODE_TRACE_PERIOD));
+ } else {
+ priv->event_log.ucode_trace = false;
+ del_timer_sync(&priv->ucode_trace);
+ }
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ char *buf;
+ int pos = 0;
+ ssize_t ret = -EFAULT;
+
+ if (priv->cfg->ops->lib->dump_fh) {
+ ret = pos = priv->cfg->ops->lib->dump_fh(priv, &buf, true);
+ if (buf) {
+ ret = simple_read_from_buffer(user_buf,
+ count, ppos, buf, pos);
+ kfree(buf);
+ }
+ }
+
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_missed_beacon_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ int pos = 0;
+ char buf[12];
+ const size_t bufsz = sizeof(buf);
+ ssize_t ret;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%d\n",
+ priv->missed_beacon_threshold);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ int missed;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &missed) != 1)
+ return -EINVAL;
+
+ if (missed < IWL_MISSED_BEACON_THRESHOLD_MIN ||
+ missed > IWL_MISSED_BEACON_THRESHOLD_MAX)
+ priv->missed_beacon_threshold =
+ IWL_MISSED_BEACON_THRESHOLD_DEF;
+ else
+ priv->missed_beacon_threshold = missed;
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_internal_scan_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ int scan;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &scan) != 1)
+ return -EINVAL;
+
+ iwl_internal_short_hw_scan(priv);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_plcp_delta_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
+ int pos = 0;
+ char buf[12];
+ const size_t bufsz = sizeof(buf);
+ ssize_t ret;
+
+ pos += scnprintf(buf + pos, bufsz - pos, "%u\n",
+ priv->cfg->plcp_delta_threshold);
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ int plcp;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &plcp) != 1)
+ return -EINVAL;
+ if ((plcp <= IWL_MAX_PLCP_ERR_THRESHOLD_MIN) ||
+ (plcp > IWL_MAX_PLCP_ERR_THRESHOLD_MAX))
+ priv->cfg->plcp_delta_threshold =
+ IWL_MAX_PLCP_ERR_THRESHOLD_DEF;
+ else
+ priv->cfg->plcp_delta_threshold = plcp;
+ return count;
+}
+
+static ssize_t iwl_dbgfs_force_reset_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ int i, pos = 0;
+ char buf[300];
+ const size_t bufsz = sizeof(buf);
+ struct iwl_force_reset *force_reset;
+
+ for (i = 0; i < IWL_MAX_FORCE_RESET; i++) {
+ force_reset = &priv->force_reset[i];
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "Force reset method %d\n", i);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tnumber of reset request: %d\n",
+ force_reset->reset_request_count);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tnumber of reset request success: %d\n",
+ force_reset->reset_success_count);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\tnumber of reset request reject: %d\n",
+ force_reset->reset_reject_count);
+ pos += scnprintf(buf + pos, bufsz - pos,
+ "\treset duration: %lu\n",
+ force_reset->reset_duration);
+ }
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_force_reset_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos) {
+
+ struct iwl_priv *priv = file->private_data;
+ char buf[8];
+ int buf_size;
+ int reset, ret;
+
+ memset(buf, 0, sizeof(buf));
+ buf_size = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+ if (sscanf(buf, "%d", &reset) != 1)
+ return -EINVAL;
+ switch (reset) {
+ case IWL_RF_RESET:
+ case IWL_FW_RESET:
+ ret = iwl_force_reset(priv, reset);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return ret ? ret : count;
+}
+
DEBUGFS_READ_FILE_OPS(rx_statistics);
DEBUGFS_READ_FILE_OPS(tx_statistics);
DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
@@ -1859,6 +2291,13 @@ DEBUGFS_READ_FILE_OPS(tx_power);
DEBUGFS_READ_FILE_OPS(power_save_status);
DEBUGFS_WRITE_FILE_OPS(clear_ucode_statistics);
DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
+DEBUGFS_WRITE_FILE_OPS(csr);
+DEBUGFS_READ_WRITE_FILE_OPS(ucode_tracing);
+DEBUGFS_READ_FILE_OPS(fh_reg);
+DEBUGFS_READ_WRITE_FILE_OPS(missed_beacon);
+DEBUGFS_WRITE_FILE_OPS(internal_scan);
+DEBUGFS_READ_WRITE_FILE_OPS(plcp_delta);
+DEBUGFS_READ_WRITE_FILE_OPS(force_reset);
/*
* Create the debugfs files and directories
@@ -1866,69 +2305,74 @@ DEBUGFS_WRITE_FILE_OPS(clear_traffic_statistics);
*/
int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
{
- struct iwl_debugfs *dbgfs;
struct dentry *phyd = priv->hw->wiphy->debugfsdir;
- int ret = 0;
+ struct dentry *dir_drv, *dir_data, *dir_rf, *dir_debug;
- dbgfs = kzalloc(sizeof(struct iwl_debugfs), GFP_KERNEL);
- if (!dbgfs) {
- ret = -ENOMEM;
- goto err;
- }
+ dir_drv = debugfs_create_dir(name, phyd);
+ if (!dir_drv)
+ return -ENOMEM;
+
+ priv->debugfs_dir = dir_drv;
- priv->dbgfs = dbgfs;
- dbgfs->name = name;
- dbgfs->dir_drv = debugfs_create_dir(name, phyd);
- if (!dbgfs->dir_drv || IS_ERR(dbgfs->dir_drv)) {
- ret = -ENOENT;
+ dir_data = debugfs_create_dir("data", dir_drv);
+ if (!dir_data)
+ goto err;
+ dir_rf = debugfs_create_dir("rf", dir_drv);
+ if (!dir_rf)
+ goto err;
+ dir_debug = debugfs_create_dir("debug", dir_drv);
+ if (!dir_debug)
goto err;
- }
- DEBUGFS_ADD_DIR(data, dbgfs->dir_drv);
- DEBUGFS_ADD_DIR(rf, dbgfs->dir_drv);
- DEBUGFS_ADD_DIR(debug, dbgfs->dir_drv);
- DEBUGFS_ADD_FILE(nvm, data, S_IRUSR);
- DEBUGFS_ADD_FILE(sram, data, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(log_event, data, S_IWUSR);
- DEBUGFS_ADD_FILE(stations, data, S_IRUSR);
- DEBUGFS_ADD_FILE(channels, data, S_IRUSR);
- DEBUGFS_ADD_FILE(status, data, S_IRUSR);
- DEBUGFS_ADD_FILE(interrupt, data, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(qos, data, S_IRUSR);
- DEBUGFS_ADD_FILE(led, data, S_IRUSR);
- DEBUGFS_ADD_FILE(sleep_level_override, data, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(current_sleep_command, data, S_IRUSR);
- DEBUGFS_ADD_FILE(thermal_throttling, data, S_IRUSR);
- DEBUGFS_ADD_FILE(disable_ht40, data, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(rx_statistics, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(tx_statistics, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(traffic_log, debug, S_IWUSR | S_IRUSR);
- DEBUGFS_ADD_FILE(rx_queue, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(tx_queue, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(tx_power, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(power_save_status, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(clear_ucode_statistics, debug, S_IWUSR);
- DEBUGFS_ADD_FILE(clear_traffic_statistics, debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(nvm, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(sram, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(log_event, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(stations, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(channels, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(status, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(interrupt, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(qos, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(led, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(sleep_level_override, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(current_sleep_command, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(thermal_throttling, dir_data, S_IRUSR);
+ DEBUGFS_ADD_FILE(disable_ht40, dir_data, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(rx_statistics, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(tx_statistics, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(traffic_log, dir_debug, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(rx_queue, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(tx_queue, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(tx_power, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(power_save_status, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(clear_ucode_statistics, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(clear_traffic_statistics, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(csr, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(fh_reg, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(missed_beacon, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(internal_scan, dir_debug, S_IWUSR);
+ DEBUGFS_ADD_FILE(plcp_delta, dir_debug, S_IWUSR | S_IRUSR);
+ DEBUGFS_ADD_FILE(force_reset, dir_debug, S_IWUSR | S_IRUSR);
if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) {
- DEBUGFS_ADD_FILE(ucode_rx_stats, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(ucode_tx_stats, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(ucode_general_stats, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(sensitivity, debug, S_IRUSR);
- DEBUGFS_ADD_FILE(chain_noise, debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_rx_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_tx_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_general_stats, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(sensitivity, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(chain_noise, dir_debug, S_IRUSR);
+ DEBUGFS_ADD_FILE(ucode_tracing, dir_debug, S_IWUSR | S_IRUSR);
}
- DEBUGFS_ADD_BOOL(disable_sensitivity, rf, &priv->disable_sens_cal);
- DEBUGFS_ADD_BOOL(disable_chain_noise, rf,
+ DEBUGFS_ADD_BOOL(disable_sensitivity, dir_rf, &priv->disable_sens_cal);
+ DEBUGFS_ADD_BOOL(disable_chain_noise, dir_rf,
&priv->disable_chain_noise_cal);
if (((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965) ||
((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_3945))
- DEBUGFS_ADD_BOOL(disable_tx_power, rf,
+ DEBUGFS_ADD_BOOL(disable_tx_power, dir_rf,
&priv->disable_tx_power_cal);
return 0;
err:
- IWL_ERR(priv, "Can't open the debugfs directory\n");
+ IWL_ERR(priv, "Can't create the debugfs directory\n");
iwl_dbgfs_unregister(priv);
- return ret;
+ return -ENOMEM;
}
EXPORT_SYMBOL(iwl_dbgfs_register);
@@ -1938,56 +2382,11 @@ EXPORT_SYMBOL(iwl_dbgfs_register);
*/
void iwl_dbgfs_unregister(struct iwl_priv *priv)
{
- if (!priv->dbgfs)
+ if (!priv->debugfs_dir)
return;
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_sleep_level_override);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_current_sleep_command);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_nvm);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_sram);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_log_event);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_stations);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_channels);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_status);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_interrupt);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_qos);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_led);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_thermal_throttling);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_disable_ht40);
- DEBUGFS_REMOVE(priv->dbgfs->dir_data);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_rx_statistics);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_tx_statistics);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_traffic_log);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_rx_queue);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_tx_queue);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_tx_power);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_power_save_status);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
- file_clear_ucode_statistics);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
- file_clear_traffic_statistics);
- if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) {
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
- file_ucode_rx_stats);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
- file_ucode_tx_stats);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
- file_ucode_general_stats);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
- file_sensitivity);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
- file_chain_noise);
- }
- DEBUGFS_REMOVE(priv->dbgfs->dir_debug);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_sensitivity);
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_chain_noise);
- if (((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965) ||
- ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_3945))
- DEBUGFS_REMOVE(priv->dbgfs->dbgfs_rf_files.file_disable_tx_power);
- DEBUGFS_REMOVE(priv->dbgfs->dir_rf);
- DEBUGFS_REMOVE(priv->dbgfs->dir_drv);
- kfree(priv->dbgfs);
- priv->dbgfs = NULL;
+ debugfs_remove_recursive(priv->debugfs_dir);
+ priv->debugfs_dir = NULL;
}
EXPORT_SYMBOL(iwl_dbgfs_unregister);
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 3822cf53e36..ab891b95804 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
@@ -512,6 +512,7 @@ struct iwl_ht_config {
bool is_ht;
bool is_40mhz;
bool single_chain_sufficient;
+ enum ieee80211_smps_mode smps; /* current smps mode */
/* BSS related data */
u8 extension_chan_offset;
u8 ht_protection;
@@ -984,6 +985,74 @@ struct iwl_switch_rxon {
__le16 channel;
};
+/*
+ * schedule the timer to wake up every UCODE_TRACE_PERIOD milliseconds
+ * to perform continuous uCode event logging operation if enabled
+ */
+#define UCODE_TRACE_PERIOD (100)
+
+/*
+ * iwl_event_log: current uCode event log position
+ *
+ * @ucode_trace: enable/disable ucode continuous trace timer
+ * @num_wraps: how many times the event buffer wraps
+ * @next_entry: the entry just before the next one that uCode would fill
+ * @non_wraps_count: counter for no wrap detected when dump ucode events
+ * @wraps_once_count: counter for wrap once detected when dump ucode events
+ * @wraps_more_count: counter for wrap more than once detected
+ * when dump ucode events
+ */
+struct iwl_event_log {
+ bool ucode_trace;
+ u32 num_wraps;
+ u32 next_entry;
+ int non_wraps_count;
+ int wraps_once_count;
+ int wraps_more_count;
+};
+
+/*
+ * host interrupt timeout value
+ * used with setting interrupt coalescing timer
+ * the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
+ *
+ * default interrupt coalescing timer is 64 x 32 = 2048 usecs
+ * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
+ */
+#define IWL_HOST_INT_TIMEOUT_MAX (0xFF)
+#define IWL_HOST_INT_TIMEOUT_DEF (0x40)
+#define IWL_HOST_INT_TIMEOUT_MIN (0x0)
+#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF)
+#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
+#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
+
+/*
+ * This is the threshold value of plcp error rate per 100mSecs. It is
+ * used to set and check for the validity of plcp_delta.
+ */
+#define IWL_MAX_PLCP_ERR_THRESHOLD_MIN (0)
+#define IWL_MAX_PLCP_ERR_THRESHOLD_DEF (50)
+#define IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF (100)
+#define IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF (200)
+#define IWL_MAX_PLCP_ERR_THRESHOLD_MAX (255)
+
+#define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3)
+#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
+
+enum iwl_reset {
+ IWL_RF_RESET = 0,
+ IWL_FW_RESET,
+ IWL_MAX_FORCE_RESET,
+};
+
+struct iwl_force_reset {
+ int reset_request_count;
+ int reset_success_count;
+ int reset_reject_count;
+ unsigned long reset_duration;
+ unsigned long last_force_reset_jiffies;
+};
+
struct iwl_priv {
/* ieee device used by generic ieee processing code */
@@ -1004,13 +1073,19 @@ struct iwl_priv {
struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
-#if defined(CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT) || defined(CONFIG_IWL3945_SPECTRUM_MEASUREMENT)
/* spectrum measurement report caching */
struct iwl_spectrum_notification measure_report;
u8 measurement_status;
-#endif
+
/* ucode beacon time */
u32 ucode_beacon_time;
+ int missed_beacon_threshold;
+
+ /* storing the jiffies when the plcp error rate is received */
+ unsigned long plcp_jiffies;
+
+ /* force reset */
+ struct iwl_force_reset force_reset[IWL_MAX_FORCE_RESET];
/* we allocate array of iwl4965_channel_info for NIC's valid channels.
* Access via channel # using indirect index array */
@@ -1029,7 +1104,6 @@ struct iwl_priv {
struct iwl_calib_result calib_results[IWL_CALIB_MAX];
/* Scan related variables */
- unsigned long last_scan_jiffies;
unsigned long next_scan_jiffies;
unsigned long scan_start;
unsigned long scan_pass_start;
@@ -1037,6 +1111,7 @@ struct iwl_priv {
void *scan;
int scan_bands;
struct cfg80211_scan_request *scan_request;
+ bool is_internal_short_scan;
u8 scan_tx_ant[IEEE80211_NUM_BANDS];
u8 mgmt_tx_ant;
@@ -1045,6 +1120,7 @@ struct iwl_priv {
spinlock_t hcmd_lock; /* protect hcmd */
spinlock_t reg_lock; /* protect hw register access */
struct mutex mutex;
+ struct mutex sync_cmd_mutex; /* enable serialization of sync commands */
/* basic pci-network driver stuff */
struct pci_dev *pci_dev;
@@ -1135,6 +1211,8 @@ struct iwl_priv {
struct iwl_notif_statistics statistics;
#ifdef CONFIG_IWLWIFI_DEBUG
struct iwl_notif_statistics accum_statistics;
+ struct iwl_notif_statistics delta_statistics;
+ struct iwl_notif_statistics max_delta;
#endif
/* context information */
@@ -1207,15 +1285,10 @@ struct iwl_priv {
struct workqueue_struct *workqueue;
- struct work_struct up;
struct work_struct restart;
- struct work_struct calibrated_work;
struct work_struct scan_completed;
struct work_struct rx_replenish;
struct work_struct abort_scan;
- struct work_struct update_link_led;
- struct work_struct auth_work;
- struct work_struct report_work;
struct work_struct request_scan;
struct work_struct beacon_update;
struct work_struct tt_work;
@@ -1251,7 +1324,8 @@ struct iwl_priv {
u16 rx_traffic_idx;
u8 *tx_traffic;
u8 *rx_traffic;
- struct iwl_debugfs *dbgfs;
+ struct dentry *debugfs_dir;
+ u32 dbgfs_sram_offset, dbgfs_sram_len;
#endif /* CONFIG_IWLWIFI_DEBUGFS */
#endif /* CONFIG_IWLWIFI_DEBUG */
@@ -1261,6 +1335,7 @@ struct iwl_priv {
u32 disable_tx_power_cal;
struct work_struct run_time_calib_work;
struct timer_list statistics_periodic;
+ struct timer_list ucode_trace;
bool hw_ready;
/*For 3945*/
#define IWL_DEFAULT_TX_POWER 0x0F
@@ -1268,6 +1343,8 @@ struct iwl_priv {
struct iwl3945_notif_statistics statistics_39;
u32 sta_supp_rates;
+
+ struct iwl_event_log event_log;
}; /*iwl_priv */
static inline void iwl_txq_ctx_activate(struct iwl_priv *priv, int txq_id)
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
index 83cc4e500a9..36580d8d8b8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.c
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
@@ -37,4 +37,6 @@ EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite32);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_rx);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event);
EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_cont_event);
+EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_wrap_event);
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
index d9c7363b1bb..ff4d012ce26 100644
--- a/drivers/net/wireless/iwlwifi/iwl-devtrace.h
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -91,6 +91,50 @@ TRACE_EVENT(iwlwifi_dev_iowrite32,
);
#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iwlwifi_ucode
+
+TRACE_EVENT(iwlwifi_dev_ucode_cont_event,
+ TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
+ TP_ARGS(priv, time, data, ev),
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+
+ __field(u32, time)
+ __field(u32, data)
+ __field(u32, ev)
+ ),
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ __entry->time = time;
+ __entry->data = data;
+ __entry->ev = ev;
+ ),
+ TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u",
+ __entry->priv, __entry->time, __entry->data, __entry->ev)
+);
+
+TRACE_EVENT(iwlwifi_dev_ucode_wrap_event,
+ TP_PROTO(struct iwl_priv *priv, u32 wraps, u32 n_entry, u32 p_entry),
+ TP_ARGS(priv, wraps, n_entry, p_entry),
+ TP_STRUCT__entry(
+ PRIV_ENTRY
+
+ __field(u32, wraps)
+ __field(u32, n_entry)
+ __field(u32, p_entry)
+ ),
+ TP_fast_assign(
+ PRIV_ASSIGN;
+ __entry->wraps = wraps;
+ __entry->n_entry = n_entry;
+ __entry->p_entry = p_entry;
+ ),
+ TP_printk("[%p] wraps=#%02d n=0x%X p=0x%X",
+ __entry->priv, __entry->wraps, __entry->n_entry,
+ __entry->p_entry)
+);
+
+#undef TRACE_SYSTEM
#define TRACE_SYSTEM iwlwifi
TRACE_EVENT(iwlwifi_dev_hcmd,
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index 4a30969689f..fd37152abae 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index 0cd9c02ee04..4e1ba824dc5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index 65fa8a69fd5..113c3669b9c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -379,6 +379,25 @@
#define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010)
+/**
+ * Bit fields for TSSR(Tx Shared Status & Control) error status register:
+ * 31: Indicates an address error when accessed to internal memory
+ * uCode/driver must write "1" in order to clear this flag
+ * 30: Indicates that Host did not send the expected number of dwords to FH
+ * uCode/driver must write "1" in order to clear this flag
+ * 16-9:Each status bit is for one channel. Indicates that an (Error) ActDMA
+ * command was received from the scheduler while the TRB was already full
+ * with previous command
+ * uCode/driver must write "1" in order to clear this flag
+ * 7-0: Each status bit indicates a channel's TxCredit error. When an error
+ * bit is set, it indicates that the FH has received a full indication
+ * from the RTC TxFIFO and the current value of the TxCredit counter was
+ * not equal to zero. This mean that the credit mechanism was not
+ * synchronized to the TxFIFO status
+ * uCode/driver must write "1" in order to clear this flag
+ */
+#define FH_TSSR_TX_ERROR_REG (FH_TSSR_LOWER_BOUND + 0x018)
+
#define FH_TSSR_TX_STATUS_REG_BIT_BUFS_EMPTY(_chnl) ((1 << (_chnl)) << 24)
#define FH_TSSR_TX_STATUS_REG_BIT_NO_PEND_REQ(_chnl) ((1 << (_chnl)) << 16)
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index 30e9ea6d54e..73681c4fefe 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -58,7 +58,6 @@ const char *get_cmd_string(u8 cmd)
IWL_CMD(COEX_PRIORITY_TABLE_CMD);
IWL_CMD(COEX_MEDIUM_NOTIFICATION);
IWL_CMD(COEX_EVENT_CMD);
- IWL_CMD(RADAR_NOTIFICATION);
IWL_CMD(REPLY_QUIET_CMD);
IWL_CMD(REPLY_CHANNEL_SWITCH);
IWL_CMD(CHANNEL_SWITCH_NOTIFICATION);
@@ -165,15 +164,13 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
/* A synchronous command can not have a callback set. */
BUG_ON(cmd->callback);
- if (test_and_set_bit(STATUS_HCMD_SYNC_ACTIVE, &priv->status)) {
- IWL_ERR(priv,
- "Error sending %s: Already sending a host command\n",
+ IWL_DEBUG_INFO(priv, "Attempting to send sync command %s\n",
get_cmd_string(cmd->id));
- ret = -EBUSY;
- goto out;
- }
+ mutex_lock(&priv->sync_cmd_mutex);
set_bit(STATUS_HCMD_ACTIVE, &priv->status);
+ IWL_DEBUG_INFO(priv, "Setting HCMD_ACTIVE for command %s \n",
+ get_cmd_string(cmd->id));
cmd_idx = iwl_enqueue_hcmd(priv, cmd);
if (cmd_idx < 0) {
@@ -194,6 +191,8 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
+ IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s \n",
+ get_cmd_string(cmd->id));
ret = -ETIMEDOUT;
goto cancel;
}
@@ -238,7 +237,7 @@ fail:
cmd->reply_page = 0;
}
out:
- clear_bit(STATUS_HCMD_SYNC_ACTIVE, &priv->status);
+ mutex_unlock(&priv->sync_cmd_mutex);
return ret;
}
EXPORT_SYMBOL(iwl_send_cmd_sync);
diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
index bd0b12efb5c..51a67fb2e18 100644
--- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
+++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -80,8 +80,8 @@ static inline void iwl_free_fw_desc(struct pci_dev *pci_dev,
struct fw_desc *desc)
{
if (desc->v_addr)
- pci_free_consistent(pci_dev, desc->len,
- desc->v_addr, desc->p_addr);
+ dma_free_coherent(&pci_dev->dev, desc->len,
+ desc->v_addr, desc->p_addr);
desc->v_addr = NULL;
desc->len = 0;
}
@@ -89,7 +89,8 @@ static inline void iwl_free_fw_desc(struct pci_dev *pci_dev,
static inline int iwl_alloc_fw_desc(struct pci_dev *pci_dev,
struct fw_desc *desc)
{
- desc->v_addr = pci_alloc_consistent(pci_dev, desc->len, &desc->p_addr);
+ desc->v_addr = dma_alloc_coherent(&pci_dev->dev, desc->len,
+ &desc->p_addr, GFP_KERNEL);
return (desc->v_addr != NULL) ? 0 : -ENOMEM;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index e552d4c4bdb..c719baf2585 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project.
*
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index 46c7a95b88f..a6f9c918aab 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.h b/drivers/net/wireless/iwlwifi/iwl-led.h
index f47f053f02e..49a70baa3fb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-led.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 8ccc0bb1d9e..1a1a9f081cc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -303,13 +303,12 @@ static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
sizeof(struct iwl_powertable_cmd), cmd);
}
-
+/* priv->mutex must be held */
int iwl_power_update_mode(struct iwl_priv *priv, bool force)
{
int ret = 0;
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
- bool enabled = (priv->iw_mode == NL80211_IFTYPE_STATION) &&
- (priv->hw->conf.flags & IEEE80211_CONF_PS);
+ bool enabled = priv->hw->conf.flags & IEEE80211_CONF_PS;
bool update_chains;
struct iwl_powertable_cmd cmd;
int dtimper;
@@ -319,7 +318,7 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
if (priv->vif)
- dtimper = priv->vif->bss_conf.dtim_period;
+ dtimper = priv->hw->conf.ps_dtim_period;
else
dtimper = 1;
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/iwl-power.h
index 310c32e8f69..5db91c10dcc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.h
+++ b/drivers/net/wireless/iwlwifi/iwl-power.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 6d95832db06..d2d2a917490 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 2dbce85404a..df257bc15f4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -123,12 +123,11 @@ EXPORT_SYMBOL(iwl_rx_queue_space);
/**
* iwl_rx_queue_update_write_ptr - Update the write pointer for the RX queue
*/
-int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
+void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
{
unsigned long flags;
u32 rx_wrt_ptr_reg = priv->hw_params.rx_wrt_ptr_reg;
u32 reg;
- int ret = 0;
spin_lock_irqsave(&q->lock, flags);
@@ -161,7 +160,6 @@ int iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q)
exit_unlock:
spin_unlock_irqrestore(&q->lock, flags);
- return ret;
}
EXPORT_SYMBOL(iwl_rx_queue_update_write_ptr);
/**
@@ -184,14 +182,13 @@ static inline __le32 iwl_dma_addr2rbd_ptr(struct iwl_priv *priv,
* also updates the memory address in the firmware to reference the new
* target buffer.
*/
-int iwl_rx_queue_restock(struct iwl_priv *priv)
+void iwl_rx_queue_restock(struct iwl_priv *priv)
{
struct iwl_rx_queue *rxq = &priv->rxq;
struct list_head *element;
struct iwl_rx_mem_buffer *rxb;
unsigned long flags;
int write;
- int ret = 0;
spin_lock_irqsave(&rxq->lock, flags);
write = rxq->write & ~0x7;
@@ -220,10 +217,8 @@ int iwl_rx_queue_restock(struct iwl_priv *priv)
spin_lock_irqsave(&rxq->lock, flags);
rxq->need_update = 1;
spin_unlock_irqrestore(&rxq->lock, flags);
- ret = iwl_rx_queue_update_write_ptr(priv, rxq);
+ iwl_rx_queue_update_write_ptr(priv, rxq);
}
-
- return ret;
}
EXPORT_SYMBOL(iwl_rx_queue_restock);
@@ -350,10 +345,10 @@ void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
}
}
- pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
- rxq->dma_addr);
- pci_free_consistent(priv->pci_dev, sizeof(struct iwl_rb_status),
- rxq->rb_stts, rxq->rb_stts_dma);
+ dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+ rxq->dma_addr);
+ dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
+ rxq->rb_stts, rxq->rb_stts_dma);
rxq->bd = NULL;
rxq->rb_stts = NULL;
}
@@ -362,7 +357,7 @@ EXPORT_SYMBOL(iwl_rx_queue_free);
int iwl_rx_queue_alloc(struct iwl_priv *priv)
{
struct iwl_rx_queue *rxq = &priv->rxq;
- struct pci_dev *dev = priv->pci_dev;
+ struct device *dev = &priv->pci_dev->dev;
int i;
spin_lock_init(&rxq->lock);
@@ -370,12 +365,13 @@ int iwl_rx_queue_alloc(struct iwl_priv *priv)
INIT_LIST_HEAD(&rxq->rx_used);
/* Alloc the circular buffer of Read Buffer Descriptors (RBDs) */
- rxq->bd = pci_alloc_consistent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr);
+ rxq->bd = dma_alloc_coherent(dev, 4 * RX_QUEUE_SIZE, &rxq->dma_addr,
+ GFP_KERNEL);
if (!rxq->bd)
goto err_bd;
- rxq->rb_stts = pci_alloc_consistent(dev, sizeof(struct iwl_rb_status),
- &rxq->rb_stts_dma);
+ rxq->rb_stts = dma_alloc_coherent(dev, sizeof(struct iwl_rb_status),
+ &rxq->rb_stts_dma, GFP_KERNEL);
if (!rxq->rb_stts)
goto err_rb;
@@ -392,8 +388,8 @@ int iwl_rx_queue_alloc(struct iwl_priv *priv)
return 0;
err_rb:
- pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
- rxq->dma_addr);
+ dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+ rxq->dma_addr);
err_bd:
return -ENOMEM;
}
@@ -473,8 +469,8 @@ int iwl_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
(rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
(rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
- /* Set interrupt coalescing timer to 64 x 32 = 2048 usecs */
- iwl_write8(priv, CSR_INT_COALESCING, 0x40);
+ /* Set interrupt coalescing timer to default (2048 usecs) */
+ iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
return 0;
}
@@ -499,9 +495,10 @@ void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
struct iwl_missed_beacon_notif *missed_beacon;
missed_beacon = &pkt->u.missed_beacon;
- if (le32_to_cpu(missed_beacon->consequtive_missed_beacons) > 5) {
+ if (le32_to_cpu(missed_beacon->consecutive_missed_beacons) >
+ priv->missed_beacon_threshold) {
IWL_DEBUG_CALIB(priv, "missed bcn cnsq %d totl %d rcd %d expctd %d\n",
- le32_to_cpu(missed_beacon->consequtive_missed_beacons),
+ le32_to_cpu(missed_beacon->consecutive_missed_beacons),
le32_to_cpu(missed_beacon->total_missed_becons),
le32_to_cpu(missed_beacon->num_recvd_beacons),
le32_to_cpu(missed_beacon->num_expected_beacons));
@@ -511,6 +508,24 @@ void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
}
EXPORT_SYMBOL(iwl_rx_missed_beacon_notif);
+void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
+ struct iwl_rx_mem_buffer *rxb)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
+
+ if (!report->state) {
+ IWL_DEBUG_11H(priv,
+ "Spectrum Measure Notification: Start\n");
+ return;
+ }
+
+ memcpy(&priv->measure_report, report, sizeof(*report));
+ priv->measurement_status |= MEASUREMENT_READY;
+}
+EXPORT_SYMBOL(iwl_rx_spectrum_measure_notif);
+
+
/* Calculate noise level, based on measurements during network silence just
* before arriving beacon. This measurement can be done only if we know
@@ -564,15 +579,24 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv,
int i;
__le32 *prev_stats;
u32 *accum_stats;
+ u32 *delta, *max_delta;
prev_stats = (__le32 *)&priv->statistics;
accum_stats = (u32 *)&priv->accum_statistics;
+ delta = (u32 *)&priv->delta_statistics;
+ max_delta = (u32 *)&priv->max_delta;
for (i = sizeof(__le32); i < sizeof(struct iwl_notif_statistics);
- i += sizeof(__le32), stats++, prev_stats++, accum_stats++)
- if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats))
- *accum_stats += (le32_to_cpu(*stats) -
+ i += sizeof(__le32), stats++, prev_stats++, delta++,
+ max_delta++, accum_stats++) {
+ if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats)) {
+ *delta = (le32_to_cpu(*stats) -
le32_to_cpu(*prev_stats));
+ *accum_stats += *delta;
+ if (*delta > *max_delta)
+ *max_delta = *delta;
+ }
+ }
/* reset accumulative statistics for "no-counter" type statistics */
priv->accum_statistics.general.temperature =
@@ -592,11 +616,15 @@ static void iwl_accumulative_statistics(struct iwl_priv *priv,
#define REG_RECALIB_PERIOD (60)
+#define PLCP_MSG "plcp_err exceeded %u, %u, %u, %u, %u, %d, %u mSecs\n"
void iwl_rx_statistics(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
{
int change;
struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ int combined_plcp_delta;
+ unsigned int plcp_msec;
+ unsigned long plcp_received_jiffies;
IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
(int)sizeof(priv->statistics),
@@ -611,6 +639,56 @@ void iwl_rx_statistics(struct iwl_priv *priv,
#ifdef CONFIG_IWLWIFI_DEBUG
iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
#endif
+ /*
+ * check for plcp_err and trigger radio reset if it exceeds
+ * the plcp error threshold plcp_delta.
+ */
+ plcp_received_jiffies = jiffies;
+ plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies -
+ (long) priv->plcp_jiffies);
+ priv->plcp_jiffies = plcp_received_jiffies;
+ /*
+ * check to make sure plcp_msec is not 0 to prevent division
+ * by zero.
+ */
+ if (plcp_msec) {
+ combined_plcp_delta =
+ (le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err) -
+ le32_to_cpu(priv->statistics.rx.ofdm.plcp_err)) +
+ (le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err) -
+ le32_to_cpu(priv->statistics.rx.ofdm_ht.plcp_err));
+
+ if ((combined_plcp_delta > 0) &&
+ ((combined_plcp_delta * 100) / plcp_msec) >
+ priv->cfg->plcp_delta_threshold) {
+ /*
+ * if plcp_err exceed the threshold, the following
+ * data is printed in csv format:
+ * Text: plcp_err exceeded %d,
+ * Received ofdm.plcp_err,
+ * Current ofdm.plcp_err,
+ * Received ofdm_ht.plcp_err,
+ * Current ofdm_ht.plcp_err,
+ * combined_plcp_delta,
+ * plcp_msec
+ */
+ IWL_DEBUG_RADIO(priv, PLCP_MSG,
+ priv->cfg->plcp_delta_threshold,
+ le32_to_cpu(pkt->u.stats.rx.ofdm.plcp_err),
+ le32_to_cpu(priv->statistics.rx.ofdm.plcp_err),
+ le32_to_cpu(pkt->u.stats.rx.ofdm_ht.plcp_err),
+ le32_to_cpu(
+ priv->statistics.rx.ofdm_ht.plcp_err),
+ combined_plcp_delta, plcp_msec);
+
+ /*
+ * Reset the RF radio due to the high plcp
+ * error rate
+ */
+ iwl_force_reset(priv, IWL_RF_RESET);
+ }
+ }
+
memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics));
set_bit(STATUS_STATISTICS, &priv->status);
@@ -638,11 +716,13 @@ void iwl_reply_statistics(struct iwl_priv *priv,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
if (le32_to_cpu(pkt->u.stats.flag) & UCODE_STATISTICS_CLEAR_MSK) {
- memset(&priv->statistics, 0,
- sizeof(struct iwl_notif_statistics));
#ifdef CONFIG_IWLWIFI_DEBUG
memset(&priv->accum_statistics, 0,
sizeof(struct iwl_notif_statistics));
+ memset(&priv->delta_statistics, 0,
+ sizeof(struct iwl_notif_statistics));
+ memset(&priv->max_delta, 0,
+ sizeof(struct iwl_notif_statistics));
#endif
IWL_DEBUG_RX(priv, "Statistics have been cleared\n");
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index fa1c89ba645..dd9ff2ed645 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -2,7 +2,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2008 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -192,19 +192,17 @@ static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
IWL_DEBUG_SCAN(priv, "Scan ch.res: "
"%d [802.11%s] "
"(TSF: 0x%08X:%08X) - %d "
- "elapsed=%lu usec (%dms since last)\n",
+ "elapsed=%lu usec\n",
notif->channel,
notif->band ? "bg" : "a",
le32_to_cpu(notif->tsf_high),
le32_to_cpu(notif->tsf_low),
le32_to_cpu(notif->statistics[0]),
- le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf,
- jiffies_to_msecs(elapsed_jiffies
- (priv->last_scan_jiffies, jiffies)));
+ le32_to_cpu(notif->tsf_low) - priv->scan_start_tsf);
#endif
- priv->last_scan_jiffies = jiffies;
- priv->next_scan_jiffies = 0;
+ if (!priv->is_internal_short_scan)
+ priv->next_scan_jiffies = 0;
}
/* Service SCAN_COMPLETE_NOTIFICATION (0x84) */
@@ -250,8 +248,9 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
goto reschedule;
}
- priv->last_scan_jiffies = jiffies;
- priv->next_scan_jiffies = 0;
+ if (!priv->is_internal_short_scan)
+ priv->next_scan_jiffies = 0;
+
IWL_DEBUG_INFO(priv, "Setting scan to off\n");
clear_bit(STATUS_SCANNING, &priv->status);
@@ -314,6 +313,72 @@ u16 iwl_get_passive_dwell_time(struct iwl_priv *priv,
}
EXPORT_SYMBOL(iwl_get_passive_dwell_time);
+static int iwl_get_single_channel_for_scan(struct iwl_priv *priv,
+ enum ieee80211_band band,
+ struct iwl_scan_channel *scan_ch)
+{
+ const struct ieee80211_supported_band *sband;
+ const struct iwl_channel_info *ch_info;
+ u16 passive_dwell = 0;
+ u16 active_dwell = 0;
+ int i, added = 0;
+ u16 channel = 0;
+
+ sband = iwl_get_hw_mode(priv, band);
+ if (!sband) {
+ IWL_ERR(priv, "invalid band\n");
+ return added;
+ }
+
+ active_dwell = iwl_get_active_dwell_time(priv, band, 0);
+ passive_dwell = iwl_get_passive_dwell_time(priv, band);
+
+ if (passive_dwell <= active_dwell)
+ passive_dwell = active_dwell + 1;
+
+ /* only scan single channel, good enough to reset the RF */
+ /* pick the first valid not in-use channel */
+ if (band == IEEE80211_BAND_5GHZ) {
+ for (i = 14; i < priv->channel_count; i++) {
+ if (priv->channel_info[i].channel !=
+ le16_to_cpu(priv->staging_rxon.channel)) {
+ channel = priv->channel_info[i].channel;
+ ch_info = iwl_get_channel_info(priv,
+ band, channel);
+ if (is_channel_valid(ch_info))
+ break;
+ }
+ }
+ } else {
+ for (i = 0; i < 14; i++) {
+ if (priv->channel_info[i].channel !=
+ le16_to_cpu(priv->staging_rxon.channel)) {
+ channel =
+ priv->channel_info[i].channel;
+ ch_info = iwl_get_channel_info(priv,
+ band, channel);
+ if (is_channel_valid(ch_info))
+ break;
+ }
+ }
+ }
+ if (channel) {
+ scan_ch->channel = cpu_to_le16(channel);
+ scan_ch->type = SCAN_CHANNEL_TYPE_PASSIVE;
+ scan_ch->active_dwell = cpu_to_le16(active_dwell);
+ scan_ch->passive_dwell = cpu_to_le16(passive_dwell);
+ /* Set txpower levels to defaults */
+ scan_ch->dsp_atten = 110;
+ if (band == IEEE80211_BAND_5GHZ)
+ scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
+ else
+ scan_ch->tx_gain = ((1 << 5) | (5 << 3));
+ added++;
+ } else
+ IWL_ERR(priv, "no valid channel found\n");
+ return added;
+}
+
static int iwl_get_channels_for_scan(struct iwl_priv *priv,
enum ieee80211_band band,
u8 is_active, u8 n_probes,
@@ -404,23 +469,9 @@ EXPORT_SYMBOL(iwl_init_scan_params);
static int iwl_scan_initiate(struct iwl_priv *priv)
{
- if (!iwl_is_ready_rf(priv)) {
- IWL_DEBUG_SCAN(priv, "Aborting scan due to not ready.\n");
- return -EIO;
- }
-
- if (test_bit(STATUS_SCANNING, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
- return -EAGAIN;
- }
-
- if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
- IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n");
- return -EAGAIN;
- }
-
IWL_DEBUG_INFO(priv, "Starting scan...\n");
set_bit(STATUS_SCANNING, &priv->status);
+ priv->is_internal_short_scan = false;
priv->scan_start = jiffies;
priv->scan_pass_start = priv->scan_start;
@@ -449,6 +500,18 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
goto out_unlock;
}
+ if (test_bit(STATUS_SCANNING, &priv->status)) {
+ IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
+
+ if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
+ IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n");
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
+
/* We don't schedule scan within next_scan_jiffies period.
* Avoid scanning during possible EAPOL exchange, return
* success immediately.
@@ -461,15 +524,6 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw,
goto out_unlock;
}
- /* if we just finished scan ask for delay */
- if (iwl_is_associated(priv) && priv->last_scan_jiffies &&
- time_after(priv->last_scan_jiffies + IWL_DELAY_NEXT_SCAN, jiffies)) {
- IWL_DEBUG_SCAN(priv, "scan rejected: within previous scan period\n");
- queue_work(priv->workqueue, &priv->scan_completed);
- ret = 0;
- goto out_unlock;
- }
-
priv->scan_bands = 0;
for (i = 0; i < req->n_channels; i++)
priv->scan_bands |= BIT(req->channels[i]->band);
@@ -488,6 +542,46 @@ out_unlock:
}
EXPORT_SYMBOL(iwl_mac_hw_scan);
+/*
+ * internal short scan, this function should only been called while associated.
+ * It will reset and tune the radio to prevent possible RF related problem
+ */
+int iwl_internal_short_hw_scan(struct iwl_priv *priv)
+{
+ int ret = 0;
+
+ if (!iwl_is_ready_rf(priv)) {
+ ret = -EIO;
+ IWL_DEBUG_SCAN(priv, "not ready or exit pending\n");
+ goto out;
+ }
+ if (test_bit(STATUS_SCANNING, &priv->status)) {
+ IWL_DEBUG_SCAN(priv, "Scan already in progress.\n");
+ ret = -EAGAIN;
+ goto out;
+ }
+ if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
+ IWL_DEBUG_SCAN(priv, "Scan request while abort pending\n");
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ priv->scan_bands = 0;
+ if (priv->band == IEEE80211_BAND_5GHZ)
+ priv->scan_bands |= BIT(IEEE80211_BAND_5GHZ);
+ else
+ priv->scan_bands |= BIT(IEEE80211_BAND_2GHZ);
+
+ IWL_DEBUG_SCAN(priv, "Start internal short scan...\n");
+ set_bit(STATUS_SCANNING, &priv->status);
+ priv->is_internal_short_scan = true;
+ queue_work(priv->workqueue, &priv->request_scan);
+
+out:
+ return ret;
+}
+EXPORT_SYMBOL(iwl_internal_short_hw_scan);
+
#define IWL_SCAN_CHECK_WATCHDOG (7 * HZ)
void iwl_bg_scan_check(struct work_struct *data)
@@ -544,14 +638,26 @@ u16 iwl_fill_probe_req(struct iwl_priv *priv, struct ieee80211_mgmt *frame,
if (left < 0)
return 0;
*pos++ = WLAN_EID_SSID;
- *pos++ = 0;
-
- len += 2;
+ if (!priv->is_internal_short_scan &&
+ priv->scan_request->n_ssids) {
+ struct cfg80211_ssid *ssid =
+ priv->scan_request->ssids;
+
+ /* Broadcast if ssid_len is 0 */
+ *pos++ = ssid->ssid_len;
+ memcpy(pos, ssid->ssid, ssid->ssid_len);
+ pos += ssid->ssid_len;
+ len += 2 + ssid->ssid_len;
+ } else {
+ *pos++ = 0;
+ len += 2;
+ }
if (WARN_ON(left < ie_len))
return len;
- memcpy(pos, ies, ie_len);
+ if (ies)
+ memcpy(pos, ies, ie_len);
len += ie_len;
left -= ie_len;
@@ -654,7 +760,6 @@ static void iwl_bg_request_scan(struct work_struct *data)
unsigned long flags;
IWL_DEBUG_INFO(priv, "Scanning while associated...\n");
-
spin_lock_irqsave(&priv->lock, flags);
interval = priv->beacon_int;
spin_unlock_irqrestore(&priv->lock, flags);
@@ -672,21 +777,29 @@ static void iwl_bg_request_scan(struct work_struct *data)
scan_suspend_time, interval);
}
- if (priv->scan_request->n_ssids) {
- int i, p = 0;
+ if (priv->is_internal_short_scan) {
+ IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
+ } else if (priv->scan_request->n_ssids) {
IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
- for (i = 0; i < priv->scan_request->n_ssids; i++) {
- /* always does wildcard anyway */
- if (!priv->scan_request->ssids[i].ssid_len)
- continue;
- scan->direct_scan[p].id = WLAN_EID_SSID;
- scan->direct_scan[p].len =
- priv->scan_request->ssids[i].ssid_len;
- memcpy(scan->direct_scan[p].ssid,
- priv->scan_request->ssids[i].ssid,
- priv->scan_request->ssids[i].ssid_len);
- n_probes++;
- p++;
+ /*
+ * The first SSID to scan is stuffed into the probe request
+ * template and the remaining ones are handled through the
+ * direct_scan array.
+ */
+ if (priv->scan_request->n_ssids > 1) {
+ int i, p = 0;
+ for (i = 1; i < priv->scan_request->n_ssids; i++) {
+ if (!priv->scan_request->ssids[i].ssid_len)
+ continue;
+ scan->direct_scan[p].id = WLAN_EID_SSID;
+ scan->direct_scan[p].len =
+ priv->scan_request->ssids[i].ssid_len;
+ memcpy(scan->direct_scan[p].ssid,
+ priv->scan_request->ssids[i].ssid,
+ priv->scan_request->ssids[i].ssid_len);
+ n_probes++;
+ p++;
+ }
}
is_active = true;
} else
@@ -753,24 +866,38 @@ static void iwl_bg_request_scan(struct work_struct *data)
rx_chain |= rx_ant << RXON_RX_CHAIN_FORCE_SEL_POS;
rx_chain |= 0x1 << RXON_RX_CHAIN_DRIVER_FORCE_POS;
scan->rx_chain = cpu_to_le16(rx_chain);
- cmd_len = iwl_fill_probe_req(priv,
- (struct ieee80211_mgmt *)scan->data,
- priv->scan_request->ie,
- priv->scan_request->ie_len,
- IWL_MAX_SCAN_SIZE - sizeof(*scan));
+ if (!priv->is_internal_short_scan) {
+ cmd_len = iwl_fill_probe_req(priv,
+ (struct ieee80211_mgmt *)scan->data,
+ priv->scan_request->ie,
+ priv->scan_request->ie_len,
+ IWL_MAX_SCAN_SIZE - sizeof(*scan));
+ } else {
+ cmd_len = iwl_fill_probe_req(priv,
+ (struct ieee80211_mgmt *)scan->data,
+ NULL, 0,
+ IWL_MAX_SCAN_SIZE - sizeof(*scan));
+ }
scan->tx_cmd.len = cpu_to_le16(cmd_len);
-
if (iwl_is_monitor_mode(priv))
scan->filter_flags = RXON_FILTER_PROMISC_MSK;
scan->filter_flags |= (RXON_FILTER_ACCEPT_GRP_MSK |
RXON_FILTER_BCON_AWARE_MSK);
- scan->channel_count =
- iwl_get_channels_for_scan(priv, band, is_active, n_probes,
- (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
-
+ if (priv->is_internal_short_scan) {
+ scan->channel_count =
+ iwl_get_single_channel_for_scan(priv, band,
+ (void *)&scan->data[le16_to_cpu(
+ scan->tx_cmd.len)]);
+ } else {
+ scan->channel_count =
+ iwl_get_channels_for_scan(priv, band,
+ is_active, n_probes,
+ (void *)&scan->data[le16_to_cpu(
+ scan->tx_cmd.len)]);
+ }
if (scan->channel_count == 0) {
IWL_DEBUG_SCAN(priv, "channel count %d\n", scan->channel_count);
goto done;
@@ -831,7 +958,12 @@ void iwl_bg_scan_completed(struct work_struct *work)
cancel_delayed_work(&priv->scan_check);
- ieee80211_scan_completed(priv->hw, false);
+ if (!priv->is_internal_short_scan)
+ ieee80211_scan_completed(priv->hw, false);
+ else {
+ priv->is_internal_short_scan = false;
+ IWL_DEBUG_SCAN(priv, "internal short scan completed\n");
+ }
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
diff --git a/drivers/net/wireless/iwlwifi/iwl-spectrum.c b/drivers/net/wireless/iwlwifi/iwl-spectrum.c
deleted file mode 100644
index 1ea5cd345fe..00000000000
--- a/drivers/net/wireless/iwlwifi/iwl-spectrum.c
+++ /dev/null
@@ -1,198 +0,0 @@
-/******************************************************************************
- *
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
- *
- * Portions of this file are derived from the ipw3945 project, as well
- * as portions of the ieee80211 subsystem header files.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- * The full GNU General Public License is included in this distribution in the
- * file called LICENSE.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/skbuff.h>
-#include <linux/netdevice.h>
-#include <linux/wireless.h>
-
-#include <net/mac80211.h>
-
-#include "iwl-eeprom.h"
-#include "iwl-dev.h"
-#include "iwl-core.h"
-#include "iwl-io.h"
-#include "iwl-spectrum.h"
-
-#define BEACON_TIME_MASK_LOW 0x00FFFFFF
-#define BEACON_TIME_MASK_HIGH 0xFF000000
-#define TIME_UNIT 1024
-
-/*
- * extended beacon time format
- * time in usec will be changed into a 32-bit value in 8:24 format
- * the high 1 byte is the beacon counts
- * the lower 3 bytes is the time in usec within one beacon interval
- */
-
-/* TOOD: was used in sysfs debug interface need to add to mac */
-#if 0
-static u32 iwl_usecs_to_beacons(u32 usec, u32 beacon_interval)
-{
- u32 quot;
- u32 rem;
- u32 interval = beacon_interval * 1024;
-
- if (!interval || !usec)
- return 0;
-
- quot = (usec / interval) & (BEACON_TIME_MASK_HIGH >> 24);
- rem = (usec % interval) & BEACON_TIME_MASK_LOW;
-
- return (quot << 24) + rem;
-}
-
-/* base is usually what we get from ucode with each received frame,
- * the same as HW timer counter counting down
- */
-
-static __le32 iwl_add_beacon_time(u32 base, u32 addon, u32 beacon_interval)
-{
- u32 base_low = base & BEACON_TIME_MASK_LOW;
- u32 addon_low = addon & BEACON_TIME_MASK_LOW;
- u32 interval = beacon_interval * TIME_UNIT;
- u32 res = (base & BEACON_TIME_MASK_HIGH) +
- (addon & BEACON_TIME_MASK_HIGH);
-
- if (base_low > addon_low)
- res += base_low - addon_low;
- else if (base_low < addon_low) {
- res += interval + base_low - addon_low;
- res += (1 << 24);
- } else
- res += (1 << 24);
-
- return cpu_to_le32(res);
-}
-static int iwl_get_measurement(struct iwl_priv *priv,
- struct ieee80211_measurement_params *params,
- u8 type)
-{
- struct iwl4965_spectrum_cmd spectrum;
- struct iwl_rx_packet *res;
- struct iwl_host_cmd cmd = {
- .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
- .data = (void *)&spectrum,
- .meta.flags = CMD_WANT_SKB,
- };
- u32 add_time = le64_to_cpu(params->start_time);
- int rc;
- int spectrum_resp_status;
- int duration = le16_to_cpu(params->duration);
-
- if (iwl_is_associated(priv))
- add_time =
- iwl_usecs_to_beacons(
- le64_to_cpu(params->start_time) - priv->last_tsf,
- le16_to_cpu(priv->rxon_timing.beacon_interval));
-
- memset(&spectrum, 0, sizeof(spectrum));
-
- spectrum.channel_count = cpu_to_le16(1);
- spectrum.flags =
- RXON_FLG_TSF2HOST_MSK | RXON_FLG_ANT_A_MSK | RXON_FLG_DIS_DIV_MSK;
- spectrum.filter_flags = MEASUREMENT_FILTER_FLAG;
- cmd.len = sizeof(spectrum);
- spectrum.len = cpu_to_le16(cmd.len - sizeof(spectrum.len));
-
- if (iwl_is_associated(priv))
- spectrum.start_time =
- iwl_add_beacon_time(priv->last_beacon_time,
- add_time,
- le16_to_cpu(priv->rxon_timing.beacon_interval));
- else
- spectrum.start_time = 0;
-
- spectrum.channels[0].duration = cpu_to_le32(duration * TIME_UNIT);
- spectrum.channels[0].channel = params->channel;
- spectrum.channels[0].type = type;
- if (priv->active_rxon.flags & RXON_FLG_BAND_24G_MSK)
- spectrum.flags |= RXON_FLG_BAND_24G_MSK |
- RXON_FLG_AUTO_DETECT_MSK | RXON_FLG_TGG_PROTECT_MSK;
-
- rc = iwl_send_cmd_sync(priv, &cmd);
- if (rc)
- return rc;
-
- res = (struct iwl_rx_packet *)cmd.meta.u.skb->data;
- if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n");
- rc = -EIO;
- }
-
- spectrum_resp_status = le16_to_cpu(res->u.spectrum.status);
- switch (spectrum_resp_status) {
- case 0: /* Command will be handled */
- if (res->u.spectrum.id != 0xff) {
- IWL_DEBUG_INFO(priv,
- "Replaced existing measurement: %d\n",
- res->u.spectrum.id);
- priv->measurement_status &= ~MEASUREMENT_READY;
- }
- priv->measurement_status |= MEASUREMENT_ACTIVE;
- rc = 0;
- break;
-
- case 1: /* Command will not be handled */
- rc = -EAGAIN;
- break;
- }
-
- dev_kfree_skb_any(cmd.meta.u.skb);
-
- return rc;
-}
-#endif
-
-static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
-{
- struct iwl_rx_packet *pkt = rxb_addr(rxb);
- struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
-
- if (!report->state) {
- IWL_DEBUG_11H(priv,
- "Spectrum Measure Notification: Start\n");
- return;
- }
-
- memcpy(&priv->measure_report, report, sizeof(*report));
- priv->measurement_status |= MEASUREMENT_READY;
-}
-
-void iwl_setup_spectrum_handlers(struct iwl_priv *priv)
-{
- priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
- iwl_rx_spectrum_measure_notif;
-}
-EXPORT_SYMBOL(iwl_setup_spectrum_handlers);
diff --git a/drivers/net/wireless/iwlwifi/iwl-spectrum.h b/drivers/net/wireless/iwlwifi/iwl-spectrum.h
index a77c1e61906..af6babee289 100644
--- a/drivers/net/wireless/iwlwifi/iwl-spectrum.h
+++ b/drivers/net/wireless/iwlwifi/iwl-spectrum.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ieee80211 subsystem header files.
*
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 90fbdb25399..4a6686fa6b3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -80,46 +80,103 @@ int iwl_get_ra_sta_id(struct iwl_priv *priv, struct ieee80211_hdr *hdr)
}
EXPORT_SYMBOL(iwl_get_ra_sta_id);
+/* priv->sta_lock must be held */
static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
{
- unsigned long flags;
-
- spin_lock_irqsave(&priv->sta_lock, flags);
if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE))
- IWL_ERR(priv, "ACTIVATE a non DRIVER active station %d\n",
- sta_id);
-
- priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE;
- IWL_DEBUG_ASSOC(priv, "Added STA to Ucode: %pM\n",
- priv->stations[sta_id].sta.sta.addr);
+ IWL_ERR(priv, "ACTIVATE a non DRIVER active station id %u addr %pM\n",
+ sta_id, priv->stations[sta_id].sta.sta.addr);
- spin_unlock_irqrestore(&priv->sta_lock, flags);
+ if (priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE) {
+ IWL_DEBUG_ASSOC(priv,
+ "STA id %u addr %pM already present in uCode (according to driver)\n",
+ sta_id, priv->stations[sta_id].sta.sta.addr);
+ } else {
+ priv->stations[sta_id].used |= IWL_STA_UCODE_ACTIVE;
+ IWL_DEBUG_ASSOC(priv, "Added STA id %u addr %pM to uCode\n",
+ sta_id, priv->stations[sta_id].sta.sta.addr);
+ }
}
-static void iwl_add_sta_callback(struct iwl_priv *priv,
- struct iwl_device_cmd *cmd,
- struct iwl_rx_packet *pkt)
+static void iwl_process_add_sta_resp(struct iwl_priv *priv,
+ struct iwl_addsta_cmd *addsta,
+ struct iwl_rx_packet *pkt,
+ bool sync)
{
- struct iwl_addsta_cmd *addsta =
- (struct iwl_addsta_cmd *)cmd->cmd.payload;
u8 sta_id = addsta->sta.sta_id;
+ unsigned long flags;
if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
- pkt->hdr.flags);
+ pkt->hdr.flags);
return;
}
+ IWL_DEBUG_INFO(priv, "Processing response for adding station %u\n",
+ sta_id);
+
+ spin_lock_irqsave(&priv->sta_lock, flags);
+
switch (pkt->u.add_sta.status) {
case ADD_STA_SUCCESS_MSK:
+ IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
iwl_sta_ucode_activate(priv, sta_id);
- /* fall through */
+ break;
+ case ADD_STA_NO_ROOM_IN_TABLE:
+ IWL_ERR(priv, "Adding station %d failed, no room in table.\n",
+ sta_id);
+ break;
+ case ADD_STA_NO_BLOCK_ACK_RESOURCE:
+ IWL_ERR(priv, "Adding station %d failed, no block ack resource.\n",
+ sta_id);
+ break;
+ case ADD_STA_MODIFY_NON_EXIST_STA:
+ IWL_ERR(priv, "Attempting to modify non-existing station %d \n",
+ sta_id);
+ break;
default:
- IWL_DEBUG_HC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
- pkt->u.add_sta.status);
+ IWL_DEBUG_ASSOC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
+ pkt->u.add_sta.status);
break;
}
+
+ IWL_DEBUG_INFO(priv, "%s station id %u addr %pM\n",
+ priv->stations[sta_id].sta.mode ==
+ STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
+ sta_id, priv->stations[sta_id].sta.sta.addr);
+
+ /*
+ * XXX: The MAC address in the command buffer is often changed from
+ * the original sent to the device. That is, the MAC address
+ * written to the command buffer often is not the same MAC adress
+ * read from the command buffer when the command returns. This
+ * issue has not yet been resolved and this debugging is left to
+ * observe the problem.
+ */
+ IWL_DEBUG_INFO(priv, "%s station according to cmd buffer %pM\n",
+ priv->stations[sta_id].sta.mode ==
+ STA_CONTROL_MODIFY_MSK ? "Modified" : "Added",
+ addsta->sta.addr);
+
+ /*
+ * Determine if we wanted to modify or add a station,
+ * if adding a station succeeded we have some more initialization
+ * to do when using station notification. TODO
+ */
+
+ spin_unlock_irqrestore(&priv->sta_lock, flags);
+}
+
+static void iwl_add_sta_callback(struct iwl_priv *priv,
+ struct iwl_device_cmd *cmd,
+ struct iwl_rx_packet *pkt)
+{
+ struct iwl_addsta_cmd *addsta =
+ (struct iwl_addsta_cmd *)cmd->cmd.payload;
+
+ iwl_process_add_sta_resp(priv, addsta, pkt, false);
+
}
int iwl_send_add_sta(struct iwl_priv *priv,
@@ -145,24 +202,9 @@ int iwl_send_add_sta(struct iwl_priv *priv,
if (ret || (flags & CMD_ASYNC))
return ret;
- pkt = (struct iwl_rx_packet *)cmd.reply_page;
- if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
- IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
- pkt->hdr.flags);
- ret = -EIO;
- }
-
if (ret == 0) {
- switch (pkt->u.add_sta.status) {
- case ADD_STA_SUCCESS_MSK:
- iwl_sta_ucode_activate(priv, sta->sta.sta_id);
- IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
- break;
- default:
- ret = -EIO;
- IWL_WARN(priv, "REPLY_ADD_STA failed\n");
- break;
- }
+ pkt = (struct iwl_rx_packet *)cmd.reply_page;
+ iwl_process_add_sta_resp(priv, sta, pkt, true);
}
iwl_free_pages(priv, cmd.reply_page);
@@ -1003,24 +1045,19 @@ int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap)
struct ieee80211_sta_ht_cap *cur_ht_config = NULL;
u8 sta_id;
- /* Add station to device's station table */
-
/*
- * XXX: This check is definitely not correct, if we're an AP
- * it'll always be false which is not what we want, but
- * it doesn't look like iwlagn is prepared to be an HT
- * AP anyway.
+ * Set HT capabilities. It is ok to set this struct even if not using
+ * HT config: the priv->current_ht_config.is_ht flag will just be false
*/
- if (priv->current_ht_config.is_ht) {
- rcu_read_lock();
- sta = ieee80211_find_sta(priv->vif, addr);
- if (sta) {
- memcpy(&ht_config, &sta->ht_cap, sizeof(ht_config));
- cur_ht_config = &ht_config;
- }
- rcu_read_unlock();
+ rcu_read_lock();
+ sta = ieee80211_find_sta(priv->vif, addr);
+ if (sta) {
+ memcpy(&ht_config, &sta->ht_cap, sizeof(ht_config));
+ cur_ht_config = &ht_config;
}
+ rcu_read_unlock();
+ /* Add station to device's station table */
sta_id = iwl_add_station(priv, addr, is_ap, CMD_SYNC, cur_ht_config);
/* Set up default rate scaling table in device's station table */
@@ -1085,6 +1122,7 @@ static void iwl_sta_init_bcast_lq(struct iwl_priv *priv)
*/
void iwl_add_bcast_station(struct iwl_priv *priv)
{
+ IWL_DEBUG_INFO(priv, "Adding broadcast station to station table\n");
iwl_add_station(priv, iwl_bcast_addr, false, CMD_SYNC, NULL);
/* Set up default rate scaling table in device's station table */
@@ -1093,6 +1131,16 @@ void iwl_add_bcast_station(struct iwl_priv *priv)
EXPORT_SYMBOL(iwl_add_bcast_station);
/**
+ * iwl3945_add_bcast_station - add broadcast station into station table.
+ */
+void iwl3945_add_bcast_station(struct iwl_priv *priv)
+{
+ IWL_DEBUG_INFO(priv, "Adding broadcast station to station table\n");
+ iwl_add_station(priv, iwl_bcast_addr, false, CMD_SYNC, NULL);
+}
+EXPORT_SYMBOL(iwl3945_add_bcast_station);
+
+/**
* iwl_get_sta_id - Find station's index within station table
*
* If new IBSS station, create new entry in station table
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h
index 8d052de2d40..2dc35fe28f5 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.h
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -53,6 +53,7 @@ void iwl_update_tkip_key(struct iwl_priv *priv,
int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap);
void iwl_add_bcast_station(struct iwl_priv *priv);
+void iwl3945_add_bcast_station(struct iwl_priv *priv);
int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, bool is_ap);
void iwl_clear_stations_table(struct iwl_priv *priv);
int iwl_get_free_ucode_key_index(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 8f407156285..1ed5206721e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -60,7 +60,8 @@ static const u16 default_tid_to_tx_fifo[] = {
static inline int iwl_alloc_dma_ptr(struct iwl_priv *priv,
struct iwl_dma_ptr *ptr, size_t size)
{
- ptr->addr = pci_alloc_consistent(priv->pci_dev, size, &ptr->dma);
+ ptr->addr = dma_alloc_coherent(&priv->pci_dev->dev, size, &ptr->dma,
+ GFP_KERNEL);
if (!ptr->addr)
return -ENOMEM;
ptr->size = size;
@@ -73,21 +74,20 @@ static inline void iwl_free_dma_ptr(struct iwl_priv *priv,
if (unlikely(!ptr->addr))
return;
- pci_free_consistent(priv->pci_dev, ptr->size, ptr->addr, ptr->dma);
+ dma_free_coherent(&priv->pci_dev->dev, ptr->size, ptr->addr, ptr->dma);
memset(ptr, 0, sizeof(*ptr));
}
/**
* iwl_txq_update_write_ptr - Send new write index to hardware
*/
-int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
+void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
{
u32 reg = 0;
- int ret = 0;
int txq_id = txq->q.id;
if (txq->need_update == 0)
- return ret;
+ return;
/* if we're trying to save power */
if (test_bit(STATUS_POWER_PMI, &priv->status)) {
@@ -101,7 +101,7 @@ int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
txq_id, reg);
iwl_set_bit(priv, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- return ret;
+ return;
}
iwl_write_direct32(priv, HBUS_TARG_WRPTR,
@@ -114,8 +114,6 @@ int iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
txq->q.write_ptr | (txq_id << 8));
txq->need_update = 0;
-
- return ret;
}
EXPORT_SYMBOL(iwl_txq_update_write_ptr);
@@ -146,7 +144,7 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
{
struct iwl_tx_queue *txq = &priv->txq[txq_id];
struct iwl_queue *q = &txq->q;
- struct pci_dev *dev = priv->pci_dev;
+ struct device *dev = &priv->pci_dev->dev;
int i;
if (q->n_bd == 0)
@@ -163,8 +161,8 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
/* De-alloc circular buffer of TFDs */
if (txq->q.n_bd)
- pci_free_consistent(dev, priv->hw_params.tfd_size *
- txq->q.n_bd, txq->tfds, txq->q.dma_addr);
+ dma_free_coherent(dev, priv->hw_params.tfd_size *
+ txq->q.n_bd, txq->tfds, txq->q.dma_addr);
/* De-alloc array of per-TFD driver data */
kfree(txq->txb);
@@ -193,7 +191,7 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
{
struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
struct iwl_queue *q = &txq->q;
- struct pci_dev *dev = priv->pci_dev;
+ struct device *dev = &priv->pci_dev->dev;
int i;
if (q->n_bd == 0)
@@ -205,8 +203,8 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
/* De-alloc circular buffer of TFDs */
if (txq->q.n_bd)
- pci_free_consistent(dev, priv->hw_params.tfd_size *
- txq->q.n_bd, txq->tfds, txq->q.dma_addr);
+ dma_free_coherent(dev, priv->hw_params.tfd_size * txq->q.n_bd,
+ txq->tfds, txq->q.dma_addr);
/* deallocate arrays */
kfree(txq->cmd);
@@ -297,7 +295,7 @@ static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
static int iwl_tx_queue_alloc(struct iwl_priv *priv,
struct iwl_tx_queue *txq, u32 id)
{
- struct pci_dev *dev = priv->pci_dev;
+ struct device *dev = &priv->pci_dev->dev;
size_t tfd_sz = priv->hw_params.tfd_size * TFD_QUEUE_SIZE_MAX;
/* Driver private data, only for Tx (not command) queues,
@@ -316,8 +314,8 @@ static int iwl_tx_queue_alloc(struct iwl_priv *priv,
/* Circular buffer of transmit frame descriptors (TFDs),
* shared with device */
- txq->tfds = pci_alloc_consistent(dev, tfd_sz, &txq->q.dma_addr);
-
+ txq->tfds = dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr,
+ GFP_KERNEL);
if (!txq->tfds) {
IWL_ERR(priv, "pci_alloc_consistent(%zd) failed\n", tfd_sz);
goto error;
@@ -366,7 +364,7 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
for (i = 0; i < actual_slots; i++) {
/* only happens for cmd queue */
if (i == slots_num)
- len += IWL_MAX_SCAN_SIZE;
+ len = IWL_MAX_CMD_SIZE;
txq->cmd[i] = kmalloc(len, GFP_KERNEL);
if (!txq->cmd[i])
@@ -745,7 +743,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
u8 tid = 0;
u8 *qc = NULL;
unsigned long flags;
- int ret;
spin_lock_irqsave(&priv->lock, flags);
if (iwl_is_rfkill(priv)) {
@@ -820,8 +817,10 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
hdr->seq_ctrl |= cpu_to_le16(seq_number);
seq_number += 0x10;
/* aggregation is on for this <sta,tid> */
- if (info->flags & IEEE80211_TX_CTL_AMPDU)
+ if (info->flags & IEEE80211_TX_CTL_AMPDU &&
+ priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
+ }
}
txq = &priv->txq[txq_id];
@@ -963,7 +962,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
/* Tell device the write index *just past* this latest filled TFD */
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
- ret = iwl_txq_update_write_ptr(priv, txq);
+ iwl_txq_update_write_ptr(priv, txq);
spin_unlock_irqrestore(&priv->lock, flags);
/*
@@ -977,9 +976,6 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
if (sta_priv && sta_priv->client)
atomic_inc(&sta_priv->pending_frames);
- if (ret)
- return ret;
-
if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
if (wait_write_ptr) {
spin_lock_irqsave(&priv->lock, flags);
@@ -1018,7 +1014,7 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
struct iwl_cmd_meta *out_meta;
dma_addr_t phys_addr;
unsigned long flags;
- int len, ret;
+ int len;
u32 idx;
u16 fix_size;
@@ -1027,9 +1023,12 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
/* If any of the command structures end up being larger than
* the TFD_MAX_PAYLOAD_SIZE, and it sent as a 'small' command then
- * we will need to increase the size of the TFD entries */
+ * we will need to increase the size of the TFD entries
+ * Also, check to see if command buffer should not exceed the size
+ * of device_cmd and max_cmd_size. */
BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
!(cmd->flags & CMD_SIZE_HUGE));
+ BUG_ON(fix_size > IWL_MAX_CMD_SIZE);
if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
IWL_WARN(priv, "Not sending command - %s KILL\n",
@@ -1073,8 +1072,8 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
if (cmd->flags & CMD_SIZE_HUGE)
out_cmd->hdr.sequence |= SEQ_HUGE_FRAME;
len = sizeof(struct iwl_device_cmd);
- len += (idx == TFD_CMD_SLOTS) ? IWL_MAX_SCAN_SIZE : 0;
-
+ if (idx == TFD_CMD_SLOTS)
+ len = IWL_MAX_CMD_SIZE;
#ifdef CONFIG_IWLWIFI_DEBUG
switch (out_cmd->hdr.cmd) {
@@ -1115,10 +1114,10 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
/* Increment and update queue's write index */
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
- ret = iwl_txq_update_write_ptr(priv, txq);
+ iwl_txq_update_write_ptr(priv, txq);
spin_unlock_irqrestore(&priv->hcmd_lock, flags);
- return ret ? ret : idx;
+ return idx;
}
static void iwl_tx_status(struct iwl_priv *priv, struct sk_buff *skb)
@@ -1260,6 +1259,8 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
if (!(meta->flags & CMD_ASYNC)) {
clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
+ IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s \n",
+ get_cmd_string(cmd->hdr.cmd));
wake_up_interruptible(&priv->wait_command_queue);
}
}
@@ -1346,7 +1347,7 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
{
int tx_fifo_id, txq_id, sta_id, ssn = -1;
struct iwl_tid_data *tid_data;
- int ret, write_ptr, read_ptr;
+ int write_ptr, read_ptr;
unsigned long flags;
if (!ra) {
@@ -1398,13 +1399,17 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
spin_lock_irqsave(&priv->lock, flags);
- ret = priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
+ /*
+ * the only reason this call can fail is queue number out of range,
+ * which can happen if uCode is reloaded and all the station
+ * information are lost. if it is outside the range, there is no need
+ * to deactivate the uCode queue, just return "success" to allow
+ * mac80211 to clean up it own data.
+ */
+ priv->cfg->ops->lib->txq_agg_disable(priv, txq_id, ssn,
tx_fifo_id);
spin_unlock_irqrestore(&priv->lock, flags);
- if (ret)
- return ret;
-
ieee80211_stop_tx_ba_cb_irqsafe(priv->vif, ra, tid);
return 0;
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index f8e4e4b18d0..54daa38ecba 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -53,9 +53,10 @@
#include "iwl-commands.h"
#include "iwl-sta.h"
#include "iwl-3945.h"
-#include "iwl-helpers.h"
#include "iwl-core.h"
+#include "iwl-helpers.h"
#include "iwl-dev.h"
+#include "iwl-spectrum.h"
/*
* module name, copyright, version, etc.
@@ -70,14 +71,13 @@
#define VD
#endif
-#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
-#define VS "s"
-#else
-#define VS
-#endif
-
-#define DRV_VERSION IWLWIFI_VERSION VD VS
-#define DRV_COPYRIGHT "Copyright(c) 2003-2009 Intel Corporation"
+/*
+ * add "s" to indicate spectrum measurement included.
+ * we add it here to be consistent with previous releases in which
+ * this was configurable.
+ */
+#define DRV_VERSION IWLWIFI_VERSION VD "s"
+#define DRV_COPYRIGHT "Copyright(c) 2003-2010 Intel Corporation"
#define DRV_AUTHOR "<ilw@linux.intel.com>"
MODULE_DESCRIPTION(DRV_DESCRIPTION);
@@ -352,10 +352,10 @@ static int iwl3945_send_beacon_cmd(struct iwl_priv *priv)
static void iwl3945_unset_hw_params(struct iwl_priv *priv)
{
if (priv->shared_virt)
- pci_free_consistent(priv->pci_dev,
- sizeof(struct iwl3945_shared),
- priv->shared_virt,
- priv->shared_phys);
+ dma_free_coherent(&priv->pci_dev->dev,
+ sizeof(struct iwl3945_shared),
+ priv->shared_virt,
+ priv->shared_phys);
}
static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
@@ -478,7 +478,6 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
u8 wait_write_ptr = 0;
u8 *qc = NULL;
unsigned long flags;
- int rc;
spin_lock_irqsave(&priv->lock, flags);
if (iwl_is_rfkill(priv)) {
@@ -663,12 +662,9 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
/* Tell device the write index *just past* this latest filled TFD */
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
- rc = iwl_txq_update_write_ptr(priv, txq);
+ iwl_txq_update_write_ptr(priv, txq);
spin_unlock_irqrestore(&priv->lock, flags);
- if (rc)
- return rc;
-
if ((iwl_queue_space(q) < q->high_mark)
&& priv->mac80211_registered) {
if (wait_write_ptr) {
@@ -689,10 +685,6 @@ drop:
return -1;
}
-#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
-
-#include "iwl-spectrum.h"
-
#define BEACON_TIME_MASK_LOW 0x00FFFFFF
#define BEACON_TIME_MASK_HIGH 0xFF000000
#define TIME_UNIT 1024
@@ -819,7 +811,6 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
return rc;
}
-#endif
static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
struct iwl_rx_mem_buffer *rxb)
@@ -962,6 +953,8 @@ static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
priv->rx_handlers[REPLY_ADD_STA] = iwl3945_rx_reply_add_sta;
priv->rx_handlers[REPLY_ERROR] = iwl_rx_reply_error;
priv->rx_handlers[CHANNEL_SWITCH_NOTIFICATION] = iwl_rx_csa;
+ priv->rx_handlers[SPECTRUM_MEASURE_NOTIFICATION] =
+ iwl_rx_spectrum_measure_notif;
priv->rx_handlers[PM_SLEEP_NOTIFICATION] = iwl_rx_pm_sleep_notif;
priv->rx_handlers[PM_DEBUG_STATISTIC_NOTIFIC] =
iwl_rx_pm_debug_statistics_notif;
@@ -975,7 +968,6 @@ static void iwl3945_setup_rx_handlers(struct iwl_priv *priv)
priv->rx_handlers[REPLY_STATISTICS_CMD] = iwl3945_hw_rx_statistics;
priv->rx_handlers[STATISTICS_NOTIFICATION] = iwl3945_hw_rx_statistics;
- iwl_setup_spectrum_handlers(priv);
iwl_setup_rx_scan_handlers(priv);
priv->rx_handlers[CARD_STATE_NOTIFICATION] = iwl3945_rx_card_state_notif;
@@ -1067,13 +1059,13 @@ static inline __le32 iwl3945_dma_addr2rbd_ptr(struct iwl_priv *priv,
* also updates the memory address in the firmware to reference the new
* target buffer.
*/
-static int iwl3945_rx_queue_restock(struct iwl_priv *priv)
+static void iwl3945_rx_queue_restock(struct iwl_priv *priv)
{
struct iwl_rx_queue *rxq = &priv->rxq;
struct list_head *element;
struct iwl_rx_mem_buffer *rxb;
unsigned long flags;
- int write, rc;
+ int write;
spin_lock_irqsave(&rxq->lock, flags);
write = rxq->write & ~0x7;
@@ -1103,12 +1095,8 @@ static int iwl3945_rx_queue_restock(struct iwl_priv *priv)
spin_lock_irqsave(&rxq->lock, flags);
rxq->need_update = 1;
spin_unlock_irqrestore(&rxq->lock, flags);
- rc = iwl_rx_queue_update_write_ptr(priv, rxq);
- if (rc)
- return rc;
+ iwl_rx_queue_update_write_ptr(priv, rxq);
}
-
- return 0;
}
/**
@@ -1253,10 +1241,10 @@ static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rx
}
}
- pci_free_consistent(priv->pci_dev, 4 * RX_QUEUE_SIZE, rxq->bd,
- rxq->dma_addr);
- pci_free_consistent(priv->pci_dev, sizeof(struct iwl_rb_status),
- rxq->rb_stts, rxq->rb_stts_dma);
+ dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
+ rxq->dma_addr);
+ dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
+ rxq->rb_stts, rxq->rb_stts_dma);
rxq->bd = NULL;
rxq->rb_stts = NULL;
}
@@ -1518,8 +1506,9 @@ void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
* iwl3945_print_event_log - Dump error event log to syslog
*
*/
-static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
- u32 num_events, u32 mode)
+static int iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
+ u32 num_events, u32 mode,
+ int pos, char **buf, size_t bufsz)
{
u32 i;
u32 base; /* SRAM byte address of event log header */
@@ -1529,7 +1518,7 @@ static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
unsigned long reg_flags;
if (num_events == 0)
- return;
+ return pos;
base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
@@ -1555,26 +1544,43 @@ static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
time = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
if (mode == 0) {
/* data, ev */
- IWL_ERR(priv, "0x%08x\t%04u\n", time, ev);
- trace_iwlwifi_dev_ucode_event(priv, 0, time, ev);
+ if (bufsz) {
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "0x%08x:%04u\n",
+ time, ev);
+ } else {
+ IWL_ERR(priv, "0x%08x\t%04u\n", time, ev);
+ trace_iwlwifi_dev_ucode_event(priv, 0,
+ time, ev);
+ }
} else {
data = _iwl_read_direct32(priv, HBUS_TARG_MEM_RDAT);
- IWL_ERR(priv, "%010u\t0x%08x\t%04u\n", time, data, ev);
- trace_iwlwifi_dev_ucode_event(priv, time, data, ev);
+ if (bufsz) {
+ pos += scnprintf(*buf + pos, bufsz - pos,
+ "%010u:0x%08x:%04u\n",
+ time, data, ev);
+ } else {
+ IWL_ERR(priv, "%010u\t0x%08x\t%04u\n",
+ time, data, ev);
+ trace_iwlwifi_dev_ucode_event(priv, time,
+ data, ev);
+ }
}
}
/* Allow device to power down */
iwl_release_nic_access(priv);
spin_unlock_irqrestore(&priv->reg_lock, reg_flags);
+ return pos;
}
/**
* iwl3945_print_last_event_logs - Dump the newest # of event log to syslog
*/
-static void iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
+static int iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
u32 num_wraps, u32 next_entry,
- u32 size, u32 mode)
+ u32 size, u32 mode,
+ int pos, char **buf, size_t bufsz)
{
/*
* display the newest DEFAULT_LOG_ENTRIES entries
@@ -1582,21 +1588,28 @@ static void iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
*/
if (num_wraps) {
if (next_entry < size) {
- iwl3945_print_event_log(priv,
- capacity - (size - next_entry),
- size - next_entry, mode);
- iwl3945_print_event_log(priv, 0,
- next_entry, mode);
+ pos = iwl3945_print_event_log(priv,
+ capacity - (size - next_entry),
+ size - next_entry, mode,
+ pos, buf, bufsz);
+ pos = iwl3945_print_event_log(priv, 0,
+ next_entry, mode,
+ pos, buf, bufsz);
} else
- iwl3945_print_event_log(priv, next_entry - size,
- size, mode);
+ pos = iwl3945_print_event_log(priv, next_entry - size,
+ size, mode,
+ pos, buf, bufsz);
} else {
if (next_entry < size)
- iwl3945_print_event_log(priv, 0, next_entry, mode);
+ pos = iwl3945_print_event_log(priv, 0,
+ next_entry, mode,
+ pos, buf, bufsz);
else
- iwl3945_print_event_log(priv, next_entry - size,
- size, mode);
+ pos = iwl3945_print_event_log(priv, next_entry - size,
+ size, mode,
+ pos, buf, bufsz);
}
+ return pos;
}
/* For sanity check only. Actual size is determined by uCode, typ. 512 */
@@ -1604,7 +1617,8 @@ static void iwl3945_print_last_event_logs(struct iwl_priv *priv, u32 capacity,
#define DEFAULT_IWL3945_DUMP_EVENT_LOG_ENTRIES (20)
-void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
+int iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
+ char **buf, bool display)
{
u32 base; /* SRAM byte address of event log header */
u32 capacity; /* event log capacity in # entries */
@@ -1612,11 +1626,13 @@ void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
u32 num_wraps; /* # times uCode wrapped to top of log */
u32 next_entry; /* index of next entry to be written by uCode */
u32 size; /* # entries that we'll print */
+ int pos = 0;
+ size_t bufsz = 0;
base = le32_to_cpu(priv->card_alive.log_event_table_ptr);
if (!iwl3945_hw_valid_rtc_data_addr(base)) {
IWL_ERR(priv, "Invalid event log pointer 0x%08X\n", base);
- return;
+ return -EINVAL;
}
/* event log header */
@@ -1642,7 +1658,7 @@ void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
/* bail out if nothing in log */
if (size == 0) {
IWL_ERR(priv, "Start IWL Event Log Dump: nothing in log\n");
- return;
+ return pos;
}
#ifdef CONFIG_IWLWIFI_DEBUG
@@ -1658,25 +1674,38 @@ void iwl3945_dump_nic_event_log(struct iwl_priv *priv, bool full_log)
size);
#ifdef CONFIG_IWLWIFI_DEBUG
+ if (display) {
+ if (full_log)
+ bufsz = capacity * 48;
+ else
+ bufsz = size * 48;
+ *buf = kmalloc(bufsz, GFP_KERNEL);
+ if (!*buf)
+ return -ENOMEM;
+ }
if ((iwl_get_debug_level(priv) & IWL_DL_FW_ERRORS) || full_log) {
/* if uCode has wrapped back to top of log,
* start at the oldest entry,
* i.e the next one that uCode would fill.
*/
if (num_wraps)
- iwl3945_print_event_log(priv, next_entry,
- capacity - next_entry, mode);
+ pos = iwl3945_print_event_log(priv, next_entry,
+ capacity - next_entry, mode,
+ pos, buf, bufsz);
/* (then/else) start at top of log */
- iwl3945_print_event_log(priv, 0, next_entry, mode);
+ pos = iwl3945_print_event_log(priv, 0, next_entry, mode,
+ pos, buf, bufsz);
} else
- iwl3945_print_last_event_logs(priv, capacity, num_wraps,
- next_entry, size, mode);
+ pos = iwl3945_print_last_event_logs(priv, capacity, num_wraps,
+ next_entry, size, mode,
+ pos, buf, bufsz);
#else
- iwl3945_print_last_event_logs(priv, capacity, num_wraps,
- next_entry, size, mode);
+ pos = iwl3945_print_last_event_logs(priv, capacity, num_wraps,
+ next_entry, size, mode,
+ pos, buf, bufsz);
#endif
-
+ return pos;
}
static void iwl3945_irq_tasklet(struct iwl_priv *priv)
@@ -2996,18 +3025,6 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
mutex_unlock(&priv->mutex);
}
-static void iwl3945_bg_up(struct work_struct *data)
-{
- struct iwl_priv *priv = container_of(data, struct iwl_priv, up);
-
- if (test_bit(STATUS_EXIT_PENDING, &priv->status))
- return;
-
- mutex_lock(&priv->mutex);
- __iwl3945_up(priv);
- mutex_unlock(&priv->mutex);
-}
-
static void iwl3945_bg_restart(struct work_struct *data)
{
struct iwl_priv *priv = container_of(data, struct iwl_priv, restart);
@@ -3024,7 +3041,13 @@ static void iwl3945_bg_restart(struct work_struct *data)
ieee80211_restart_hw(priv->hw);
} else {
iwl3945_down(priv);
- queue_work(priv->workqueue, &priv->up);
+
+ if (test_bit(STATUS_EXIT_PENDING, &priv->status))
+ return;
+
+ mutex_lock(&priv->mutex);
+ __iwl3945_up(priv);
+ mutex_unlock(&priv->mutex);
}
}
@@ -3528,8 +3551,6 @@ static ssize_t store_filter_flags(struct device *d,
static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
store_filter_flags);
-#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
-
static ssize_t show_measurement(struct device *d,
struct device_attribute *attr, char *buf)
{
@@ -3599,7 +3620,6 @@ static ssize_t store_measurement(struct device *d,
static DEVICE_ATTR(measurement, S_IRUSR | S_IWUSR,
show_measurement, store_measurement);
-#endif /* CONFIG_IWL3945_SPECTRUM_MEASUREMENT */
static ssize_t store_retry_rate(struct device *d,
struct device_attribute *attr,
@@ -3748,7 +3768,6 @@ static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
init_waitqueue_head(&priv->wait_command_queue);
- INIT_WORK(&priv->up, iwl3945_bg_up);
INIT_WORK(&priv->restart, iwl3945_bg_restart);
INIT_WORK(&priv->rx_replenish, iwl3945_bg_rx_replenish);
INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update);
@@ -3782,9 +3801,7 @@ static struct attribute *iwl3945_sysfs_entries[] = {
&dev_attr_dump_errors.attr,
&dev_attr_flags.attr,
&dev_attr_filter_flags.attr,
-#ifdef CONFIG_IWL3945_SPECTRUM_MEASUREMENT
&dev_attr_measurement.attr,
-#endif
&dev_attr_retry_rate.attr,
&dev_attr_statistics.attr,
&dev_attr_status.attr,
@@ -3810,7 +3827,6 @@ static struct ieee80211_ops iwl3945_hw_ops = {
.config = iwl_mac_config,
.configure_filter = iwl_configure_filter,
.set_key = iwl3945_mac_set_key,
- .get_tx_stats = iwl_mac_get_tx_stats,
.conf_tx = iwl_mac_conf_tx,
.reset_tsf = iwl_mac_reset_tsf,
.bss_info_changed = iwl_bss_info_changed,
@@ -3831,6 +3847,7 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
INIT_LIST_HEAD(&priv->free_frames);
mutex_init(&priv->mutex);
+ mutex_init(&priv->sync_cmd_mutex);
/* Clear the driver's (not device's) station table */
iwl_clear_stations_table(priv);
@@ -3840,6 +3857,7 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
priv->band = IEEE80211_BAND_2GHZ;
priv->iw_mode = NL80211_IFTYPE_STATION;
+ priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
iwl_reset_qos(priv);
@@ -4022,6 +4040,13 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
spin_lock_init(&priv->reg_lock);
spin_lock_init(&priv->lock);
+ /*
+ * stop and reset the on-board processor just in case it is in a
+ * strange state ... like being left stranded by a primary kernel
+ * and this is now the kdump kernel trying to start up
+ */
+ iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
+
/***********************
* 4. Read EEPROM
* ********************/
diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
index be992ca41cf..c29c994de0e 100644
--- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
+++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
@@ -89,7 +89,7 @@ static int iwm_debugfs_dbg_modules_write(void *data, u64 val)
for (i = 0; i < __IWM_DM_NR; i++)
iwm->dbg.dbg_module[i] = 0;
- for_each_bit(bit, &iwm->dbg.dbg_modules, __IWM_DM_NR)
+ for_each_set_bit(bit, &iwm->dbg.dbg_modules, __IWM_DM_NR)
iwm->dbg.dbg_module[bit] = iwm->dbg.dbg_level;
return 0;
diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h
index 842811142be..79ffa3b98d7 100644
--- a/drivers/net/wireless/iwmc3200wifi/iwm.h
+++ b/drivers/net/wireless/iwmc3200wifi/iwm.h
@@ -268,7 +268,7 @@ struct iwm_priv {
struct sk_buff_head rx_list;
struct list_head rx_tickets;
- struct list_head rx_packets[IWM_RX_ID_HASH + 1];
+ struct list_head rx_packets[IWM_RX_ID_HASH];
struct workqueue_struct *rx_wq;
struct work_struct rx_worker;
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index f727b4a8319..8456b4dbd14 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -868,36 +868,35 @@ static int iwm_mlme_mgt_frame(struct iwm_priv *iwm, u8 *buf,
struct iwm_umac_notif_mgt_frame *mgt_frame =
(struct iwm_umac_notif_mgt_frame *)buf;
struct ieee80211_mgmt *mgt = (struct ieee80211_mgmt *)mgt_frame->frame;
- u8 *ie;
IWM_HEXDUMP(iwm, DBG, MLME, "MGT: ", mgt_frame->frame,
le16_to_cpu(mgt_frame->len));
if (ieee80211_is_assoc_req(mgt->frame_control)) {
- ie = mgt->u.assoc_req.variable;;
- iwm->req_ie_len =
- le16_to_cpu(mgt_frame->len) - (ie - (u8 *)mgt);
+ iwm->req_ie_len = le16_to_cpu(mgt_frame->len)
+ - offsetof(struct ieee80211_mgmt,
+ u.assoc_req.variable);
kfree(iwm->req_ie);
iwm->req_ie = kmemdup(mgt->u.assoc_req.variable,
iwm->req_ie_len, GFP_KERNEL);
} else if (ieee80211_is_reassoc_req(mgt->frame_control)) {
- ie = mgt->u.reassoc_req.variable;;
- iwm->req_ie_len =
- le16_to_cpu(mgt_frame->len) - (ie - (u8 *)mgt);
+ iwm->req_ie_len = le16_to_cpu(mgt_frame->len)
+ - offsetof(struct ieee80211_mgmt,
+ u.reassoc_req.variable);
kfree(iwm->req_ie);
iwm->req_ie = kmemdup(mgt->u.reassoc_req.variable,
iwm->req_ie_len, GFP_KERNEL);
} else if (ieee80211_is_assoc_resp(mgt->frame_control)) {
- ie = mgt->u.assoc_resp.variable;;
- iwm->resp_ie_len =
- le16_to_cpu(mgt_frame->len) - (ie - (u8 *)mgt);
+ iwm->resp_ie_len = le16_to_cpu(mgt_frame->len)
+ - offsetof(struct ieee80211_mgmt,
+ u.assoc_resp.variable);
kfree(iwm->resp_ie);
iwm->resp_ie = kmemdup(mgt->u.assoc_resp.variable,
iwm->resp_ie_len, GFP_KERNEL);
} else if (ieee80211_is_reassoc_resp(mgt->frame_control)) {
- ie = mgt->u.reassoc_resp.variable;;
- iwm->resp_ie_len =
- le16_to_cpu(mgt_frame->len) - (ie - (u8 *)mgt);
+ iwm->resp_ie_len = le16_to_cpu(mgt_frame->len)
+ - offsetof(struct ieee80211_mgmt,
+ u.reassoc_resp.variable);
kfree(iwm->resp_ie);
iwm->resp_ie = kmemdup(mgt->u.reassoc_resp.variable,
iwm->resp_ie_len, GFP_KERNEL);
@@ -1117,7 +1116,7 @@ static int iwm_ntf_stop_resume_tx(struct iwm_priv *iwm, u8 *buf,
return -EINVAL;
}
- for_each_bit(bit, (unsigned long *)&tid_msk, IWM_UMAC_TID_NR) {
+ for_each_set_bit(bit, (unsigned long *)&tid_msk, IWM_UMAC_TID_NR) {
tid_info = &sta_info->tid_info[bit];
mutex_lock(&tid_info->mutex);
@@ -1534,6 +1533,33 @@ static void classify8023(struct sk_buff *skb)
}
}
+static void iwm_rx_process_amsdu(struct iwm_priv *iwm, struct sk_buff *skb)
+{
+ struct wireless_dev *wdev = iwm_to_wdev(iwm);
+ struct net_device *ndev = iwm_to_ndev(iwm);
+ struct sk_buff_head list;
+ struct sk_buff *frame;
+
+ IWM_HEXDUMP(iwm, DBG, RX, "A-MSDU: ", skb->data, skb->len);
+
+ __skb_queue_head_init(&list);
+ ieee80211_amsdu_to_8023s(skb, &list, ndev->dev_addr, wdev->iftype, 0);
+
+ while ((frame = __skb_dequeue(&list))) {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += frame->len;
+
+ frame->protocol = eth_type_trans(frame, ndev);
+ frame->ip_summed = CHECKSUM_NONE;
+ memset(frame->cb, 0, sizeof(frame->cb));
+
+ if (netif_rx_ni(frame) == NET_RX_DROP) {
+ IWM_ERR(iwm, "Packet dropped\n");
+ ndev->stats.rx_dropped++;
+ }
+ }
+}
+
static void iwm_rx_process_packet(struct iwm_priv *iwm,
struct iwm_rx_packet *packet,
struct iwm_rx_ticket_node *ticket_node)
@@ -1548,25 +1574,34 @@ static void iwm_rx_process_packet(struct iwm_priv *iwm,
switch (le16_to_cpu(ticket_node->ticket->action)) {
case IWM_RX_TICKET_RELEASE:
IWM_DBG_RX(iwm, DBG, "RELEASE packet\n");
- classify8023(skb);
+
iwm_rx_adjust_packet(iwm, packet, ticket_node);
+ skb->dev = iwm_to_ndev(iwm);
+ classify8023(skb);
+
+ if (le16_to_cpu(ticket_node->ticket->flags) &
+ IWM_RX_TICKET_AMSDU_MSK) {
+ iwm_rx_process_amsdu(iwm, skb);
+ break;
+ }
+
ret = ieee80211_data_to_8023(skb, ndev->dev_addr, wdev->iftype);
if (ret < 0) {
IWM_DBG_RX(iwm, DBG, "Couldn't convert 802.11 header - "
"%d\n", ret);
+ kfree_skb(packet->skb);
break;
}
IWM_HEXDUMP(iwm, DBG, RX, "802.3: ", skb->data, skb->len);
- skb->dev = iwm_to_ndev(iwm);
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += skb->len;
+
skb->protocol = eth_type_trans(skb, ndev);
skb->ip_summed = CHECKSUM_NONE;
memset(skb->cb, 0, sizeof(skb->cb));
- ndev->stats.rx_packets++;
- ndev->stats.rx_bytes += skb->len;
-
if (netif_rx_ni(skb) == NET_RX_DROP) {
IWM_ERR(iwm, "Packet dropped\n");
ndev->stats.rx_dropped++;
diff --git a/drivers/net/wireless/libertas/Kconfig b/drivers/net/wireless/libertas/Kconfig
index 30aa9d48d67..0485c995757 100644
--- a/drivers/net/wireless/libertas/Kconfig
+++ b/drivers/net/wireless/libertas/Kconfig
@@ -37,3 +37,9 @@ config LIBERTAS_DEBUG
depends on LIBERTAS
---help---
Debugging support.
+
+config LIBERTAS_MESH
+ bool "Enable mesh support"
+ depends on LIBERTAS
+ help
+ This enables Libertas' MESH support, used by e.g. the OLPC people.
diff --git a/drivers/net/wireless/libertas/Makefile b/drivers/net/wireless/libertas/Makefile
index b188cd97a05..45e870e3311 100644
--- a/drivers/net/wireless/libertas/Makefile
+++ b/drivers/net/wireless/libertas/Makefile
@@ -5,11 +5,11 @@ libertas-y += cmdresp.o
libertas-y += debugfs.o
libertas-y += ethtool.o
libertas-y += main.o
-libertas-y += mesh.o
libertas-y += rx.o
libertas-y += scan.o
libertas-y += tx.o
libertas-y += wext.o
+libertas-$(CONFIG_LIBERTAS_MESH) += mesh.o
usb8xxx-objs += if_usb.o
libertas_cs-objs += if_cs.o
diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c
index 751067369ba..f03d5e4e59c 100644
--- a/drivers/net/wireless/libertas/assoc.c
+++ b/drivers/net/wireless/libertas/assoc.c
@@ -390,10 +390,8 @@ int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv,
cmd.enablehwauto = cpu_to_le16(priv->enablehwauto);
cmd.bitmap = lbs_rate_to_fw_bitmap(priv->cur_rate, priv->enablehwauto);
ret = lbs_cmd_with_response(priv, CMD_802_11_RATE_ADAPT_RATESET, &cmd);
- if (!ret && cmd_action == CMD_ACT_GET) {
- priv->ratebitmap = le16_to_cpu(cmd.bitmap);
+ if (!ret && cmd_action == CMD_ACT_GET)
priv->enablehwauto = le16_to_cpu(cmd.enablehwauto);
- }
lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
return ret;
@@ -807,8 +805,7 @@ static int lbs_try_associate(struct lbs_private *priv,
}
/* Use short preamble only when both the BSS and firmware support it */
- if ((priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) &&
- (assoc_req->bss.capability & WLAN_CAPABILITY_SHORT_PREAMBLE))
+ if (assoc_req->bss.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
preamble = RADIO_PREAMBLE_SHORT;
ret = lbs_set_radio(priv, preamble, 1);
@@ -939,8 +936,7 @@ static int lbs_adhoc_join(struct lbs_private *priv,
}
/* Use short preamble only when both the BSS and firmware support it */
- if ((priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) &&
- (bss->capability & WLAN_CAPABILITY_SHORT_PREAMBLE)) {
+ if (bss->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) {
lbs_deb_join("AdhocJoin: Short preamble\n");
preamble = RADIO_PREAMBLE_SHORT;
}
@@ -1049,7 +1045,7 @@ static int lbs_adhoc_start(struct lbs_private *priv,
struct assoc_request *assoc_req)
{
struct cmd_ds_802_11_ad_hoc_start cmd;
- u8 preamble = RADIO_PREAMBLE_LONG;
+ u8 preamble = RADIO_PREAMBLE_SHORT;
size_t ratesize = 0;
u16 tmpcap = 0;
int ret = 0;
@@ -1057,11 +1053,6 @@ static int lbs_adhoc_start(struct lbs_private *priv,
lbs_deb_enter(LBS_DEB_ASSOC);
- if (priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE) {
- lbs_deb_join("ADHOC_START: Will use short preamble\n");
- preamble = RADIO_PREAMBLE_SHORT;
- }
-
ret = lbs_set_radio(priv, preamble, 1);
if (ret)
goto out;
@@ -1169,11 +1160,11 @@ int lbs_adhoc_stop(struct lbs_private *priv)
static inline int match_bss_no_security(struct lbs_802_11_security *secinfo,
struct bss_descriptor *match_bss)
{
- if (!secinfo->wep_enabled && !secinfo->WPAenabled
- && !secinfo->WPA2enabled
- && match_bss->wpa_ie[0] != WLAN_EID_GENERIC
- && match_bss->rsn_ie[0] != WLAN_EID_RSN
- && !(match_bss->capability & WLAN_CAPABILITY_PRIVACY))
+ if (!secinfo->wep_enabled &&
+ !secinfo->WPAenabled && !secinfo->WPA2enabled &&
+ match_bss->wpa_ie[0] != WLAN_EID_GENERIC &&
+ match_bss->rsn_ie[0] != WLAN_EID_RSN &&
+ !(match_bss->capability & WLAN_CAPABILITY_PRIVACY))
return 1;
else
return 0;
@@ -1182,9 +1173,9 @@ static inline int match_bss_no_security(struct lbs_802_11_security *secinfo,
static inline int match_bss_static_wep(struct lbs_802_11_security *secinfo,
struct bss_descriptor *match_bss)
{
- if (secinfo->wep_enabled && !secinfo->WPAenabled
- && !secinfo->WPA2enabled
- && (match_bss->capability & WLAN_CAPABILITY_PRIVACY))
+ if (secinfo->wep_enabled &&
+ !secinfo->WPAenabled && !secinfo->WPA2enabled &&
+ (match_bss->capability & WLAN_CAPABILITY_PRIVACY))
return 1;
else
return 0;
@@ -1193,8 +1184,8 @@ static inline int match_bss_static_wep(struct lbs_802_11_security *secinfo,
static inline int match_bss_wpa(struct lbs_802_11_security *secinfo,
struct bss_descriptor *match_bss)
{
- if (!secinfo->wep_enabled && secinfo->WPAenabled
- && (match_bss->wpa_ie[0] == WLAN_EID_GENERIC)
+ if (!secinfo->wep_enabled && secinfo->WPAenabled &&
+ (match_bss->wpa_ie[0] == WLAN_EID_GENERIC)
/* privacy bit may NOT be set in some APs like LinkSys WRT54G
&& (match_bss->capability & WLAN_CAPABILITY_PRIVACY) */
)
@@ -1219,11 +1210,11 @@ static inline int match_bss_wpa2(struct lbs_802_11_security *secinfo,
static inline int match_bss_dynamic_wep(struct lbs_802_11_security *secinfo,
struct bss_descriptor *match_bss)
{
- if (!secinfo->wep_enabled && !secinfo->WPAenabled
- && !secinfo->WPA2enabled
- && (match_bss->wpa_ie[0] != WLAN_EID_GENERIC)
- && (match_bss->rsn_ie[0] != WLAN_EID_RSN)
- && (match_bss->capability & WLAN_CAPABILITY_PRIVACY))
+ if (!secinfo->wep_enabled &&
+ !secinfo->WPAenabled && !secinfo->WPA2enabled &&
+ (match_bss->wpa_ie[0] != WLAN_EID_GENERIC) &&
+ (match_bss->rsn_ie[0] != WLAN_EID_RSN) &&
+ (match_bss->capability & WLAN_CAPABILITY_PRIVACY))
return 1;
else
return 0;
@@ -1534,8 +1525,8 @@ static int assoc_helper_associate(struct lbs_private *priv,
/* If we're given and 'any' BSSID, try associating based on SSID */
if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags)) {
- if (compare_ether_addr(bssid_any, assoc_req->bssid)
- && compare_ether_addr(bssid_off, assoc_req->bssid)) {
+ if (compare_ether_addr(bssid_any, assoc_req->bssid) &&
+ compare_ether_addr(bssid_off, assoc_req->bssid)) {
ret = assoc_helper_bssid(priv, assoc_req);
done = 1;
}
@@ -1621,11 +1612,9 @@ static int assoc_helper_channel(struct lbs_private *priv,
goto restore_mesh;
}
- if ( assoc_req->secinfo.wep_enabled
- && (assoc_req->wep_keys[0].len
- || assoc_req->wep_keys[1].len
- || assoc_req->wep_keys[2].len
- || assoc_req->wep_keys[3].len)) {
+ if (assoc_req->secinfo.wep_enabled &&
+ (assoc_req->wep_keys[0].len || assoc_req->wep_keys[1].len ||
+ assoc_req->wep_keys[2].len || assoc_req->wep_keys[3].len)) {
/* Make sure WEP keys are re-sent to firmware */
set_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags);
}
@@ -1992,14 +1981,14 @@ void lbs_association_worker(struct work_struct *work)
assoc_req->secinfo.auth_mode);
/* If 'any' SSID was specified, find an SSID to associate with */
- if (test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)
- && !assoc_req->ssid_len)
+ if (test_bit(ASSOC_FLAG_SSID, &assoc_req->flags) &&
+ !assoc_req->ssid_len)
find_any_ssid = 1;
/* But don't use 'any' SSID if there's a valid locked BSSID to use */
if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags)) {
- if (compare_ether_addr(assoc_req->bssid, bssid_any)
- && compare_ether_addr(assoc_req->bssid, bssid_off))
+ if (compare_ether_addr(assoc_req->bssid, bssid_any) &&
+ compare_ether_addr(assoc_req->bssid, bssid_off))
find_any_ssid = 0;
}
@@ -2061,13 +2050,6 @@ void lbs_association_worker(struct work_struct *work)
goto out;
}
- if ( test_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags)
- || test_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags)) {
- ret = assoc_helper_wep_keys(priv, assoc_req);
- if (ret)
- goto out;
- }
-
if (test_bit(ASSOC_FLAG_SECINFO, &assoc_req->flags)) {
ret = assoc_helper_secinfo(priv, assoc_req);
if (ret)
@@ -2080,18 +2062,31 @@ void lbs_association_worker(struct work_struct *work)
goto out;
}
- if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags)
- || test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags)) {
+ /*
+ * v10 FW wants WPA keys to be set/cleared before WEP key operations,
+ * otherwise it will fail to correctly associate to WEP networks.
+ * Other firmware versions don't appear to care.
+ */
+ if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc_req->flags) ||
+ test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc_req->flags)) {
ret = assoc_helper_wpa_keys(priv, assoc_req);
if (ret)
goto out;
}
+ if (test_bit(ASSOC_FLAG_WEP_KEYS, &assoc_req->flags) ||
+ test_bit(ASSOC_FLAG_WEP_TX_KEYIDX, &assoc_req->flags)) {
+ ret = assoc_helper_wep_keys(priv, assoc_req);
+ if (ret)
+ goto out;
+ }
+
+
/* SSID/BSSID should be the _last_ config option set, because they
* trigger the association attempt.
*/
- if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags)
- || test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) {
+ if (test_bit(ASSOC_FLAG_BSSID, &assoc_req->flags) ||
+ test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) {
int success = 1;
ret = assoc_helper_associate(priv, assoc_req);
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 42611bea76a..82371ef3952 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -143,19 +143,6 @@ int lbs_update_hw_spec(struct lbs_private *priv)
lbs_deb_cmd("GET_HW_SPEC: hardware interface 0x%x, hardware spec 0x%04x\n",
cmd.hwifversion, cmd.version);
- /* Determine mesh_fw_ver from fwrelease and fwcapinfo */
- /* 5.0.16p0 9.0.0.p0 is known to NOT support any mesh */
- /* 5.110.22 have mesh command with 0xa3 command id */
- /* 10.0.0.p0 FW brings in mesh config command with different id */
- /* Check FW version MSB and initialize mesh_fw_ver */
- if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V5)
- priv->mesh_fw_ver = MESH_FW_OLD;
- else if ((MRVL_FW_MAJOR_REV(priv->fwrelease) >= MRVL_FW_V10) &&
- (priv->fwcapinfo & MESH_CAPINFO_ENABLE_MASK))
- priv->mesh_fw_ver = MESH_FW_NEW;
- else
- priv->mesh_fw_ver = MESH_NONE;
-
/* Clamp region code to 8-bit since FW spec indicates that it should
* only ever be 8-bit, even though the field size is 16-bit. Some firmware
* returns non-zero high 8 bits here.
@@ -855,9 +842,6 @@ int lbs_set_radio(struct lbs_private *priv, u8 preamble, u8 radio_on)
if (priv->fwrelease < 0x09000000) {
switch (preamble) {
case RADIO_PREAMBLE_SHORT:
- if (!(priv->capability & WLAN_CAPABILITY_SHORT_PREAMBLE))
- goto out;
- /* Fall through */
case RADIO_PREAMBLE_AUTO:
case RADIO_PREAMBLE_LONG:
cmd.control = cpu_to_le16(preamble);
@@ -1011,6 +995,8 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
ret = 0;
break;
+#ifdef CONFIG_LIBERTAS_MESH
+
case CMD_BT_ACCESS:
ret = lbs_cmd_bt_access(cmdptr, cmd_action, pdata_buf);
break;
@@ -1019,6 +1005,8 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
ret = lbs_cmd_fwt_access(cmdptr, cmd_action, pdata_buf);
break;
+#endif
+
case CMD_802_11_BEACON_CTRL:
ret = lbs_cmd_bcn_ctrl(priv, cmdptr, cmd_action);
break;
@@ -1317,7 +1305,7 @@ int lbs_execute_next_command(struct lbs_private *priv)
if ((priv->psmode != LBS802_11POWERMODECAM) &&
(priv->psstate == PS_STATE_FULL_POWER) &&
((priv->connect_status == LBS_CONNECTED) ||
- (priv->mesh_connect_status == LBS_CONNECTED))) {
+ lbs_mesh_connected(priv))) {
if (priv->secinfo.WPAenabled ||
priv->secinfo.WPA2enabled) {
/* check for valid WPA group keys */
diff --git a/drivers/net/wireless/libertas/cmd.h b/drivers/net/wireless/libertas/cmd.h
index 2862748aef7..cb4138a55fd 100644
--- a/drivers/net/wireless/libertas/cmd.h
+++ b/drivers/net/wireless/libertas/cmd.h
@@ -110,18 +110,6 @@ int lbs_set_snmp_mib(struct lbs_private *priv, u32 oid, u16 val);
int lbs_get_snmp_mib(struct lbs_private *priv, u32 oid, u16 *out_val);
-/* Mesh related */
-
-int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action,
- struct cmd_ds_mesh_access *cmd);
-
-int lbs_mesh_config_send(struct lbs_private *priv,
- struct cmd_ds_mesh_config *cmd,
- uint16_t action, uint16_t type);
-
-int lbs_mesh_config(struct lbs_private *priv, uint16_t enable, uint16_t chan);
-
-
/* Commands only used in wext.c, assoc. and scan.c */
int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0,
diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c
index 21d57690c20..e7470442f76 100644
--- a/drivers/net/wireless/libertas/cmdresp.c
+++ b/drivers/net/wireless/libertas/cmdresp.c
@@ -240,11 +240,6 @@ int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len)
/* Now we got response from FW, cancel the command timer */
del_timer(&priv->command_timer);
priv->cmd_timed_out = 0;
- if (priv->nr_retries) {
- lbs_pr_info("Received result %x to command %x after %d retries\n",
- result, curcmd, priv->nr_retries);
- priv->nr_retries = 0;
- }
/* Store the response code to cur_cmd_retcode. */
priv->cur_cmd_retcode = result;
@@ -485,20 +480,8 @@ int lbs_process_event(struct lbs_private *priv, u32 event)
break;
case MACREG_INT_CODE_MESH_AUTO_STARTED:
- /* Ignore spurious autostart events if autostart is disabled */
- if (!priv->mesh_autostart_enabled) {
- lbs_pr_info("EVENT: MESH_AUTO_STARTED (ignoring)\n");
- break;
- }
- lbs_pr_info("EVENT: MESH_AUTO_STARTED\n");
- priv->mesh_connect_status = LBS_CONNECTED;
- if (priv->mesh_open) {
- netif_carrier_on(priv->mesh_dev);
- if (!priv->tx_pending_len)
- netif_wake_queue(priv->mesh_dev);
- }
- priv->mode = IW_MODE_ADHOC;
- schedule_work(&priv->sync_channel);
+ /* Ignore spurious autostart events */
+ lbs_pr_info("EVENT: MESH_AUTO_STARTED (ignoring)\n");
break;
default:
diff --git a/drivers/net/wireless/libertas/defs.h b/drivers/net/wireless/libertas/defs.h
index 6b6ea9f7bf5..ea3f10ef4e0 100644
--- a/drivers/net/wireless/libertas/defs.h
+++ b/drivers/net/wireless/libertas/defs.h
@@ -397,13 +397,6 @@ enum KEY_INFO_WPA {
KEY_INFO_WPA_ENABLED = 0x04
};
-/** mesh_fw_ver */
-enum _mesh_fw_ver {
- MESH_NONE = 0, /* MESH is not supported */
- MESH_FW_OLD, /* MESH is supported in FW V5 */
- MESH_FW_NEW, /* MESH is supported in FW V10 and newer */
-};
-
/* Default values for fwt commands. */
#define FWT_DEFAULT_METRIC 0
#define FWT_DEFAULT_DIR 1
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index 05bb298dfae..6977ee82021 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -39,15 +39,14 @@ struct lbs_private {
/* Mesh */
struct net_device *mesh_dev; /* Virtual device */
+#ifdef CONFIG_LIBERTAS_MESH
u32 mesh_connect_status;
struct lbs_mesh_stats mstats;
int mesh_open;
- int mesh_fw_ver;
- int mesh_autostart_enabled;
uint16_t mesh_tlv;
u8 mesh_ssid[IEEE80211_MAX_SSID_LEN + 1];
u8 mesh_ssid_len;
- struct work_struct sync_channel;
+#endif
/* Monitor mode */
struct net_device *rtap_net_dev;
@@ -110,7 +109,6 @@ struct lbs_private {
struct list_head cmdpendingq; /* pending command buffers */
wait_queue_head_t cmd_pending;
struct timer_list command_timer;
- int nr_retries;
int cmd_timed_out;
/* Command responses sent from the hardware to the driver */
@@ -176,9 +174,7 @@ struct lbs_private {
struct bss_descriptor *networks;
struct assoc_request * pending_assoc_req;
struct assoc_request * in_progress_assoc_req;
- u16 capability;
uint16_t enablehwauto;
- uint16_t ratebitmap;
/* ADHOC */
u16 beacon_period;
diff --git a/drivers/net/wireless/libertas/ethtool.c b/drivers/net/wireless/libertas/ethtool.c
index 63d020374c2..3804a58d7f4 100644
--- a/drivers/net/wireless/libertas/ethtool.c
+++ b/drivers/net/wireless/libertas/ethtool.c
@@ -114,9 +114,11 @@ const struct ethtool_ops lbs_ethtool_ops = {
.get_drvinfo = lbs_ethtool_get_drvinfo,
.get_eeprom = lbs_ethtool_get_eeprom,
.get_eeprom_len = lbs_ethtool_get_eeprom_len,
+#ifdef CONFIG_LIBERTAS_MESH
.get_sset_count = lbs_mesh_ethtool_get_sset_count,
.get_ethtool_stats = lbs_mesh_ethtool_get_stats,
.get_strings = lbs_mesh_ethtool_get_strings,
+#endif
.get_wol = lbs_ethtool_get_wol,
.set_wol = lbs_ethtool_set_wol,
};
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index bf4bfbae622..3ea03f259ee 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -23,6 +23,7 @@
#include <linux/kthread.h>
#include <linux/list.h>
#include <linux/netdevice.h>
+#include <linux/semaphore.h>
#include <linux/spi/libertas_spi.h>
#include <linux/spi/spi.h>
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index c2975c8e2f2..28a1c9d1627 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -123,7 +123,7 @@ static ssize_t lbs_rtap_set(struct device *dev,
if (priv->monitormode == monitor_mode)
return strlen(buf);
if (!priv->monitormode) {
- if (priv->infra_open || priv->mesh_open)
+ if (priv->infra_open || lbs_mesh_open(priv))
return -EBUSY;
if (priv->mode == IW_MODE_INFRA)
lbs_cmd_80211_deauthenticate(priv,
@@ -319,15 +319,18 @@ static int lbs_add_mcast_addrs(struct cmd_ds_mac_multicast_adr *cmd,
{
int i = nr_addrs;
struct dev_mc_list *mc_list;
+ int cnt;
if ((dev->flags & (IFF_UP|IFF_MULTICAST)) != (IFF_UP|IFF_MULTICAST))
return nr_addrs;
netif_addr_lock_bh(dev);
- for (mc_list = dev->mc_list; mc_list; mc_list = mc_list->next) {
+ cnt = netdev_mc_count(dev);
+ netdev_for_each_mc_addr(mc_list, dev) {
if (mac_in_list(cmd->maclist, nr_addrs, mc_list->dmi_addr)) {
lbs_deb_net("mcast address %s:%pM skipped\n", dev->name,
mc_list->dmi_addr);
+ cnt--;
continue;
}
@@ -337,9 +340,10 @@ static int lbs_add_mcast_addrs(struct cmd_ds_mac_multicast_adr *cmd,
lbs_deb_net("mcast address %s:%pM added to filter\n", dev->name,
mc_list->dmi_addr);
i++;
+ cnt--;
}
netif_addr_unlock_bh(dev);
- if (mc_list)
+ if (cnt)
return -EOVERFLOW;
return i;
@@ -536,31 +540,14 @@ static int lbs_thread(void *data)
if (priv->cmd_timed_out && priv->cur_cmd) {
struct cmd_ctrl_node *cmdnode = priv->cur_cmd;
- if (++priv->nr_retries > 3) {
- lbs_pr_info("Excessive timeouts submitting "
- "command 0x%04x\n",
- le16_to_cpu(cmdnode->cmdbuf->command));
- lbs_complete_command(priv, cmdnode, -ETIMEDOUT);
- priv->nr_retries = 0;
- if (priv->reset_card)
- priv->reset_card(priv);
- } else {
- priv->cur_cmd = NULL;
- priv->dnld_sent = DNLD_RES_RECEIVED;
- lbs_pr_info("requeueing command 0x%04x due "
- "to timeout (#%d)\n",
- le16_to_cpu(cmdnode->cmdbuf->command),
- priv->nr_retries);
-
- /* Stick it back at the _top_ of the pending queue
- for immediate resubmission */
- list_add(&cmdnode->list, &priv->cmdpendingq);
- }
+ lbs_pr_info("Timeout submitting command 0x%04x\n",
+ le16_to_cpu(cmdnode->cmdbuf->command));
+ lbs_complete_command(priv, cmdnode, -ETIMEDOUT);
+ if (priv->reset_card)
+ priv->reset_card(priv);
}
priv->cmd_timed_out = 0;
-
-
if (!priv->fw_ready)
continue;
@@ -622,7 +609,7 @@ static int lbs_thread(void *data)
if (priv->connect_status == LBS_CONNECTED)
netif_wake_queue(priv->dev);
if (priv->mesh_dev &&
- priv->mesh_connect_status == LBS_CONNECTED)
+ lbs_mesh_connected(priv))
netif_wake_queue(priv->mesh_dev);
}
}
@@ -732,7 +719,7 @@ done:
* This function handles the timeout of command sending.
* It will re-send the same command again.
*/
-static void command_timer_fn(unsigned long data)
+static void lbs_cmd_timeout_handler(unsigned long data)
{
struct lbs_private *priv = (struct lbs_private *)data;
unsigned long flags;
@@ -809,18 +796,6 @@ int lbs_exit_auto_deep_sleep(struct lbs_private *priv)
return 0;
}
-static void lbs_sync_channel_worker(struct work_struct *work)
-{
- struct lbs_private *priv = container_of(work, struct lbs_private,
- sync_channel);
-
- lbs_deb_enter(LBS_DEB_MAIN);
- if (lbs_update_channel(priv))
- lbs_pr_info("Channel synchronization failed.");
- lbs_deb_leave(LBS_DEB_MAIN);
-}
-
-
static int lbs_init_adapter(struct lbs_private *priv)
{
size_t bufsize;
@@ -848,14 +823,12 @@ static int lbs_init_adapter(struct lbs_private *priv)
memset(priv->current_addr, 0xff, ETH_ALEN);
priv->connect_status = LBS_DISCONNECTED;
- priv->mesh_connect_status = LBS_DISCONNECTED;
priv->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
priv->mode = IW_MODE_INFRA;
priv->channel = DEFAULT_AD_HOC_CHANNEL;
priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON;
priv->radio_on = 1;
priv->enablehwauto = 1;
- priv->capability = WLAN_CAPABILITY_SHORT_PREAMBLE;
priv->psmode = LBS802_11POWERMODECAM;
priv->psstate = PS_STATE_FULL_POWER;
priv->is_deep_sleep = 0;
@@ -865,7 +838,7 @@ static int lbs_init_adapter(struct lbs_private *priv)
mutex_init(&priv->lock);
- setup_timer(&priv->command_timer, command_timer_fn,
+ setup_timer(&priv->command_timer, lbs_cmd_timeout_handler,
(unsigned long)priv);
setup_timer(&priv->auto_deepsleep_timer, auto_deepsleep_timer_fn,
(unsigned long)priv);
@@ -998,11 +971,6 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
INIT_DELAYED_WORK(&priv->assoc_work, lbs_association_worker);
INIT_DELAYED_WORK(&priv->scan_work, lbs_scan_worker);
INIT_WORK(&priv->mcast_work, lbs_set_mcast_worker);
- INIT_WORK(&priv->sync_channel, lbs_sync_channel_worker);
-
- priv->mesh_open = 0;
- sprintf(priv->mesh_ssid, "mesh");
- priv->mesh_ssid_len = 4;
priv->wol_criteria = 0xffffffff;
priv->wol_gpio = 0xff;
@@ -1076,6 +1044,17 @@ void lbs_remove_card(struct lbs_private *priv)
EXPORT_SYMBOL_GPL(lbs_remove_card);
+static int lbs_rtap_supported(struct lbs_private *priv)
+{
+ if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V5)
+ return 1;
+
+ /* newer firmware use a capability mask */
+ return ((MRVL_FW_MAJOR_REV(priv->fwrelease) >= MRVL_FW_V10) &&
+ (priv->fwcapinfo & MESH_CAPINFO_ENABLE_MASK));
+}
+
+
int lbs_start_card(struct lbs_private *priv)
{
struct net_device *dev = priv->dev;
@@ -1095,12 +1074,14 @@ int lbs_start_card(struct lbs_private *priv)
lbs_update_channel(priv);
+ lbs_init_mesh(priv);
+
/*
* While rtap isn't related to mesh, only mesh-enabled
* firmware implements the rtap functionality via
* CMD_802_11_MONITOR_MODE.
*/
- if (lbs_init_mesh(priv)) {
+ if (lbs_rtap_supported(priv)) {
if (device_create_file(&dev->dev, &dev_attr_lbs_rtap))
lbs_pr_err("cannot register lbs_rtap attribute\n");
}
@@ -1134,7 +1115,9 @@ void lbs_stop_card(struct lbs_private *priv)
netif_carrier_off(dev);
lbs_debugfs_remove_one(priv);
- if (lbs_deinit_mesh(priv))
+ lbs_deinit_mesh(priv);
+
+ if (lbs_rtap_supported(priv))
device_remove_file(&dev->dev, &dev_attr_lbs_rtap);
/* Delete the timeout of the currently processing command */
diff --git a/drivers/net/wireless/libertas/mesh.c b/drivers/net/wireless/libertas/mesh.c
index 92b7a357a5e..e385af1f458 100644
--- a/drivers/net/wireless/libertas/mesh.c
+++ b/drivers/net/wireless/libertas/mesh.c
@@ -1,4 +1,3 @@
-#include <linux/moduleparam.h>
#include <linux/delay.h>
#include <linux/etherdevice.h>
#include <linux/netdevice.h>
@@ -197,7 +196,14 @@ int lbs_init_mesh(struct lbs_private *priv)
lbs_deb_enter(LBS_DEB_MESH);
- if (priv->mesh_fw_ver == MESH_FW_OLD) {
+ priv->mesh_connect_status = LBS_DISCONNECTED;
+
+ /* Determine mesh_fw_ver from fwrelease and fwcapinfo */
+ /* 5.0.16p0 9.0.0.p0 is known to NOT support any mesh */
+ /* 5.110.22 have mesh command with 0xa3 command id */
+ /* 10.0.0.p0 FW brings in mesh config command with different id */
+ /* Check FW version MSB and initialize mesh_fw_ver */
+ if (MRVL_FW_MAJOR_REV(priv->fwrelease) == MRVL_FW_V5) {
/* Enable mesh, if supported, and work out which TLV it uses.
0x100 + 291 is an unofficial value used in 5.110.20.pXX
0x100 + 37 is the official value used in 5.110.21.pXX
@@ -219,7 +225,9 @@ int lbs_init_mesh(struct lbs_private *priv)
priv->channel))
priv->mesh_tlv = 0;
}
- } else if (priv->mesh_fw_ver == MESH_FW_NEW) {
+ } else
+ if ((MRVL_FW_MAJOR_REV(priv->fwrelease) >= MRVL_FW_V10) &&
+ (priv->fwcapinfo & MESH_CAPINFO_ENABLE_MASK)) {
/* 10.0.0.pXX new firmwares should succeed with TLV
* 0x100+37; Do not invoke command with old TLV.
*/
@@ -228,7 +236,12 @@ int lbs_init_mesh(struct lbs_private *priv)
priv->channel))
priv->mesh_tlv = 0;
}
+
+
if (priv->mesh_tlv) {
+ sprintf(priv->mesh_ssid, "mesh");
+ priv->mesh_ssid_len = 4;
+
lbs_add_mesh(priv);
if (device_create_file(&dev->dev, &dev_attr_lbs_mesh))
@@ -416,10 +429,10 @@ struct net_device *lbs_mesh_set_dev(struct lbs_private *priv,
struct net_device *dev, struct rxpd *rxpd)
{
if (priv->mesh_dev) {
- if (priv->mesh_fw_ver == MESH_FW_OLD) {
+ if (priv->mesh_tlv == TLV_TYPE_OLD_MESH_ID) {
if (rxpd->rx_control & RxPD_MESH_FRAME)
dev = priv->mesh_dev;
- } else if (priv->mesh_fw_ver == MESH_FW_NEW) {
+ } else if (priv->mesh_tlv == TLV_TYPE_MESH_ID) {
if (rxpd->u.bss.bss_num == MESH_IFACE_ID)
dev = priv->mesh_dev;
}
@@ -432,9 +445,9 @@ void lbs_mesh_set_txpd(struct lbs_private *priv,
struct net_device *dev, struct txpd *txpd)
{
if (dev == priv->mesh_dev) {
- if (priv->mesh_fw_ver == MESH_FW_OLD)
+ if (priv->mesh_tlv == TLV_TYPE_OLD_MESH_ID)
txpd->tx_control |= cpu_to_le32(TxPD_MESH_FRAME);
- else if (priv->mesh_fw_ver == MESH_FW_NEW)
+ else if (priv->mesh_tlv == TLV_TYPE_MESH_ID)
txpd->u.bss.bss_num = MESH_IFACE_ID;
}
}
@@ -538,7 +551,7 @@ static int __lbs_mesh_config_send(struct lbs_private *priv,
* Command id is 0xac for v10 FW along with mesh interface
* id in bits 14-13-12.
*/
- if (priv->mesh_fw_ver == MESH_FW_NEW)
+ if (priv->mesh_tlv == TLV_TYPE_MESH_ID)
command = CMD_MESH_CONFIG |
(MESH_IFACE_ID << MESH_IFACE_BIT_OFFSET);
diff --git a/drivers/net/wireless/libertas/mesh.h b/drivers/net/wireless/libertas/mesh.h
index fea9b5d005f..e2573303a32 100644
--- a/drivers/net/wireless/libertas/mesh.h
+++ b/drivers/net/wireless/libertas/mesh.h
@@ -9,6 +9,8 @@
#include <net/lib80211.h>
+#ifdef CONFIG_LIBERTAS_MESH
+
/* Mesh statistics */
struct lbs_mesh_stats {
u32 fwd_bcast_cnt; /* Fwd: Broadcast counter */
@@ -46,11 +48,20 @@ void lbs_mesh_set_txpd(struct lbs_private *priv,
/* Command handling */
struct cmd_ds_command;
+struct cmd_ds_mesh_access;
+struct cmd_ds_mesh_config;
int lbs_cmd_bt_access(struct cmd_ds_command *cmd,
u16 cmd_action, void *pdata_buf);
int lbs_cmd_fwt_access(struct cmd_ds_command *cmd,
u16 cmd_action, void *pdata_buf);
+int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action,
+ struct cmd_ds_mesh_access *cmd);
+int lbs_mesh_config_send(struct lbs_private *priv,
+ struct cmd_ds_mesh_config *cmd,
+ uint16_t action, uint16_t type);
+int lbs_mesh_config(struct lbs_private *priv, uint16_t enable, uint16_t chan);
+
/* Persistent configuration */
@@ -75,4 +86,25 @@ void lbs_mesh_ethtool_get_strings(struct net_device *dev,
uint32_t stringset, uint8_t *s);
+/* Accessors */
+
+#define lbs_mesh_open(priv) (priv->mesh_open)
+#define lbs_mesh_connected(priv) (priv->mesh_connect_status == LBS_CONNECTED)
+
+#else
+
+#define lbs_init_mesh(priv)
+#define lbs_deinit_mesh(priv)
+#define lbs_add_mesh(priv)
+#define lbs_remove_mesh(priv)
+#define lbs_mesh_set_dev(priv, dev, rxpd) (dev)
+#define lbs_mesh_set_txpd(priv, dev, txpd)
+#define lbs_mesh_config(priv, enable, chan)
+#define lbs_mesh_open(priv) (0)
+#define lbs_mesh_connected(priv) (0)
+
+#endif
+
+
+
#endif
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c
index b0b1c784150..220361e69cd 100644
--- a/drivers/net/wireless/libertas/scan.c
+++ b/drivers/net/wireless/libertas/scan.c
@@ -635,7 +635,7 @@ out:
if (priv->connect_status == LBS_CONNECTED && !priv->tx_pending_len)
netif_wake_queue(priv->dev);
- if (priv->mesh_dev && (priv->mesh_connect_status == LBS_CONNECTED) &&
+ if (priv->mesh_dev && lbs_mesh_connected(priv) &&
!priv->tx_pending_len)
netif_wake_queue(priv->mesh_dev);
diff --git a/drivers/net/wireless/libertas/tx.c b/drivers/net/wireless/libertas/tx.c
index 315d1ce286c..52d244ea3d9 100644
--- a/drivers/net/wireless/libertas/tx.c
+++ b/drivers/net/wireless/libertas/tx.c
@@ -198,7 +198,7 @@ void lbs_send_tx_feedback(struct lbs_private *priv, u32 try_count)
if (priv->connect_status == LBS_CONNECTED)
netif_wake_queue(priv->dev);
- if (priv->mesh_dev && (priv->mesh_connect_status == LBS_CONNECTED))
+ if (priv->mesh_dev && lbs_mesh_connected(priv))
netif_wake_queue(priv->mesh_dev);
}
EXPORT_SYMBOL_GPL(lbs_send_tx_feedback);
diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c
index 4b1aab593a8..71f88a08e09 100644
--- a/drivers/net/wireless/libertas/wext.c
+++ b/drivers/net/wireless/libertas/wext.c
@@ -192,7 +192,7 @@ static void copy_active_data_rates(struct lbs_private *priv, u8 *rates)
lbs_deb_enter(LBS_DEB_WEXT);
if ((priv->connect_status != LBS_CONNECTED) &&
- (priv->mesh_connect_status != LBS_CONNECTED))
+ !lbs_mesh_connected(priv))
memcpy(rates, lbs_bg_rates, MAX_RATES);
else
memcpy(rates, priv->curbssparams.rates, MAX_RATES);
@@ -298,6 +298,7 @@ static int lbs_get_nick(struct net_device *dev, struct iw_request_info *info,
return 0;
}
+#ifdef CONFIG_LIBERTAS_MESH
static int mesh_get_nick(struct net_device *dev, struct iw_request_info *info,
struct iw_point *dwrq, char *extra)
{
@@ -307,7 +308,7 @@ static int mesh_get_nick(struct net_device *dev, struct iw_request_info *info,
/* Use nickname to indicate that mesh is on */
- if (priv->mesh_connect_status == LBS_CONNECTED) {
+ if (lbs_mesh_connected(priv)) {
strncpy(extra, "Mesh", 12);
extra[12] = '\0';
dwrq->length = strlen(extra);
@@ -321,6 +322,7 @@ static int mesh_get_nick(struct net_device *dev, struct iw_request_info *info,
lbs_deb_leave(LBS_DEB_WEXT);
return 0;
}
+#endif
static int lbs_set_rts(struct net_device *dev, struct iw_request_info *info,
struct iw_param *vwrq, char *extra)
@@ -422,6 +424,7 @@ static int lbs_get_mode(struct net_device *dev,
return 0;
}
+#ifdef CONFIG_LIBERTAS_MESH
static int mesh_wlan_get_mode(struct net_device *dev,
struct iw_request_info *info, u32 * uwrq,
char *extra)
@@ -433,6 +436,7 @@ static int mesh_wlan_get_mode(struct net_device *dev,
lbs_deb_leave(LBS_DEB_WEXT);
return 0;
}
+#endif
static int lbs_get_txpow(struct net_device *dev,
struct iw_request_info *info,
@@ -863,7 +867,7 @@ static struct iw_statistics *lbs_get_wireless_stats(struct net_device *dev)
/* If we're not associated, all quality values are meaningless */
if ((priv->connect_status != LBS_CONNECTED) &&
- (priv->mesh_connect_status != LBS_CONNECTED))
+ !lbs_mesh_connected(priv))
goto out;
/* Quality by RSSI */
@@ -1010,6 +1014,7 @@ out:
return ret;
}
+#ifdef CONFIG_LIBERTAS_MESH
static int lbs_mesh_set_freq(struct net_device *dev,
struct iw_request_info *info,
struct iw_freq *fwrq, char *extra)
@@ -1061,6 +1066,7 @@ out:
lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
return ret;
}
+#endif
static int lbs_set_rate(struct net_device *dev, struct iw_request_info *info,
struct iw_param *vwrq, char *extra)
@@ -2108,6 +2114,7 @@ out:
return ret;
}
+#ifdef CONFIG_LIBERTAS_MESH
static int lbs_mesh_get_essid(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *dwrq, char *extra)
@@ -2161,6 +2168,7 @@ static int lbs_mesh_set_essid(struct net_device *dev,
lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
return ret;
}
+#endif
/**
* @brief Connect to the AP or Ad-hoc Network with specific bssid
@@ -2267,7 +2275,13 @@ static const iw_handler lbs_handler[] = {
(iw_handler) lbs_get_encodeext,/* SIOCGIWENCODEEXT */
(iw_handler) NULL, /* SIOCSIWPMKSA */
};
+struct iw_handler_def lbs_handler_def = {
+ .num_standard = ARRAY_SIZE(lbs_handler),
+ .standard = (iw_handler *) lbs_handler,
+ .get_wireless_stats = lbs_get_wireless_stats,
+};
+#ifdef CONFIG_LIBERTAS_MESH
static const iw_handler mesh_wlan_handler[] = {
(iw_handler) NULL, /* SIOCSIWCOMMIT */
(iw_handler) lbs_get_name, /* SIOCGIWNAME */
@@ -2325,14 +2339,10 @@ static const iw_handler mesh_wlan_handler[] = {
(iw_handler) lbs_get_encodeext,/* SIOCGIWENCODEEXT */
(iw_handler) NULL, /* SIOCSIWPMKSA */
};
-struct iw_handler_def lbs_handler_def = {
- .num_standard = ARRAY_SIZE(lbs_handler),
- .standard = (iw_handler *) lbs_handler,
- .get_wireless_stats = lbs_get_wireless_stats,
-};
struct iw_handler_def mesh_handler_def = {
.num_standard = ARRAY_SIZE(mesh_wlan_handler),
.standard = (iw_handler *) mesh_wlan_handler,
.get_wireless_stats = lbs_get_wireless_stats,
};
+#endif
diff --git a/drivers/net/wireless/libertas_tf/main.c b/drivers/net/wireless/libertas_tf/main.c
index 26a1abd5bb0..6ab30033c26 100644
--- a/drivers/net/wireless/libertas_tf/main.c
+++ b/drivers/net/wireless/libertas_tf/main.c
@@ -318,14 +318,14 @@ static void lbtf_op_stop(struct ieee80211_hw *hw)
}
static int lbtf_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct lbtf_private *priv = hw->priv;
if (priv->vif != NULL)
return -EOPNOTSUPP;
- priv->vif = conf->vif;
- switch (conf->type) {
+ priv->vif = vif;
+ switch (vif->type) {
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_AP:
lbtf_set_mode(priv, LBTF_AP_MODE);
@@ -337,12 +337,12 @@ static int lbtf_op_add_interface(struct ieee80211_hw *hw,
priv->vif = NULL;
return -EOPNOTSUPP;
}
- lbtf_set_mac_address(priv, (u8 *) conf->mac_addr);
+ lbtf_set_mac_address(priv, (u8 *) vif->addr);
return 0;
}
static void lbtf_op_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct lbtf_private *priv = hw->priv;
@@ -555,6 +555,9 @@ struct lbtf_private *lbtf_add_card(void *card, struct device *dmdev)
priv->band.n_channels = ARRAY_SIZE(lbtf_channels);
priv->band.channels = priv->channels;
hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
+ hw->wiphy->interface_modes =
+ BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC);
skb_queue_head_init(&priv->bc_ps_buf);
SET_IEEE80211_DEV(hw, dmdev);
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 88e41176e7f..6ea77e95277 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -32,6 +32,10 @@ static int radios = 2;
module_param(radios, int, 0444);
MODULE_PARM_DESC(radios, "Number of simulated radios");
+static bool fake_hw_scan;
+module_param(fake_hw_scan, bool, 0444);
+MODULE_PARM_DESC(fake_hw_scan, "Install fake (no-op) hw-scan handler");
+
/**
* enum hwsim_regtest - the type of regulatory tests we offer
*
@@ -281,6 +285,8 @@ struct mac80211_hwsim_data {
struct ieee80211_channel channels_5ghz[ARRAY_SIZE(hwsim_channels_5ghz)];
struct ieee80211_rate rates[ARRAY_SIZE(hwsim_rates)];
+ struct mac_address addresses[2];
+
struct ieee80211_channel *channel;
unsigned long beacon_int; /* in jiffies unit */
unsigned int rx_filter;
@@ -436,6 +442,38 @@ static bool hwsim_ps_rx_ok(struct mac80211_hwsim_data *data,
}
+struct mac80211_hwsim_addr_match_data {
+ bool ret;
+ const u8 *addr;
+};
+
+static void mac80211_hwsim_addr_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct mac80211_hwsim_addr_match_data *md = data;
+ if (memcmp(mac, md->addr, ETH_ALEN) == 0)
+ md->ret = true;
+}
+
+
+static bool mac80211_hwsim_addr_match(struct mac80211_hwsim_data *data,
+ const u8 *addr)
+{
+ struct mac80211_hwsim_addr_match_data md;
+
+ if (memcmp(addr, data->hw->wiphy->perm_addr, ETH_ALEN) == 0)
+ return true;
+
+ md.ret = false;
+ md.addr = addr;
+ ieee80211_iterate_active_interfaces_atomic(data->hw,
+ mac80211_hwsim_addr_iter,
+ &md);
+
+ return md.ret;
+}
+
+
static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
@@ -488,8 +526,7 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
if (nskb == NULL)
continue;
- if (memcmp(hdr->addr1, data2->hw->wiphy->perm_addr,
- ETH_ALEN) == 0)
+ if (mac80211_hwsim_addr_match(data2, hdr->addr1))
ack = true;
memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status));
ieee80211_rx_irqsafe(data2->hw, nskb);
@@ -553,24 +590,24 @@ static void mac80211_hwsim_stop(struct ieee80211_hw *hw)
static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%pM)\n",
- wiphy_name(hw->wiphy), __func__, conf->type,
- conf->mac_addr);
- hwsim_set_magic(conf->vif);
+ wiphy_name(hw->wiphy), __func__, vif->type,
+ vif->addr);
+ hwsim_set_magic(vif);
return 0;
}
static void mac80211_hwsim_remove_interface(
- struct ieee80211_hw *hw, struct ieee80211_if_init_conf *conf)
+ struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
printk(KERN_DEBUG "%s:%s (type=%d mac_addr=%pM)\n",
- wiphy_name(hw->wiphy), __func__, conf->type,
- conf->mac_addr);
- hwsim_check_magic(conf->vif);
- hwsim_clear_magic(conf->vif);
+ wiphy_name(hw->wiphy), __func__, vif->type,
+ vif->addr);
+ hwsim_check_magic(vif);
+ hwsim_clear_magic(vif);
}
@@ -618,12 +655,26 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
{
struct mac80211_hwsim_data *data = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
-
- printk(KERN_DEBUG "%s:%s (freq=%d idle=%d ps=%d)\n",
+ static const char *chantypes[4] = {
+ [NL80211_CHAN_NO_HT] = "noht",
+ [NL80211_CHAN_HT20] = "ht20",
+ [NL80211_CHAN_HT40MINUS] = "ht40-",
+ [NL80211_CHAN_HT40PLUS] = "ht40+",
+ };
+ static const char *smps_modes[IEEE80211_SMPS_NUM_MODES] = {
+ [IEEE80211_SMPS_AUTOMATIC] = "auto",
+ [IEEE80211_SMPS_OFF] = "off",
+ [IEEE80211_SMPS_STATIC] = "static",
+ [IEEE80211_SMPS_DYNAMIC] = "dynamic",
+ };
+
+ printk(KERN_DEBUG "%s:%s (freq=%d/%s idle=%d ps=%d smps=%s)\n",
wiphy_name(hw->wiphy), __func__,
conf->channel->center_freq,
+ chantypes[conf->channel_type],
!!(conf->flags & IEEE80211_CONF_IDLE),
- !!(conf->flags & IEEE80211_CONF_PS));
+ !!(conf->flags & IEEE80211_CONF_PS),
+ smps_modes[conf->smps_mode]);
data->idle = !!(conf->flags & IEEE80211_CONF_IDLE);
@@ -720,23 +771,41 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
}
}
+static int mac80211_hwsim_sta_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ hwsim_check_magic(vif);
+ hwsim_set_sta_magic(sta);
+
+ return 0;
+}
+
+static int mac80211_hwsim_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ hwsim_check_magic(vif);
+ hwsim_clear_sta_magic(sta);
+
+ return 0;
+}
+
static void mac80211_hwsim_sta_notify(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum sta_notify_cmd cmd,
struct ieee80211_sta *sta)
{
hwsim_check_magic(vif);
+
switch (cmd) {
- case STA_NOTIFY_ADD:
- hwsim_set_sta_magic(sta);
- break;
- case STA_NOTIFY_REMOVE:
- hwsim_clear_sta_magic(sta);
- break;
case STA_NOTIFY_SLEEP:
case STA_NOTIFY_AWAKE:
/* TODO: make good use of these flags */
break;
+ default:
+ WARN(1, "Invalid sta notify: %d\n", cmd);
+ break;
}
}
@@ -827,7 +896,77 @@ static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw,
}
#endif
-static const struct ieee80211_ops mac80211_hwsim_ops =
+static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn)
+{
+ switch (action) {
+ case IEEE80211_AMPDU_TX_START:
+ ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_STOP:
+ ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
+ break;
+ case IEEE80211_AMPDU_TX_OPERATIONAL:
+ break;
+ case IEEE80211_AMPDU_RX_START:
+ case IEEE80211_AMPDU_RX_STOP:
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static void mac80211_hwsim_flush(struct ieee80211_hw *hw, bool drop)
+{
+ /*
+ * In this special case, there's nothing we need to
+ * do because hwsim does transmission synchronously.
+ * In the future, when it does transmissions via
+ * userspace, we may need to do something.
+ */
+}
+
+struct hw_scan_done {
+ struct delayed_work w;
+ struct ieee80211_hw *hw;
+};
+
+static void hw_scan_done(struct work_struct *work)
+{
+ struct hw_scan_done *hsd =
+ container_of(work, struct hw_scan_done, w.work);
+
+ ieee80211_scan_completed(hsd->hw, false);
+ kfree(hsd);
+}
+
+static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw,
+ struct cfg80211_scan_request *req)
+{
+ struct hw_scan_done *hsd = kzalloc(sizeof(*hsd), GFP_KERNEL);
+ int i;
+
+ if (!hsd)
+ return -ENOMEM;
+
+ hsd->hw = hw;
+ INIT_DELAYED_WORK(&hsd->w, hw_scan_done);
+
+ printk(KERN_DEBUG "hwsim scan request\n");
+ for (i = 0; i < req->n_channels; i++)
+ printk(KERN_DEBUG "hwsim scan freq %d\n",
+ req->channels[i]->center_freq);
+
+ ieee80211_queue_delayed_work(hw, &hsd->w, 2 * HZ);
+
+ return 0;
+}
+
+static struct ieee80211_ops mac80211_hwsim_ops =
{
.tx = mac80211_hwsim_tx,
.start = mac80211_hwsim_start,
@@ -837,10 +976,14 @@ static const struct ieee80211_ops mac80211_hwsim_ops =
.config = mac80211_hwsim_config,
.configure_filter = mac80211_hwsim_configure_filter,
.bss_info_changed = mac80211_hwsim_bss_info_changed,
+ .sta_add = mac80211_hwsim_sta_add,
+ .sta_remove = mac80211_hwsim_sta_remove,
.sta_notify = mac80211_hwsim_sta_notify,
.set_tim = mac80211_hwsim_set_tim,
.conf_tx = mac80211_hwsim_conf_tx,
CFG80211_TESTMODE_CMD(mac80211_hwsim_testmode_cmd)
+ .ampdu_action = mac80211_hwsim_ampdu_action,
+ .flush = mac80211_hwsim_flush,
};
@@ -1035,6 +1178,9 @@ static int __init init_mac80211_hwsim(void)
if (radios < 1 || radios > 100)
return -EINVAL;
+ if (fake_hw_scan)
+ mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
+
spin_lock_init(&hwsim_radio_lock);
INIT_LIST_HEAD(&hwsim_radios);
@@ -1072,7 +1218,11 @@ static int __init init_mac80211_hwsim(void)
SET_IEEE80211_DEV(hw, data->dev);
addr[3] = i >> 8;
addr[4] = i;
- SET_IEEE80211_PERM_ADDR(hw, addr);
+ memcpy(data->addresses[0].addr, addr, ETH_ALEN);
+ memcpy(data->addresses[1].addr, addr, ETH_ALEN);
+ data->addresses[1].addr[0] |= 0x40;
+ hw->wiphy->n_addresses = 2;
+ hw->wiphy->addresses = data->addresses;
hw->channel_change_time = 1;
hw->queues = 4;
@@ -1082,7 +1232,9 @@ static int __init init_mac80211_hwsim(void)
BIT(NL80211_IFTYPE_MESH_POINT);
hw->flags = IEEE80211_HW_MFP_CAPABLE |
- IEEE80211_HW_SIGNAL_DBM;
+ IEEE80211_HW_SIGNAL_DBM |
+ IEEE80211_HW_SUPPORTS_STATIC_SMPS |
+ IEEE80211_HW_SUPPORTS_DYNAMIC_SMPS;
/* ask mac80211 to reserve space for magic */
hw->vif_data_size = sizeof(struct hwsim_vif_priv);
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 59f92105b0c..ac65e13eb0d 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -2,7 +2,7 @@
* drivers/net/wireless/mwl8k.c
* Driver for Marvell TOPDOG 802.11 Wireless cards
*
- * Copyright (C) 2008-2009 Marvell Semiconductor Inc.
+ * Copyright (C) 2008, 2009, 2010 Marvell Semiconductor Inc.
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
@@ -26,7 +26,7 @@
#define MWL8K_DESC "Marvell TOPDOG(R) 802.11 Wireless Network Driver"
#define MWL8K_NAME KBUILD_MODNAME
-#define MWL8K_VERSION "0.10"
+#define MWL8K_VERSION "0.12"
/* Register definitions */
#define MWL8K_HIU_GEN_PTR 0x00000c10
@@ -92,8 +92,7 @@ struct mwl8k_device_info {
char *part_name;
char *helper_image;
char *fw_image;
- struct rxd_ops *rxd_ops;
- u16 modes;
+ struct rxd_ops *ap_rxd_ops;
};
struct mwl8k_rx_queue {
@@ -120,34 +119,36 @@ struct mwl8k_tx_queue {
/* sw appends here */
int tail;
- struct ieee80211_tx_queue_stats stats;
+ unsigned int len;
struct mwl8k_tx_desc *txd;
dma_addr_t txd_dma;
struct sk_buff **skb;
};
-/* Pointers to the firmware data and meta information about it. */
-struct mwl8k_firmware {
- /* Boot helper code */
- struct firmware *helper;
+struct mwl8k_priv {
+ struct ieee80211_hw *hw;
+ struct pci_dev *pdev;
- /* Microcode */
- struct firmware *ucode;
-};
+ struct mwl8k_device_info *device_info;
-struct mwl8k_priv {
void __iomem *sram;
void __iomem *regs;
- struct ieee80211_hw *hw;
- struct pci_dev *pdev;
+ /* firmware */
+ struct firmware *fw_helper;
+ struct firmware *fw_ucode;
- struct mwl8k_device_info *device_info;
+ /* hardware/firmware parameters */
bool ap_fw;
struct rxd_ops *rxd_ops;
-
- /* firmware files and meta data */
- struct mwl8k_firmware fw;
+ struct ieee80211_supported_band band_24;
+ struct ieee80211_channel channels_24[14];
+ struct ieee80211_rate rates_24[14];
+ struct ieee80211_supported_band band_50;
+ struct ieee80211_channel channels_50[4];
+ struct ieee80211_rate rates_50[9];
+ u32 ap_macids_supported;
+ u32 sta_macids_supported;
/* firmware access */
struct mutex fw_mutex;
@@ -161,9 +162,9 @@ struct mwl8k_priv {
/* TX quiesce completion, protected by fw_mutex and tx_lock */
struct completion *tx_wait;
- struct ieee80211_vif *vif;
-
- struct ieee80211_channel *current_channel;
+ /* List of interfaces. */
+ u32 macids_used;
+ struct list_head vif_list;
/* power management status cookie from firmware */
u32 *cookie;
@@ -182,11 +183,6 @@ struct mwl8k_priv {
struct mwl8k_rx_queue rxq[MWL8K_RX_QUEUES];
struct mwl8k_tx_queue txq[MWL8K_TX_QUEUES];
- /* PHY parameters */
- struct ieee80211_supported_band band;
- struct ieee80211_channel channels[14];
- struct ieee80211_rate rates[14];
-
bool radio_on;
bool radio_short_preamble;
bool sniffer_enabled;
@@ -205,32 +201,33 @@ struct mwl8k_priv {
*/
struct work_struct finalize_join_worker;
- /* Tasklet to reclaim TX descriptors and buffers after tx */
- struct tasklet_struct tx_reclaim_task;
+ /* Tasklet to perform TX reclaim. */
+ struct tasklet_struct poll_tx_task;
+
+ /* Tasklet to perform RX. */
+ struct tasklet_struct poll_rx_task;
};
/* Per interface specific private data */
struct mwl8k_vif {
- /* backpointer to parent config block */
- struct mwl8k_priv *priv;
-
- /* BSS config of AP or IBSS from mac80211*/
- struct ieee80211_bss_conf bss_info;
-
- /* BSSID of AP or IBSS */
- u8 bssid[ETH_ALEN];
- u8 mac_addr[ETH_ALEN];
+ struct list_head list;
+ struct ieee80211_vif *vif;
- /* Index into station database.Returned by update_sta_db call */
- u8 peer_id;
+ /* Firmware macid for this vif. */
+ int macid;
- /* Non AMPDU sequence number assigned by driver */
- u16 seqno;
+ /* Non AMPDU sequence number assigned by driver. */
+ u16 seqno;
};
-
#define MWL8K_VIF(_vif) ((struct mwl8k_vif *)&((_vif)->drv_priv))
-static const struct ieee80211_channel mwl8k_channels[] = {
+struct mwl8k_sta {
+ /* Index into station database. Returned by UPDATE_STADB. */
+ u8 peer_id;
+};
+#define MWL8K_STA(_sta) ((struct mwl8k_sta *)&((_sta)->drv_priv))
+
+static const struct ieee80211_channel mwl8k_channels_24[] = {
{ .center_freq = 2412, .hw_value = 1, },
{ .center_freq = 2417, .hw_value = 2, },
{ .center_freq = 2422, .hw_value = 3, },
@@ -242,9 +239,12 @@ static const struct ieee80211_channel mwl8k_channels[] = {
{ .center_freq = 2452, .hw_value = 9, },
{ .center_freq = 2457, .hw_value = 10, },
{ .center_freq = 2462, .hw_value = 11, },
+ { .center_freq = 2467, .hw_value = 12, },
+ { .center_freq = 2472, .hw_value = 13, },
+ { .center_freq = 2484, .hw_value = 14, },
};
-static const struct ieee80211_rate mwl8k_rates[] = {
+static const struct ieee80211_rate mwl8k_rates_24[] = {
{ .bitrate = 10, .hw_value = 2, },
{ .bitrate = 20, .hw_value = 4, },
{ .bitrate = 55, .hw_value = 11, },
@@ -261,8 +261,23 @@ static const struct ieee80211_rate mwl8k_rates[] = {
{ .bitrate = 720, .hw_value = 144, },
};
-static const u8 mwl8k_rateids[12] = {
- 2, 4, 11, 22, 12, 18, 24, 36, 48, 72, 96, 108,
+static const struct ieee80211_channel mwl8k_channels_50[] = {
+ { .center_freq = 5180, .hw_value = 36, },
+ { .center_freq = 5200, .hw_value = 40, },
+ { .center_freq = 5220, .hw_value = 44, },
+ { .center_freq = 5240, .hw_value = 48, },
+};
+
+static const struct ieee80211_rate mwl8k_rates_50[] = {
+ { .bitrate = 60, .hw_value = 12, },
+ { .bitrate = 90, .hw_value = 18, },
+ { .bitrate = 120, .hw_value = 24, },
+ { .bitrate = 180, .hw_value = 36, },
+ { .bitrate = 240, .hw_value = 48, },
+ { .bitrate = 360, .hw_value = 72, },
+ { .bitrate = 480, .hw_value = 96, },
+ { .bitrate = 540, .hw_value = 108, },
+ { .bitrate = 720, .hw_value = 144, },
};
/* Set or get info from Firmware */
@@ -278,6 +293,7 @@ static const u8 mwl8k_rateids[12] = {
#define MWL8K_CMD_RADIO_CONTROL 0x001c
#define MWL8K_CMD_RF_TX_POWER 0x001e
#define MWL8K_CMD_RF_ANTENNA 0x0020
+#define MWL8K_CMD_SET_BEACON 0x0100 /* per-vif */
#define MWL8K_CMD_SET_PRE_SCAN 0x0107
#define MWL8K_CMD_SET_POST_SCAN 0x0108
#define MWL8K_CMD_SET_RF_CHANNEL 0x010a
@@ -291,8 +307,10 @@ static const u8 mwl8k_rateids[12] = {
#define MWL8K_CMD_MIMO_CONFIG 0x0125
#define MWL8K_CMD_USE_FIXED_RATE 0x0126
#define MWL8K_CMD_ENABLE_SNIFFER 0x0150
-#define MWL8K_CMD_SET_MAC_ADDR 0x0202
+#define MWL8K_CMD_SET_MAC_ADDR 0x0202 /* per-vif */
#define MWL8K_CMD_SET_RATEADAPT_MODE 0x0203
+#define MWL8K_CMD_BSS_START 0x1100 /* per-vif */
+#define MWL8K_CMD_SET_NEW_STN 0x1111 /* per-vif */
#define MWL8K_CMD_UPDATE_STADB 0x1123
static const char *mwl8k_cmd_name(u16 cmd, char *buf, int bufsize)
@@ -310,6 +328,7 @@ static const char *mwl8k_cmd_name(u16 cmd, char *buf, int bufsize)
MWL8K_CMDNAME(RADIO_CONTROL);
MWL8K_CMDNAME(RF_TX_POWER);
MWL8K_CMDNAME(RF_ANTENNA);
+ MWL8K_CMDNAME(SET_BEACON);
MWL8K_CMDNAME(SET_PRE_SCAN);
MWL8K_CMDNAME(SET_POST_SCAN);
MWL8K_CMDNAME(SET_RF_CHANNEL);
@@ -325,6 +344,8 @@ static const char *mwl8k_cmd_name(u16 cmd, char *buf, int bufsize)
MWL8K_CMDNAME(ENABLE_SNIFFER);
MWL8K_CMDNAME(SET_MAC_ADDR);
MWL8K_CMDNAME(SET_RATEADAPT_MODE);
+ MWL8K_CMDNAME(BSS_START);
+ MWL8K_CMDNAME(SET_NEW_STN);
MWL8K_CMDNAME(UPDATE_STADB);
default:
snprintf(buf, bufsize, "0x%x", cmd);
@@ -355,8 +376,8 @@ static void mwl8k_release_fw(struct firmware **fw)
static void mwl8k_release_firmware(struct mwl8k_priv *priv)
{
- mwl8k_release_fw(&priv->fw.ucode);
- mwl8k_release_fw(&priv->fw.helper);
+ mwl8k_release_fw(&priv->fw_ucode);
+ mwl8k_release_fw(&priv->fw_helper);
}
/* Request fw image */
@@ -377,7 +398,7 @@ static int mwl8k_request_firmware(struct mwl8k_priv *priv)
int rc;
if (di->helper_image != NULL) {
- rc = mwl8k_request_fw(priv, di->helper_image, &priv->fw.helper);
+ rc = mwl8k_request_fw(priv, di->helper_image, &priv->fw_helper);
if (rc) {
printk(KERN_ERR "%s: Error requesting helper "
"firmware file %s\n", pci_name(priv->pdev),
@@ -386,24 +407,22 @@ static int mwl8k_request_firmware(struct mwl8k_priv *priv)
}
}
- rc = mwl8k_request_fw(priv, di->fw_image, &priv->fw.ucode);
+ rc = mwl8k_request_fw(priv, di->fw_image, &priv->fw_ucode);
if (rc) {
printk(KERN_ERR "%s: Error requesting firmware file %s\n",
pci_name(priv->pdev), di->fw_image);
- mwl8k_release_fw(&priv->fw.helper);
+ mwl8k_release_fw(&priv->fw_helper);
return rc;
}
return 0;
}
-MODULE_FIRMWARE("mwl8k/helper_8687.fw");
-MODULE_FIRMWARE("mwl8k/fmimage_8687.fw");
-
struct mwl8k_cmd_pkt {
__le16 code;
__le16 length;
- __le16 seq_num;
+ __u8 seq_num;
+ __u8 macid;
__le16 result;
char payload[0];
} __attribute__((packed));
@@ -461,6 +480,7 @@ static int mwl8k_load_fw_image(struct mwl8k_priv *priv,
cmd->code = cpu_to_le16(MWL8K_CMD_CODE_DNLD);
cmd->seq_num = 0;
+ cmd->macid = 0;
cmd->result = 0;
done = 0;
@@ -551,13 +571,12 @@ static int mwl8k_feed_fw_image(struct mwl8k_priv *priv,
static int mwl8k_load_firmware(struct ieee80211_hw *hw)
{
struct mwl8k_priv *priv = hw->priv;
- struct firmware *fw = priv->fw.ucode;
- struct mwl8k_device_info *di = priv->device_info;
+ struct firmware *fw = priv->fw_ucode;
int rc;
int loops;
if (!memcmp(fw->data, "\x01\x00\x00\x00", 4)) {
- struct firmware *helper = priv->fw.helper;
+ struct firmware *helper = priv->fw_helper;
if (helper == NULL) {
printk(KERN_ERR "%s: helper image needed but none "
@@ -584,10 +603,7 @@ static int mwl8k_load_firmware(struct ieee80211_hw *hw)
return rc;
}
- if (di->modes & BIT(NL80211_IFTYPE_AP))
- iowrite32(MWL8K_MODE_AP, priv->regs + MWL8K_HIU_GEN_PTR);
- else
- iowrite32(MWL8K_MODE_STA, priv->regs + MWL8K_HIU_GEN_PTR);
+ iowrite32(MWL8K_MODE_STA, priv->regs + MWL8K_HIU_GEN_PTR);
loops = 500000;
do {
@@ -610,91 +626,6 @@ static int mwl8k_load_firmware(struct ieee80211_hw *hw)
}
-/*
- * Defines shared between transmission and reception.
- */
-/* HT control fields for firmware */
-struct ewc_ht_info {
- __le16 control1;
- __le16 control2;
- __le16 control3;
-} __attribute__((packed));
-
-/* Firmware Station database operations */
-#define MWL8K_STA_DB_ADD_ENTRY 0
-#define MWL8K_STA_DB_MODIFY_ENTRY 1
-#define MWL8K_STA_DB_DEL_ENTRY 2
-#define MWL8K_STA_DB_FLUSH 3
-
-/* Peer Entry flags - used to define the type of the peer node */
-#define MWL8K_PEER_TYPE_ACCESSPOINT 2
-
-struct peer_capability_info {
- /* Peer type - AP vs. STA. */
- __u8 peer_type;
-
- /* Basic 802.11 capabilities from assoc resp. */
- __le16 basic_caps;
-
- /* Set if peer supports 802.11n high throughput (HT). */
- __u8 ht_support;
-
- /* Valid if HT is supported. */
- __le16 ht_caps;
- __u8 extended_ht_caps;
- struct ewc_ht_info ewc_info;
-
- /* Legacy rate table. Intersection of our rates and peer rates. */
- __u8 legacy_rates[12];
-
- /* HT rate table. Intersection of our rates and peer rates. */
- __u8 ht_rates[16];
- __u8 pad[16];
-
- /* If set, interoperability mode, no proprietary extensions. */
- __u8 interop;
- __u8 pad2;
- __u8 station_id;
- __le16 amsdu_enabled;
-} __attribute__((packed));
-
-/* Inline functions to manipulate QoS field in data descriptor. */
-static inline u16 mwl8k_qos_setbit_eosp(u16 qos)
-{
- u16 val_mask = 1 << 4;
-
- /* End of Service Period Bit 4 */
- return qos | val_mask;
-}
-
-static inline u16 mwl8k_qos_setbit_ack(u16 qos, u8 ack_policy)
-{
- u16 val_mask = 0x3;
- u8 shift = 5;
- u16 qos_mask = ~(val_mask << shift);
-
- /* Ack Policy Bit 5-6 */
- return (qos & qos_mask) | ((ack_policy & val_mask) << shift);
-}
-
-static inline u16 mwl8k_qos_setbit_amsdu(u16 qos)
-{
- u16 val_mask = 1 << 7;
-
- /* AMSDU present Bit 7 */
- return qos | val_mask;
-}
-
-static inline u16 mwl8k_qos_setbit_qlen(u16 qos, u8 len)
-{
- u16 val_mask = 0xff;
- u8 shift = 8;
- u16 qos_mask = ~(val_mask << shift);
-
- /* Queue Length Bits 8-15 */
- return (qos & qos_mask) | ((len & val_mask) << shift);
-}
-
/* DMA header used by firmware and hardware. */
struct mwl8k_dma_data {
__le16 fwlen;
@@ -761,9 +692,9 @@ static inline void mwl8k_add_dma_header(struct sk_buff *skb)
/*
- * Packet reception for 88w8366.
+ * Packet reception for 88w8366 AP firmware.
*/
-struct mwl8k_rxd_8366 {
+struct mwl8k_rxd_8366_ap {
__le16 pkt_len;
__u8 sq2;
__u8 rate;
@@ -781,23 +712,23 @@ struct mwl8k_rxd_8366 {
__u8 rx_ctrl;
} __attribute__((packed));
-#define MWL8K_8366_RATE_INFO_MCS_FORMAT 0x80
-#define MWL8K_8366_RATE_INFO_40MHZ 0x40
-#define MWL8K_8366_RATE_INFO_RATEID(x) ((x) & 0x3f)
+#define MWL8K_8366_AP_RATE_INFO_MCS_FORMAT 0x80
+#define MWL8K_8366_AP_RATE_INFO_40MHZ 0x40
+#define MWL8K_8366_AP_RATE_INFO_RATEID(x) ((x) & 0x3f)
-#define MWL8K_8366_RX_CTRL_OWNED_BY_HOST 0x80
+#define MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST 0x80
-static void mwl8k_rxd_8366_init(void *_rxd, dma_addr_t next_dma_addr)
+static void mwl8k_rxd_8366_ap_init(void *_rxd, dma_addr_t next_dma_addr)
{
- struct mwl8k_rxd_8366 *rxd = _rxd;
+ struct mwl8k_rxd_8366_ap *rxd = _rxd;
rxd->next_rxd_phys_addr = cpu_to_le32(next_dma_addr);
- rxd->rx_ctrl = MWL8K_8366_RX_CTRL_OWNED_BY_HOST;
+ rxd->rx_ctrl = MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST;
}
-static void mwl8k_rxd_8366_refill(void *_rxd, dma_addr_t addr, int len)
+static void mwl8k_rxd_8366_ap_refill(void *_rxd, dma_addr_t addr, int len)
{
- struct mwl8k_rxd_8366 *rxd = _rxd;
+ struct mwl8k_rxd_8366_ap *rxd = _rxd;
rxd->pkt_len = cpu_to_le16(len);
rxd->pkt_phys_addr = cpu_to_le32(addr);
@@ -806,12 +737,12 @@ static void mwl8k_rxd_8366_refill(void *_rxd, dma_addr_t addr, int len)
}
static int
-mwl8k_rxd_8366_process(void *_rxd, struct ieee80211_rx_status *status,
- __le16 *qos)
+mwl8k_rxd_8366_ap_process(void *_rxd, struct ieee80211_rx_status *status,
+ __le16 *qos)
{
- struct mwl8k_rxd_8366 *rxd = _rxd;
+ struct mwl8k_rxd_8366_ap *rxd = _rxd;
- if (!(rxd->rx_ctrl & MWL8K_8366_RX_CTRL_OWNED_BY_HOST))
+ if (!(rxd->rx_ctrl & MWL8K_8366_AP_RX_CTRL_OWNED_BY_HOST))
return -1;
rmb();
@@ -820,23 +751,29 @@ mwl8k_rxd_8366_process(void *_rxd, struct ieee80211_rx_status *status,
status->signal = -rxd->rssi;
status->noise = -rxd->noise_floor;
- if (rxd->rate & MWL8K_8366_RATE_INFO_MCS_FORMAT) {
+ if (rxd->rate & MWL8K_8366_AP_RATE_INFO_MCS_FORMAT) {
status->flag |= RX_FLAG_HT;
- if (rxd->rate & MWL8K_8366_RATE_INFO_40MHZ)
+ if (rxd->rate & MWL8K_8366_AP_RATE_INFO_40MHZ)
status->flag |= RX_FLAG_40MHZ;
- status->rate_idx = MWL8K_8366_RATE_INFO_RATEID(rxd->rate);
+ status->rate_idx = MWL8K_8366_AP_RATE_INFO_RATEID(rxd->rate);
} else {
int i;
- for (i = 0; i < ARRAY_SIZE(mwl8k_rates); i++) {
- if (mwl8k_rates[i].hw_value == rxd->rate) {
+ for (i = 0; i < ARRAY_SIZE(mwl8k_rates_24); i++) {
+ if (mwl8k_rates_24[i].hw_value == rxd->rate) {
status->rate_idx = i;
break;
}
}
}
- status->band = IEEE80211_BAND_2GHZ;
+ if (rxd->channel > 14) {
+ status->band = IEEE80211_BAND_5GHZ;
+ if (!(status->flag & RX_FLAG_HT))
+ status->rate_idx -= 5;
+ } else {
+ status->band = IEEE80211_BAND_2GHZ;
+ }
status->freq = ieee80211_channel_to_frequency(rxd->channel);
*qos = rxd->qos_control;
@@ -844,17 +781,17 @@ mwl8k_rxd_8366_process(void *_rxd, struct ieee80211_rx_status *status,
return le16_to_cpu(rxd->pkt_len);
}
-static struct rxd_ops rxd_8366_ops = {
- .rxd_size = sizeof(struct mwl8k_rxd_8366),
- .rxd_init = mwl8k_rxd_8366_init,
- .rxd_refill = mwl8k_rxd_8366_refill,
- .rxd_process = mwl8k_rxd_8366_process,
+static struct rxd_ops rxd_8366_ap_ops = {
+ .rxd_size = sizeof(struct mwl8k_rxd_8366_ap),
+ .rxd_init = mwl8k_rxd_8366_ap_init,
+ .rxd_refill = mwl8k_rxd_8366_ap_refill,
+ .rxd_process = mwl8k_rxd_8366_ap_process,
};
/*
- * Packet reception for 88w8687.
+ * Packet reception for STA firmware.
*/
-struct mwl8k_rxd_8687 {
+struct mwl8k_rxd_sta {
__le16 pkt_len;
__u8 link_quality;
__u8 noise_level;
@@ -871,26 +808,26 @@ struct mwl8k_rxd_8687 {
__u8 pad2[2];
} __attribute__((packed));
-#define MWL8K_8687_RATE_INFO_SHORTPRE 0x8000
-#define MWL8K_8687_RATE_INFO_ANTSELECT(x) (((x) >> 11) & 0x3)
-#define MWL8K_8687_RATE_INFO_RATEID(x) (((x) >> 3) & 0x3f)
-#define MWL8K_8687_RATE_INFO_40MHZ 0x0004
-#define MWL8K_8687_RATE_INFO_SHORTGI 0x0002
-#define MWL8K_8687_RATE_INFO_MCS_FORMAT 0x0001
+#define MWL8K_STA_RATE_INFO_SHORTPRE 0x8000
+#define MWL8K_STA_RATE_INFO_ANTSELECT(x) (((x) >> 11) & 0x3)
+#define MWL8K_STA_RATE_INFO_RATEID(x) (((x) >> 3) & 0x3f)
+#define MWL8K_STA_RATE_INFO_40MHZ 0x0004
+#define MWL8K_STA_RATE_INFO_SHORTGI 0x0002
+#define MWL8K_STA_RATE_INFO_MCS_FORMAT 0x0001
-#define MWL8K_8687_RX_CTRL_OWNED_BY_HOST 0x02
+#define MWL8K_STA_RX_CTRL_OWNED_BY_HOST 0x02
-static void mwl8k_rxd_8687_init(void *_rxd, dma_addr_t next_dma_addr)
+static void mwl8k_rxd_sta_init(void *_rxd, dma_addr_t next_dma_addr)
{
- struct mwl8k_rxd_8687 *rxd = _rxd;
+ struct mwl8k_rxd_sta *rxd = _rxd;
rxd->next_rxd_phys_addr = cpu_to_le32(next_dma_addr);
- rxd->rx_ctrl = MWL8K_8687_RX_CTRL_OWNED_BY_HOST;
+ rxd->rx_ctrl = MWL8K_STA_RX_CTRL_OWNED_BY_HOST;
}
-static void mwl8k_rxd_8687_refill(void *_rxd, dma_addr_t addr, int len)
+static void mwl8k_rxd_sta_refill(void *_rxd, dma_addr_t addr, int len)
{
- struct mwl8k_rxd_8687 *rxd = _rxd;
+ struct mwl8k_rxd_sta *rxd = _rxd;
rxd->pkt_len = cpu_to_le16(len);
rxd->pkt_phys_addr = cpu_to_le32(addr);
@@ -899,13 +836,13 @@ static void mwl8k_rxd_8687_refill(void *_rxd, dma_addr_t addr, int len)
}
static int
-mwl8k_rxd_8687_process(void *_rxd, struct ieee80211_rx_status *status,
+mwl8k_rxd_sta_process(void *_rxd, struct ieee80211_rx_status *status,
__le16 *qos)
{
- struct mwl8k_rxd_8687 *rxd = _rxd;
+ struct mwl8k_rxd_sta *rxd = _rxd;
u16 rate_info;
- if (!(rxd->rx_ctrl & MWL8K_8687_RX_CTRL_OWNED_BY_HOST))
+ if (!(rxd->rx_ctrl & MWL8K_STA_RX_CTRL_OWNED_BY_HOST))
return -1;
rmb();
@@ -915,19 +852,25 @@ mwl8k_rxd_8687_process(void *_rxd, struct ieee80211_rx_status *status,
status->signal = -rxd->rssi;
status->noise = -rxd->noise_level;
- status->antenna = MWL8K_8687_RATE_INFO_ANTSELECT(rate_info);
- status->rate_idx = MWL8K_8687_RATE_INFO_RATEID(rate_info);
+ status->antenna = MWL8K_STA_RATE_INFO_ANTSELECT(rate_info);
+ status->rate_idx = MWL8K_STA_RATE_INFO_RATEID(rate_info);
- if (rate_info & MWL8K_8687_RATE_INFO_SHORTPRE)
+ if (rate_info & MWL8K_STA_RATE_INFO_SHORTPRE)
status->flag |= RX_FLAG_SHORTPRE;
- if (rate_info & MWL8K_8687_RATE_INFO_40MHZ)
+ if (rate_info & MWL8K_STA_RATE_INFO_40MHZ)
status->flag |= RX_FLAG_40MHZ;
- if (rate_info & MWL8K_8687_RATE_INFO_SHORTGI)
+ if (rate_info & MWL8K_STA_RATE_INFO_SHORTGI)
status->flag |= RX_FLAG_SHORT_GI;
- if (rate_info & MWL8K_8687_RATE_INFO_MCS_FORMAT)
+ if (rate_info & MWL8K_STA_RATE_INFO_MCS_FORMAT)
status->flag |= RX_FLAG_HT;
- status->band = IEEE80211_BAND_2GHZ;
+ if (rxd->channel > 14) {
+ status->band = IEEE80211_BAND_5GHZ;
+ if (!(status->flag & RX_FLAG_HT))
+ status->rate_idx -= 5;
+ } else {
+ status->band = IEEE80211_BAND_2GHZ;
+ }
status->freq = ieee80211_channel_to_frequency(rxd->channel);
*qos = rxd->qos_control;
@@ -935,11 +878,11 @@ mwl8k_rxd_8687_process(void *_rxd, struct ieee80211_rx_status *status,
return le16_to_cpu(rxd->pkt_len);
}
-static struct rxd_ops rxd_8687_ops = {
- .rxd_size = sizeof(struct mwl8k_rxd_8687),
- .rxd_init = mwl8k_rxd_8687_init,
- .rxd_refill = mwl8k_rxd_8687_refill,
- .rxd_process = mwl8k_rxd_8687_process,
+static struct rxd_ops rxd_sta_ops = {
+ .rxd_size = sizeof(struct mwl8k_rxd_sta),
+ .rxd_init = mwl8k_rxd_sta_init,
+ .rxd_refill = mwl8k_rxd_sta_refill,
+ .rxd_process = mwl8k_rxd_sta_process,
};
@@ -1153,16 +1096,18 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
* Packet transmission.
*/
-/* Transmit packet ACK policy */
-#define MWL8K_TXD_ACK_POLICY_NORMAL 0
-#define MWL8K_TXD_ACK_POLICY_BLOCKACK 3
-
#define MWL8K_TXD_STATUS_OK 0x00000001
#define MWL8K_TXD_STATUS_OK_RETRY 0x00000002
#define MWL8K_TXD_STATUS_OK_MORE_RETRY 0x00000004
#define MWL8K_TXD_STATUS_MULTICAST_TX 0x00000008
#define MWL8K_TXD_STATUS_FW_OWNED 0x80000000
+#define MWL8K_QOS_QLEN_UNSPEC 0xff00
+#define MWL8K_QOS_ACK_POLICY_MASK 0x0060
+#define MWL8K_QOS_ACK_POLICY_NORMAL 0x0000
+#define MWL8K_QOS_ACK_POLICY_BLOCKACK 0x0060
+#define MWL8K_QOS_EOSP 0x0010
+
struct mwl8k_tx_desc {
__le32 status;
__u8 data_rate;
@@ -1187,8 +1132,7 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index)
int size;
int i;
- memset(&txq->stats, 0, sizeof(struct ieee80211_tx_queue_stats));
- txq->stats.limit = MWL8K_TX_DESCS;
+ txq->len = 0;
txq->head = 0;
txq->tail = 0;
@@ -1264,7 +1208,7 @@ static void mwl8k_dump_tx_rings(struct ieee80211_hw *hw)
printk(KERN_ERR "%s: txq[%d] len=%d head=%d tail=%d "
"fw_owned=%d drv_owned=%d unused=%d\n",
wiphy_name(hw->wiphy), i,
- txq->stats.len, txq->head, txq->tail,
+ txq->len, txq->head, txq->tail,
fw_owned, drv_owned, unused);
}
}
@@ -1272,7 +1216,7 @@ static void mwl8k_dump_tx_rings(struct ieee80211_hw *hw)
/*
* Must be called with priv->fw_mutex held and tx queues stopped.
*/
-#define MWL8K_TX_WAIT_TIMEOUT_MS 1000
+#define MWL8K_TX_WAIT_TIMEOUT_MS 5000
static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
{
@@ -1316,8 +1260,8 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
}
if (priv->pending_tx_pkts < oldcount) {
- printk(KERN_NOTICE "%s: timeout waiting for tx "
- "rings to drain (%d -> %d pkts), retrying\n",
+ printk(KERN_NOTICE "%s: waiting for tx rings "
+ "to drain (%d -> %d pkts)\n",
wiphy_name(hw->wiphy), oldcount,
priv->pending_tx_pkts);
retry = 1;
@@ -1342,13 +1286,15 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
MWL8K_TXD_STATUS_OK_RETRY | \
MWL8K_TXD_STATUS_OK_MORE_RETRY))
-static void mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int force)
+static int
+mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int limit, int force)
{
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_tx_queue *txq = priv->txq + index;
- int wake = 0;
+ int processed;
- while (txq->stats.len > 0) {
+ processed = 0;
+ while (txq->len > 0 && limit--) {
int tx;
struct mwl8k_tx_desc *tx_desc;
unsigned long addr;
@@ -1370,8 +1316,8 @@ static void mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int force)
}
txq->head = (tx + 1) % MWL8K_TX_DESCS;
- BUG_ON(txq->stats.len == 0);
- txq->stats.len--;
+ BUG_ON(txq->len == 0);
+ txq->len--;
priv->pending_tx_pkts--;
addr = le32_to_cpu(tx_desc->pkt_phys_addr);
@@ -1395,11 +1341,13 @@ static void mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int force)
ieee80211_tx_status_irqsafe(hw, skb);
- wake = 1;
+ processed++;
}
- if (wake && priv->radio_on && !mutex_is_locked(&priv->fw_mutex))
+ if (processed && priv->radio_on && !mutex_is_locked(&priv->fw_mutex))
ieee80211_wake_queue(hw, index);
+
+ return processed;
}
/* must be called only when the card's transmit is completely halted */
@@ -1408,7 +1356,7 @@ static void mwl8k_txq_deinit(struct ieee80211_hw *hw, int index)
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_tx_queue *txq = priv->txq + index;
- mwl8k_txq_reclaim(hw, index, 1);
+ mwl8k_txq_reclaim(hw, index, INT_MAX, 1);
kfree(txq->skb);
txq->skb = NULL;
@@ -1446,11 +1394,9 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
mwl8k_vif = MWL8K_VIF(tx_info->control.vif);
if (tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
- u16 seqno = mwl8k_vif->seqno;
-
wh->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
- wh->seq_ctrl |= cpu_to_le16(seqno << 4);
- mwl8k_vif->seqno = seqno++ % 4096;
+ wh->seq_ctrl |= cpu_to_le16(mwl8k_vif->seqno);
+ mwl8k_vif->seqno += 0x10;
}
/* Setup firmware control bit fields for each frame type. */
@@ -1459,24 +1405,17 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
if (ieee80211_is_mgmt(wh->frame_control) ||
ieee80211_is_ctl(wh->frame_control)) {
txdatarate = 0;
- qos = mwl8k_qos_setbit_eosp(qos);
- /* Set Queue size to unspecified */
- qos = mwl8k_qos_setbit_qlen(qos, 0xff);
+ qos |= MWL8K_QOS_QLEN_UNSPEC | MWL8K_QOS_EOSP;
} else if (ieee80211_is_data(wh->frame_control)) {
txdatarate = 1;
if (is_multicast_ether_addr(wh->addr1))
txstatus |= MWL8K_TXD_STATUS_MULTICAST_TX;
- /* Send pkt in an aggregate if AMPDU frame. */
+ qos &= ~MWL8K_QOS_ACK_POLICY_MASK;
if (tx_info->flags & IEEE80211_TX_CTL_AMPDU)
- qos = mwl8k_qos_setbit_ack(qos,
- MWL8K_TXD_ACK_POLICY_BLOCKACK);
+ qos |= MWL8K_QOS_ACK_POLICY_BLOCKACK;
else
- qos = mwl8k_qos_setbit_ack(qos,
- MWL8K_TXD_ACK_POLICY_NORMAL);
-
- if (qos & IEEE80211_QOS_CONTROL_A_MSDU_PRESENT)
- qos = mwl8k_qos_setbit_amsdu(qos);
+ qos |= MWL8K_QOS_ACK_POLICY_NORMAL;
}
dma = pci_map_single(priv->pdev, skb->data,
@@ -1503,12 +1442,14 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
tx->pkt_phys_addr = cpu_to_le32(dma);
tx->pkt_len = cpu_to_le16(skb->len);
tx->rate_info = 0;
- tx->peer_id = mwl8k_vif->peer_id;
+ if (!priv->ap_fw && tx_info->control.sta != NULL)
+ tx->peer_id = MWL8K_STA(tx_info->control.sta)->peer_id;
+ else
+ tx->peer_id = 0;
wmb();
tx->status = cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED | txstatus);
- txq->stats.count++;
- txq->stats.len++;
+ txq->len++;
priv->pending_tx_pkts++;
txq->tail++;
@@ -1656,6 +1597,56 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
return rc;
}
+static int mwl8k_post_pervif_cmd(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct mwl8k_cmd_pkt *cmd)
+{
+ if (vif != NULL)
+ cmd->macid = MWL8K_VIF(vif)->macid;
+ return mwl8k_post_cmd(hw, cmd);
+}
+
+/*
+ * Setup code shared between STA and AP firmware images.
+ */
+static void mwl8k_setup_2ghz_band(struct ieee80211_hw *hw)
+{
+ struct mwl8k_priv *priv = hw->priv;
+
+ BUILD_BUG_ON(sizeof(priv->channels_24) != sizeof(mwl8k_channels_24));
+ memcpy(priv->channels_24, mwl8k_channels_24, sizeof(mwl8k_channels_24));
+
+ BUILD_BUG_ON(sizeof(priv->rates_24) != sizeof(mwl8k_rates_24));
+ memcpy(priv->rates_24, mwl8k_rates_24, sizeof(mwl8k_rates_24));
+
+ priv->band_24.band = IEEE80211_BAND_2GHZ;
+ priv->band_24.channels = priv->channels_24;
+ priv->band_24.n_channels = ARRAY_SIZE(mwl8k_channels_24);
+ priv->band_24.bitrates = priv->rates_24;
+ priv->band_24.n_bitrates = ARRAY_SIZE(mwl8k_rates_24);
+
+ hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band_24;
+}
+
+static void mwl8k_setup_5ghz_band(struct ieee80211_hw *hw)
+{
+ struct mwl8k_priv *priv = hw->priv;
+
+ BUILD_BUG_ON(sizeof(priv->channels_50) != sizeof(mwl8k_channels_50));
+ memcpy(priv->channels_50, mwl8k_channels_50, sizeof(mwl8k_channels_50));
+
+ BUILD_BUG_ON(sizeof(priv->rates_50) != sizeof(mwl8k_rates_50));
+ memcpy(priv->rates_50, mwl8k_rates_50, sizeof(mwl8k_rates_50));
+
+ priv->band_50.band = IEEE80211_BAND_5GHZ;
+ priv->band_50.channels = priv->channels_50;
+ priv->band_50.n_channels = ARRAY_SIZE(mwl8k_channels_50);
+ priv->band_50.bitrates = priv->rates_50;
+ priv->band_50.n_bitrates = ARRAY_SIZE(mwl8k_rates_50);
+
+ hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &priv->band_50;
+}
+
/*
* CMD_GET_HW_SPEC (STA version).
*/
@@ -1678,6 +1669,89 @@ struct mwl8k_cmd_get_hw_spec_sta {
__le32 total_rxd;
} __attribute__((packed));
+#define MWL8K_CAP_MAX_AMSDU 0x20000000
+#define MWL8K_CAP_GREENFIELD 0x08000000
+#define MWL8K_CAP_AMPDU 0x04000000
+#define MWL8K_CAP_RX_STBC 0x01000000
+#define MWL8K_CAP_TX_STBC 0x00800000
+#define MWL8K_CAP_SHORTGI_40MHZ 0x00400000
+#define MWL8K_CAP_SHORTGI_20MHZ 0x00200000
+#define MWL8K_CAP_RX_ANTENNA_MASK 0x000e0000
+#define MWL8K_CAP_TX_ANTENNA_MASK 0x0001c000
+#define MWL8K_CAP_DELAY_BA 0x00003000
+#define MWL8K_CAP_MIMO 0x00000200
+#define MWL8K_CAP_40MHZ 0x00000100
+#define MWL8K_CAP_BAND_MASK 0x00000007
+#define MWL8K_CAP_5GHZ 0x00000004
+#define MWL8K_CAP_2GHZ4 0x00000001
+
+static void
+mwl8k_set_ht_caps(struct ieee80211_hw *hw,
+ struct ieee80211_supported_band *band, u32 cap)
+{
+ int rx_streams;
+ int tx_streams;
+
+ band->ht_cap.ht_supported = 1;
+
+ if (cap & MWL8K_CAP_MAX_AMSDU)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
+ if (cap & MWL8K_CAP_GREENFIELD)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_GRN_FLD;
+ if (cap & MWL8K_CAP_AMPDU) {
+ hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
+ band->ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
+ band->ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE;
+ }
+ if (cap & MWL8K_CAP_RX_STBC)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_RX_STBC;
+ if (cap & MWL8K_CAP_TX_STBC)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
+ if (cap & MWL8K_CAP_SHORTGI_40MHZ)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
+ if (cap & MWL8K_CAP_SHORTGI_20MHZ)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
+ if (cap & MWL8K_CAP_DELAY_BA)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_DELAY_BA;
+ if (cap & MWL8K_CAP_40MHZ)
+ band->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
+
+ rx_streams = hweight32(cap & MWL8K_CAP_RX_ANTENNA_MASK);
+ tx_streams = hweight32(cap & MWL8K_CAP_TX_ANTENNA_MASK);
+
+ band->ht_cap.mcs.rx_mask[0] = 0xff;
+ if (rx_streams >= 2)
+ band->ht_cap.mcs.rx_mask[1] = 0xff;
+ if (rx_streams >= 3)
+ band->ht_cap.mcs.rx_mask[2] = 0xff;
+ band->ht_cap.mcs.rx_mask[4] = 0x01;
+ band->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
+
+ if (rx_streams != tx_streams) {
+ band->ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
+ band->ht_cap.mcs.tx_params |= (tx_streams - 1) <<
+ IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
+ }
+}
+
+static void
+mwl8k_set_caps(struct ieee80211_hw *hw, u32 caps)
+{
+ struct mwl8k_priv *priv = hw->priv;
+
+ if ((caps & MWL8K_CAP_2GHZ4) || !(caps & MWL8K_CAP_BAND_MASK)) {
+ mwl8k_setup_2ghz_band(hw);
+ if (caps & MWL8K_CAP_MIMO)
+ mwl8k_set_ht_caps(hw, &priv->band_24, caps);
+ }
+
+ if (caps & MWL8K_CAP_5GHZ) {
+ mwl8k_setup_5ghz_band(hw);
+ if (caps & MWL8K_CAP_MIMO)
+ mwl8k_set_ht_caps(hw, &priv->band_50, caps);
+ }
+}
+
static int mwl8k_cmd_get_hw_spec_sta(struct ieee80211_hw *hw)
{
struct mwl8k_priv *priv = hw->priv;
@@ -1708,6 +1782,9 @@ static int mwl8k_cmd_get_hw_spec_sta(struct ieee80211_hw *hw)
priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs);
priv->fw_rev = le32_to_cpu(cmd->fw_rev);
priv->hw_rev = cmd->hw_rev;
+ mwl8k_set_caps(hw, le32_to_cpu(cmd->caps));
+ priv->ap_macids_supported = 0x00000000;
+ priv->sta_macids_supported = 0x00000001;
}
kfree(cmd);
@@ -1761,6 +1838,9 @@ static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw)
priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs);
priv->fw_rev = le32_to_cpu(cmd->fw_rev);
priv->hw_rev = cmd->hw_rev;
+ mwl8k_setup_2ghz_band(hw);
+ priv->ap_macids_supported = 0x000000ff;
+ priv->sta_macids_supported = 0x00000000;
off = le32_to_cpu(cmd->wcbbase0) & 0xffff;
iowrite32(cpu_to_le32(priv->txq[0].txd_dma), priv->sram + off);
@@ -1806,7 +1886,9 @@ struct mwl8k_cmd_set_hw_spec {
__le32 total_rxd;
} __attribute__((packed));
-#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080
+#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080
+#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP 0x00000020
+#define MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON 0x00000010
static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw)
{
@@ -1827,7 +1909,9 @@ static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw)
cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES);
for (i = 0; i < MWL8K_TX_QUEUES; i++)
cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].txd_dma);
- cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT);
+ cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT |
+ MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_PROBERESP |
+ MWL8K_SET_HW_SPEC_FLAG_HOSTFORM_BEACON);
cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS);
cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS);
@@ -1897,9 +1981,9 @@ __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti,
}
/*
- * CMD_802_11_GET_STAT.
+ * CMD_GET_STAT.
*/
-struct mwl8k_cmd_802_11_get_stat {
+struct mwl8k_cmd_get_stat {
struct mwl8k_cmd_pkt header;
__le32 stats[64];
} __attribute__((packed));
@@ -1909,10 +1993,10 @@ struct mwl8k_cmd_802_11_get_stat {
#define MWL8K_STAT_FCS_ERROR 24
#define MWL8K_STAT_RTS_SUCCESS 11
-static int mwl8k_cmd_802_11_get_stat(struct ieee80211_hw *hw,
- struct ieee80211_low_level_stats *stats)
+static int mwl8k_cmd_get_stat(struct ieee80211_hw *hw,
+ struct ieee80211_low_level_stats *stats)
{
- struct mwl8k_cmd_802_11_get_stat *cmd;
+ struct mwl8k_cmd_get_stat *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -1939,9 +2023,9 @@ static int mwl8k_cmd_802_11_get_stat(struct ieee80211_hw *hw,
}
/*
- * CMD_802_11_RADIO_CONTROL.
+ * CMD_RADIO_CONTROL.
*/
-struct mwl8k_cmd_802_11_radio_control {
+struct mwl8k_cmd_radio_control {
struct mwl8k_cmd_pkt header;
__le16 action;
__le16 control;
@@ -1949,10 +2033,10 @@ struct mwl8k_cmd_802_11_radio_control {
} __attribute__((packed));
static int
-mwl8k_cmd_802_11_radio_control(struct ieee80211_hw *hw, bool enable, bool force)
+mwl8k_cmd_radio_control(struct ieee80211_hw *hw, bool enable, bool force)
{
struct mwl8k_priv *priv = hw->priv;
- struct mwl8k_cmd_802_11_radio_control *cmd;
+ struct mwl8k_cmd_radio_control *cmd;
int rc;
if (enable == priv->radio_on && !force)
@@ -1977,36 +2061,32 @@ mwl8k_cmd_802_11_radio_control(struct ieee80211_hw *hw, bool enable, bool force)
return rc;
}
-static int mwl8k_cmd_802_11_radio_disable(struct ieee80211_hw *hw)
+static int mwl8k_cmd_radio_disable(struct ieee80211_hw *hw)
{
- return mwl8k_cmd_802_11_radio_control(hw, 0, 0);
+ return mwl8k_cmd_radio_control(hw, 0, 0);
}
-static int mwl8k_cmd_802_11_radio_enable(struct ieee80211_hw *hw)
+static int mwl8k_cmd_radio_enable(struct ieee80211_hw *hw)
{
- return mwl8k_cmd_802_11_radio_control(hw, 1, 0);
+ return mwl8k_cmd_radio_control(hw, 1, 0);
}
static int
mwl8k_set_radio_preamble(struct ieee80211_hw *hw, bool short_preamble)
{
- struct mwl8k_priv *priv;
-
- if (hw == NULL || hw->priv == NULL)
- return -EINVAL;
- priv = hw->priv;
+ struct mwl8k_priv *priv = hw->priv;
priv->radio_short_preamble = short_preamble;
- return mwl8k_cmd_802_11_radio_control(hw, 1, 1);
+ return mwl8k_cmd_radio_control(hw, 1, 1);
}
/*
- * CMD_802_11_RF_TX_POWER.
+ * CMD_RF_TX_POWER.
*/
#define MWL8K_TX_POWER_LEVEL_TOTAL 8
-struct mwl8k_cmd_802_11_rf_tx_power {
+struct mwl8k_cmd_rf_tx_power {
struct mwl8k_cmd_pkt header;
__le16 action;
__le16 support_level;
@@ -2015,9 +2095,9 @@ struct mwl8k_cmd_802_11_rf_tx_power {
__le16 power_level_list[MWL8K_TX_POWER_LEVEL_TOTAL];
} __attribute__((packed));
-static int mwl8k_cmd_802_11_rf_tx_power(struct ieee80211_hw *hw, int dBm)
+static int mwl8k_cmd_rf_tx_power(struct ieee80211_hw *hw, int dBm)
{
- struct mwl8k_cmd_802_11_rf_tx_power *cmd;
+ struct mwl8k_cmd_rf_tx_power *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
@@ -2069,6 +2149,36 @@ mwl8k_cmd_rf_antenna(struct ieee80211_hw *hw, int antenna, int mask)
}
/*
+ * CMD_SET_BEACON.
+ */
+struct mwl8k_cmd_set_beacon {
+ struct mwl8k_cmd_pkt header;
+ __le16 beacon_len;
+ __u8 beacon[0];
+};
+
+static int mwl8k_cmd_set_beacon(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u8 *beacon, int len)
+{
+ struct mwl8k_cmd_set_beacon *cmd;
+ int rc;
+
+ cmd = kzalloc(sizeof(*cmd) + len, GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_BEACON);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd) + len);
+ cmd->beacon_len = cpu_to_le16(len);
+ memcpy(cmd->beacon, beacon, len);
+
+ rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+ kfree(cmd);
+
+ return rc;
+}
+
+/*
* CMD_SET_PRE_SCAN.
*/
struct mwl8k_cmd_set_pre_scan {
@@ -2103,7 +2213,7 @@ struct mwl8k_cmd_set_post_scan {
} __attribute__((packed));
static int
-mwl8k_cmd_set_post_scan(struct ieee80211_hw *hw, __u8 *mac)
+mwl8k_cmd_set_post_scan(struct ieee80211_hw *hw, const __u8 *mac)
{
struct mwl8k_cmd_set_post_scan *cmd;
int rc;
@@ -2134,8 +2244,9 @@ struct mwl8k_cmd_set_rf_channel {
} __attribute__((packed));
static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
- struct ieee80211_channel *channel)
+ struct ieee80211_conf *conf)
{
+ struct ieee80211_channel *channel = conf->channel;
struct mwl8k_cmd_set_rf_channel *cmd;
int rc;
@@ -2147,10 +2258,19 @@ static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->action = cpu_to_le16(MWL8K_CMD_SET);
cmd->current_channel = channel->hw_value;
+
if (channel->band == IEEE80211_BAND_2GHZ)
- cmd->channel_flags = cpu_to_le32(0x00000081);
- else
- cmd->channel_flags = cpu_to_le32(0x00000000);
+ cmd->channel_flags |= cpu_to_le32(0x00000001);
+ else if (channel->band == IEEE80211_BAND_5GHZ)
+ cmd->channel_flags |= cpu_to_le32(0x00000004);
+
+ if (conf->channel_type == NL80211_CHAN_NO_HT ||
+ conf->channel_type == NL80211_CHAN_HT20)
+ cmd->channel_flags |= cpu_to_le32(0x00000080);
+ else if (conf->channel_type == NL80211_CHAN_HT40MINUS)
+ cmd->channel_flags |= cpu_to_le32(0x000001900);
+ else if (conf->channel_type == NL80211_CHAN_HT40PLUS)
+ cmd->channel_flags |= cpu_to_le32(0x000000900);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
@@ -2159,85 +2279,75 @@ static int mwl8k_cmd_set_rf_channel(struct ieee80211_hw *hw,
}
/*
- * CMD_SET_SLOT.
+ * CMD_SET_AID.
*/
-struct mwl8k_cmd_set_slot {
- struct mwl8k_cmd_pkt header;
- __le16 action;
- __u8 short_slot;
-} __attribute__((packed));
-
-static int mwl8k_cmd_set_slot(struct ieee80211_hw *hw, bool short_slot_time)
-{
- struct mwl8k_cmd_set_slot *cmd;
- int rc;
-
- cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
- if (cmd == NULL)
- return -ENOMEM;
-
- cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_SLOT);
- cmd->header.length = cpu_to_le16(sizeof(*cmd));
- cmd->action = cpu_to_le16(MWL8K_CMD_SET);
- cmd->short_slot = short_slot_time;
-
- rc = mwl8k_post_cmd(hw, &cmd->header);
- kfree(cmd);
+#define MWL8K_FRAME_PROT_DISABLED 0x00
+#define MWL8K_FRAME_PROT_11G 0x07
+#define MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY 0x02
+#define MWL8K_FRAME_PROT_11N_HT_ALL 0x06
- return rc;
-}
+struct mwl8k_cmd_update_set_aid {
+ struct mwl8k_cmd_pkt header;
+ __le16 aid;
-/*
- * CMD_MIMO_CONFIG.
- */
-struct mwl8k_cmd_mimo_config {
- struct mwl8k_cmd_pkt header;
- __le32 action;
- __u8 rx_antenna_map;
- __u8 tx_antenna_map;
+ /* AP's MAC address (BSSID) */
+ __u8 bssid[ETH_ALEN];
+ __le16 protection_mode;
+ __u8 supp_rates[14];
} __attribute__((packed));
-static int mwl8k_cmd_mimo_config(struct ieee80211_hw *hw, __u8 rx, __u8 tx)
+static void legacy_rate_mask_to_array(u8 *rates, u32 mask)
{
- struct mwl8k_cmd_mimo_config *cmd;
- int rc;
-
- cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
- if (cmd == NULL)
- return -ENOMEM;
-
- cmd->header.code = cpu_to_le16(MWL8K_CMD_MIMO_CONFIG);
- cmd->header.length = cpu_to_le16(sizeof(*cmd));
- cmd->action = cpu_to_le32((u32)MWL8K_CMD_SET);
- cmd->rx_antenna_map = rx;
- cmd->tx_antenna_map = tx;
+ int i;
+ int j;
- rc = mwl8k_post_cmd(hw, &cmd->header);
- kfree(cmd);
+ /*
+ * Clear nonstandard rates 4 and 13.
+ */
+ mask &= 0x1fef;
- return rc;
+ for (i = 0, j = 0; i < 14; i++) {
+ if (mask & (1 << i))
+ rates[j++] = mwl8k_rates_24[i].hw_value;
+ }
}
-/*
- * CMD_ENABLE_SNIFFER.
- */
-struct mwl8k_cmd_enable_sniffer {
- struct mwl8k_cmd_pkt header;
- __le32 action;
-} __attribute__((packed));
-
-static int mwl8k_enable_sniffer(struct ieee80211_hw *hw, bool enable)
+static int
+mwl8k_cmd_set_aid(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u32 legacy_rate_mask)
{
- struct mwl8k_cmd_enable_sniffer *cmd;
+ struct mwl8k_cmd_update_set_aid *cmd;
+ u16 prot_mode;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_ENABLE_SNIFFER);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_AID);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
- cmd->action = cpu_to_le32(!!enable);
+ cmd->aid = cpu_to_le16(vif->bss_conf.aid);
+ memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN);
+
+ if (vif->bss_conf.use_cts_prot) {
+ prot_mode = MWL8K_FRAME_PROT_11G;
+ } else {
+ switch (vif->bss_conf.ht_operation_mode &
+ IEEE80211_HT_OP_MODE_PROTECTION) {
+ case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
+ prot_mode = MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY;
+ break;
+ case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
+ prot_mode = MWL8K_FRAME_PROT_11N_HT_ALL;
+ break;
+ default:
+ prot_mode = MWL8K_FRAME_PROT_DISABLED;
+ break;
+ }
+ }
+ cmd->protection_mode = cpu_to_le16(prot_mode);
+
+ legacy_rate_mask_to_array(cmd->supp_rates, legacy_rate_mask);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
@@ -2246,37 +2356,32 @@ static int mwl8k_enable_sniffer(struct ieee80211_hw *hw, bool enable)
}
/*
- * CMD_SET_MAC_ADDR.
+ * CMD_SET_RATE.
*/
-struct mwl8k_cmd_set_mac_addr {
- struct mwl8k_cmd_pkt header;
- union {
- struct {
- __le16 mac_type;
- __u8 mac_addr[ETH_ALEN];
- } mbss;
- __u8 mac_addr[ETH_ALEN];
- };
+struct mwl8k_cmd_set_rate {
+ struct mwl8k_cmd_pkt header;
+ __u8 legacy_rates[14];
+
+ /* Bitmap for supported MCS codes. */
+ __u8 mcs_set[16];
+ __u8 reserved[16];
} __attribute__((packed));
-static int mwl8k_set_mac_addr(struct ieee80211_hw *hw, u8 *mac)
+static int
+mwl8k_cmd_set_rate(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ u32 legacy_rate_mask, u8 *mcs_rates)
{
- struct mwl8k_priv *priv = hw->priv;
- struct mwl8k_cmd_set_mac_addr *cmd;
+ struct mwl8k_cmd_set_rate *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_MAC_ADDR);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATE);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
- if (priv->ap_fw) {
- cmd->mbss.mac_type = 0;
- memcpy(cmd->mbss.mac_addr, mac, ETH_ALEN);
- } else {
- memcpy(cmd->mac_addr, mac, ETH_ALEN);
- }
+ legacy_rate_mask_to_array(cmd->legacy_rates, legacy_rate_mask);
+ memcpy(cmd->mcs_set, mcs_rates, 16);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
@@ -2284,29 +2389,40 @@ static int mwl8k_set_mac_addr(struct ieee80211_hw *hw, u8 *mac)
return rc;
}
-
/*
- * CMD_SET_RATEADAPT_MODE.
+ * CMD_FINALIZE_JOIN.
*/
-struct mwl8k_cmd_set_rate_adapt_mode {
+#define MWL8K_FJ_BEACON_MAXLEN 128
+
+struct mwl8k_cmd_finalize_join {
struct mwl8k_cmd_pkt header;
- __le16 action;
- __le16 mode;
+ __le32 sleep_interval; /* Number of beacon periods to sleep */
+ __u8 beacon_data[MWL8K_FJ_BEACON_MAXLEN];
} __attribute__((packed));
-static int mwl8k_cmd_setrateadaptmode(struct ieee80211_hw *hw, __u16 mode)
+static int mwl8k_cmd_finalize_join(struct ieee80211_hw *hw, void *frame,
+ int framelen, int dtim)
{
- struct mwl8k_cmd_set_rate_adapt_mode *cmd;
+ struct mwl8k_cmd_finalize_join *cmd;
+ struct ieee80211_mgmt *payload = frame;
+ int payload_len;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATEADAPT_MODE);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_FINALIZE_JOIN);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
- cmd->action = cpu_to_le16(MWL8K_CMD_SET);
- cmd->mode = cpu_to_le16(mode);
+ cmd->sleep_interval = cpu_to_le32(dtim ? dtim : 1);
+
+ payload_len = framelen - ieee80211_hdrlen(payload->frame_control);
+ if (payload_len < 0)
+ payload_len = 0;
+ else if (payload_len > MWL8K_FJ_BEACON_MAXLEN)
+ payload_len = MWL8K_FJ_BEACON_MAXLEN;
+
+ memcpy(cmd->beacon_data, &payload->u.beacon, payload_len);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
@@ -2315,59 +2431,57 @@ static int mwl8k_cmd_setrateadaptmode(struct ieee80211_hw *hw, __u16 mode)
}
/*
- * CMD_SET_WMM_MODE.
+ * CMD_SET_RTS_THRESHOLD.
*/
-struct mwl8k_cmd_set_wmm {
+struct mwl8k_cmd_set_rts_threshold {
struct mwl8k_cmd_pkt header;
__le16 action;
+ __le16 threshold;
} __attribute__((packed));
-static int mwl8k_set_wmm(struct ieee80211_hw *hw, bool enable)
+static int
+mwl8k_cmd_set_rts_threshold(struct ieee80211_hw *hw, int rts_thresh)
{
- struct mwl8k_priv *priv = hw->priv;
- struct mwl8k_cmd_set_wmm *cmd;
+ struct mwl8k_cmd_set_rts_threshold *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_WMM_MODE);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_RTS_THRESHOLD);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
- cmd->action = cpu_to_le16(!!enable);
+ cmd->action = cpu_to_le16(MWL8K_CMD_SET);
+ cmd->threshold = cpu_to_le16(rts_thresh);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
- if (!rc)
- priv->wmm_enabled = enable;
-
return rc;
}
/*
- * CMD_SET_RTS_THRESHOLD.
+ * CMD_SET_SLOT.
*/
-struct mwl8k_cmd_rts_threshold {
+struct mwl8k_cmd_set_slot {
struct mwl8k_cmd_pkt header;
__le16 action;
- __le16 threshold;
+ __u8 short_slot;
} __attribute__((packed));
-static int mwl8k_rts_threshold(struct ieee80211_hw *hw,
- u16 action, u16 threshold)
+static int mwl8k_cmd_set_slot(struct ieee80211_hw *hw, bool short_slot_time)
{
- struct mwl8k_cmd_rts_threshold *cmd;
+ struct mwl8k_cmd_set_slot *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_RTS_THRESHOLD);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_SLOT);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
- cmd->action = cpu_to_le16(action);
- cmd->threshold = cpu_to_le16(threshold);
+ cmd->action = cpu_to_le16(MWL8K_CMD_SET);
+ cmd->short_slot = short_slot_time;
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
@@ -2426,9 +2540,9 @@ struct mwl8k_cmd_set_edca_params {
MWL8K_SET_EDCA_AIFS)
static int
-mwl8k_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
- __u16 cw_min, __u16 cw_max,
- __u8 aifs, __u16 txop)
+mwl8k_cmd_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
+ __u16 cw_min, __u16 cw_max,
+ __u8 aifs, __u16 txop)
{
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_cmd_set_edca_params *cmd;
@@ -2438,12 +2552,6 @@ mwl8k_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
if (cmd == NULL)
return -ENOMEM;
- /*
- * Queues 0 (BE) and 1 (BK) are swapped in hardware for
- * this call.
- */
- qnum ^= !(qnum >> 1);
-
cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_EDCA_PARAMS);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
cmd->action = cpu_to_le16(MWL8K_SET_EDCA_ALL);
@@ -2467,170 +2575,259 @@ mwl8k_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
}
/*
- * CMD_FINALIZE_JOIN.
+ * CMD_SET_WMM_MODE.
*/
-#define MWL8K_FJ_BEACON_MAXLEN 128
-
-struct mwl8k_cmd_finalize_join {
+struct mwl8k_cmd_set_wmm_mode {
struct mwl8k_cmd_pkt header;
- __le32 sleep_interval; /* Number of beacon periods to sleep */
- __u8 beacon_data[MWL8K_FJ_BEACON_MAXLEN];
+ __le16 action;
} __attribute__((packed));
-static int mwl8k_finalize_join(struct ieee80211_hw *hw, void *frame,
- int framelen, int dtim)
+static int mwl8k_cmd_set_wmm_mode(struct ieee80211_hw *hw, bool enable)
{
- struct mwl8k_cmd_finalize_join *cmd;
- struct ieee80211_mgmt *payload = frame;
- int payload_len;
+ struct mwl8k_priv *priv = hw->priv;
+ struct mwl8k_cmd_set_wmm_mode *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_FINALIZE_JOIN);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_WMM_MODE);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
- cmd->sleep_interval = cpu_to_le32(dtim ? dtim : 1);
-
- payload_len = framelen - ieee80211_hdrlen(payload->frame_control);
- if (payload_len < 0)
- payload_len = 0;
- else if (payload_len > MWL8K_FJ_BEACON_MAXLEN)
- payload_len = MWL8K_FJ_BEACON_MAXLEN;
-
- memcpy(cmd->beacon_data, &payload->u.beacon, payload_len);
+ cmd->action = cpu_to_le16(!!enable);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
+ if (!rc)
+ priv->wmm_enabled = enable;
+
return rc;
}
/*
- * CMD_UPDATE_STADB.
+ * CMD_MIMO_CONFIG.
*/
-struct mwl8k_cmd_update_sta_db {
+struct mwl8k_cmd_mimo_config {
struct mwl8k_cmd_pkt header;
+ __le32 action;
+ __u8 rx_antenna_map;
+ __u8 tx_antenna_map;
+} __attribute__((packed));
- /* See STADB_ACTION_TYPE */
- __le32 action;
+static int mwl8k_cmd_mimo_config(struct ieee80211_hw *hw, __u8 rx, __u8 tx)
+{
+ struct mwl8k_cmd_mimo_config *cmd;
+ int rc;
- /* Peer MAC address */
- __u8 peer_addr[ETH_ALEN];
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
- __le32 reserved;
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_MIMO_CONFIG);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->action = cpu_to_le32((u32)MWL8K_CMD_SET);
+ cmd->rx_antenna_map = rx;
+ cmd->tx_antenna_map = tx;
- /* Peer info - valid during add/update. */
- struct peer_capability_info peer_info;
+ rc = mwl8k_post_cmd(hw, &cmd->header);
+ kfree(cmd);
+
+ return rc;
+}
+
+/*
+ * CMD_USE_FIXED_RATE (STA version).
+ */
+struct mwl8k_cmd_use_fixed_rate_sta {
+ struct mwl8k_cmd_pkt header;
+ __le32 action;
+ __le32 allow_rate_drop;
+ __le32 num_rates;
+ struct {
+ __le32 is_ht_rate;
+ __le32 enable_retry;
+ __le32 rate;
+ __le32 retry_count;
+ } rate_entry[8];
+ __le32 rate_type;
+ __le32 reserved1;
+ __le32 reserved2;
} __attribute__((packed));
-static int mwl8k_cmd_update_sta_db(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif, __u32 action)
+#define MWL8K_USE_AUTO_RATE 0x0002
+#define MWL8K_UCAST_RATE 0
+
+static int mwl8k_cmd_use_fixed_rate_sta(struct ieee80211_hw *hw)
{
- struct mwl8k_vif *mv_vif = MWL8K_VIF(vif);
- struct ieee80211_bss_conf *info = &mv_vif->bss_info;
- struct mwl8k_cmd_update_sta_db *cmd;
- struct peer_capability_info *peer_info;
+ struct mwl8k_cmd_use_fixed_rate_sta *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_STADB);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_USE_FIXED_RATE);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->action = cpu_to_le32(MWL8K_USE_AUTO_RATE);
+ cmd->rate_type = cpu_to_le32(MWL8K_UCAST_RATE);
- cmd->action = cpu_to_le32(action);
- peer_info = &cmd->peer_info;
- memcpy(cmd->peer_addr, mv_vif->bssid, ETH_ALEN);
+ rc = mwl8k_post_cmd(hw, &cmd->header);
+ kfree(cmd);
- switch (action) {
- case MWL8K_STA_DB_ADD_ENTRY:
- case MWL8K_STA_DB_MODIFY_ENTRY:
- /* Build peer_info block */
- peer_info->peer_type = MWL8K_PEER_TYPE_ACCESSPOINT;
- peer_info->basic_caps = cpu_to_le16(info->assoc_capability);
- memcpy(peer_info->legacy_rates, mwl8k_rateids,
- sizeof(mwl8k_rateids));
- peer_info->interop = 1;
- peer_info->amsdu_enabled = 0;
-
- rc = mwl8k_post_cmd(hw, &cmd->header);
- if (rc == 0)
- mv_vif->peer_id = peer_info->station_id;
+ return rc;
+}
- break;
+/*
+ * CMD_USE_FIXED_RATE (AP version).
+ */
+struct mwl8k_cmd_use_fixed_rate_ap {
+ struct mwl8k_cmd_pkt header;
+ __le32 action;
+ __le32 allow_rate_drop;
+ __le32 num_rates;
+ struct mwl8k_rate_entry_ap {
+ __le32 is_ht_rate;
+ __le32 enable_retry;
+ __le32 rate;
+ __le32 retry_count;
+ } rate_entry[4];
+ u8 multicast_rate;
+ u8 multicast_rate_type;
+ u8 management_rate;
+} __attribute__((packed));
- case MWL8K_STA_DB_DEL_ENTRY:
- case MWL8K_STA_DB_FLUSH:
- default:
- rc = mwl8k_post_cmd(hw, &cmd->header);
- if (rc == 0)
- mv_vif->peer_id = 0;
- break;
- }
+static int
+mwl8k_cmd_use_fixed_rate_ap(struct ieee80211_hw *hw, int mcast, int mgmt)
+{
+ struct mwl8k_cmd_use_fixed_rate_ap *cmd;
+ int rc;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_USE_FIXED_RATE);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->action = cpu_to_le32(MWL8K_USE_AUTO_RATE);
+ cmd->multicast_rate = mcast;
+ cmd->management_rate = mgmt;
+
+ rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
return rc;
}
/*
- * CMD_SET_AID.
+ * CMD_ENABLE_SNIFFER.
*/
-#define MWL8K_FRAME_PROT_DISABLED 0x00
-#define MWL8K_FRAME_PROT_11G 0x07
-#define MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY 0x02
-#define MWL8K_FRAME_PROT_11N_HT_ALL 0x06
-
-struct mwl8k_cmd_update_set_aid {
- struct mwl8k_cmd_pkt header;
- __le16 aid;
-
- /* AP's MAC address (BSSID) */
- __u8 bssid[ETH_ALEN];
- __le16 protection_mode;
- __u8 supp_rates[14];
+struct mwl8k_cmd_enable_sniffer {
+ struct mwl8k_cmd_pkt header;
+ __le32 action;
} __attribute__((packed));
-static int mwl8k_cmd_set_aid(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static int mwl8k_cmd_enable_sniffer(struct ieee80211_hw *hw, bool enable)
{
- struct mwl8k_vif *mv_vif = MWL8K_VIF(vif);
- struct ieee80211_bss_conf *info = &mv_vif->bss_info;
- struct mwl8k_cmd_update_set_aid *cmd;
- u16 prot_mode;
+ struct mwl8k_cmd_enable_sniffer *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_AID);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_ENABLE_SNIFFER);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
- cmd->aid = cpu_to_le16(info->aid);
+ cmd->action = cpu_to_le32(!!enable);
- memcpy(cmd->bssid, mv_vif->bssid, ETH_ALEN);
+ rc = mwl8k_post_cmd(hw, &cmd->header);
+ kfree(cmd);
- if (info->use_cts_prot) {
- prot_mode = MWL8K_FRAME_PROT_11G;
+ return rc;
+}
+
+/*
+ * CMD_SET_MAC_ADDR.
+ */
+struct mwl8k_cmd_set_mac_addr {
+ struct mwl8k_cmd_pkt header;
+ union {
+ struct {
+ __le16 mac_type;
+ __u8 mac_addr[ETH_ALEN];
+ } mbss;
+ __u8 mac_addr[ETH_ALEN];
+ };
+} __attribute__((packed));
+
+#define MWL8K_MAC_TYPE_PRIMARY_CLIENT 0
+#define MWL8K_MAC_TYPE_SECONDARY_CLIENT 1
+#define MWL8K_MAC_TYPE_PRIMARY_AP 2
+#define MWL8K_MAC_TYPE_SECONDARY_AP 3
+
+static int mwl8k_cmd_set_mac_addr(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u8 *mac)
+{
+ struct mwl8k_priv *priv = hw->priv;
+ struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
+ struct mwl8k_cmd_set_mac_addr *cmd;
+ int mac_type;
+ int rc;
+
+ mac_type = MWL8K_MAC_TYPE_PRIMARY_AP;
+ if (vif != NULL && vif->type == NL80211_IFTYPE_STATION) {
+ if (mwl8k_vif->macid + 1 == ffs(priv->sta_macids_supported))
+ mac_type = MWL8K_MAC_TYPE_PRIMARY_CLIENT;
+ else
+ mac_type = MWL8K_MAC_TYPE_SECONDARY_CLIENT;
+ } else if (vif != NULL && vif->type == NL80211_IFTYPE_AP) {
+ if (mwl8k_vif->macid + 1 == ffs(priv->ap_macids_supported))
+ mac_type = MWL8K_MAC_TYPE_PRIMARY_AP;
+ else
+ mac_type = MWL8K_MAC_TYPE_SECONDARY_AP;
+ }
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_MAC_ADDR);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ if (priv->ap_fw) {
+ cmd->mbss.mac_type = cpu_to_le16(mac_type);
+ memcpy(cmd->mbss.mac_addr, mac, ETH_ALEN);
} else {
- switch (info->ht_operation_mode &
- IEEE80211_HT_OP_MODE_PROTECTION) {
- case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
- prot_mode = MWL8K_FRAME_PROT_11N_HT_40MHZ_ONLY;
- break;
- case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
- prot_mode = MWL8K_FRAME_PROT_11N_HT_ALL;
- break;
- default:
- prot_mode = MWL8K_FRAME_PROT_DISABLED;
- break;
- }
+ memcpy(cmd->mac_addr, mac, ETH_ALEN);
}
- cmd->protection_mode = cpu_to_le16(prot_mode);
- memcpy(cmd->supp_rates, mwl8k_rateids, sizeof(mwl8k_rateids));
+ rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+ kfree(cmd);
+
+ return rc;
+}
+
+/*
+ * CMD_SET_RATEADAPT_MODE.
+ */
+struct mwl8k_cmd_set_rate_adapt_mode {
+ struct mwl8k_cmd_pkt header;
+ __le16 action;
+ __le16 mode;
+} __attribute__((packed));
+
+static int mwl8k_cmd_set_rateadapt_mode(struct ieee80211_hw *hw, __u16 mode)
+{
+ struct mwl8k_cmd_set_rate_adapt_mode *cmd;
+ int rc;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATEADAPT_MODE);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->action = cpu_to_le16(MWL8K_CMD_SET);
+ cmd->mode = cpu_to_le16(mode);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
@@ -2639,115 +2836,255 @@ static int mwl8k_cmd_set_aid(struct ieee80211_hw *hw,
}
/*
- * CMD_SET_RATE.
+ * CMD_BSS_START.
*/
-struct mwl8k_cmd_update_rateset {
- struct mwl8k_cmd_pkt header;
- __u8 legacy_rates[14];
-
- /* Bitmap for supported MCS codes. */
- __u8 mcs_set[16];
- __u8 reserved[16];
+struct mwl8k_cmd_bss_start {
+ struct mwl8k_cmd_pkt header;
+ __le32 enable;
} __attribute__((packed));
-static int mwl8k_update_rateset(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+static int mwl8k_cmd_bss_start(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, int enable)
{
- struct mwl8k_cmd_update_rateset *cmd;
+ struct mwl8k_cmd_bss_start *cmd;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_RATE);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_BSS_START);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
- memcpy(cmd->legacy_rates, mwl8k_rateids, sizeof(mwl8k_rateids));
+ cmd->enable = cpu_to_le32(enable);
- rc = mwl8k_post_cmd(hw, &cmd->header);
+ rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
kfree(cmd);
return rc;
}
/*
- * CMD_USE_FIXED_RATE.
+ * CMD_SET_NEW_STN.
*/
-#define MWL8K_RATE_TABLE_SIZE 8
-#define MWL8K_UCAST_RATE 0
-#define MWL8K_USE_AUTO_RATE 0x0002
+struct mwl8k_cmd_set_new_stn {
+ struct mwl8k_cmd_pkt header;
+ __le16 aid;
+ __u8 mac_addr[6];
+ __le16 stn_id;
+ __le16 action;
+ __le16 rsvd;
+ __le32 legacy_rates;
+ __u8 ht_rates[4];
+ __le16 cap_info;
+ __le16 ht_capabilities_info;
+ __u8 mac_ht_param_info;
+ __u8 rev;
+ __u8 control_channel;
+ __u8 add_channel;
+ __le16 op_mode;
+ __le16 stbc;
+ __u8 add_qos_info;
+ __u8 is_qos_sta;
+ __le32 fw_sta_ptr;
+} __attribute__((packed));
+
+#define MWL8K_STA_ACTION_ADD 0
+#define MWL8K_STA_ACTION_REMOVE 2
+
+static int mwl8k_cmd_set_new_stn_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mwl8k_cmd_set_new_stn *cmd;
+ u32 rates;
+ int rc;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_NEW_STN);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->aid = cpu_to_le16(sta->aid);
+ memcpy(cmd->mac_addr, sta->addr, ETH_ALEN);
+ cmd->stn_id = cpu_to_le16(sta->aid);
+ cmd->action = cpu_to_le16(MWL8K_STA_ACTION_ADD);
+ if (hw->conf.channel->band == IEEE80211_BAND_2GHZ)
+ rates = sta->supp_rates[IEEE80211_BAND_2GHZ];
+ else
+ rates = sta->supp_rates[IEEE80211_BAND_5GHZ] << 5;
+ cmd->legacy_rates = cpu_to_le32(rates);
+ if (sta->ht_cap.ht_supported) {
+ cmd->ht_rates[0] = sta->ht_cap.mcs.rx_mask[0];
+ cmd->ht_rates[1] = sta->ht_cap.mcs.rx_mask[1];
+ cmd->ht_rates[2] = sta->ht_cap.mcs.rx_mask[2];
+ cmd->ht_rates[3] = sta->ht_cap.mcs.rx_mask[3];
+ cmd->ht_capabilities_info = cpu_to_le16(sta->ht_cap.cap);
+ cmd->mac_ht_param_info = (sta->ht_cap.ampdu_factor & 3) |
+ ((sta->ht_cap.ampdu_density & 7) << 2);
+ cmd->is_qos_sta = 1;
+ }
+
+ rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+ kfree(cmd);
+
+ return rc;
+}
+
+static int mwl8k_cmd_set_new_stn_add_self(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif)
+{
+ struct mwl8k_cmd_set_new_stn *cmd;
+ int rc;
-struct mwl8k_rate_entry {
- /* Set to 1 if HT rate, 0 if legacy. */
- __le32 is_ht_rate;
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
- /* Set to 1 to use retry_count field. */
- __le32 enable_retry;
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_NEW_STN);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ memcpy(cmd->mac_addr, vif->addr, ETH_ALEN);
- /* Specified legacy rate or MCS. */
- __le32 rate;
+ rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+ kfree(cmd);
- /* Number of allowed retries. */
- __le32 retry_count;
+ return rc;
+}
+
+static int mwl8k_cmd_set_new_stn_del(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u8 *addr)
+{
+ struct mwl8k_cmd_set_new_stn *cmd;
+ int rc;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_NEW_STN);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ memcpy(cmd->mac_addr, addr, ETH_ALEN);
+ cmd->action = cpu_to_le16(MWL8K_STA_ACTION_REMOVE);
+
+ rc = mwl8k_post_pervif_cmd(hw, vif, &cmd->header);
+ kfree(cmd);
+
+ return rc;
+}
+
+/*
+ * CMD_UPDATE_STADB.
+ */
+struct ewc_ht_info {
+ __le16 control1;
+ __le16 control2;
+ __le16 control3;
} __attribute__((packed));
-struct mwl8k_rate_table {
- /* 1 to allow specified rate and below */
- __le32 allow_rate_drop;
- __le32 num_rates;
- struct mwl8k_rate_entry rate_entry[MWL8K_RATE_TABLE_SIZE];
+struct peer_capability_info {
+ /* Peer type - AP vs. STA. */
+ __u8 peer_type;
+
+ /* Basic 802.11 capabilities from assoc resp. */
+ __le16 basic_caps;
+
+ /* Set if peer supports 802.11n high throughput (HT). */
+ __u8 ht_support;
+
+ /* Valid if HT is supported. */
+ __le16 ht_caps;
+ __u8 extended_ht_caps;
+ struct ewc_ht_info ewc_info;
+
+ /* Legacy rate table. Intersection of our rates and peer rates. */
+ __u8 legacy_rates[12];
+
+ /* HT rate table. Intersection of our rates and peer rates. */
+ __u8 ht_rates[16];
+ __u8 pad[16];
+
+ /* If set, interoperability mode, no proprietary extensions. */
+ __u8 interop;
+ __u8 pad2;
+ __u8 station_id;
+ __le16 amsdu_enabled;
} __attribute__((packed));
-struct mwl8k_cmd_use_fixed_rate {
- struct mwl8k_cmd_pkt header;
+struct mwl8k_cmd_update_stadb {
+ struct mwl8k_cmd_pkt header;
+
+ /* See STADB_ACTION_TYPE */
__le32 action;
- struct mwl8k_rate_table rate_table;
- /* Unicast, Broadcast or Multicast */
- __le32 rate_type;
- __le32 reserved1;
- __le32 reserved2;
+ /* Peer MAC address */
+ __u8 peer_addr[ETH_ALEN];
+
+ __le32 reserved;
+
+ /* Peer info - valid during add/update. */
+ struct peer_capability_info peer_info;
} __attribute__((packed));
-static int mwl8k_cmd_use_fixed_rate(struct ieee80211_hw *hw,
- u32 action, u32 rate_type, struct mwl8k_rate_table *rate_table)
+#define MWL8K_STA_DB_MODIFY_ENTRY 1
+#define MWL8K_STA_DB_DEL_ENTRY 2
+
+/* Peer Entry flags - used to define the type of the peer node */
+#define MWL8K_PEER_TYPE_ACCESSPOINT 2
+
+static int mwl8k_cmd_update_stadb_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
{
- struct mwl8k_cmd_use_fixed_rate *cmd;
- int count;
+ struct mwl8k_cmd_update_stadb *cmd;
+ struct peer_capability_info *p;
+ u32 rates;
int rc;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (cmd == NULL)
return -ENOMEM;
- cmd->header.code = cpu_to_le16(MWL8K_CMD_USE_FIXED_RATE);
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_STADB);
cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->action = cpu_to_le32(MWL8K_STA_DB_MODIFY_ENTRY);
+ memcpy(cmd->peer_addr, sta->addr, ETH_ALEN);
+
+ p = &cmd->peer_info;
+ p->peer_type = MWL8K_PEER_TYPE_ACCESSPOINT;
+ p->basic_caps = cpu_to_le16(vif->bss_conf.assoc_capability);
+ p->ht_support = sta->ht_cap.ht_supported;
+ p->ht_caps = sta->ht_cap.cap;
+ p->extended_ht_caps = (sta->ht_cap.ampdu_factor & 3) |
+ ((sta->ht_cap.ampdu_density & 7) << 2);
+ if (hw->conf.channel->band == IEEE80211_BAND_2GHZ)
+ rates = sta->supp_rates[IEEE80211_BAND_2GHZ];
+ else
+ rates = sta->supp_rates[IEEE80211_BAND_5GHZ] << 5;
+ legacy_rate_mask_to_array(p->legacy_rates, rates);
+ memcpy(p->ht_rates, sta->ht_cap.mcs.rx_mask, 16);
+ p->interop = 1;
+ p->amsdu_enabled = 0;
- cmd->action = cpu_to_le32(action);
- cmd->rate_type = cpu_to_le32(rate_type);
+ rc = mwl8k_post_cmd(hw, &cmd->header);
+ kfree(cmd);
- if (rate_table != NULL) {
- /*
- * Copy over each field manually so that endian
- * conversion can be done.
- */
- cmd->rate_table.allow_rate_drop =
- cpu_to_le32(rate_table->allow_rate_drop);
- cmd->rate_table.num_rates =
- cpu_to_le32(rate_table->num_rates);
-
- for (count = 0; count < rate_table->num_rates; count++) {
- struct mwl8k_rate_entry *dst =
- &cmd->rate_table.rate_entry[count];
- struct mwl8k_rate_entry *src =
- &rate_table->rate_entry[count];
-
- dst->is_ht_rate = cpu_to_le32(src->is_ht_rate);
- dst->enable_retry = cpu_to_le32(src->enable_retry);
- dst->rate = cpu_to_le32(src->rate);
- dst->retry_count = cpu_to_le32(src->retry_count);
- }
- }
+ return rc ? rc : p->station_id;
+}
+
+static int mwl8k_cmd_update_stadb_del(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif, u8 *addr)
+{
+ struct mwl8k_cmd_update_stadb *cmd;
+ int rc;
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ cmd->header.code = cpu_to_le16(MWL8K_CMD_UPDATE_STADB);
+ cmd->header.length = cpu_to_le16(sizeof(*cmd));
+ cmd->action = cpu_to_le32(MWL8K_STA_DB_DEL_ENTRY);
+ memcpy(cmd->peer_addr, addr, ETH_ALEN);
rc = mwl8k_post_cmd(hw, &cmd->header);
kfree(cmd);
@@ -2766,19 +3103,22 @@ static irqreturn_t mwl8k_interrupt(int irq, void *dev_id)
u32 status;
status = ioread32(priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
- iowrite32(~status, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
-
if (!status)
return IRQ_NONE;
- if (status & MWL8K_A2H_INT_TX_DONE)
- tasklet_schedule(&priv->tx_reclaim_task);
+ if (status & MWL8K_A2H_INT_TX_DONE) {
+ status &= ~MWL8K_A2H_INT_TX_DONE;
+ tasklet_schedule(&priv->poll_tx_task);
+ }
if (status & MWL8K_A2H_INT_RX_READY) {
- while (rxq_process(hw, 0, 1))
- rxq_refill(hw, 0, 1);
+ status &= ~MWL8K_A2H_INT_RX_READY;
+ tasklet_schedule(&priv->poll_rx_task);
}
+ if (status)
+ iowrite32(~status, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
+
if (status & MWL8K_A2H_INT_OPC_DONE) {
if (priv->hostcmd_wait != NULL)
complete(priv->hostcmd_wait);
@@ -2793,6 +3133,53 @@ static irqreturn_t mwl8k_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void mwl8k_tx_poll(unsigned long data)
+{
+ struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
+ struct mwl8k_priv *priv = hw->priv;
+ int limit;
+ int i;
+
+ limit = 32;
+
+ spin_lock_bh(&priv->tx_lock);
+
+ for (i = 0; i < MWL8K_TX_QUEUES; i++)
+ limit -= mwl8k_txq_reclaim(hw, i, limit, 0);
+
+ if (!priv->pending_tx_pkts && priv->tx_wait != NULL) {
+ complete(priv->tx_wait);
+ priv->tx_wait = NULL;
+ }
+
+ spin_unlock_bh(&priv->tx_lock);
+
+ if (limit) {
+ writel(~MWL8K_A2H_INT_TX_DONE,
+ priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
+ } else {
+ tasklet_schedule(&priv->poll_tx_task);
+ }
+}
+
+static void mwl8k_rx_poll(unsigned long data)
+{
+ struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
+ struct mwl8k_priv *priv = hw->priv;
+ int limit;
+
+ limit = 32;
+ limit -= rxq_process(hw, 0, limit);
+ limit -= rxq_refill(hw, 0, limit);
+
+ if (limit) {
+ writel(~MWL8K_A2H_INT_RX_READY,
+ priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
+ } else {
+ tasklet_schedule(&priv->poll_rx_task);
+ }
+}
+
/*
* Core driver operations.
@@ -2803,7 +3190,7 @@ static int mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
int index = skb_get_queue_mapping(skb);
int rc;
- if (priv->current_channel == NULL) {
+ if (!priv->radio_on) {
printk(KERN_DEBUG "%s: dropped TX frame since radio "
"disabled\n", wiphy_name(hw->wiphy));
dev_kfree_skb(skb);
@@ -2828,19 +3215,20 @@ static int mwl8k_start(struct ieee80211_hw *hw)
return -EIO;
}
- /* Enable tx reclaim tasklet */
- tasklet_enable(&priv->tx_reclaim_task);
+ /* Enable TX reclaim and RX tasklets. */
+ tasklet_enable(&priv->poll_tx_task);
+ tasklet_enable(&priv->poll_rx_task);
/* Enable interrupts */
iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
rc = mwl8k_fw_lock(hw);
if (!rc) {
- rc = mwl8k_cmd_802_11_radio_enable(hw);
+ rc = mwl8k_cmd_radio_enable(hw);
if (!priv->ap_fw) {
if (!rc)
- rc = mwl8k_enable_sniffer(hw, 0);
+ rc = mwl8k_cmd_enable_sniffer(hw, 0);
if (!rc)
rc = mwl8k_cmd_set_pre_scan(hw);
@@ -2851,10 +3239,10 @@ static int mwl8k_start(struct ieee80211_hw *hw)
}
if (!rc)
- rc = mwl8k_cmd_setrateadaptmode(hw, 0);
+ rc = mwl8k_cmd_set_rateadapt_mode(hw, 0);
if (!rc)
- rc = mwl8k_set_wmm(hw, 0);
+ rc = mwl8k_cmd_set_wmm_mode(hw, 0);
mwl8k_fw_unlock(hw);
}
@@ -2862,7 +3250,8 @@ static int mwl8k_start(struct ieee80211_hw *hw)
if (rc) {
iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
free_irq(priv->pdev->irq, hw);
- tasklet_disable(&priv->tx_reclaim_task);
+ tasklet_disable(&priv->poll_tx_task);
+ tasklet_disable(&priv->poll_rx_task);
}
return rc;
@@ -2873,7 +3262,7 @@ static void mwl8k_stop(struct ieee80211_hw *hw)
struct mwl8k_priv *priv = hw->priv;
int i;
- mwl8k_cmd_802_11_radio_disable(hw);
+ mwl8k_cmd_radio_disable(hw);
ieee80211_stop_queues(hw);
@@ -2886,36 +3275,27 @@ static void mwl8k_stop(struct ieee80211_hw *hw)
if (priv->beacon_skb != NULL)
dev_kfree_skb(priv->beacon_skb);
- /* Stop tx reclaim tasklet */
- tasklet_disable(&priv->tx_reclaim_task);
+ /* Stop TX reclaim and RX tasklets. */
+ tasklet_disable(&priv->poll_tx_task);
+ tasklet_disable(&priv->poll_rx_task);
/* Return all skbs to mac80211 */
for (i = 0; i < MWL8K_TX_QUEUES; i++)
- mwl8k_txq_reclaim(hw, i, 1);
+ mwl8k_txq_reclaim(hw, i, INT_MAX, 1);
}
static int mwl8k_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct mwl8k_priv *priv = hw->priv;
struct mwl8k_vif *mwl8k_vif;
-
- /*
- * We only support one active interface at a time.
- */
- if (priv->vif != NULL)
- return -EBUSY;
-
- /*
- * We only support managed interfaces for now.
- */
- if (conf->type != NL80211_IFTYPE_STATION)
- return -EINVAL;
+ u32 macids_supported;
+ int macid;
/*
* Reject interface creation if sniffer mode is active, as
* STA operation is mutually exclusive with hardware sniffer
- * mode.
+ * mode. (Sniffer mode is only used on STA firmware.)
*/
if (priv->sniffer_enabled) {
printk(KERN_INFO "%s: unable to create STA "
@@ -2924,37 +3304,54 @@ static int mwl8k_add_interface(struct ieee80211_hw *hw,
return -EINVAL;
}
- /* Clean out driver private area */
- mwl8k_vif = MWL8K_VIF(conf->vif);
- memset(mwl8k_vif, 0, sizeof(*mwl8k_vif));
- /* Set and save the mac address */
- mwl8k_set_mac_addr(hw, conf->mac_addr);
- memcpy(mwl8k_vif->mac_addr, conf->mac_addr, ETH_ALEN);
+ switch (vif->type) {
+ case NL80211_IFTYPE_AP:
+ macids_supported = priv->ap_macids_supported;
+ break;
+ case NL80211_IFTYPE_STATION:
+ macids_supported = priv->sta_macids_supported;
+ break;
+ default:
+ return -EINVAL;
+ }
- /* Back pointer to parent config block */
- mwl8k_vif->priv = priv;
+ macid = ffs(macids_supported & ~priv->macids_used);
+ if (!macid--)
+ return -EBUSY;
- /* Set Initial sequence number to zero */
+ /* Setup driver private area. */
+ mwl8k_vif = MWL8K_VIF(vif);
+ memset(mwl8k_vif, 0, sizeof(*mwl8k_vif));
+ mwl8k_vif->vif = vif;
+ mwl8k_vif->macid = macid;
mwl8k_vif->seqno = 0;
- priv->vif = conf->vif;
- priv->current_channel = NULL;
+ /* Set the mac address. */
+ mwl8k_cmd_set_mac_addr(hw, vif, vif->addr);
+
+ if (priv->ap_fw)
+ mwl8k_cmd_set_new_stn_add_self(hw, vif);
+
+ priv->macids_used |= 1 << mwl8k_vif->macid;
+ list_add_tail(&mwl8k_vif->list, &priv->vif_list);
return 0;
}
static void mwl8k_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct mwl8k_priv *priv = hw->priv;
+ struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
- if (priv->vif == NULL)
- return;
+ if (priv->ap_fw)
+ mwl8k_cmd_set_new_stn_del(hw, vif, vif->addr);
- mwl8k_set_mac_addr(hw, "\x00\x00\x00\x00\x00\x00");
+ mwl8k_cmd_set_mac_addr(hw, vif, "\x00\x00\x00\x00\x00\x00");
- priv->vif = NULL;
+ priv->macids_used &= ~(1 << mwl8k_vif->macid);
+ list_del(&mwl8k_vif->list);
}
static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
@@ -2964,8 +3361,7 @@ static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
int rc;
if (conf->flags & IEEE80211_CONF_IDLE) {
- mwl8k_cmd_802_11_radio_disable(hw);
- priv->current_channel = NULL;
+ mwl8k_cmd_radio_disable(hw);
return 0;
}
@@ -2973,19 +3369,17 @@ static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
if (rc)
return rc;
- rc = mwl8k_cmd_802_11_radio_enable(hw);
+ rc = mwl8k_cmd_radio_enable(hw);
if (rc)
goto out;
- rc = mwl8k_cmd_set_rf_channel(hw, conf->channel);
+ rc = mwl8k_cmd_set_rf_channel(hw, conf);
if (rc)
goto out;
- priv->current_channel = conf->channel;
-
if (conf->power_level > 18)
conf->power_level = 18;
- rc = mwl8k_cmd_802_11_rf_tx_power(hw, conf->power_level);
+ rc = mwl8k_cmd_rf_tx_power(hw, conf->power_level);
if (rc)
goto out;
@@ -3003,79 +3397,160 @@ out:
return rc;
}
-static void mwl8k_bss_info_changed(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif,
- struct ieee80211_bss_conf *info,
- u32 changed)
+static void
+mwl8k_bss_info_changed_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed)
{
struct mwl8k_priv *priv = hw->priv;
- struct mwl8k_vif *mwl8k_vif = MWL8K_VIF(vif);
+ u32 ap_legacy_rates;
+ u8 ap_mcs_rates[16];
int rc;
- if ((changed & BSS_CHANGED_ASSOC) == 0)
+ if (mwl8k_fw_lock(hw))
return;
- priv->capture_beacon = false;
-
- rc = mwl8k_fw_lock(hw);
- if (rc)
- return;
+ /*
+ * No need to capture a beacon if we're no longer associated.
+ */
+ if ((changed & BSS_CHANGED_ASSOC) && !vif->bss_conf.assoc)
+ priv->capture_beacon = false;
- if (info->assoc) {
- memcpy(&mwl8k_vif->bss_info, info,
- sizeof(struct ieee80211_bss_conf));
+ /*
+ * Get the AP's legacy and MCS rates.
+ */
+ if (vif->bss_conf.assoc) {
+ struct ieee80211_sta *ap;
- memcpy(mwl8k_vif->bssid, info->bssid, ETH_ALEN);
+ rcu_read_lock();
- /* Install rates */
- rc = mwl8k_update_rateset(hw, vif);
- if (rc)
+ ap = ieee80211_find_sta(vif, vif->bss_conf.bssid);
+ if (ap == NULL) {
+ rcu_read_unlock();
goto out;
+ }
+
+ if (hw->conf.channel->band == IEEE80211_BAND_2GHZ) {
+ ap_legacy_rates = ap->supp_rates[IEEE80211_BAND_2GHZ];
+ } else {
+ ap_legacy_rates =
+ ap->supp_rates[IEEE80211_BAND_5GHZ] << 5;
+ }
+ memcpy(ap_mcs_rates, ap->ht_cap.mcs.rx_mask, 16);
- /* Turn on rate adaptation */
- rc = mwl8k_cmd_use_fixed_rate(hw, MWL8K_USE_AUTO_RATE,
- MWL8K_UCAST_RATE, NULL);
+ rcu_read_unlock();
+ }
+
+ if ((changed & BSS_CHANGED_ASSOC) && vif->bss_conf.assoc) {
+ rc = mwl8k_cmd_set_rate(hw, vif, ap_legacy_rates, ap_mcs_rates);
if (rc)
goto out;
- /* Set radio preamble */
- rc = mwl8k_set_radio_preamble(hw, info->use_short_preamble);
+ rc = mwl8k_cmd_use_fixed_rate_sta(hw);
if (rc)
goto out;
+ }
- /* Set slot time */
- rc = mwl8k_cmd_set_slot(hw, info->use_short_slot);
+ if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+ rc = mwl8k_set_radio_preamble(hw,
+ vif->bss_conf.use_short_preamble);
if (rc)
goto out;
+ }
- /* Update peer rate info */
- rc = mwl8k_cmd_update_sta_db(hw, vif,
- MWL8K_STA_DB_MODIFY_ENTRY);
+ if (changed & BSS_CHANGED_ERP_SLOT) {
+ rc = mwl8k_cmd_set_slot(hw, vif->bss_conf.use_short_slot);
if (rc)
goto out;
+ }
- /* Set AID */
- rc = mwl8k_cmd_set_aid(hw, vif);
+ if (vif->bss_conf.assoc &&
+ (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_ERP_CTS_PROT |
+ BSS_CHANGED_HT))) {
+ rc = mwl8k_cmd_set_aid(hw, vif, ap_legacy_rates);
if (rc)
goto out;
+ }
+ if (vif->bss_conf.assoc &&
+ (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INT))) {
/*
* Finalize the join. Tell rx handler to process
* next beacon from our BSSID.
*/
- memcpy(priv->capture_bssid, mwl8k_vif->bssid, ETH_ALEN);
+ memcpy(priv->capture_bssid, vif->bss_conf.bssid, ETH_ALEN);
priv->capture_beacon = true;
- } else {
- rc = mwl8k_cmd_update_sta_db(hw, vif, MWL8K_STA_DB_DEL_ENTRY);
- memset(&mwl8k_vif->bss_info, 0,
- sizeof(struct ieee80211_bss_conf));
- memset(mwl8k_vif->bssid, 0, ETH_ALEN);
}
out:
mwl8k_fw_unlock(hw);
}
+static void
+mwl8k_bss_info_changed_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed)
+{
+ int rc;
+
+ if (mwl8k_fw_lock(hw))
+ return;
+
+ if (changed & BSS_CHANGED_ERP_PREAMBLE) {
+ rc = mwl8k_set_radio_preamble(hw,
+ vif->bss_conf.use_short_preamble);
+ if (rc)
+ goto out;
+ }
+
+ if (changed & BSS_CHANGED_BASIC_RATES) {
+ int idx;
+ int rate;
+
+ /*
+ * Use lowest supported basic rate for multicasts
+ * and management frames (such as probe responses --
+ * beacons will always go out at 1 Mb/s).
+ */
+ idx = ffs(vif->bss_conf.basic_rates);
+ if (idx)
+ idx--;
+
+ if (hw->conf.channel->band == IEEE80211_BAND_2GHZ)
+ rate = mwl8k_rates_24[idx].hw_value;
+ else
+ rate = mwl8k_rates_50[idx].hw_value;
+
+ mwl8k_cmd_use_fixed_rate_ap(hw, rate, rate);
+ }
+
+ if (changed & (BSS_CHANGED_BEACON_INT | BSS_CHANGED_BEACON)) {
+ struct sk_buff *skb;
+
+ skb = ieee80211_beacon_get(hw, vif);
+ if (skb != NULL) {
+ mwl8k_cmd_set_beacon(hw, vif, skb->data, skb->len);
+ kfree_skb(skb);
+ }
+ }
+
+ if (changed & BSS_CHANGED_BEACON_ENABLED)
+ mwl8k_cmd_bss_start(hw, vif, info->enable_beacon);
+
+out:
+ mwl8k_fw_unlock(hw);
+}
+
+static void
+mwl8k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ struct ieee80211_bss_conf *info, u32 changed)
+{
+ struct mwl8k_priv *priv = hw->priv;
+
+ if (!priv->ap_fw)
+ mwl8k_bss_info_changed_sta(hw, vif, info, changed);
+ else
+ mwl8k_bss_info_changed_ap(hw, vif, info, changed);
+}
+
static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw,
int mc_count, struct dev_addr_list *mclist)
{
@@ -3105,7 +3580,7 @@ mwl8k_configure_filter_sniffer(struct ieee80211_hw *hw,
* operation, so refuse to enable sniffer mode if a STA
* interface is active.
*/
- if (priv->vif != NULL) {
+ if (!list_empty(&priv->vif_list)) {
if (net_ratelimit())
printk(KERN_INFO "%s: not enabling sniffer "
"mode because STA interface is active\n",
@@ -3114,7 +3589,7 @@ mwl8k_configure_filter_sniffer(struct ieee80211_hw *hw,
}
if (!priv->sniffer_enabled) {
- if (mwl8k_enable_sniffer(hw, 1))
+ if (mwl8k_cmd_enable_sniffer(hw, 1))
return 0;
priv->sniffer_enabled = true;
}
@@ -3126,6 +3601,14 @@ mwl8k_configure_filter_sniffer(struct ieee80211_hw *hw,
return 1;
}
+static struct mwl8k_vif *mwl8k_first_vif(struct mwl8k_priv *priv)
+{
+ if (!list_empty(&priv->vif_list))
+ return list_entry(priv->vif_list.next, struct mwl8k_vif, list);
+
+ return NULL;
+}
+
static void mwl8k_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *total_flags,
@@ -3163,7 +3646,7 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
}
if (priv->sniffer_enabled) {
- mwl8k_enable_sniffer(hw, 0);
+ mwl8k_cmd_enable_sniffer(hw, 0);
priv->sniffer_enabled = false;
}
@@ -3174,7 +3657,8 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
*/
mwl8k_cmd_set_pre_scan(hw);
} else {
- u8 *bssid;
+ struct mwl8k_vif *mwl8k_vif;
+ const u8 *bssid;
/*
* Enable the BSS filter.
@@ -3184,9 +3668,11 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
* (where the OUI part needs to be nonzero for
* the BSSID to be accepted by POST_SCAN).
*/
- bssid = "\x01\x00\x00\x00\x00\x00";
- if (priv->vif != NULL)
- bssid = MWL8K_VIF(priv->vif)->bssid;
+ mwl8k_vif = mwl8k_first_vif(priv);
+ if (mwl8k_vif != NULL)
+ bssid = mwl8k_vif->vif->bss_conf.bssid;
+ else
+ bssid = "\x01\x00\x00\x00\x00\x00";
mwl8k_cmd_set_post_scan(hw, bssid);
}
@@ -3213,7 +3699,39 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
static int mwl8k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
- return mwl8k_rts_threshold(hw, MWL8K_CMD_SET, value);
+ return mwl8k_cmd_set_rts_threshold(hw, value);
+}
+
+static int mwl8k_sta_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mwl8k_priv *priv = hw->priv;
+
+ if (priv->ap_fw)
+ return mwl8k_cmd_set_new_stn_del(hw, vif, sta->addr);
+ else
+ return mwl8k_cmd_update_stadb_del(hw, vif, sta->addr);
+}
+
+static int mwl8k_sta_add(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct mwl8k_priv *priv = hw->priv;
+ int ret;
+
+ if (!priv->ap_fw) {
+ ret = mwl8k_cmd_update_stadb_add(hw, vif, sta);
+ if (ret >= 0) {
+ MWL8K_STA(sta)->peer_id = ret;
+ return 0;
+ }
+
+ return ret;
+ }
+
+ return mwl8k_cmd_set_new_stn_add(hw, vif, sta);
}
static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
@@ -3225,14 +3743,14 @@ static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
rc = mwl8k_fw_lock(hw);
if (!rc) {
if (!priv->wmm_enabled)
- rc = mwl8k_set_wmm(hw, 1);
+ rc = mwl8k_cmd_set_wmm_mode(hw, 1);
if (!rc)
- rc = mwl8k_set_edca_params(hw, queue,
- params->cw_min,
- params->cw_max,
- params->aifs,
- params->txop);
+ rc = mwl8k_cmd_set_edca_params(hw, queue,
+ params->cw_min,
+ params->cw_max,
+ params->aifs,
+ params->txop);
mwl8k_fw_unlock(hw);
}
@@ -3240,28 +3758,26 @@ static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
return rc;
}
-static int mwl8k_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *stats)
+static int mwl8k_get_stats(struct ieee80211_hw *hw,
+ struct ieee80211_low_level_stats *stats)
{
- struct mwl8k_priv *priv = hw->priv;
- struct mwl8k_tx_queue *txq;
- int index;
-
- spin_lock_bh(&priv->tx_lock);
- for (index = 0; index < MWL8K_TX_QUEUES; index++) {
- txq = priv->txq + index;
- memcpy(&stats[index], &txq->stats,
- sizeof(struct ieee80211_tx_queue_stats));
- }
- spin_unlock_bh(&priv->tx_lock);
-
- return 0;
+ return mwl8k_cmd_get_stat(hw, stats);
}
-static int mwl8k_get_stats(struct ieee80211_hw *hw,
- struct ieee80211_low_level_stats *stats)
+static int
+mwl8k_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
+ enum ieee80211_ampdu_mlme_action action,
+ struct ieee80211_sta *sta, u16 tid, u16 *ssn)
{
- return mwl8k_cmd_802_11_get_stat(hw, stats);
+ switch (action) {
+ case IEEE80211_AMPDU_RX_START:
+ case IEEE80211_AMPDU_RX_STOP:
+ if (!(hw->flags & IEEE80211_HW_AMPDU_AGGREGATION))
+ return -ENOTSUPP;
+ return 0;
+ default:
+ return -ENOTSUPP;
+ }
}
static const struct ieee80211_ops mwl8k_ops = {
@@ -3275,67 +3791,72 @@ static const struct ieee80211_ops mwl8k_ops = {
.prepare_multicast = mwl8k_prepare_multicast,
.configure_filter = mwl8k_configure_filter,
.set_rts_threshold = mwl8k_set_rts_threshold,
+ .sta_add = mwl8k_sta_add,
+ .sta_remove = mwl8k_sta_remove,
.conf_tx = mwl8k_conf_tx,
- .get_tx_stats = mwl8k_get_tx_stats,
.get_stats = mwl8k_get_stats,
+ .ampdu_action = mwl8k_ampdu_action,
};
-static void mwl8k_tx_reclaim_handler(unsigned long data)
-{
- int i;
- struct ieee80211_hw *hw = (struct ieee80211_hw *) data;
- struct mwl8k_priv *priv = hw->priv;
-
- spin_lock_bh(&priv->tx_lock);
- for (i = 0; i < MWL8K_TX_QUEUES; i++)
- mwl8k_txq_reclaim(hw, i, 0);
-
- if (priv->tx_wait != NULL && !priv->pending_tx_pkts) {
- complete(priv->tx_wait);
- priv->tx_wait = NULL;
- }
- spin_unlock_bh(&priv->tx_lock);
-}
-
static void mwl8k_finalize_join_worker(struct work_struct *work)
{
struct mwl8k_priv *priv =
container_of(work, struct mwl8k_priv, finalize_join_worker);
struct sk_buff *skb = priv->beacon_skb;
- u8 dtim = MWL8K_VIF(priv->vif)->bss_info.dtim_period;
+ struct ieee80211_mgmt *mgmt = (void *)skb->data;
+ int len = skb->len - offsetof(struct ieee80211_mgmt, u.beacon.variable);
+ const u8 *tim = cfg80211_find_ie(WLAN_EID_TIM,
+ mgmt->u.beacon.variable, len);
+ int dtim_period = 1;
- mwl8k_finalize_join(priv->hw, skb->data, skb->len, dtim);
- dev_kfree_skb(skb);
+ if (tim && tim[1] >= 2)
+ dtim_period = tim[3];
+
+ mwl8k_cmd_finalize_join(priv->hw, skb->data, skb->len, dtim_period);
+ dev_kfree_skb(skb);
priv->beacon_skb = NULL;
}
enum {
- MWL8687 = 0,
+ MWL8363 = 0,
+ MWL8687,
MWL8366,
};
static struct mwl8k_device_info mwl8k_info_tbl[] __devinitdata = {
- {
+ [MWL8363] = {
+ .part_name = "88w8363",
+ .helper_image = "mwl8k/helper_8363.fw",
+ .fw_image = "mwl8k/fmimage_8363.fw",
+ },
+ [MWL8687] = {
.part_name = "88w8687",
.helper_image = "mwl8k/helper_8687.fw",
.fw_image = "mwl8k/fmimage_8687.fw",
- .rxd_ops = &rxd_8687_ops,
- .modes = BIT(NL80211_IFTYPE_STATION),
},
- {
+ [MWL8366] = {
.part_name = "88w8366",
.helper_image = "mwl8k/helper_8366.fw",
.fw_image = "mwl8k/fmimage_8366.fw",
- .rxd_ops = &rxd_8366_ops,
- .modes = 0,
+ .ap_rxd_ops = &rxd_8366_ap_ops,
},
};
+MODULE_FIRMWARE("mwl8k/helper_8363.fw");
+MODULE_FIRMWARE("mwl8k/fmimage_8363.fw");
+MODULE_FIRMWARE("mwl8k/helper_8687.fw");
+MODULE_FIRMWARE("mwl8k/fmimage_8687.fw");
+MODULE_FIRMWARE("mwl8k/helper_8366.fw");
+MODULE_FIRMWARE("mwl8k/fmimage_8366.fw");
+
static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = {
+ { PCI_VDEVICE(MARVELL, 0x2a0c), .driver_data = MWL8363, },
+ { PCI_VDEVICE(MARVELL, 0x2a24), .driver_data = MWL8363, },
{ PCI_VDEVICE(MARVELL, 0x2a2b), .driver_data = MWL8687, },
{ PCI_VDEVICE(MARVELL, 0x2a30), .driver_data = MWL8687, },
{ PCI_VDEVICE(MARVELL, 0x2a40), .driver_data = MWL8366, },
+ { PCI_VDEVICE(MARVELL, 0x2a43), .driver_data = MWL8366, },
{ },
};
MODULE_DEVICE_TABLE(pci, mwl8k_pci_id_table);
@@ -3354,6 +3875,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
printed_version = 1;
}
+
rc = pci_enable_device(pdev);
if (rc) {
printk(KERN_ERR "%s: Cannot enable new PCI device\n",
@@ -3370,6 +3892,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
pci_set_master(pdev);
+
hw = ieee80211_alloc_hw(sizeof(*priv), &mwl8k_ops);
if (hw == NULL) {
printk(KERN_ERR "%s: ieee80211 alloc failed\n", MWL8K_NAME);
@@ -3377,17 +3900,14 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
goto err_free_reg;
}
+ SET_IEEE80211_DEV(hw, &pdev->dev);
+ pci_set_drvdata(pdev, hw);
+
priv = hw->priv;
priv->hw = hw;
priv->pdev = pdev;
priv->device_info = &mwl8k_info_tbl[id->driver_data];
- priv->rxd_ops = priv->device_info->rxd_ops;
- priv->sniffer_enabled = false;
- priv->wmm_enabled = false;
- priv->pending_tx_pkts = 0;
- SET_IEEE80211_DEV(hw, &pdev->dev);
- pci_set_drvdata(pdev, hw);
priv->sram = pci_iomap(pdev, 0, 0x10000);
if (priv->sram == NULL) {
@@ -3410,16 +3930,46 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
}
}
- memcpy(priv->channels, mwl8k_channels, sizeof(mwl8k_channels));
- priv->band.band = IEEE80211_BAND_2GHZ;
- priv->band.channels = priv->channels;
- priv->band.n_channels = ARRAY_SIZE(mwl8k_channels);
- priv->band.bitrates = priv->rates;
- priv->band.n_bitrates = ARRAY_SIZE(mwl8k_rates);
- hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
- BUILD_BUG_ON(sizeof(priv->rates) != sizeof(mwl8k_rates));
- memcpy(priv->rates, mwl8k_rates, sizeof(mwl8k_rates));
+ /* Reset firmware and hardware */
+ mwl8k_hw_reset(priv);
+
+ /* Ask userland hotplug daemon for the device firmware */
+ rc = mwl8k_request_firmware(priv);
+ if (rc) {
+ printk(KERN_ERR "%s: Firmware files not found\n",
+ wiphy_name(hw->wiphy));
+ goto err_stop_firmware;
+ }
+
+ /* Load firmware into hardware */
+ rc = mwl8k_load_firmware(hw);
+ if (rc) {
+ printk(KERN_ERR "%s: Cannot start firmware\n",
+ wiphy_name(hw->wiphy));
+ goto err_stop_firmware;
+ }
+
+ /* Reclaim memory once firmware is successfully loaded */
+ mwl8k_release_firmware(priv);
+
+
+ if (priv->ap_fw) {
+ priv->rxd_ops = priv->device_info->ap_rxd_ops;
+ if (priv->rxd_ops == NULL) {
+ printk(KERN_ERR "%s: Driver does not have AP "
+ "firmware image support for this hardware\n",
+ wiphy_name(hw->wiphy));
+ goto err_stop_firmware;
+ }
+ } else {
+ priv->rxd_ops = &rxd_sta_ops;
+ }
+
+ priv->sniffer_enabled = false;
+ priv->wmm_enabled = false;
+ priv->pending_tx_pkts = 0;
+
/*
* Extra headroom is the size of the required DMA header
@@ -3432,12 +3982,13 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
hw->queues = MWL8K_TX_QUEUES;
- hw->wiphy->interface_modes = priv->device_info->modes;
-
/* Set rssi and noise values to dBm */
hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_NOISE_DBM;
hw->vif_data_size = sizeof(struct mwl8k_vif);
- priv->vif = NULL;
+ hw->sta_data_size = sizeof(struct mwl8k_sta);
+
+ priv->macids_used = 0;
+ INIT_LIST_HEAD(&priv->vif_list);
/* Set default radio state and preamble */
priv->radio_on = 0;
@@ -3446,19 +3997,20 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
/* Finalize join worker */
INIT_WORK(&priv->finalize_join_worker, mwl8k_finalize_join_worker);
- /* TX reclaim tasklet */
- tasklet_init(&priv->tx_reclaim_task,
- mwl8k_tx_reclaim_handler, (unsigned long)hw);
- tasklet_disable(&priv->tx_reclaim_task);
+ /* TX reclaim and RX tasklets. */
+ tasklet_init(&priv->poll_tx_task, mwl8k_tx_poll, (unsigned long)hw);
+ tasklet_disable(&priv->poll_tx_task);
+ tasklet_init(&priv->poll_rx_task, mwl8k_rx_poll, (unsigned long)hw);
+ tasklet_disable(&priv->poll_rx_task);
/* Power management cookie */
priv->cookie = pci_alloc_consistent(priv->pdev, 4, &priv->cookie_dma);
if (priv->cookie == NULL)
- goto err_iounmap;
+ goto err_stop_firmware;
rc = mwl8k_rxq_init(hw, 0);
if (rc)
- goto err_iounmap;
+ goto err_free_cookie;
rxq_refill(hw, 0, INT_MAX);
mutex_init(&priv->fw_mutex);
@@ -3478,7 +4030,8 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS);
iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
- iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_CLEAR_SEL);
+ iowrite32(MWL8K_A2H_INT_TX_DONE | MWL8K_A2H_INT_RX_READY,
+ priv->regs + MWL8K_HIU_A2H_INTERRUPT_CLEAR_SEL);
iowrite32(0xffffffff, priv->regs + MWL8K_HIU_A2H_INTERRUPT_STATUS_MASK);
rc = request_irq(priv->pdev->irq, mwl8k_interrupt,
@@ -3489,31 +4042,9 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
goto err_free_queues;
}
- /* Reset firmware and hardware */
- mwl8k_hw_reset(priv);
-
- /* Ask userland hotplug daemon for the device firmware */
- rc = mwl8k_request_firmware(priv);
- if (rc) {
- printk(KERN_ERR "%s: Firmware files not found\n",
- wiphy_name(hw->wiphy));
- goto err_free_irq;
- }
-
- /* Load firmware into hardware */
- rc = mwl8k_load_firmware(hw);
- if (rc) {
- printk(KERN_ERR "%s: Cannot start firmware\n",
- wiphy_name(hw->wiphy));
- goto err_stop_firmware;
- }
-
- /* Reclaim memory once firmware is successfully loaded */
- mwl8k_release_firmware(priv);
-
/*
* Temporarily enable interrupts. Initial firmware host
- * commands use interrupts and avoids polling. Disable
+ * commands use interrupts and avoid polling. Disable
* interrupts when done.
*/
iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
@@ -3529,22 +4060,29 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
if (rc) {
printk(KERN_ERR "%s: Cannot initialise firmware\n",
wiphy_name(hw->wiphy));
- goto err_stop_firmware;
+ goto err_free_irq;
}
+ hw->wiphy->interface_modes = 0;
+ if (priv->ap_macids_supported)
+ hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP);
+ if (priv->sta_macids_supported)
+ hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_STATION);
+
+
/* Turn radio off */
- rc = mwl8k_cmd_802_11_radio_disable(hw);
+ rc = mwl8k_cmd_radio_disable(hw);
if (rc) {
printk(KERN_ERR "%s: Cannot disable\n", wiphy_name(hw->wiphy));
- goto err_stop_firmware;
+ goto err_free_irq;
}
/* Clear MAC address */
- rc = mwl8k_set_mac_addr(hw, "\x00\x00\x00\x00\x00\x00");
+ rc = mwl8k_cmd_set_mac_addr(hw, NULL, "\x00\x00\x00\x00\x00\x00");
if (rc) {
printk(KERN_ERR "%s: Cannot clear MAC address\n",
wiphy_name(hw->wiphy));
- goto err_stop_firmware;
+ goto err_free_irq;
}
/* Disable interrupts */
@@ -3555,7 +4093,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
if (rc) {
printk(KERN_ERR "%s: Cannot register device\n",
wiphy_name(hw->wiphy));
- goto err_stop_firmware;
+ goto err_free_queues;
}
printk(KERN_INFO "%s: %s v%d, %pM, %s firmware %u.%u.%u.%u\n",
@@ -3567,10 +4105,6 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
return 0;
-err_stop_firmware:
- mwl8k_hw_reset(priv);
- mwl8k_release_firmware(priv);
-
err_free_irq:
iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
free_irq(priv->pdev->irq, hw);
@@ -3580,11 +4114,16 @@ err_free_queues:
mwl8k_txq_deinit(hw, i);
mwl8k_rxq_deinit(hw, 0);
-err_iounmap:
+err_free_cookie:
if (priv->cookie != NULL)
pci_free_consistent(priv->pdev, 4,
priv->cookie, priv->cookie_dma);
+err_stop_firmware:
+ mwl8k_hw_reset(priv);
+ mwl8k_release_firmware(priv);
+
+err_iounmap:
if (priv->regs != NULL)
pci_iounmap(pdev, priv->regs);
@@ -3622,15 +4161,16 @@ static void __devexit mwl8k_remove(struct pci_dev *pdev)
ieee80211_unregister_hw(hw);
- /* Remove tx reclaim tasklet */
- tasklet_kill(&priv->tx_reclaim_task);
+ /* Remove TX reclaim and RX tasklets. */
+ tasklet_kill(&priv->poll_tx_task);
+ tasklet_kill(&priv->poll_rx_task);
/* Stop hardware */
mwl8k_hw_reset(priv);
/* Return all skbs to mac80211 */
for (i = 0; i < MWL8K_TX_QUEUES; i++)
- mwl8k_txq_reclaim(hw, i, 1);
+ mwl8k_txq_reclaim(hw, i, INT_MAX, 1);
for (i = 0; i < MWL8K_TX_QUEUES; i++)
mwl8k_txq_deinit(hw, i);
diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c
index 404830f47ab..e6369242e49 100644
--- a/drivers/net/wireless/orinoco/hw.c
+++ b/drivers/net/wireless/orinoco/hw.c
@@ -1028,7 +1028,7 @@ int orinoco_clear_tkip_key(struct orinoco_private *priv, int key_idx)
}
int __orinoco_hw_set_multicast_list(struct orinoco_private *priv,
- struct dev_addr_list *mc_list,
+ struct net_device *dev,
int mc_count, int promisc)
{
hermes_t *hw = &priv->hw;
@@ -1049,24 +1049,16 @@ int __orinoco_hw_set_multicast_list(struct orinoco_private *priv,
* group address if either we want to multicast, or if we were
* multicasting and want to stop */
if (!promisc && (mc_count || priv->mc_count)) {
- struct dev_mc_list *p = mc_list;
+ struct dev_mc_list *p;
struct hermes_multicast mclist;
- int i;
+ int i = 0;
- for (i = 0; i < mc_count; i++) {
- /* paranoia: is list shorter than mc_count? */
- BUG_ON(!p);
- /* paranoia: bad address size in list? */
- BUG_ON(p->dmi_addrlen != ETH_ALEN);
-
- memcpy(mclist.addr[i], p->dmi_addr, ETH_ALEN);
- p = p->next;
+ netdev_for_each_mc_addr(p, dev) {
+ if (i == mc_count)
+ break;
+ memcpy(mclist.addr[i++], p->dmi_addr, ETH_ALEN);
}
- if (p)
- printk(KERN_WARNING "%s: Multicast list is "
- "longer than mc_count\n", priv->ndev->name);
-
err = hermes_write_ltv(hw, USER_BAP,
HERMES_RID_CNFGROUPADDRESSES,
HERMES_BYTES_TO_RECLEN(mc_count * ETH_ALEN),
diff --git a/drivers/net/wireless/orinoco/hw.h b/drivers/net/wireless/orinoco/hw.h
index e2f7fdc4d45..9799a1d14a6 100644
--- a/drivers/net/wireless/orinoco/hw.h
+++ b/drivers/net/wireless/orinoco/hw.h
@@ -43,7 +43,7 @@ int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx,
u8 *tsc, size_t tsc_len);
int orinoco_clear_tkip_key(struct orinoco_private *priv, int key_idx);
int __orinoco_hw_set_multicast_list(struct orinoco_private *priv,
- struct dev_addr_list *mc_list,
+ struct net_device *dev,
int mc_count, int promisc);
int orinoco_hw_get_essid(struct orinoco_private *priv, int *active,
char buf[IW_ESSID_MAX_SIZE+1]);
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index 753a1804eee..b42634c614b 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -1668,16 +1668,15 @@ __orinoco_set_multicast_list(struct net_device *dev)
/* The Hermes doesn't seem to have an allmulti mode, so we go
* into promiscuous mode and let the upper levels deal. */
if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) ||
- (dev->mc_count > MAX_MULTICAST(priv))) {
+ (netdev_mc_count(dev) > MAX_MULTICAST(priv))) {
promisc = 1;
mc_count = 0;
} else {
promisc = 0;
- mc_count = dev->mc_count;
+ mc_count = netdev_mc_count(dev);
}
- err = __orinoco_hw_set_multicast_list(priv, dev->mc_list, mc_count,
- promisc);
+ err = __orinoco_hw_set_multicast_list(priv, dev, mc_count, promisc);
return err;
}
diff --git a/drivers/net/wireless/orinoco/orinoco_cs.c b/drivers/net/wireless/orinoco/orinoco_cs.c
index f27bb8367c9..1d4ada188ed 100644
--- a/drivers/net/wireless/orinoco/orinoco_cs.c
+++ b/drivers/net/wireless/orinoco/orinoco_cs.c
@@ -407,7 +407,6 @@ static struct pcmcia_device_id orinoco_cs_ids[] = {
PCMCIA_DEVICE_PROD_ID12("3Com", "3CRWE737A AirConnect Wireless LAN PC Card", 0x41240e5b, 0x56010af3),
PCMCIA_DEVICE_PROD_ID12("ACTIONTEC", "PRISM Wireless LAN PC Card", 0x393089da, 0xa71e69d5),
PCMCIA_DEVICE_PROD_ID12("Addtron", "AWP-100 Wireless PCMCIA", 0xe6ec52ce, 0x08649af2),
- PCMCIA_DEVICE_PROD_ID123("AIRVAST", "IEEE 802.11b Wireless PCMCIA Card", "HFA3863", 0xea569531, 0x4bcb9645, 0x355cb092),
PCMCIA_DEVICE_PROD_ID12("Allied Telesyn", "AT-WCL452 Wireless PCMCIA Radio", 0x5cd01705, 0x4271660f),
PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11b_PC_CARD_25", 0x78fc06ee, 0xdb9aa842),
PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11B_CF_CARD_25", 0x78fc06ee, 0x45a50c1e),
@@ -417,7 +416,6 @@ static struct pcmcia_device_id orinoco_cs_ids[] = {
PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-CF-S11G", 0x2decece3, 0x82067c18),
PCMCIA_DEVICE_PROD_ID12("Cabletron", "RoamAbout 802.11 DS", 0x32d445f5, 0xedeffd90),
PCMCIA_DEVICE_PROD_ID12("Compaq", "WL200_11Mbps_Wireless_PCI_Card", 0x54f7c49c, 0x15a75e5b),
- PCMCIA_DEVICE_PROD_ID123("corega", "WL PCCL-11", "ISL37300P", 0x0a21501a, 0x59868926, 0xc9049a39),
PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCC-11", 0x5261440f, 0xa6405584),
PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCCA-11", 0x5261440f, 0xdf6115f9),
PCMCIA_DEVICE_PROD_ID12("corega_K.K.", "Wireless_LAN_PCCB-11", 0x29e33311, 0xee7a27ae),
@@ -432,7 +430,6 @@ static struct pcmcia_device_id orinoco_cs_ids[] = {
PCMCIA_DEVICE_PROD_ID12("INTERSIL", "HFA384x/IEEE", 0x74c5e40d, 0xdb472a18),
PCMCIA_DEVICE_PROD_ID12("INTERSIL", "I-GATE 11M PC Card / PC Card plus", 0x74c5e40d, 0x8304ff77),
PCMCIA_DEVICE_PROD_ID12("Intersil", "PRISM 2_5 PCMCIA ADAPTER", 0x4b801a17, 0x6345a0bf),
- PCMCIA_DEVICE_PROD_ID123("Intersil", "PRISM Freedom PCMCIA Adapter", "ISL37100P", 0x4b801a17, 0xf222ec2d, 0x630d52b2),
PCMCIA_DEVICE_PROD_ID12("LeArtery", "SYNCBYAIR 11Mbps Wireless LAN PC Card", 0x7e3b326a, 0x49893e92),
PCMCIA_DEVICE_PROD_ID12("Linksys", "Wireless CompactFlash Card", 0x0733cc81, 0x0c52f395),
PCMCIA_DEVICE_PROD_ID12("Lucent Technologies", "WaveLAN/IEEE", 0x23eb9949, 0xc562e72a),
@@ -445,7 +442,6 @@ static struct pcmcia_device_id orinoco_cs_ids[] = {
PCMCIA_DEVICE_PROD_ID12("Nortel Networks", "emobility 802.11 Wireless LAN PC Card", 0x2d617ea0, 0x88cd5767),
PCMCIA_DEVICE_PROD_ID12("OEM", "PRISM2 IEEE 802.11 PC-Card", 0xfea54c90, 0x48f2bdd6),
PCMCIA_DEVICE_PROD_ID12("OTC", "Wireless AirEZY 2411-PCC WLAN Card", 0x4ac44287, 0x235a6bed),
- PCMCIA_DEVICE_PROD_ID123("PCMCIA", "11M WLAN Card v2.5", "ISL37300P", 0x281f1c5d, 0x6e440487, 0xc9049a39),
PCMCIA_DEVICE_PROD_ID12("PLANEX", "GeoWave/GW-CF110", 0x209f40ab, 0xd9715264),
PCMCIA_DEVICE_PROD_ID12("PLANEX", "GeoWave/GW-NS110", 0x209f40ab, 0x46263178),
PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PC CARD HARMONY 80211B", 0xc6536a5e, 0x090c3cd9),
@@ -454,8 +450,11 @@ static struct pcmcia_device_id orinoco_cs_ids[] = {
PCMCIA_DEVICE_PROD_ID12("SMC", "SMC2532W-B EliteConnect Wireless Adapter", 0xc4f8b18b, 0x196bd757),
PCMCIA_DEVICE_PROD_ID12("SMC", "SMC2632W", 0xc4f8b18b, 0x474a1f2a),
PCMCIA_DEVICE_PROD_ID12("Symbol Technologies", "LA4111 Spectrum24 Wireless LAN PC Card", 0x3f02b4d6, 0x3663cb0e),
- PCMCIA_DEVICE_PROD_ID123("The Linksys Group, Inc.", "Instant Wireless Network PC Card", "ISL37300P", 0xa5f472c2, 0x590eb502, 0xc9049a39),
PCMCIA_DEVICE_PROD_ID12("ZoomAir 11Mbps High", "Rate wireless Networking", 0x273fe3db, 0x32a1eaee),
+ PCMCIA_DEVICE_PROD_ID3("HFA3863", 0x355cb092),
+ PCMCIA_DEVICE_PROD_ID3("ISL37100P", 0x630d52b2),
+ PCMCIA_DEVICE_PROD_ID3("ISL37101P-10", 0xdd97a26b),
+ PCMCIA_DEVICE_PROD_ID3("ISL37300P", 0xc9049a39),
PCMCIA_DEVICE_NULL,
};
MODULE_DEVICE_TABLE(pcmcia, orinoco_cs_ids);
diff --git a/drivers/net/wireless/orinoco/orinoco_nortel.c b/drivers/net/wireless/orinoco/orinoco_nortel.c
index c13a4c38341..075f446b313 100644
--- a/drivers/net/wireless/orinoco/orinoco_nortel.c
+++ b/drivers/net/wireless/orinoco/orinoco_nortel.c
@@ -274,7 +274,7 @@ static void __devexit orinoco_nortel_remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static struct pci_device_id orinoco_nortel_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(orinoco_nortel_id_table) = {
/* Nortel emobility PCI */
{0x126c, 0x8030, PCI_ANY_ID, PCI_ANY_ID,},
/* Symbol LA-4123 PCI */
diff --git a/drivers/net/wireless/orinoco/orinoco_pci.c b/drivers/net/wireless/orinoco/orinoco_pci.c
index fea7781948e..bda5317cc59 100644
--- a/drivers/net/wireless/orinoco/orinoco_pci.c
+++ b/drivers/net/wireless/orinoco/orinoco_pci.c
@@ -212,7 +212,7 @@ static void __devexit orinoco_pci_remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static struct pci_device_id orinoco_pci_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(orinoco_pci_id_table) = {
/* Intersil Prism 3 */
{0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID,},
/* Intersil Prism 2.5 */
diff --git a/drivers/net/wireless/orinoco/orinoco_plx.c b/drivers/net/wireless/orinoco/orinoco_plx.c
index 3f2942a1e4f..e0d5874ab42 100644
--- a/drivers/net/wireless/orinoco/orinoco_plx.c
+++ b/drivers/net/wireless/orinoco/orinoco_plx.c
@@ -310,7 +310,7 @@ static void __devexit orinoco_plx_remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static struct pci_device_id orinoco_plx_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(orinoco_plx_id_table) = {
{0x111a, 0x1023, PCI_ANY_ID, PCI_ANY_ID,}, /* Siemens SpeedStream SS1023 */
{0x1385, 0x4100, PCI_ANY_ID, PCI_ANY_ID,}, /* Netgear MA301 */
{0x15e8, 0x0130, PCI_ANY_ID, PCI_ANY_ID,}, /* Correga - does this work? */
diff --git a/drivers/net/wireless/orinoco/orinoco_tmd.c b/drivers/net/wireless/orinoco/orinoco_tmd.c
index d3452548cc7..88cbc7902aa 100644
--- a/drivers/net/wireless/orinoco/orinoco_tmd.c
+++ b/drivers/net/wireless/orinoco/orinoco_tmd.c
@@ -203,7 +203,7 @@ static void __devexit orinoco_tmd_remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static struct pci_device_id orinoco_tmd_id_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(orinoco_tmd_id_table) = {
{0x15e8, 0x0131, PCI_ANY_ID, PCI_ANY_ID,}, /* NDC and OEMs, e.g. pheecom */
{0,},
};
diff --git a/drivers/net/wireless/p54/main.c b/drivers/net/wireless/p54/main.c
index 18012dbfb45..4f752a21495 100644
--- a/drivers/net/wireless/p54/main.c
+++ b/drivers/net/wireless/p54/main.c
@@ -33,21 +33,29 @@ MODULE_DESCRIPTION("Softmac Prism54 common code");
MODULE_LICENSE("GPL");
MODULE_ALIAS("prism54common");
+static int p54_sta_add_remove(struct ieee80211_hw *hw,
+ struct ieee80211_vif *vif,
+ struct ieee80211_sta *sta)
+{
+ struct p54_common *priv = hw->priv;
+
+ /*
+ * Notify the firmware that we don't want or we don't
+ * need to buffer frames for this station anymore.
+ */
+
+ p54_sta_unlock(priv, sta->addr);
+
+ return 0;
+}
+
static void p54_sta_notify(struct ieee80211_hw *dev, struct ieee80211_vif *vif,
enum sta_notify_cmd notify_cmd,
struct ieee80211_sta *sta)
{
struct p54_common *priv = dev->priv;
- switch (notify_cmd) {
- case STA_NOTIFY_ADD:
- case STA_NOTIFY_REMOVE:
- /*
- * Notify the firmware that we don't want or we don't
- * need to buffer frames for this station anymore.
- */
- p54_sta_unlock(priv, sta->addr);
- break;
+ switch (notify_cmd) {
case STA_NOTIFY_AWAKE:
/* update the firmware's filter table */
p54_sta_unlock(priv, sta->addr);
@@ -216,7 +224,7 @@ static void p54_stop(struct ieee80211_hw *dev)
}
static int p54_add_interface(struct ieee80211_hw *dev,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct p54_common *priv = dev->priv;
@@ -226,28 +234,28 @@ static int p54_add_interface(struct ieee80211_hw *dev,
return -EOPNOTSUPP;
}
- priv->vif = conf->vif;
+ priv->vif = vif;
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
- priv->mode = conf->type;
+ priv->mode = vif->type;
break;
default:
mutex_unlock(&priv->conf_mutex);
return -EOPNOTSUPP;
}
- memcpy(priv->mac_addr, conf->mac_addr, ETH_ALEN);
+ memcpy(priv->mac_addr, vif->addr, ETH_ALEN);
p54_setup_mac(priv);
mutex_unlock(&priv->conf_mutex);
return 0;
}
static void p54_remove_interface(struct ieee80211_hw *dev,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct p54_common *priv = dev->priv;
@@ -358,16 +366,6 @@ static int p54_get_stats(struct ieee80211_hw *dev,
return 0;
}
-static int p54_get_tx_stats(struct ieee80211_hw *dev,
- struct ieee80211_tx_queue_stats *stats)
-{
- struct p54_common *priv = dev->priv;
-
- memcpy(stats, &priv->tx_stats[P54_QUEUE_DATA],
- sizeof(stats[0]) * dev->queues);
- return 0;
-}
-
static void p54_bss_info_changed(struct ieee80211_hw *dev,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
@@ -516,13 +514,14 @@ static const struct ieee80211_ops p54_ops = {
.remove_interface = p54_remove_interface,
.set_tim = p54_set_tim,
.sta_notify = p54_sta_notify,
+ .sta_add = p54_sta_add_remove,
+ .sta_remove = p54_sta_add_remove,
.set_key = p54_set_key,
.config = p54_config,
.bss_info_changed = p54_bss_info_changed,
.configure_filter = p54_configure_filter,
.conf_tx = p54_conf_tx,
.get_stats = p54_get_stats,
- .get_tx_stats = p54_get_tx_stats
};
struct ieee80211_hw *p54_init_common(size_t priv_data_len)
diff --git a/drivers/net/wireless/p54/p54.h b/drivers/net/wireless/p54/p54.h
index 1afc39410e8..43a3b2ead81 100644
--- a/drivers/net/wireless/p54/p54.h
+++ b/drivers/net/wireless/p54/p54.h
@@ -157,6 +157,12 @@ struct p54_led_dev {
#endif /* CONFIG_P54_LEDS */
+struct p54_tx_queue_stats {
+ unsigned int len;
+ unsigned int limit;
+ unsigned int count;
+};
+
struct p54_common {
struct ieee80211_hw *hw;
struct ieee80211_vif *vif;
@@ -183,7 +189,7 @@ struct p54_common {
/* (e)DCF / QOS state */
bool use_short_slot;
spinlock_t tx_stats_lock;
- struct ieee80211_tx_queue_stats tx_stats[8];
+ struct p54_tx_queue_stats tx_stats[8];
struct p54_edcf_queue_param qos_params[8];
/* Radio data */
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c
index a72f7c2577d..ed4bdffdd63 100644
--- a/drivers/net/wireless/p54/p54pci.c
+++ b/drivers/net/wireless/p54/p54pci.c
@@ -31,7 +31,7 @@ MODULE_LICENSE("GPL");
MODULE_ALIAS("prism54pci");
MODULE_FIRMWARE("isl3886pci");
-static struct pci_device_id p54p_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(p54p_table) = {
/* Intersil PRISM Duette/Prism GT Wireless LAN adapter */
{ PCI_DEVICE(0x1260, 0x3890) },
/* 3COM 3CRWE154G72 Wireless LAN adapter */
@@ -157,6 +157,14 @@ static void p54p_refill_rx_ring(struct ieee80211_hw *dev,
skb_tail_pointer(skb),
priv->common.rx_mtu + 32,
PCI_DMA_FROMDEVICE);
+
+ if (pci_dma_mapping_error(priv->pdev, mapping)) {
+ dev_kfree_skb_any(skb);
+ dev_err(&priv->pdev->dev,
+ "RX DMA Mapping error\n");
+ break;
+ }
+
desc->host_addr = cpu_to_le32(mapping);
desc->device_addr = 0; // FIXME: necessary?
desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
@@ -226,14 +234,14 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
p54p_refill_rx_ring(dev, ring_index, ring, ring_limit, rx_buf);
}
-/* caller must hold priv->lock */
static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
int ring_index, struct p54p_desc *ring, u32 ring_limit,
- void **tx_buf)
+ struct sk_buff **tx_buf)
{
struct p54p_priv *priv = dev->priv;
struct p54p_ring_control *ring_control = priv->ring_control;
struct p54p_desc *desc;
+ struct sk_buff *skb;
u32 idx, i;
i = (*index) % ring_limit;
@@ -242,9 +250,8 @@ static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
while (i != idx) {
desc = &ring[i];
- if (tx_buf[i])
- if (FREE_AFTER_TX((struct sk_buff *) tx_buf[i]))
- p54_free_skb(dev, tx_buf[i]);
+
+ skb = tx_buf[i];
tx_buf[i] = NULL;
pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr),
@@ -255,17 +262,28 @@ static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
desc->len = 0;
desc->flags = 0;
+ if (skb && FREE_AFTER_TX(skb))
+ p54_free_skb(dev, skb);
+
i++;
i %= ring_limit;
}
}
-static void p54p_rx_tasklet(unsigned long dev_id)
+static void p54p_tasklet(unsigned long dev_id)
{
struct ieee80211_hw *dev = (struct ieee80211_hw *)dev_id;
struct p54p_priv *priv = dev->priv;
struct p54p_ring_control *ring_control = priv->ring_control;
+ p54p_check_tx_ring(dev, &priv->tx_idx_mgmt, 3, ring_control->tx_mgmt,
+ ARRAY_SIZE(ring_control->tx_mgmt),
+ priv->tx_buf_mgmt);
+
+ p54p_check_tx_ring(dev, &priv->tx_idx_data, 1, ring_control->tx_data,
+ ARRAY_SIZE(ring_control->tx_data),
+ priv->tx_buf_data);
+
p54p_check_rx_ring(dev, &priv->rx_idx_mgmt, 2, ring_control->rx_mgmt,
ARRAY_SIZE(ring_control->rx_mgmt), priv->rx_buf_mgmt);
@@ -280,59 +298,49 @@ static irqreturn_t p54p_interrupt(int irq, void *dev_id)
{
struct ieee80211_hw *dev = dev_id;
struct p54p_priv *priv = dev->priv;
- struct p54p_ring_control *ring_control = priv->ring_control;
__le32 reg;
- spin_lock(&priv->lock);
reg = P54P_READ(int_ident);
if (unlikely(reg == cpu_to_le32(0xFFFFFFFF))) {
- spin_unlock(&priv->lock);
- return IRQ_HANDLED;
+ goto out;
}
-
P54P_WRITE(int_ack, reg);
reg &= P54P_READ(int_enable);
- if (reg & cpu_to_le32(ISL38XX_INT_IDENT_UPDATE)) {
- p54p_check_tx_ring(dev, &priv->tx_idx_mgmt,
- 3, ring_control->tx_mgmt,
- ARRAY_SIZE(ring_control->tx_mgmt),
- priv->tx_buf_mgmt);
-
- p54p_check_tx_ring(dev, &priv->tx_idx_data,
- 1, ring_control->tx_data,
- ARRAY_SIZE(ring_control->tx_data),
- priv->tx_buf_data);
-
- tasklet_schedule(&priv->rx_tasklet);
-
- } else if (reg & cpu_to_le32(ISL38XX_INT_IDENT_INIT))
+ if (reg & cpu_to_le32(ISL38XX_INT_IDENT_UPDATE))
+ tasklet_schedule(&priv->tasklet);
+ else if (reg & cpu_to_le32(ISL38XX_INT_IDENT_INIT))
complete(&priv->boot_comp);
- spin_unlock(&priv->lock);
-
+out:
return reg ? IRQ_HANDLED : IRQ_NONE;
}
static void p54p_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
{
+ unsigned long flags;
struct p54p_priv *priv = dev->priv;
struct p54p_ring_control *ring_control = priv->ring_control;
- unsigned long flags;
struct p54p_desc *desc;
dma_addr_t mapping;
u32 device_idx, idx, i;
spin_lock_irqsave(&priv->lock, flags);
-
device_idx = le32_to_cpu(ring_control->device_idx[1]);
idx = le32_to_cpu(ring_control->host_idx[1]);
i = idx % ARRAY_SIZE(ring_control->tx_data);
- priv->tx_buf_data[i] = skb;
mapping = pci_map_single(priv->pdev, skb->data, skb->len,
PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(priv->pdev, mapping)) {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ p54_free_skb(dev, skb);
+ dev_err(&priv->pdev->dev, "TX DMA mapping error\n");
+ return ;
+ }
+ priv->tx_buf_data[i] = skb;
+
desc = &ring_control->tx_data[i];
desc->host_addr = cpu_to_le32(mapping);
desc->device_addr = ((struct p54_hdr *)skb->data)->req_id;
@@ -354,14 +362,14 @@ static void p54p_stop(struct ieee80211_hw *dev)
unsigned int i;
struct p54p_desc *desc;
- tasklet_kill(&priv->rx_tasklet);
-
P54P_WRITE(int_enable, cpu_to_le32(0));
P54P_READ(int_enable);
udelay(10);
free_irq(priv->pdev->irq, dev);
+ tasklet_kill(&priv->tasklet);
+
P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET));
for (i = 0; i < ARRAY_SIZE(priv->rx_buf_data); i++) {
@@ -545,7 +553,7 @@ static int __devinit p54p_probe(struct pci_dev *pdev,
priv->common.tx = p54p_tx;
spin_lock_init(&priv->lock);
- tasklet_init(&priv->rx_tasklet, p54p_rx_tasklet, (unsigned long)dev);
+ tasklet_init(&priv->tasklet, p54p_tasklet, (unsigned long)dev);
err = request_firmware(&priv->firmware, "isl3886pci",
&priv->pdev->dev);
diff --git a/drivers/net/wireless/p54/p54pci.h b/drivers/net/wireless/p54/p54pci.h
index fbb683953fb..2feead617a3 100644
--- a/drivers/net/wireless/p54/p54pci.h
+++ b/drivers/net/wireless/p54/p54pci.h
@@ -92,7 +92,7 @@ struct p54p_priv {
struct p54_common common;
struct pci_dev *pdev;
struct p54p_csr __iomem *map;
- struct tasklet_struct rx_tasklet;
+ struct tasklet_struct tasklet;
const struct firmware *firmware;
spinlock_t lock;
struct p54p_ring_control *ring_control;
@@ -101,8 +101,8 @@ struct p54p_priv {
u32 rx_idx_mgmt, tx_idx_mgmt;
struct sk_buff *rx_buf_data[8];
struct sk_buff *rx_buf_mgmt[4];
- void *tx_buf_data[32];
- void *tx_buf_mgmt[4];
+ struct sk_buff *tx_buf_data[32];
+ struct sk_buff *tx_buf_mgmt[4];
struct completion boot_comp;
};
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index 92af9b96bb7..b3c4fbd80d8 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -36,6 +36,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
/* Version 1 devices (pci chip + net2280) */
{USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */
{USB_DEVICE(0x0707, 0xee06)}, /* SMC 2862W-G */
+ {USB_DEVICE(0x07aa, 0x001c)}, /* Corega CG-WLUSB2GT */
{USB_DEVICE(0x083a, 0x4501)}, /* Accton 802.11g WN4501 USB */
{USB_DEVICE(0x083a, 0x4502)}, /* Siemens Gigaset USB Adapter */
{USB_DEVICE(0x083a, 0x5501)}, /* Phillips CPWUA054 */
@@ -60,6 +61,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
{USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */
{USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */
{USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */
+ {USB_DEVICE(0x083a, 0xf503)}, /* Accton FD7050E ver 1010ec */
{USB_DEVICE(0x0846, 0x4240)}, /* Netgear WG111 (v2) */
{USB_DEVICE(0x0915, 0x2000)}, /* Cohiba Proto board */
{USB_DEVICE(0x0915, 0x2002)}, /* Cohiba Proto board */
diff --git a/drivers/net/wireless/p54/txrx.c b/drivers/net/wireless/p54/txrx.c
index b6dda2b27fb..66057999a93 100644
--- a/drivers/net/wireless/p54/txrx.c
+++ b/drivers/net/wireless/p54/txrx.c
@@ -183,10 +183,10 @@ static int p54_tx_qos_accounting_alloc(struct p54_common *priv,
struct sk_buff *skb,
const u16 p54_queue)
{
- struct ieee80211_tx_queue_stats *queue;
+ struct p54_tx_queue_stats *queue;
unsigned long flags;
- if (WARN_ON(p54_queue > P54_QUEUE_NUM))
+ if (WARN_ON(p54_queue >= P54_QUEUE_NUM))
return -EINVAL;
queue = &priv->tx_stats[p54_queue];
diff --git a/drivers/net/wireless/prism54/islpci_hotplug.c b/drivers/net/wireless/prism54/islpci_hotplug.c
index e4f2bb7368f..dc14420a9ad 100644
--- a/drivers/net/wireless/prism54/islpci_hotplug.c
+++ b/drivers/net/wireless/prism54/islpci_hotplug.c
@@ -39,7 +39,7 @@ module_param(init_pcitm, int, 0);
* driver_data
* If you have an update for this please contact prism54-devel@prism54.org
* The latest list can be found at http://prism54.org/supported_cards.php */
-static const struct pci_device_id prism54_id_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(prism54_id_tbl) = {
/* Intersil PRISM Duette/Prism GT Wireless LAN adapter */
{
0x1260, 0x3890,
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 88e1e4e32b2..84c530aa52f 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -1871,10 +1871,8 @@ static void ray_update_parm(struct net_device *dev, UCHAR objid, UCHAR *value,
/*===========================================================================*/
static void ray_update_multi_list(struct net_device *dev, int all)
{
- struct dev_mc_list *dmi, **dmip;
int ccsindex;
struct ccs __iomem *pccs;
- int i = 0;
ray_dev_t *local = netdev_priv(dev);
struct pcmcia_device *link = local->finder;
void __iomem *p = local->sram + HOST_TO_ECF_BASE;
@@ -1895,9 +1893,11 @@ static void ray_update_multi_list(struct net_device *dev, int all)
writeb(0xff, &pccs->var);
local->num_multi = 0xff;
} else {
+ struct dev_mc_list *dmi;
+ int i = 0;
+
/* Copy the kernel's list of MC addresses to card */
- for (dmip = &dev->mc_list; (dmi = *dmip) != NULL;
- dmip = &dmi->next) {
+ netdev_for_each_mc_addr(dmi, dev) {
memcpy_toio(p, dmi->dmi_addr, ETH_ALEN);
dev_dbg(&link->dev,
"ray_update_multi add addr %02x%02x%02x%02x%02x%02x\n",
@@ -1950,7 +1950,7 @@ static void set_multicast_list(struct net_device *dev)
if (dev->flags & IFF_ALLMULTI)
ray_update_multi_list(dev, 1);
else {
- if (local->num_multi != dev->mc_count)
+ if (local->num_multi != netdev_mc_count(dev))
ray_update_multi_list(dev, 0);
}
} /* end set_multicast_list */
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 2ecbedb26e1..9f6d6bf06b8 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -728,9 +728,9 @@ static int rndis_query_oid(struct usbnet *dev, __le32 oid, void *data, int *len)
ret = rndis_command(dev, u.header, buflen);
priv->current_command_oid = 0;
if (ret < 0)
- devdbg(dev, "rndis_query_oid(%s): rndis_command() failed, %d "
- "(%08x)", oid_to_string(oid), ret,
- le32_to_cpu(u.get_c->status));
+ netdev_dbg(dev->net, "%s(%s): rndis_command() failed, %d (%08x)\n",
+ __func__, oid_to_string(oid), ret,
+ le32_to_cpu(u.get_c->status));
if (ret == 0) {
memcpy(data, u.buf + le32_to_cpu(u.get_c->offset) + 8, *len);
@@ -741,9 +741,9 @@ static int rndis_query_oid(struct usbnet *dev, __le32 oid, void *data, int *len)
ret = rndis_error_status(u.get_c->status);
if (ret < 0)
- devdbg(dev, "rndis_query_oid(%s): device returned "
- "error, 0x%08x (%d)", oid_to_string(oid),
- le32_to_cpu(u.get_c->status), ret);
+ netdev_dbg(dev->net, "%s(%s): device returned error, 0x%08x (%d)\n",
+ __func__, oid_to_string(oid),
+ le32_to_cpu(u.get_c->status), ret);
}
mutex_unlock(&priv->command_lock);
@@ -791,17 +791,17 @@ static int rndis_set_oid(struct usbnet *dev, __le32 oid, void *data, int len)
ret = rndis_command(dev, u.header, buflen);
priv->current_command_oid = 0;
if (ret < 0)
- devdbg(dev, "rndis_set_oid(%s): rndis_command() failed, %d "
- "(%08x)", oid_to_string(oid), ret,
- le32_to_cpu(u.set_c->status));
+ netdev_dbg(dev->net, "%s(%s): rndis_command() failed, %d (%08x)\n",
+ __func__, oid_to_string(oid), ret,
+ le32_to_cpu(u.set_c->status));
if (ret == 0) {
ret = rndis_error_status(u.set_c->status);
if (ret < 0)
- devdbg(dev, "rndis_set_oid(%s): device returned error, "
- "0x%08x (%d)", oid_to_string(oid),
- le32_to_cpu(u.set_c->status), ret);
+ netdev_dbg(dev->net, "%s(%s): device returned error, 0x%08x (%d)\n",
+ __func__, oid_to_string(oid),
+ le32_to_cpu(u.set_c->status), ret);
}
mutex_unlock(&priv->command_lock);
@@ -870,11 +870,11 @@ static int rndis_set_config_parameter(struct usbnet *dev, char *param,
#endif
if (value_type == 2)
- devdbg(dev, "setting config parameter: %s, value: %s",
- param, (u8 *)value);
+ netdev_dbg(dev->net, "setting config parameter: %s, value: %s\n",
+ param, (u8 *)value);
else
- devdbg(dev, "setting config parameter: %s, value: %d",
- param, *(u32 *)value);
+ netdev_dbg(dev->net, "setting config parameter: %s, value: %d\n",
+ param, *(u32 *)value);
infobuf->name_offs = cpu_to_le32(sizeof(*infobuf));
infobuf->name_length = cpu_to_le32(param_len);
@@ -897,20 +897,21 @@ static int rndis_set_config_parameter(struct usbnet *dev, char *param,
}
#ifdef DEBUG
- devdbg(dev, "info buffer (len: %d):", info_len);
+ netdev_dbg(dev->net, "info buffer (len: %d)\n", info_len);
for (i = 0; i < info_len; i += 12) {
u32 *tmp = (u32 *)((u8 *)infobuf + i);
- devdbg(dev, "%08X:%08X:%08X",
- cpu_to_be32(tmp[0]),
- cpu_to_be32(tmp[1]),
- cpu_to_be32(tmp[2]));
+ netdev_dbg(dev->net, "%08X:%08X:%08X\n",
+ cpu_to_be32(tmp[0]),
+ cpu_to_be32(tmp[1]),
+ cpu_to_be32(tmp[2]));
}
#endif
ret = rndis_set_oid(dev, OID_GEN_RNDIS_CONFIG_PARAMETER,
infobuf, info_len);
if (ret != 0)
- devdbg(dev, "setting rndis config parameter failed, %d.", ret);
+ netdev_dbg(dev->net, "setting rndis config parameter failed, %d\n",
+ ret);
kfree(infobuf);
return ret;
@@ -945,13 +946,13 @@ static int set_essid(struct usbnet *usbdev, struct ndis_80211_ssid *ssid)
ret = rndis_set_oid(usbdev, OID_802_11_SSID, ssid, sizeof(*ssid));
if (ret < 0) {
- devwarn(usbdev, "setting SSID failed (%08X)", ret);
+ netdev_warn(usbdev->net, "setting SSID failed (%08X)\n", ret);
return ret;
}
if (ret == 0) {
memcpy(&priv->essid, ssid, sizeof(priv->essid));
priv->radio_on = true;
- devdbg(usbdev, "set_essid: radio_on = true");
+ netdev_dbg(usbdev->net, "%s(): radio_on = true\n", __func__);
}
return ret;
@@ -963,7 +964,8 @@ static int set_bssid(struct usbnet *usbdev, u8 bssid[ETH_ALEN])
ret = rndis_set_oid(usbdev, OID_802_11_BSSID, bssid, ETH_ALEN);
if (ret < 0) {
- devwarn(usbdev, "setting BSSID[%pM] failed (%08X)", bssid, ret);
+ netdev_warn(usbdev->net, "setting BSSID[%pM] failed (%08X)\n",
+ bssid, ret);
return ret;
}
@@ -1021,7 +1023,8 @@ static int disassociate(struct usbnet *usbdev, bool reset_ssid)
ret = rndis_set_oid(usbdev, OID_802_11_DISASSOCIATE, NULL, 0);
if (ret == 0) {
priv->radio_on = false;
- devdbg(usbdev, "disassociate: radio_on = false");
+ netdev_dbg(usbdev->net, "%s(): radio_on = false\n",
+ __func__);
if (reset_ssid)
msleep(100);
@@ -1054,8 +1057,8 @@ static int set_auth_mode(struct usbnet *usbdev, u32 wpa_version,
__le32 tmp;
int auth_mode, ret;
- devdbg(usbdev, "set_auth_mode: wpa_version=0x%x authalg=0x%x "
- "keymgmt=0x%x", wpa_version, auth_type, keymgmt);
+ netdev_dbg(usbdev->net, "%s(): wpa_version=0x%x authalg=0x%x keymgmt=0x%x\n",
+ __func__, wpa_version, auth_type, keymgmt);
if (wpa_version & NL80211_WPA_VERSION_2) {
if (keymgmt & RNDIS_WLAN_KEY_MGMT_802_1X)
@@ -1082,7 +1085,8 @@ static int set_auth_mode(struct usbnet *usbdev, u32 wpa_version,
ret = rndis_set_oid(usbdev, OID_802_11_AUTHENTICATION_MODE, &tmp,
sizeof(tmp));
if (ret != 0) {
- devwarn(usbdev, "setting auth mode failed (%08X)", ret);
+ netdev_warn(usbdev->net, "setting auth mode failed (%08X)\n",
+ ret);
return ret;
}
@@ -1098,7 +1102,8 @@ static int set_priv_filter(struct usbnet *usbdev)
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
__le32 tmp;
- devdbg(usbdev, "set_priv_filter: wpa_version=0x%x", priv->wpa_version);
+ netdev_dbg(usbdev->net, "%s(): wpa_version=0x%x\n",
+ __func__, priv->wpa_version);
if (priv->wpa_version & NL80211_WPA_VERSION_2 ||
priv->wpa_version & NL80211_WPA_VERSION_1)
@@ -1116,8 +1121,8 @@ static int set_encr_mode(struct usbnet *usbdev, int pairwise, int groupwise)
__le32 tmp;
int encr_mode, ret;
- devdbg(usbdev, "set_encr_mode: cipher_pair=0x%x cipher_group=0x%x",
- pairwise, groupwise);
+ netdev_dbg(usbdev->net, "%s(): cipher_pair=0x%x cipher_group=0x%x\n",
+ __func__, pairwise, groupwise);
if (pairwise & RNDIS_WLAN_ALG_CCMP)
encr_mode = NDIS_80211_ENCR_CCMP_ENABLED;
@@ -1136,7 +1141,8 @@ static int set_encr_mode(struct usbnet *usbdev, int pairwise, int groupwise)
ret = rndis_set_oid(usbdev, OID_802_11_ENCRYPTION_STATUS, &tmp,
sizeof(tmp));
if (ret != 0) {
- devwarn(usbdev, "setting encr mode failed (%08X)", ret);
+ netdev_warn(usbdev->net, "setting encr mode failed (%08X)\n",
+ ret);
return ret;
}
@@ -1151,13 +1157,15 @@ static int set_infra_mode(struct usbnet *usbdev, int mode)
__le32 tmp;
int ret;
- devdbg(usbdev, "set_infra_mode: infra_mode=0x%x", priv->infra_mode);
+ netdev_dbg(usbdev->net, "%s(): infra_mode=0x%x\n",
+ __func__, priv->infra_mode);
tmp = cpu_to_le32(mode);
ret = rndis_set_oid(usbdev, OID_802_11_INFRASTRUCTURE_MODE, &tmp,
sizeof(tmp));
if (ret != 0) {
- devwarn(usbdev, "setting infra mode failed (%08X)", ret);
+ netdev_warn(usbdev->net, "setting infra mode failed (%08X)\n",
+ ret);
return ret;
}
@@ -1174,7 +1182,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
{
__le32 tmp;
- devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
+ netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
if (rts_threshold < 0 || rts_threshold > 2347)
rts_threshold = 2347;
@@ -1188,7 +1196,7 @@ static int set_frag_threshold(struct usbnet *usbdev, u32 frag_threshold)
{
__le32 tmp;
- devdbg(usbdev, "set_frag_threshold %i", frag_threshold);
+ netdev_dbg(usbdev->net, "%s(): %i\n", __func__, frag_threshold);
if (frag_threshold < 256 || frag_threshold > 2346)
frag_threshold = 2346;
@@ -1222,7 +1230,7 @@ static int set_channel(struct usbnet *usbdev, int channel)
unsigned int dsconfig;
int len, ret;
- devdbg(usbdev, "set_channel(%d)", channel);
+ netdev_dbg(usbdev->net, "%s(%d)\n", __func__, channel);
/* this OID is valid only when not associated */
if (is_associated(usbdev))
@@ -1233,7 +1241,8 @@ static int set_channel(struct usbnet *usbdev, int channel)
len = sizeof(config);
ret = rndis_query_oid(usbdev, OID_802_11_CONFIGURATION, &config, &len);
if (ret < 0) {
- devdbg(usbdev, "set_channel: querying configuration failed");
+ netdev_dbg(usbdev->net, "%s(): querying configuration failed\n",
+ __func__);
return ret;
}
@@ -1241,7 +1250,7 @@ static int set_channel(struct usbnet *usbdev, int channel)
ret = rndis_set_oid(usbdev, OID_802_11_CONFIGURATION, &config,
sizeof(config));
- devdbg(usbdev, "set_channel: %d -> %d", channel, ret);
+ netdev_dbg(usbdev->net, "%s(): %d -> %d\n", __func__, channel, ret);
return ret;
}
@@ -1255,7 +1264,8 @@ static int add_wep_key(struct usbnet *usbdev, const u8 *key, int key_len,
u32 cipher;
int ret;
- devdbg(usbdev, "add_wep_key(idx: %d, len: %d)", index, key_len);
+ netdev_dbg(usbdev->net, "%s(idx: %d, len: %d)\n",
+ __func__, index, key_len);
if ((key_len != 5 && key_len != 13) || index < 0 || index > 3)
return -EINVAL;
@@ -1277,15 +1287,15 @@ static int add_wep_key(struct usbnet *usbdev, const u8 *key, int key_len,
ret = set_encr_mode(usbdev, RNDIS_WLAN_ALG_WEP,
RNDIS_WLAN_ALG_NONE);
if (ret)
- devwarn(usbdev, "encryption couldn't be enabled (%08X)",
- ret);
+ netdev_warn(usbdev->net, "encryption couldn't be enabled (%08X)\n",
+ ret);
}
ret = rndis_set_oid(usbdev, OID_802_11_ADD_WEP, &ndis_key,
sizeof(ndis_key));
if (ret != 0) {
- devwarn(usbdev, "adding encryption key %d failed (%08X)",
- index+1, ret);
+ netdev_warn(usbdev->net, "adding encryption key %d failed (%08X)\n",
+ index + 1, ret);
return ret;
}
@@ -1307,22 +1317,23 @@ static int add_wpa_key(struct usbnet *usbdev, const u8 *key, int key_len,
int ret;
if (index < 0 || index >= 4) {
- devdbg(usbdev, "add_wpa_key: index out of range (%i)", index);
+ netdev_dbg(usbdev->net, "%s(): index out of range (%i)\n",
+ __func__, index);
return -EINVAL;
}
if (key_len > sizeof(ndis_key.material) || key_len < 0) {
- devdbg(usbdev, "add_wpa_key: key length out of range (%i)",
- key_len);
+ netdev_dbg(usbdev->net, "%s(): key length out of range (%i)\n",
+ __func__, key_len);
return -EINVAL;
}
if (flags & NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ) {
if (!rx_seq || seq_len <= 0) {
- devdbg(usbdev, "add_wpa_key: recv seq flag without"
- "buffer");
+ netdev_dbg(usbdev->net, "%s(): recv seq flag without buffer\n",
+ __func__);
return -EINVAL;
}
if (rx_seq && seq_len > sizeof(ndis_key.rsc)) {
- devdbg(usbdev, "add_wpa_key: too big recv seq buffer");
+ netdev_dbg(usbdev->net, "%s(): too big recv seq buffer\n", __func__);
return -EINVAL;
}
}
@@ -1330,15 +1341,16 @@ static int add_wpa_key(struct usbnet *usbdev, const u8 *key, int key_len,
is_addr_ok = addr && !is_zero_ether_addr(addr) &&
!is_broadcast_ether_addr(addr);
if ((flags & NDIS_80211_ADDKEY_PAIRWISE_KEY) && !is_addr_ok) {
- devdbg(usbdev, "add_wpa_key: pairwise but bssid invalid (%pM)",
- addr);
+ netdev_dbg(usbdev->net, "%s(): pairwise but bssid invalid (%pM)\n",
+ __func__, addr);
return -EINVAL;
}
- devdbg(usbdev, "add_wpa_key(%i): flags:%i%i%i", index,
- !!(flags & NDIS_80211_ADDKEY_TRANSMIT_KEY),
- !!(flags & NDIS_80211_ADDKEY_PAIRWISE_KEY),
- !!(flags & NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ));
+ netdev_dbg(usbdev->net, "%s(%i): flags:%i%i%i\n",
+ __func__, index,
+ !!(flags & NDIS_80211_ADDKEY_TRANSMIT_KEY),
+ !!(flags & NDIS_80211_ADDKEY_PAIRWISE_KEY),
+ !!(flags & NDIS_80211_ADDKEY_SET_INIT_RECV_SEQ));
memset(&ndis_key, 0, sizeof(ndis_key));
@@ -1372,7 +1384,8 @@ static int add_wpa_key(struct usbnet *usbdev, const u8 *key, int key_len,
ret = rndis_set_oid(usbdev, OID_802_11_ADD_KEY, &ndis_key,
le32_to_cpu(ndis_key.size));
- devdbg(usbdev, "add_wpa_key: OID_802_11_ADD_KEY -> %08X", ret);
+ netdev_dbg(usbdev->net, "%s(): OID_802_11_ADD_KEY -> %08X\n",
+ __func__, ret);
if (ret != 0)
return ret;
@@ -1401,7 +1414,7 @@ static int restore_key(struct usbnet *usbdev, int key_idx)
key = priv->encr_keys[key_idx];
- devdbg(usbdev, "restore_key: %i:%i", key_idx, key.len);
+ netdev_dbg(usbdev->net, "%s(): %i:%i\n", __func__, key_idx, key.len);
if (key.len == 0)
return 0;
@@ -1436,8 +1449,9 @@ static int remove_key(struct usbnet *usbdev, int index, const u8 *bssid)
is_wpa = is_wpa_key(priv, index);
- devdbg(usbdev, "remove_key: %i:%s:%i", index, is_wpa ? "wpa" : "wep",
- priv->encr_keys[index].len);
+ netdev_dbg(usbdev->net, "%s(): %i:%s:%i\n",
+ __func__, index, is_wpa ? "wpa" : "wep",
+ priv->encr_keys[index].len);
clear_key(priv, index);
@@ -1464,9 +1478,9 @@ static int remove_key(struct usbnet *usbdev, int index, const u8 *bssid)
ret = rndis_set_oid(usbdev, OID_802_11_REMOVE_WEP, &keyindex,
sizeof(keyindex));
if (ret != 0) {
- devwarn(usbdev,
- "removing encryption key %d failed (%08X)",
- index, ret);
+ netdev_warn(usbdev->net,
+ "removing encryption key %d failed (%08X)\n",
+ index, ret);
return ret;
}
}
@@ -1488,29 +1502,29 @@ static void set_multicast_list(struct usbnet *usbdev)
filter = RNDIS_PACKET_TYPE_DIRECTED | RNDIS_PACKET_TYPE_BROADCAST;
+ netif_addr_lock_bh(usbdev->net);
if (usbdev->net->flags & IFF_PROMISC) {
filter |= RNDIS_PACKET_TYPE_PROMISCUOUS |
RNDIS_PACKET_TYPE_ALL_LOCAL;
} else if (usbdev->net->flags & IFF_ALLMULTI ||
- usbdev->net->mc_count > priv->multicast_size) {
+ netdev_mc_count(usbdev->net) > priv->multicast_size) {
filter |= RNDIS_PACKET_TYPE_ALL_MULTICAST;
- } else if (usbdev->net->mc_count > 0) {
- size = min(priv->multicast_size, usbdev->net->mc_count);
+ } else if (!netdev_mc_empty(usbdev->net)) {
+ size = min(priv->multicast_size, netdev_mc_count(usbdev->net));
buf = kmalloc(size * ETH_ALEN, GFP_KERNEL);
if (!buf) {
- devwarn(usbdev,
- "couldn't alloc %d bytes of memory",
- size * ETH_ALEN);
+ netdev_warn(usbdev->net,
+ "couldn't alloc %d bytes of memory\n",
+ size * ETH_ALEN);
+ netif_addr_unlock_bh(usbdev->net);
return;
}
- mclist = usbdev->net->mc_list;
- for (i = 0; i < size && mclist; mclist = mclist->next) {
- if (mclist->dmi_addrlen != ETH_ALEN)
- continue;
-
- memcpy(buf + i * ETH_ALEN, mclist->dmi_addr, ETH_ALEN);
- i++;
+ i = 0;
+ netdev_for_each_mc_addr(mclist, usbdev->net) {
+ if (i == size)
+ break;
+ memcpy(buf + i++ * ETH_ALEN, mclist->dmi_addr, ETH_ALEN);
}
ret = rndis_set_oid(usbdev, OID_802_3_MULTICAST_LIST, buf,
@@ -1520,21 +1534,22 @@ static void set_multicast_list(struct usbnet *usbdev)
else
filter |= RNDIS_PACKET_TYPE_ALL_MULTICAST;
- devdbg(usbdev, "OID_802_3_MULTICAST_LIST(%d, max: %d) -> %d",
- i, priv->multicast_size, ret);
+ netdev_dbg(usbdev->net, "OID_802_3_MULTICAST_LIST(%d, max: %d) -> %d\n",
+ i, priv->multicast_size, ret);
kfree(buf);
}
+ netif_addr_unlock_bh(usbdev->net);
ret = rndis_set_oid(usbdev, OID_GEN_CURRENT_PACKET_FILTER, &filter,
sizeof(filter));
if (ret < 0) {
- devwarn(usbdev, "couldn't set packet filter: %08x",
- le32_to_cpu(filter));
+ netdev_warn(usbdev->net, "couldn't set packet filter: %08x\n",
+ le32_to_cpu(filter));
}
- devdbg(usbdev, "OID_GEN_CURRENT_PACKET_FILTER(%08x) -> %d",
- le32_to_cpu(filter), ret);
+ netdev_dbg(usbdev->net, "OID_GEN_CURRENT_PACKET_FILTER(%08x) -> %d\n",
+ le32_to_cpu(filter), ret);
}
/*
@@ -1592,7 +1607,8 @@ static int rndis_set_tx_power(struct wiphy *wiphy, enum tx_power_setting type,
struct rndis_wlan_private *priv = wiphy_priv(wiphy);
struct usbnet *usbdev = priv->usbdev;
- devdbg(usbdev, "rndis_set_tx_power type:0x%x dbm:%i", type, dbm);
+ netdev_dbg(usbdev->net, "%s(): type:0x%x dbm:%i\n",
+ __func__, type, dbm);
/* Device doesn't support changing txpower after initialization, only
* turn off/on radio. Support 'auto' mode and setting same dBm that is
@@ -1615,7 +1631,7 @@ static int rndis_get_tx_power(struct wiphy *wiphy, int *dbm)
*dbm = get_bcm4320_power_dbm(priv);
- devdbg(usbdev, "rndis_get_tx_power dbm:%i", *dbm);
+ netdev_dbg(usbdev->net, "%s(): dbm:%i\n", __func__, *dbm);
return 0;
}
@@ -1629,7 +1645,7 @@ static int rndis_scan(struct wiphy *wiphy, struct net_device *dev,
int ret;
__le32 tmp;
- devdbg(usbdev, "cfg80211.scan");
+ netdev_dbg(usbdev->net, "cfg80211.scan\n");
/* Get current bssid list from device before new scan, as new scan
* clears internal bssid list.
@@ -1669,8 +1685,8 @@ static struct cfg80211_bss *rndis_bss_info_update(struct usbnet *usbdev,
int ie_len, bssid_len;
u8 *ie;
- devdbg(usbdev, " found bssid: '%.32s' [%pM]", bssid->ssid.essid,
- bssid->mac);
+ netdev_dbg(usbdev->net, " found bssid: '%.32s' [%pM]\n",
+ bssid->ssid.essid, bssid->mac);
/* parse bssid structure */
bssid_len = le32_to_cpu(bssid->length);
@@ -1712,7 +1728,7 @@ static int rndis_check_bssid_list(struct usbnet *usbdev)
int ret = -EINVAL, len, count, bssid_len;
bool resized = false;
- devdbg(usbdev, "check_bssid_list");
+ netdev_dbg(usbdev->net, "check_bssid_list\n");
len = CONTROL_BUFFER_SIZE;
resize_buf:
@@ -1736,8 +1752,8 @@ resize_buf:
bssid = bssid_list->bssid;
bssid_len = le32_to_cpu(bssid->length);
count = le32_to_cpu(bssid_list->num_items);
- devdbg(usbdev, "check_bssid_list: %d BSSIDs found (buflen: %d)", count,
- len);
+ netdev_dbg(usbdev->net, "check_bssid_list: %d BSSIDs found (buflen: %d)\n",
+ count, len);
while (count && ((void *)bssid + bssid_len) <= (buf + len)) {
rndis_bss_info_update(usbdev, bssid);
@@ -1759,7 +1775,7 @@ static void rndis_get_scan_results(struct work_struct *work)
struct usbnet *usbdev = priv->usbdev;
int ret;
- devdbg(usbdev, "get_scan_results");
+ netdev_dbg(usbdev->net, "get_scan_results\n");
if (!priv->scan_request)
return;
@@ -1793,7 +1809,7 @@ static int rndis_connect(struct wiphy *wiphy, struct net_device *dev,
if (sme->crypto.n_ciphers_pairwise > 0 &&
pairwise == RNDIS_WLAN_ALG_NONE) {
- deverr(usbdev, "Unsupported pairwise cipher");
+ netdev_err(usbdev->net, "Unsupported pairwise cipher\n");
return -ENOTSUPP;
}
@@ -1803,28 +1819,30 @@ static int rndis_connect(struct wiphy *wiphy, struct net_device *dev,
if (sme->crypto.n_akm_suites > 0 &&
keymgmt == RNDIS_WLAN_KEY_MGMT_NONE) {
- deverr(usbdev, "Invalid keymgmt");
+ netdev_err(usbdev->net, "Invalid keymgmt\n");
return -ENOTSUPP;
}
- devdbg(usbdev, "cfg80211.connect('%.32s':[%pM]:%d:[%d,0x%x:0x%x]:[0x%x:"
- "0x%x]:0x%x)", sme->ssid, sme->bssid, chan,
- sme->privacy, sme->crypto.wpa_versions, sme->auth_type,
- groupwise, pairwise, keymgmt);
+ netdev_dbg(usbdev->net, "cfg80211.connect('%.32s':[%pM]:%d:[%d,0x%x:0x%x]:[0x%x:0x%x]:0x%x)\n",
+ sme->ssid, sme->bssid, chan,
+ sme->privacy, sme->crypto.wpa_versions, sme->auth_type,
+ groupwise, pairwise, keymgmt);
if (is_associated(usbdev))
disassociate(usbdev, false);
ret = set_infra_mode(usbdev, NDIS_80211_INFRA_INFRA);
if (ret < 0) {
- devdbg(usbdev, "connect: set_infra_mode failed, %d", ret);
+ netdev_dbg(usbdev->net, "connect: set_infra_mode failed, %d\n",
+ ret);
goto err_turn_radio_on;
}
ret = set_auth_mode(usbdev, sme->crypto.wpa_versions, sme->auth_type,
keymgmt);
if (ret < 0) {
- devdbg(usbdev, "connect: set_auth_mode failed, %d", ret);
+ netdev_dbg(usbdev->net, "connect: set_auth_mode failed, %d\n",
+ ret);
goto err_turn_radio_on;
}
@@ -1832,14 +1850,16 @@ static int rndis_connect(struct wiphy *wiphy, struct net_device *dev,
ret = set_encr_mode(usbdev, pairwise, groupwise);
if (ret < 0) {
- devdbg(usbdev, "connect: set_encr_mode failed, %d", ret);
+ netdev_dbg(usbdev->net, "connect: set_encr_mode failed, %d\n",
+ ret);
goto err_turn_radio_on;
}
if (channel) {
ret = set_channel(usbdev, chan);
if (ret < 0) {
- devdbg(usbdev, "connect: set_channel failed, %d", ret);
+ netdev_dbg(usbdev->net, "connect: set_channel failed, %d\n",
+ ret);
goto err_turn_radio_on;
}
}
@@ -1848,8 +1868,8 @@ static int rndis_connect(struct wiphy *wiphy, struct net_device *dev,
priv->encr_tx_key_index = sme->key_idx;
ret = add_wep_key(usbdev, sme->key, sme->key_len, sme->key_idx);
if (ret < 0) {
- devdbg(usbdev, "connect: add_wep_key failed, %d "
- "(%d, %d)", ret, sme->key_len, sme->key_idx);
+ netdev_dbg(usbdev->net, "connect: add_wep_key failed, %d (%d, %d)\n",
+ ret, sme->key_len, sme->key_idx);
goto err_turn_radio_on;
}
}
@@ -1858,7 +1878,8 @@ static int rndis_connect(struct wiphy *wiphy, struct net_device *dev,
!is_broadcast_ether_addr(sme->bssid)) {
ret = set_bssid(usbdev, sme->bssid);
if (ret < 0) {
- devdbg(usbdev, "connect: set_bssid failed, %d", ret);
+ netdev_dbg(usbdev->net, "connect: set_bssid failed, %d\n",
+ ret);
goto err_turn_radio_on;
}
} else
@@ -1880,7 +1901,7 @@ static int rndis_connect(struct wiphy *wiphy, struct net_device *dev,
ret = set_essid(usbdev, &ssid);
if (ret < 0)
- devdbg(usbdev, "connect: set_essid failed, %d", ret);
+ netdev_dbg(usbdev->net, "connect: set_essid failed, %d\n", ret);
return ret;
err_turn_radio_on:
@@ -1895,7 +1916,7 @@ static int rndis_disconnect(struct wiphy *wiphy, struct net_device *dev,
struct rndis_wlan_private *priv = wiphy_priv(wiphy);
struct usbnet *usbdev = priv->usbdev;
- devdbg(usbdev, "cfg80211.disconnect(%d)", reason_code);
+ netdev_dbg(usbdev->net, "cfg80211.disconnect(%d)\n", reason_code);
priv->connected = false;
memset(priv->bssid, 0, ETH_ALEN);
@@ -1929,21 +1950,23 @@ static int rndis_join_ibss(struct wiphy *wiphy, struct net_device *dev,
alg = RNDIS_WLAN_ALG_NONE;
}
- devdbg(usbdev, "cfg80211.join_ibss('%.32s':[%pM]:%d:%d)", params->ssid,
- params->bssid, chan, params->privacy);
+ netdev_dbg(usbdev->net, "cfg80211.join_ibss('%.32s':[%pM]:%d:%d)\n",
+ params->ssid, params->bssid, chan, params->privacy);
if (is_associated(usbdev))
disassociate(usbdev, false);
ret = set_infra_mode(usbdev, NDIS_80211_INFRA_ADHOC);
if (ret < 0) {
- devdbg(usbdev, "join_ibss: set_infra_mode failed, %d", ret);
+ netdev_dbg(usbdev->net, "join_ibss: set_infra_mode failed, %d\n",
+ ret);
goto err_turn_radio_on;
}
ret = set_auth_mode(usbdev, 0, auth_type, RNDIS_WLAN_KEY_MGMT_NONE);
if (ret < 0) {
- devdbg(usbdev, "join_ibss: set_auth_mode failed, %d", ret);
+ netdev_dbg(usbdev->net, "join_ibss: set_auth_mode failed, %d\n",
+ ret);
goto err_turn_radio_on;
}
@@ -1951,15 +1974,16 @@ static int rndis_join_ibss(struct wiphy *wiphy, struct net_device *dev,
ret = set_encr_mode(usbdev, alg, RNDIS_WLAN_ALG_NONE);
if (ret < 0) {
- devdbg(usbdev, "join_ibss: set_encr_mode failed, %d", ret);
+ netdev_dbg(usbdev->net, "join_ibss: set_encr_mode failed, %d\n",
+ ret);
goto err_turn_radio_on;
}
if (channel) {
ret = set_channel(usbdev, chan);
if (ret < 0) {
- devdbg(usbdev, "join_ibss: set_channel failed, %d",
- ret);
+ netdev_dbg(usbdev->net, "join_ibss: set_channel failed, %d\n",
+ ret);
goto err_turn_radio_on;
}
}
@@ -1968,7 +1992,8 @@ static int rndis_join_ibss(struct wiphy *wiphy, struct net_device *dev,
!is_broadcast_ether_addr(params->bssid)) {
ret = set_bssid(usbdev, params->bssid);
if (ret < 0) {
- devdbg(usbdev, "join_ibss: set_bssid failed, %d", ret);
+ netdev_dbg(usbdev->net, "join_ibss: set_bssid failed, %d\n",
+ ret);
goto err_turn_radio_on;
}
} else
@@ -1988,7 +2013,8 @@ static int rndis_join_ibss(struct wiphy *wiphy, struct net_device *dev,
ret = set_essid(usbdev, &ssid);
if (ret < 0)
- devdbg(usbdev, "join_ibss: set_essid failed, %d", ret);
+ netdev_dbg(usbdev->net, "join_ibss: set_essid failed, %d\n",
+ ret);
return ret;
err_turn_radio_on:
@@ -2002,7 +2028,7 @@ static int rndis_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
struct rndis_wlan_private *priv = wiphy_priv(wiphy);
struct usbnet *usbdev = priv->usbdev;
- devdbg(usbdev, "cfg80211.leave_ibss()");
+ netdev_dbg(usbdev->net, "cfg80211.leave_ibss()\n");
priv->connected = false;
memset(priv->bssid, 0, ETH_ALEN);
@@ -2028,8 +2054,8 @@ static int rndis_add_key(struct wiphy *wiphy, struct net_device *netdev,
struct usbnet *usbdev = priv->usbdev;
__le32 flags;
- devdbg(usbdev, "rndis_add_key(%i, %pM, %08x)", key_index, mac_addr,
- params->cipher);
+ netdev_dbg(usbdev->net, "%s(%i, %pM, %08x)\n",
+ __func__, key_index, mac_addr, params->cipher);
switch (params->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
@@ -2050,8 +2076,8 @@ static int rndis_add_key(struct wiphy *wiphy, struct net_device *netdev,
key_index, mac_addr, params->seq,
params->seq_len, params->cipher, flags);
default:
- devdbg(usbdev, "rndis_add_key: unsupported cipher %08x",
- params->cipher);
+ netdev_dbg(usbdev->net, "%s(): unsupported cipher %08x\n",
+ __func__, params->cipher);
return -ENOTSUPP;
}
}
@@ -2062,7 +2088,7 @@ static int rndis_del_key(struct wiphy *wiphy, struct net_device *netdev,
struct rndis_wlan_private *priv = wiphy_priv(wiphy);
struct usbnet *usbdev = priv->usbdev;
- devdbg(usbdev, "rndis_del_key(%i, %pM)", key_index, mac_addr);
+ netdev_dbg(usbdev->net, "%s(%i, %pM)\n", __func__, key_index, mac_addr);
return remove_key(usbdev, key_index, mac_addr);
}
@@ -2074,7 +2100,7 @@ static int rndis_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
struct usbnet *usbdev = priv->usbdev;
struct rndis_wlan_encr_key key;
- devdbg(usbdev, "rndis_set_default_key(%i)", key_index);
+ netdev_dbg(usbdev->net, "%s(%i)\n", __func__, key_index);
priv->encr_tx_key_index = key_index;
@@ -2188,7 +2214,8 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
if (ret < 0)
memset(bssid, 0, sizeof(bssid));
- devdbg(usbdev, "link up work: [%pM] %s", bssid, roamed ? "roamed" : "");
+ netdev_dbg(usbdev->net, "link up work: [%pM]%s\n",
+ bssid, roamed ? " roamed" : "");
/* Internal bss list in device always contains at least the currently
* connected bss and we can get it to cfg80211 with
@@ -2270,8 +2297,8 @@ static void rndis_wlan_auth_indication(struct usbnet *usbdev,
/* must have at least one array entry */
if (len < offsetof(struct ndis_80211_status_indication, u) +
sizeof(struct ndis_80211_auth_request)) {
- devinfo(usbdev, "authentication indication: "
- "too short message (%i)", len);
+ netdev_info(usbdev->net, "authentication indication: too short message (%i)\n",
+ len);
return;
}
@@ -2298,8 +2325,8 @@ static void rndis_wlan_auth_indication(struct usbnet *usbdev,
type = "group_error";
}
- devinfo(usbdev, "authentication indication: %s (0x%08x)", type,
- le32_to_cpu(auth_req->flags));
+ netdev_info(usbdev->net, "authentication indication: %s (0x%08x)\n",
+ type, le32_to_cpu(auth_req->flags));
if (pairwise_error) {
key_type = NL80211_KEYTYPE_PAIRWISE;
@@ -2335,8 +2362,8 @@ static void rndis_wlan_pmkid_cand_list_indication(struct usbnet *usbdev,
if (len < offsetof(struct ndis_80211_status_indication, u) +
sizeof(struct ndis_80211_pmkid_cand_list)) {
- devinfo(usbdev, "pmkid candidate list indication: "
- "too short message (%i)", len);
+ netdev_info(usbdev->net, "pmkid candidate list indication: too short message (%i)\n",
+ len);
return;
}
@@ -2346,18 +2373,16 @@ static void rndis_wlan_pmkid_cand_list_indication(struct usbnet *usbdev,
offsetof(struct ndis_80211_status_indication, u);
if (len < expected_len) {
- devinfo(usbdev, "pmkid candidate list indication: "
- "list larger than buffer (%i < %i)",
- len, expected_len);
+ netdev_info(usbdev->net, "pmkid candidate list indication: list larger than buffer (%i < %i)\n",
+ len, expected_len);
return;
}
cand_list = &indication->u.cand_list;
- devinfo(usbdev, "pmkid candidate list indication: "
- "version %i, candidates %i",
- le32_to_cpu(cand_list->version),
- le32_to_cpu(cand_list->num_candidates));
+ netdev_info(usbdev->net, "pmkid candidate list indication: version %i, candidates %i\n",
+ le32_to_cpu(cand_list->version),
+ le32_to_cpu(cand_list->num_candidates));
if (le32_to_cpu(cand_list->version) != 1)
return;
@@ -2366,8 +2391,8 @@ static void rndis_wlan_pmkid_cand_list_indication(struct usbnet *usbdev,
struct ndis_80211_pmkid_candidate *cand =
&cand_list->candidate_list[i];
- devdbg(usbdev, "cand[%i]: flags: 0x%08x, bssid: %pM",
- i, le32_to_cpu(cand->flags), cand->bssid);
+ netdev_dbg(usbdev->net, "cand[%i]: flags: 0x%08x, bssid: %pM\n",
+ i, le32_to_cpu(cand->flags), cand->bssid);
#if 0
struct iw_pmkid_cand pcand;
@@ -2398,15 +2423,14 @@ static void rndis_wlan_media_specific_indication(struct usbnet *usbdev,
len = le32_to_cpu(msg->length);
if (len < 8) {
- devinfo(usbdev, "media specific indication, "
- "ignore too short message (%i < 8)", len);
+ netdev_info(usbdev->net, "media specific indication, ignore too short message (%i < 8)\n",
+ len);
return;
}
if (offset + len > buflen) {
- devinfo(usbdev, "media specific indication, "
- "too large to fit to buffer (%i > %i)",
- offset + len, buflen);
+ netdev_info(usbdev->net, "media specific indication, too large to fit to buffer (%i > %i)\n",
+ offset + len, buflen);
return;
}
@@ -2414,13 +2438,13 @@ static void rndis_wlan_media_specific_indication(struct usbnet *usbdev,
switch (le32_to_cpu(indication->status_type)) {
case NDIS_80211_STATUSTYPE_RADIOSTATE:
- devinfo(usbdev, "radio state indication: %i",
- le32_to_cpu(indication->u.radio_status));
+ netdev_info(usbdev->net, "radio state indication: %i\n",
+ le32_to_cpu(indication->u.radio_status));
return;
case NDIS_80211_STATUSTYPE_MEDIASTREAMMODE:
- devinfo(usbdev, "media stream mode indication: %i",
- le32_to_cpu(indication->u.media_stream_mode));
+ netdev_info(usbdev->net, "media stream mode indication: %i\n",
+ le32_to_cpu(indication->u.media_stream_mode));
return;
case NDIS_80211_STATUSTYPE_AUTHENTICATION:
@@ -2432,9 +2456,8 @@ static void rndis_wlan_media_specific_indication(struct usbnet *usbdev,
return;
default:
- devinfo(usbdev, "media specific indication: "
- "unknown status type 0x%08x",
- le32_to_cpu(indication->status_type));
+ netdev_info(usbdev->net, "media specific indication: unknown status type 0x%08x\n",
+ le32_to_cpu(indication->status_type));
}
}
@@ -2451,14 +2474,13 @@ static void rndis_wlan_indication(struct usbnet *usbdev, void *ind, int buflen)
* and userspace to think that device is
* roaming/reassociating when it isn't.
*/
- devdbg(usbdev, "ignored OID_802_11_ADD_KEY triggered "
- "'media connect'");
+ netdev_dbg(usbdev->net, "ignored OID_802_11_ADD_KEY triggered 'media connect'\n");
return;
}
usbnet_pause_rx(usbdev);
- devinfo(usbdev, "media connect");
+ netdev_info(usbdev->net, "media connect\n");
/* queue work to avoid recursive calls into rndis_command */
set_bit(WORK_LINK_UP, &priv->work_pending);
@@ -2466,7 +2488,7 @@ static void rndis_wlan_indication(struct usbnet *usbdev, void *ind, int buflen)
break;
case RNDIS_STATUS_MEDIA_DISCONNECT:
- devinfo(usbdev, "media disconnect");
+ netdev_info(usbdev->net, "media disconnect\n");
/* queue work to avoid recursive calls into rndis_command */
set_bit(WORK_LINK_DOWN, &priv->work_pending);
@@ -2478,8 +2500,8 @@ static void rndis_wlan_indication(struct usbnet *usbdev, void *ind, int buflen)
break;
default:
- devinfo(usbdev, "indication: 0x%08x",
- le32_to_cpu(msg->status));
+ netdev_info(usbdev->net, "indication: 0x%08x\n",
+ le32_to_cpu(msg->status));
break;
}
}
@@ -2544,8 +2566,8 @@ static void rndis_device_poller(struct work_struct *work)
if (ret == 0)
priv->last_qual = level_to_qual(le32_to_cpu(rssi));
- devdbg(usbdev, "dev-poller: OID_802_11_RSSI -> %d, rssi:%d, qual: %d",
- ret, le32_to_cpu(rssi), level_to_qual(le32_to_cpu(rssi)));
+ netdev_dbg(usbdev->net, "dev-poller: OID_802_11_RSSI -> %d, rssi:%d, qual: %d\n",
+ ret, le32_to_cpu(rssi), level_to_qual(le32_to_cpu(rssi)));
/* Workaround transfer stalls on poor quality links.
* TODO: find right way to fix these stalls (as stalls do not happen
@@ -2594,23 +2616,9 @@ end:
/*
* driver/device initialization
*/
-static int bcm4320a_early_init(struct usbnet *usbdev)
-{
- /* bcm4320a doesn't handle configuration parameters well. Try
- * set any and you get partially zeroed mac and broken device.
- */
-
- return 0;
-}
-
-static int bcm4320b_early_init(struct usbnet *usbdev)
+static void rndis_copy_module_params(struct usbnet *usbdev)
{
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
- char buf[8];
-
- /* Early initialization settings, setting these won't have effect
- * if called after generic_rndis_bind().
- */
priv->param_country[0] = modparam_country[0];
priv->param_country[1] = modparam_country[1];
@@ -2652,6 +2660,32 @@ static int bcm4320b_early_init(struct usbnet *usbdev)
priv->param_workaround_interval = 500;
else
priv->param_workaround_interval = modparam_workaround_interval;
+}
+
+static int bcm4320a_early_init(struct usbnet *usbdev)
+{
+ /* copy module parameters for bcm4320a so that iwconfig reports txpower
+ * and workaround parameter is copied to private structure correctly.
+ */
+ rndis_copy_module_params(usbdev);
+
+ /* bcm4320a doesn't handle configuration parameters well. Try
+ * set any and you get partially zeroed mac and broken device.
+ */
+
+ return 0;
+}
+
+static int bcm4320b_early_init(struct usbnet *usbdev)
+{
+ struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
+ char buf[8];
+
+ rndis_copy_module_params(usbdev);
+
+ /* Early initialization settings, setting these won't have effect
+ * if called after generic_rndis_bind().
+ */
rndis_set_config_parameter_str(usbdev, "Country", priv->param_country);
rndis_set_config_parameter_str(usbdev, "FrameBursting",
@@ -2826,11 +2860,11 @@ static int rndis_wlan_reset(struct usbnet *usbdev)
struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
int retval;
- devdbg(usbdev, "rndis_wlan_reset");
+ netdev_dbg(usbdev->net, "%s()\n", __func__);
retval = rndis_reset(usbdev);
if (retval)
- devwarn(usbdev, "rndis_reset() failed: %d", retval);
+ netdev_warn(usbdev->net, "rndis_reset failed: %d\n", retval);
/* rndis_reset cleared multicast list, so restore here.
(set_multicast_list() also turns on current packet filter) */
@@ -2848,7 +2882,7 @@ static int rndis_wlan_stop(struct usbnet *usbdev)
int retval;
__le32 filter;
- devdbg(usbdev, "rndis_wlan_stop");
+ netdev_dbg(usbdev->net, "%s()\n", __func__);
retval = disassociate(usbdev, false);
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index bf60689aaab..5239e082cd0 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -54,17 +54,17 @@ config RT61PCI
When compiled as a module, this driver will be called rt61pci.
config RT2800PCI_PCI
- tristate
+ boolean
depends on PCI
default y
config RT2800PCI_SOC
- tristate
+ boolean
depends on RALINK_RT288X || RALINK_RT305X
default y
config RT2800PCI
- tristate "Ralink rt2800 (PCI/PCMCIA) support (VERY EXPERIMENTAL)"
+ tristate "Ralink rt28xx/rt30xx/rt35xx (PCI/PCIe/PCMCIA) support (EXPERIMENTAL)"
depends on (RT2800PCI_PCI || RT2800PCI_SOC) && EXPERIMENTAL
select RT2800_LIB
select RT2X00_LIB_PCI if RT2800PCI_PCI
@@ -75,7 +75,7 @@ config RT2800PCI
select CRC_CCITT
select EEPROM_93CX6
---help---
- This adds support for rt2800 wireless chipset family.
+ This adds support for rt2800/rt3000/rt3500 wireless chipset family.
Supported chips: RT2760, RT2790, RT2860, RT2880, RT2890 & RT3052
This driver is non-functional at the moment and is intended for
@@ -83,6 +83,32 @@ config RT2800PCI
When compiled as a module, this driver will be called "rt2800pci.ko".
+if RT2800PCI
+
+config RT2800PCI_RT30XX
+ bool "rt2800pci - Include support for rt30xx (PCI/PCIe/PCMCIA) devices"
+ default n
+ ---help---
+ This adds support for rt30xx wireless chipset family to the
+ rt2800pci driver.
+ Supported chips: RT3090, RT3091 & RT3092
+
+ Support for these devices is non-functional at the moment and is
+ intended for testers and developers.
+
+config RT2800PCI_RT35XX
+ bool "rt2800pci - Include support for rt35xx (PCI/PCIe/PCMCIA) devices"
+ default n
+ ---help---
+ This adds support for rt35xx wireless chipset family to the
+ rt2800pci driver.
+ Supported chips: RT3060, RT3062, RT3562, RT3592
+
+ Support for these devices is non-functional at the moment and is
+ intended for testers and developers.
+
+endif
+
config RT2500USB
tristate "Ralink rt2500 (USB) support"
depends on USB
@@ -126,6 +152,43 @@ config RT2800USB
When compiled as a module, this driver will be called "rt2800usb.ko".
+if RT2800USB
+
+config RT2800USB_RT30XX
+ bool "rt2800usb - Include support for rt30xx (USB) devices"
+ default n
+ ---help---
+ This adds support for rt30xx wireless chipset family to the
+ rt2800usb driver.
+ Supported chips: RT3070, RT3071 & RT3072
+
+ Support for these devices is non-functional at the moment and is
+ intended for testers and developers.
+
+config RT2800USB_RT35XX
+ bool "rt2800usb - Include support for rt35xx (USB) devices"
+ default n
+ ---help---
+ This adds support for rt35xx wireless chipset family to the
+ rt2800usb driver.
+ Supported chips: RT3572
+
+ Support for these devices is non-functional at the moment and is
+ intended for testers and developers.
+
+config RT2800USB_UNKNOWN
+ bool "rt2800usb - Include support for unknown (USB) devices"
+ default n
+ ---help---
+ This adds support for rt2800 family devices that are known to
+ have a rt2800 family chipset, but for which the exact chipset
+ is unknown.
+
+ Support status for these devices is unknown, and enabling these
+ devices may or may not work.
+
+endif
+
config RT2800_LIB
tristate
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index e7f46405a41..c22b04042d5 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -451,7 +451,7 @@ static void rt2400pci_config_channel(struct rt2x00_dev *rt2x00dev,
/*
* RF2420 chipset don't need any additional actions.
*/
- if (rt2x00_rf(&rt2x00dev->chip, RF2420))
+ if (rt2x00_rf(rt2x00dev, RF2420))
return;
/*
@@ -1340,11 +1340,10 @@ static int rt2400pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
*/
value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
rt2x00pci_register_read(rt2x00dev, CSR0, &reg);
- rt2x00_set_chip_rf(rt2x00dev, value, reg);
- rt2x00_print_chip(rt2x00dev);
+ rt2x00_set_chip(rt2x00dev, RT2460, value,
+ rt2x00_get_field32(reg, CSR0_REVISION));
- if (!rt2x00_rf(&rt2x00dev->chip, RF2420) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2421)) {
+ if (!rt2x00_rf(rt2x00dev, RF2420) && !rt2x00_rf(rt2x00dev, RF2421)) {
ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
return -ENODEV;
}
@@ -1562,7 +1561,6 @@ static const struct ieee80211_ops rt2400pci_mac80211_ops = {
.get_stats = rt2x00mac_get_stats,
.bss_info_changed = rt2x00mac_bss_info_changed,
.conf_tx = rt2400pci_conf_tx,
- .get_tx_stats = rt2x00mac_get_tx_stats,
.get_tsf = rt2400pci_get_tsf,
.tx_last_beacon = rt2400pci_tx_last_beacon,
.rfkill_poll = rt2x00mac_rfkill_poll,
@@ -1643,7 +1641,7 @@ static const struct rt2x00_ops rt2400pci_ops = {
/*
* RT2400pci module information.
*/
-static struct pci_device_id rt2400pci_device_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(rt2400pci_device_table) = {
{ PCI_DEVICE(0x1814, 0x0101), PCI_DEVICE_DATA(&rt2400pci_ops) },
{ 0, }
};
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.h b/drivers/net/wireless/rt2x00/rt2400pci.h
index c3dea697b90..c048b18f413 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.h
+++ b/drivers/net/wireless/rt2x00/rt2400pci.h
@@ -65,6 +65,7 @@
* CSR0: ASIC revision number.
*/
#define CSR0 0x0000
+#define CSR0_REVISION FIELD32(0x0000ffff)
/*
* CSR1: System control register.
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 408fcfc120f..52bbcf1bd17 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -440,8 +440,7 @@ static void rt2500pci_config_ant(struct rt2x00_dev *rt2x00dev,
/*
* RT2525E and RT5222 need to flip TX I/Q
*/
- if (rt2x00_rf(&rt2x00dev->chip, RF2525E) ||
- rt2x00_rf(&rt2x00dev->chip, RF5222)) {
+ if (rt2x00_rf(rt2x00dev, RF2525E) || rt2x00_rf(rt2x00dev, RF5222)) {
rt2x00_set_field8(&r2, BBP_R2_TX_IQ_FLIP, 1);
rt2x00_set_field32(&reg, BBPCSR1_CCK_FLIP, 1);
rt2x00_set_field32(&reg, BBPCSR1_OFDM_FLIP, 1);
@@ -449,7 +448,7 @@ static void rt2500pci_config_ant(struct rt2x00_dev *rt2x00dev,
/*
* RT2525E does not need RX I/Q Flip.
*/
- if (rt2x00_rf(&rt2x00dev->chip, RF2525E))
+ if (rt2x00_rf(rt2x00dev, RF2525E))
rt2x00_set_field8(&r14, BBP_R14_RX_IQ_FLIP, 0);
} else {
rt2x00_set_field32(&reg, BBPCSR1_CCK_FLIP, 0);
@@ -475,14 +474,14 @@ static void rt2500pci_config_channel(struct rt2x00_dev *rt2x00dev,
* Switch on tuning bits.
* For RT2523 devices we do not need to update the R1 register.
*/
- if (!rt2x00_rf(&rt2x00dev->chip, RF2523))
+ if (!rt2x00_rf(rt2x00dev, RF2523))
rt2x00_set_field32(&rf->rf1, RF1_TUNER, 1);
rt2x00_set_field32(&rf->rf3, RF3_TUNER, 1);
/*
* For RT2525 we should first set the channel to half band higher.
*/
- if (rt2x00_rf(&rt2x00dev->chip, RF2525)) {
+ if (rt2x00_rf(rt2x00dev, RF2525)) {
static const u32 vals[] = {
0x00080cbe, 0x00080d02, 0x00080d06, 0x00080d0a,
0x00080d0e, 0x00080d12, 0x00080d16, 0x00080d1a,
@@ -516,7 +515,7 @@ static void rt2500pci_config_channel(struct rt2x00_dev *rt2x00dev,
* Switch off tuning bits.
* For RT2523 devices we do not need to update the R1 register.
*/
- if (!rt2x00_rf(&rt2x00dev->chip, RF2523)) {
+ if (!rt2x00_rf(rt2x00dev, RF2523)) {
rt2x00_set_field32(&rf->rf1, RF1_TUNER, 0);
rt2500pci_rf_write(rt2x00dev, 1, rf->rf1);
}
@@ -640,7 +639,7 @@ static void rt2500pci_link_tuner(struct rt2x00_dev *rt2x00dev,
* up to version C the link tuning should halt after 20
* seconds while being associated.
*/
- if (rt2x00_rev(&rt2x00dev->chip) < RT2560_VERSION_D &&
+ if (rt2x00_rev(rt2x00dev) < RT2560_VERSION_D &&
rt2x00dev->intf_associated && count > 20)
return;
@@ -650,7 +649,7 @@ static void rt2500pci_link_tuner(struct rt2x00_dev *rt2x00dev,
* should go straight to dynamic CCA tuning when they
* are not associated.
*/
- if (rt2x00_rev(&rt2x00dev->chip) < RT2560_VERSION_D ||
+ if (rt2x00_rev(rt2x00dev) < RT2560_VERSION_D ||
!rt2x00dev->intf_associated)
goto dynamic_cca_tune;
@@ -1504,15 +1503,15 @@ static int rt2500pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
*/
value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
rt2x00pci_register_read(rt2x00dev, CSR0, &reg);
- rt2x00_set_chip_rf(rt2x00dev, value, reg);
- rt2x00_print_chip(rt2x00dev);
-
- if (!rt2x00_rf(&rt2x00dev->chip, RF2522) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2523) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2524) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2525) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2525E) &&
- !rt2x00_rf(&rt2x00dev->chip, RF5222)) {
+ rt2x00_set_chip(rt2x00dev, RT2560, value,
+ rt2x00_get_field32(reg, CSR0_REVISION));
+
+ if (!rt2x00_rf(rt2x00dev, RF2522) &&
+ !rt2x00_rf(rt2x00dev, RF2523) &&
+ !rt2x00_rf(rt2x00dev, RF2524) &&
+ !rt2x00_rf(rt2x00dev, RF2525) &&
+ !rt2x00_rf(rt2x00dev, RF2525E) &&
+ !rt2x00_rf(rt2x00dev, RF5222)) {
ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
return -ENODEV;
}
@@ -1744,22 +1743,22 @@ static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->supported_bands = SUPPORT_BAND_2GHZ;
spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
- if (rt2x00_rf(&rt2x00dev->chip, RF2522)) {
+ if (rt2x00_rf(rt2x00dev, RF2522)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522);
spec->channels = rf_vals_bg_2522;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF2523)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2523)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2523);
spec->channels = rf_vals_bg_2523;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF2524)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2524)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2524);
spec->channels = rf_vals_bg_2524;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF2525)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2525)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525);
spec->channels = rf_vals_bg_2525;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2525E)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525e);
spec->channels = rf_vals_bg_2525e;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF5222)) {
+ } else if (rt2x00_rf(rt2x00dev, RF5222)) {
spec->supported_bands |= SUPPORT_BAND_5GHZ;
spec->num_channels = ARRAY_SIZE(rf_vals_5222);
spec->channels = rf_vals_5222;
@@ -1860,7 +1859,6 @@ static const struct ieee80211_ops rt2500pci_mac80211_ops = {
.get_stats = rt2x00mac_get_stats,
.bss_info_changed = rt2x00mac_bss_info_changed,
.conf_tx = rt2x00mac_conf_tx,
- .get_tx_stats = rt2x00mac_get_tx_stats,
.get_tsf = rt2500pci_get_tsf,
.tx_last_beacon = rt2500pci_tx_last_beacon,
.rfkill_poll = rt2x00mac_rfkill_poll,
@@ -1941,7 +1939,7 @@ static const struct rt2x00_ops rt2500pci_ops = {
/*
* RT2500pci module information.
*/
-static struct pci_device_id rt2500pci_device_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(rt2500pci_device_table) = {
{ PCI_DEVICE(0x1814, 0x0201), PCI_DEVICE_DATA(&rt2500pci_ops) },
{ 0, }
};
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.h b/drivers/net/wireless/rt2x00/rt2500pci.h
index c6bd1fcae7e..d708031361a 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.h
+++ b/drivers/net/wireless/rt2x00/rt2500pci.h
@@ -76,6 +76,7 @@
* CSR0: ASIC revision number.
*/
#define CSR0 0x0000
+#define CSR0_REVISION FIELD32(0x0000ffff)
/*
* CSR1: System control register.
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 83f2592c59d..ee34c137e7c 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -565,8 +565,7 @@ static void rt2500usb_config_ant(struct rt2x00_dev *rt2x00dev,
/*
* RT2525E and RT5222 need to flip TX I/Q
*/
- if (rt2x00_rf(&rt2x00dev->chip, RF2525E) ||
- rt2x00_rf(&rt2x00dev->chip, RF5222)) {
+ if (rt2x00_rf(rt2x00dev, RF2525E) || rt2x00_rf(rt2x00dev, RF5222)) {
rt2x00_set_field8(&r2, BBP_R2_TX_IQ_FLIP, 1);
rt2x00_set_field16(&csr5, PHY_CSR5_CCK_FLIP, 1);
rt2x00_set_field16(&csr6, PHY_CSR6_OFDM_FLIP, 1);
@@ -574,7 +573,7 @@ static void rt2500usb_config_ant(struct rt2x00_dev *rt2x00dev,
/*
* RT2525E does not need RX I/Q Flip.
*/
- if (rt2x00_rf(&rt2x00dev->chip, RF2525E))
+ if (rt2x00_rf(rt2x00dev, RF2525E))
rt2x00_set_field8(&r14, BBP_R14_RX_IQ_FLIP, 0);
} else {
rt2x00_set_field16(&csr5, PHY_CSR5_CCK_FLIP, 0);
@@ -598,7 +597,7 @@ static void rt2500usb_config_channel(struct rt2x00_dev *rt2x00dev,
/*
* For RT2525E we should first set the channel to half band higher.
*/
- if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) {
+ if (rt2x00_rf(rt2x00dev, RF2525E)) {
static const u32 vals[] = {
0x000008aa, 0x000008ae, 0x000008ae, 0x000008b2,
0x000008b2, 0x000008b6, 0x000008b6, 0x000008ba,
@@ -793,7 +792,7 @@ static int rt2500usb_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field16(&reg, MAC_CSR1_HOST_READY, 1);
rt2500usb_register_write(rt2x00dev, MAC_CSR1, reg);
- if (rt2x00_rev(&rt2x00dev->chip) >= RT2570_VERSION_C) {
+ if (rt2x00_rev(rt2x00dev) >= RT2570_VERSION_C) {
rt2500usb_register_read(rt2x00dev, PHY_CSR2, &reg);
rt2x00_set_field16(&reg, PHY_CSR2_LNA, 0);
} else {
@@ -1409,21 +1408,18 @@ static int rt2500usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
rt2500usb_register_read(rt2x00dev, MAC_CSR0, &reg);
rt2x00_set_chip(rt2x00dev, RT2570, value, reg);
- rt2x00_print_chip(rt2x00dev);
-
- if (!rt2x00_check_rev(&rt2x00dev->chip, 0x000ffff0, 0) ||
- rt2x00_check_rev(&rt2x00dev->chip, 0x0000000f, 0)) {
+ if (((reg & 0xfff0) != 0) || ((reg & 0x0000000f) == 0)) {
ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
return -ENODEV;
}
- if (!rt2x00_rf(&rt2x00dev->chip, RF2522) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2523) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2524) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2525) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2525E) &&
- !rt2x00_rf(&rt2x00dev->chip, RF5222)) {
+ if (!rt2x00_rf(rt2x00dev, RF2522) &&
+ !rt2x00_rf(rt2x00dev, RF2523) &&
+ !rt2x00_rf(rt2x00dev, RF2524) &&
+ !rt2x00_rf(rt2x00dev, RF2525) &&
+ !rt2x00_rf(rt2x00dev, RF2525E) &&
+ !rt2x00_rf(rt2x00dev, RF5222)) {
ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
return -ENODEV;
}
@@ -1667,22 +1663,22 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->supported_bands = SUPPORT_BAND_2GHZ;
spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
- if (rt2x00_rf(&rt2x00dev->chip, RF2522)) {
+ if (rt2x00_rf(rt2x00dev, RF2522)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522);
spec->channels = rf_vals_bg_2522;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF2523)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2523)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2523);
spec->channels = rf_vals_bg_2523;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF2524)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2524)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2524);
spec->channels = rf_vals_bg_2524;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF2525)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2525)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525);
spec->channels = rf_vals_bg_2525;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF2525E)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2525E)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525e);
spec->channels = rf_vals_bg_2525e;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF5222)) {
+ } else if (rt2x00_rf(rt2x00dev, RF5222)) {
spec->supported_bands |= SUPPORT_BAND_5GHZ;
spec->num_channels = ARRAY_SIZE(rf_vals_5222);
spec->channels = rf_vals_5222;
@@ -1763,7 +1759,6 @@ static const struct ieee80211_ops rt2500usb_mac80211_ops = {
.get_stats = rt2x00mac_get_stats,
.bss_info_changed = rt2x00mac_bss_info_changed,
.conf_tx = rt2x00mac_conf_tx,
- .get_tx_stats = rt2x00mac_get_tx_stats,
.rfkill_poll = rt2x00mac_rfkill_poll,
};
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index 1a7eae357fe..74c0433dba3 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -60,11 +60,11 @@
/*
* Chipset version.
*/
-#define RT2860C_VERSION 0x28600100
-#define RT2860D_VERSION 0x28600101
-#define RT2880E_VERSION 0x28720200
-#define RT2883_VERSION 0x28830300
-#define RT3070_VERSION 0x30700200
+#define RT2860C_VERSION 0x0100
+#define RT2860D_VERSION 0x0101
+#define RT2880E_VERSION 0x0200
+#define RT2883_VERSION 0x0300
+#define RT3070_VERSION 0x0200
/*
* Signal information.
@@ -408,8 +408,8 @@
* ASIC_VER: 2860 or 2870
*/
#define MAC_CSR0 0x1000
-#define MAC_CSR0_ASIC_REV FIELD32(0x0000ffff)
-#define MAC_CSR0_ASIC_VER FIELD32(0xffff0000)
+#define MAC_CSR0_REVISION FIELD32(0x0000ffff)
+#define MAC_CSR0_CHIPSET FIELD32(0xffff0000)
/*
* MAC_SYS_CTRL:
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 9deae41cb78..18d4d8e4ae6 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -37,9 +37,12 @@
#include <linux/module.h>
#include "rt2x00.h"
-#if defined(CONFIG_RT2800USB) || defined(CONFIG_RT2800USB_MODULE)
+#if defined(CONFIG_RT2X00_LIB_USB) || defined(CONFIG_RT2X00_LIB_USB_MODULE)
#include "rt2x00usb.h"
#endif
+#if defined(CONFIG_RT2X00_LIB_PCI) || defined(CONFIG_RT2X00_LIB_PCI_MODULE)
+#include "rt2x00pci.h"
+#endif
#include "rt2800lib.h"
#include "rt2800.h"
#include "rt2800usb.h"
@@ -89,7 +92,7 @@ static void rt2800_bbp_write(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&reg, BBP_CSR_CFG_REGNUM, word);
rt2x00_set_field32(&reg, BBP_CSR_CFG_BUSY, 1);
rt2x00_set_field32(&reg, BBP_CSR_CFG_READ_CONTROL, 0);
- if (rt2x00_intf_is_pci(rt2x00dev))
+ if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev))
rt2x00_set_field32(&reg, BBP_CSR_CFG_BBP_RW_MODE, 1);
rt2800_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg);
@@ -118,7 +121,7 @@ static void rt2800_bbp_read(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&reg, BBP_CSR_CFG_REGNUM, word);
rt2x00_set_field32(&reg, BBP_CSR_CFG_BUSY, 1);
rt2x00_set_field32(&reg, BBP_CSR_CFG_READ_CONTROL, 1);
- if (rt2x00_intf_is_pci(rt2x00dev))
+ if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev))
rt2x00_set_field32(&reg, BBP_CSR_CFG_BBP_RW_MODE, 1);
rt2800_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg);
@@ -218,10 +221,9 @@ void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
u32 reg;
/*
- * RT2880 and RT3052 don't support MCU requests.
+ * SOC devices don't support MCU requests.
*/
- if (rt2x00_rt(&rt2x00dev->chip, RT2880) ||
- rt2x00_rt(&rt2x00dev->chip, RT3052))
+ if (rt2x00_is_soc(rt2x00dev))
return;
mutex_lock(&rt2x00dev->csr_mutex);
@@ -246,6 +248,25 @@ void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
}
EXPORT_SYMBOL_GPL(rt2800_mcu_request);
+int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
+{
+ unsigned int i;
+ u32 reg;
+
+ for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+ rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
+ if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
+ !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
+ return 0;
+
+ msleep(1);
+ }
+
+ ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n");
+ return -EACCES;
+}
+EXPORT_SYMBOL_GPL(rt2800_wait_wpdma_ready);
+
#ifdef CONFIG_RT2X00_LIB_DEBUGFS
const struct rt2x00debug rt2800_rt2x00debug = {
.owner = THIS_MODULE,
@@ -348,7 +369,7 @@ static int rt2800_blink_set(struct led_classdev *led_cdev,
return 0;
}
-void rt2800_init_led(struct rt2x00_dev *rt2x00dev,
+static void rt2800_init_led(struct rt2x00_dev *rt2x00dev,
struct rt2x00_led *led, enum led_type type)
{
led->rt2x00dev = rt2x00dev;
@@ -357,7 +378,6 @@ void rt2800_init_led(struct rt2x00_dev *rt2x00dev,
led->led_dev.blink_set = rt2800_blink_set;
led->flags = LED_INITIALIZED;
}
-EXPORT_SYMBOL_GPL(rt2800_init_led);
#endif /* CONFIG_RT2X00_LIB_LEDS */
/*
@@ -643,7 +663,7 @@ void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
switch ((int)ant->tx) {
case 1:
rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0);
- if (rt2x00_intf_is_pci(rt2x00dev))
+ if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev))
rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 0);
break;
case 2:
@@ -806,12 +826,12 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
unsigned int tx_pin;
u8 bbp;
- if ((rt2x00_rt(&rt2x00dev->chip, RT3070) ||
- rt2x00_rt(&rt2x00dev->chip, RT3090)) &&
- (rt2x00_rf(&rt2x00dev->chip, RF2020) ||
- rt2x00_rf(&rt2x00dev->chip, RF3020) ||
- rt2x00_rf(&rt2x00dev->chip, RF3021) ||
- rt2x00_rf(&rt2x00dev->chip, RF3022)))
+ if ((rt2x00_rt(rt2x00dev, RT3070) ||
+ rt2x00_rt(rt2x00dev, RT3090)) &&
+ (rt2x00_rf(rt2x00dev, RF2020) ||
+ rt2x00_rf(rt2x00dev, RF3020) ||
+ rt2x00_rf(rt2x00dev, RF3021) ||
+ rt2x00_rf(rt2x00dev, RF3022)))
rt2800_config_channel_rt3x(rt2x00dev, conf, rf, info);
else
rt2800_config_channel_rt2x(rt2x00dev, conf, rf, info);
@@ -878,7 +898,8 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field8(&bbp, BBP3_HT40_PLUS, conf_is_ht40_plus(conf));
rt2800_bbp_write(rt2x00dev, 3, bbp);
- if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION) {
+ if (rt2x00_rt(rt2x00dev, RT2860) &&
+ (rt2x00_rev(rt2x00dev) == RT2860C_VERSION)) {
if (conf_is_ht40(conf)) {
rt2800_bbp_write(rt2x00dev, 69, 0x1a);
rt2800_bbp_write(rt2x00dev, 70, 0x0a);
@@ -1040,8 +1061,9 @@ EXPORT_SYMBOL_GPL(rt2800_link_stats);
static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
{
if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
- if (rt2x00_intf_is_usb(rt2x00dev) &&
- rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION)
+ if (rt2x00_is_usb(rt2x00dev) &&
+ rt2x00_rt(rt2x00dev, RT3070) &&
+ (rt2x00_rev(rt2x00dev) == RT3070_VERSION))
return 0x1c + (2 * rt2x00dev->lna_gain);
else
return 0x2e + rt2x00dev->lna_gain;
@@ -1072,7 +1094,8 @@ EXPORT_SYMBOL_GPL(rt2800_reset_tuner);
void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
const u32 count)
{
- if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION)
+ if (rt2x00_rt(rt2x00dev, RT2860) &&
+ (rt2x00_rev(rt2x00dev) == RT2860C_VERSION))
return;
/*
@@ -1092,7 +1115,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
u32 reg;
unsigned int i;
- if (rt2x00_intf_is_usb(rt2x00dev)) {
+ if (rt2x00_is_usb(rt2x00dev)) {
/*
* Wait until BBP and RF are ready.
*/
@@ -1111,7 +1134,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
rt2800_register_write(rt2x00dev, PBF_SYS_CTRL,
reg & ~0x00002000);
- } else if (rt2x00_intf_is_pci(rt2x00dev))
+ } else if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev))
rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
@@ -1119,9 +1142,9 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
- if (rt2x00_intf_is_usb(rt2x00dev)) {
+ if (rt2x00_is_usb(rt2x00dev)) {
rt2800_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000);
-#if defined(CONFIG_RT2800USB) || defined(CONFIG_RT2800USB_MODULE)
+#if defined(CONFIG_RT2X00_LIB_USB) || defined(CONFIG_RT2X00_LIB_USB_MODULE)
rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
USB_MODE_RESET, REGISTER_TIMEOUT);
#endif
@@ -1157,8 +1180,9 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, BCN_TIME_CFG_TX_TIME_COMPENSATE, 0);
rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
- if (rt2x00_intf_is_usb(rt2x00dev) &&
- rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION) {
+ if (rt2x00_is_usb(rt2x00dev) &&
+ rt2x00_rt(rt2x00dev, RT3070) &&
+ (rt2x00_rev(rt2x00dev) == RT3070_VERSION)) {
rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
@@ -1185,8 +1209,14 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, MAX_LEN_CFG, &reg);
rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_MPDU, AGGREGATION_SIZE);
- if (rt2x00_rev(&rt2x00dev->chip) >= RT2880E_VERSION &&
- rt2x00_rev(&rt2x00dev->chip) < RT3070_VERSION)
+ if ((rt2x00_rt(rt2x00dev, RT2872) &&
+ (rt2x00_rev(rt2x00dev) >= RT2880E_VERSION)) ||
+ rt2x00_rt(rt2x00dev, RT2880) ||
+ rt2x00_rt(rt2x00dev, RT2883) ||
+ rt2x00_rt(rt2x00dev, RT2890) ||
+ rt2x00_rt(rt2x00dev, RT3052) ||
+ (rt2x00_rt(rt2x00dev, RT3070) &&
+ (rt2x00_rev(rt2x00dev) < RT3070_VERSION)))
rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 2);
else
rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 1);
@@ -1276,7 +1306,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_GF40, 1);
rt2800_register_write(rt2x00dev, GF40_PROT_CFG, reg);
- if (rt2x00_intf_is_usb(rt2x00dev)) {
+ if (rt2x00_is_usb(rt2x00dev)) {
rt2800_register_write(rt2x00dev, PBF_CFG, 0xf40006);
rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
@@ -1336,7 +1366,7 @@ int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, HW_BEACON_BASE6, 0);
rt2800_register_write(rt2x00dev, HW_BEACON_BASE7, 0);
- if (rt2x00_intf_is_usb(rt2x00dev)) {
+ if (rt2x00_is_usb(rt2x00dev)) {
rt2800_register_read(rt2x00dev, USB_CYC_CFG, &reg);
rt2x00_set_field32(&reg, USB_CYC_CFG_CLOCK_CYCLE, 30);
rt2800_register_write(rt2x00dev, USB_CYC_CFG, reg);
@@ -1465,22 +1495,25 @@ int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
rt2800_bbp_write(rt2x00dev, 103, 0x00);
rt2800_bbp_write(rt2x00dev, 105, 0x05);
- if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION) {
+ if (rt2x00_rt(rt2x00dev, RT2860) &&
+ (rt2x00_rev(rt2x00dev) == RT2860C_VERSION)) {
rt2800_bbp_write(rt2x00dev, 69, 0x16);
rt2800_bbp_write(rt2x00dev, 73, 0x12);
}
- if (rt2x00_rev(&rt2x00dev->chip) > RT2860D_VERSION)
+ if (rt2x00_rt(rt2x00dev, RT2860) &&
+ (rt2x00_rev(rt2x00dev) > RT2860D_VERSION))
rt2800_bbp_write(rt2x00dev, 84, 0x19);
- if (rt2x00_intf_is_usb(rt2x00dev) &&
- rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION) {
+ if (rt2x00_is_usb(rt2x00dev) &&
+ rt2x00_rt(rt2x00dev, RT3070) &&
+ (rt2x00_rev(rt2x00dev) == RT3070_VERSION)) {
rt2800_bbp_write(rt2x00dev, 70, 0x0a);
rt2800_bbp_write(rt2x00dev, 84, 0x99);
rt2800_bbp_write(rt2x00dev, 105, 0x05);
}
- if (rt2x00_rt(&rt2x00dev->chip, RT3052)) {
+ if (rt2x00_rt(rt2x00dev, RT3052)) {
rt2800_bbp_write(rt2x00dev, 31, 0x08);
rt2800_bbp_write(rt2x00dev, 78, 0x0e);
rt2800_bbp_write(rt2x00dev, 80, 0x08);
@@ -1565,14 +1598,15 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
u8 rfcsr;
u8 bbp;
- if (rt2x00_intf_is_usb(rt2x00dev) &&
- rt2x00_rev(&rt2x00dev->chip) != RT3070_VERSION)
+ if (rt2x00_is_usb(rt2x00dev) &&
+ rt2x00_rt(rt2x00dev, RT3070) &&
+ (rt2x00_rev(rt2x00dev) != RT3070_VERSION))
return 0;
- if (rt2x00_intf_is_pci(rt2x00dev)) {
- if (!rt2x00_rf(&rt2x00dev->chip, RF3020) &&
- !rt2x00_rf(&rt2x00dev->chip, RF3021) &&
- !rt2x00_rf(&rt2x00dev->chip, RF3022))
+ if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) {
+ if (!rt2x00_rf(rt2x00dev, RF3020) &&
+ !rt2x00_rf(rt2x00dev, RF3021) &&
+ !rt2x00_rf(rt2x00dev, RF3022))
return 0;
}
@@ -1586,7 +1620,7 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
- if (rt2x00_intf_is_usb(rt2x00dev)) {
+ if (rt2x00_is_usb(rt2x00dev)) {
rt2800_rfcsr_write(rt2x00dev, 4, 0x40);
rt2800_rfcsr_write(rt2x00dev, 5, 0x03);
rt2800_rfcsr_write(rt2x00dev, 6, 0x02);
@@ -1607,7 +1641,7 @@ int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
rt2800_rfcsr_write(rt2x00dev, 25, 0x01);
rt2800_rfcsr_write(rt2x00dev, 27, 0x03);
rt2800_rfcsr_write(rt2x00dev, 29, 0x1f);
- } else if (rt2x00_intf_is_pci(rt2x00dev)) {
+ } else if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev)) {
rt2800_rfcsr_write(rt2x00dev, 0, 0x50);
rt2800_rfcsr_write(rt2x00dev, 1, 0x01);
rt2800_rfcsr_write(rt2x00dev, 2, 0xf7);
@@ -1737,7 +1771,12 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF2820);
rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word);
EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word);
- } else if (rt2x00_rev(&rt2x00dev->chip) < RT2883_VERSION) {
+ } else if (rt2x00_rt(rt2x00dev, RT2860) ||
+ rt2x00_rt(rt2x00dev, RT2870) ||
+ rt2x00_rt(rt2x00dev, RT2872) ||
+ rt2x00_rt(rt2x00dev, RT2880) ||
+ (rt2x00_rt(rt2x00dev, RT2883) &&
+ (rt2x00_rev(rt2x00dev) < RT2883_VERSION))) {
/*
* There is a max of 2 RX streams for RT28x0 series
*/
@@ -1836,36 +1875,34 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
- rt2x00_set_chip_rf(rt2x00dev, value, reg);
-
- if (rt2x00_intf_is_usb(rt2x00dev)) {
- struct rt2x00_chip *chip = &rt2x00dev->chip;
-
- /*
- * The check for rt2860 is not a typo, some rt2870 hardware
- * identifies itself as rt2860 in the CSR register.
- */
- if (rt2x00_check_rev(chip, 0xfff00000, 0x28600000) ||
- rt2x00_check_rev(chip, 0xfff00000, 0x28700000) ||
- rt2x00_check_rev(chip, 0xfff00000, 0x28800000)) {
- rt2x00_set_chip_rt(rt2x00dev, RT2870);
- } else if (rt2x00_check_rev(chip, 0xffff0000, 0x30700000)) {
- rt2x00_set_chip_rt(rt2x00dev, RT3070);
- } else {
- ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
- return -ENODEV;
- }
+ rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET),
+ value, rt2x00_get_field32(reg, MAC_CSR0_REVISION));
+
+ if (!rt2x00_rt(rt2x00dev, RT2860) &&
+ !rt2x00_rt(rt2x00dev, RT2870) &&
+ !rt2x00_rt(rt2x00dev, RT2872) &&
+ !rt2x00_rt(rt2x00dev, RT2880) &&
+ !rt2x00_rt(rt2x00dev, RT2883) &&
+ !rt2x00_rt(rt2x00dev, RT2890) &&
+ !rt2x00_rt(rt2x00dev, RT3052) &&
+ !rt2x00_rt(rt2x00dev, RT3070) &&
+ !rt2x00_rt(rt2x00dev, RT3071) &&
+ !rt2x00_rt(rt2x00dev, RT3090) &&
+ !rt2x00_rt(rt2x00dev, RT3390) &&
+ !rt2x00_rt(rt2x00dev, RT3572)) {
+ ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
+ return -ENODEV;
}
- rt2x00_print_chip(rt2x00dev);
-
- if (!rt2x00_rf(&rt2x00dev->chip, RF2820) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2850) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2720) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2750) &&
- !rt2x00_rf(&rt2x00dev->chip, RF3020) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2020) &&
- !rt2x00_rf(&rt2x00dev->chip, RF3021) &&
- !rt2x00_rf(&rt2x00dev->chip, RF3022)) {
+
+ if (!rt2x00_rf(rt2x00dev, RF2820) &&
+ !rt2x00_rf(rt2x00dev, RF2850) &&
+ !rt2x00_rf(rt2x00dev, RF2720) &&
+ !rt2x00_rf(rt2x00dev, RF2750) &&
+ !rt2x00_rf(rt2x00dev, RF3020) &&
+ !rt2x00_rf(rt2x00dev, RF2020) &&
+ !rt2x00_rf(rt2x00dev, RF3021) &&
+ !rt2x00_rf(rt2x00dev, RF3022) &&
+ !rt2x00_rf(rt2x00dev, RF3052)) {
ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
return -ENODEV;
}
@@ -2013,7 +2050,6 @@ static const struct rf_channel rf_vals_302x[] = {
int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
{
- struct rt2x00_chip *chip = &rt2x00dev->chip;
struct hw_mode_spec *spec = &rt2x00dev->spec;
struct channel_info *info;
char *tx_power1;
@@ -2024,7 +2060,7 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
/*
* Disable powersaving as default on PCI devices.
*/
- if (rt2x00_intf_is_pci(rt2x00dev))
+ if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev))
rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
/*
@@ -2049,19 +2085,19 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->supported_bands = SUPPORT_BAND_2GHZ;
spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
- if (rt2x00_rf(chip, RF2820) ||
- rt2x00_rf(chip, RF2720) ||
- (rt2x00_intf_is_pci(rt2x00dev) && rt2x00_rf(chip, RF3052))) {
+ if (rt2x00_rf(rt2x00dev, RF2820) ||
+ rt2x00_rf(rt2x00dev, RF2720) ||
+ rt2x00_rf(rt2x00dev, RF3052)) {
spec->num_channels = 14;
spec->channels = rf_vals;
- } else if (rt2x00_rf(chip, RF2850) || rt2x00_rf(chip, RF2750)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2850) || rt2x00_rf(rt2x00dev, RF2750)) {
spec->supported_bands |= SUPPORT_BAND_5GHZ;
spec->num_channels = ARRAY_SIZE(rf_vals);
spec->channels = rf_vals;
- } else if (rt2x00_rf(chip, RF3020) ||
- rt2x00_rf(chip, RF2020) ||
- rt2x00_rf(chip, RF3021) ||
- rt2x00_rf(chip, RF3022)) {
+ } else if (rt2x00_rf(rt2x00dev, RF3020) ||
+ rt2x00_rf(rt2x00dev, RF2020) ||
+ rt2x00_rf(rt2x00dev, RF3021) ||
+ rt2x00_rf(rt2x00dev, RF3022)) {
spec->num_channels = ARRAY_SIZE(rf_vals_302x);
spec->channels = rf_vals_302x;
}
@@ -2069,7 +2105,7 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
/*
* Initialize HT information.
*/
- if (!rt2x00_rf(chip, RF2020))
+ if (!rt2x00_rf(rt2x00dev, RF2020))
spec->ht.ht_supported = true;
else
spec->ht.ht_supported = false;
@@ -2282,7 +2318,6 @@ const struct ieee80211_ops rt2800_mac80211_ops = {
.set_rts_threshold = rt2800_set_rts_threshold,
.bss_info_changed = rt2x00mac_bss_info_changed,
.conf_tx = rt2800_conf_tx,
- .get_tx_stats = rt2x00mac_get_tx_stats,
.get_tsf = rt2800_get_tsf,
.rfkill_poll = rt2x00mac_rfkill_poll,
};
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index 535ce22f2ac..ebabeae62d1 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -114,8 +114,6 @@ void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
extern const struct rt2x00debug rt2800_rt2x00debug;
int rt2800_rfkill_poll(struct rt2x00_dev *rt2x00dev);
-void rt2800_init_led(struct rt2x00_dev *rt2x00dev,
- struct rt2x00_led *led, enum led_type type);
int rt2800_config_shared_key(struct rt2x00_dev *rt2x00dev,
struct rt2x00lib_crypto *crypto,
struct ieee80211_key_conf *key);
@@ -139,6 +137,7 @@ void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
int rt2800_init_registers(struct rt2x00_dev *rt2x00dev);
int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev);
int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev);
+int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev);
int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev);
void rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index dfc886fcb44..aca8c124f43 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -48,14 +48,6 @@
#include "rt2800.h"
#include "rt2800pci.h"
-#ifdef CONFIG_RT2800PCI_PCI_MODULE
-#define CONFIG_RT2800PCI_PCI
-#endif
-
-#ifdef CONFIG_RT2800PCI_WISOC_MODULE
-#define CONFIG_RT2800PCI_WISOC
-#endif
-
/*
* Allow hardware encryption to be disabled.
*/
@@ -87,7 +79,7 @@ static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
}
-#ifdef CONFIG_RT2800PCI_WISOC
+#ifdef CONFIG_RT2800PCI_SOC
static void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
{
u32 *base_addr = (u32 *) KSEG1ADDR(0x1F040000); /* XXX for RT3052 */
@@ -98,7 +90,7 @@ static void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
static inline void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
{
}
-#endif /* CONFIG_RT2800PCI_WISOC */
+#endif /* CONFIG_RT2800PCI_SOC */
#ifdef CONFIG_RT2800PCI_PCI
static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom)
@@ -461,24 +453,6 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
}
-static int rt2800pci_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
-{
- unsigned int i;
- u32 reg;
-
- for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
- if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
- !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
- return 0;
-
- msleep(1);
- }
-
- ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n");
- return -EACCES;
-}
-
static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
@@ -487,10 +461,10 @@ static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
/*
* Initialize all registers.
*/
- if (unlikely(rt2800pci_wait_wpdma_ready(rt2x00dev) ||
+ if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
rt2800pci_init_queues(rt2x00dev) ||
rt2800_init_registers(rt2x00dev) ||
- rt2800pci_wait_wpdma_ready(rt2x00dev) ||
+ rt2800_wait_wpdma_ready(rt2x00dev) ||
rt2800_init_bbp(rt2x00dev) ||
rt2800_init_rfcsr(rt2x00dev)))
return -EIO;
@@ -570,7 +544,7 @@ static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
/* Wait for DMA, ignore error */
- rt2800pci_wait_wpdma_ready(rt2x00dev);
+ rt2800_wait_wpdma_ready(rt2x00dev);
}
static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev,
@@ -835,7 +809,6 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
struct rxdone_entry_desc *rxdesc)
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
- struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
struct queue_entry_priv_pci *entry_priv = entry->priv_data;
__le32 *rxd = entry_priv->desc;
__le32 *rxwi = (__le32 *)entry->skb->data;
@@ -883,10 +856,8 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
if (rt2x00_get_field32(rxd3, RXD_W3_MY_BSS))
rxdesc->dev_flags |= RXDONE_MY_BSS;
- if (rt2x00_get_field32(rxd3, RXD_W3_L2PAD)) {
+ if (rt2x00_get_field32(rxd3, RXD_W3_L2PAD))
rxdesc->dev_flags |= RXDONE_L2PAD;
- skbdesc->flags |= SKBDESC_L2_PADDED;
- }
if (rt2x00_get_field32(rxwi1, RXWI_W1_SHORT_GI))
rxdesc->flags |= RX_FLAG_SHORT_GI;
@@ -927,7 +898,6 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry,
* Remove TXWI descriptor from start of buffer.
*/
skb_pull(entry->skb, RXWI_DESC_SIZE);
- skb_trim(entry->skb, rxdesc->size);
}
/*
@@ -1071,18 +1041,12 @@ static int rt2800pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
/*
* Read EEPROM into buffer
*/
- switch (rt2x00dev->chip.rt) {
- case RT2880:
- case RT3052:
+ if (rt2x00_is_soc(rt2x00dev))
rt2800pci_read_eeprom_soc(rt2x00dev);
- break;
- default:
- if (rt2800pci_efuse_detect(rt2x00dev))
- rt2800pci_read_eeprom_efuse(rt2x00dev);
- else
- rt2800pci_read_eeprom_pci(rt2x00dev);
- break;
- }
+ else if (rt2800pci_efuse_detect(rt2x00dev))
+ rt2800pci_read_eeprom_efuse(rt2x00dev);
+ else
+ rt2800pci_read_eeprom_pci(rt2x00dev);
return rt2800_validate_eeprom(rt2x00dev);
}
@@ -1133,8 +1097,7 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
/*
* This device requires firmware.
*/
- if (!rt2x00_rt(&rt2x00dev->chip, RT2880) &&
- !rt2x00_rt(&rt2x00dev->chip, RT3052))
+ if (!rt2x00_is_soc(rt2x00dev))
__set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags);
__set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags);
__set_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags);
@@ -1221,8 +1184,11 @@ static const struct rt2x00_ops rt2800pci_ops = {
/*
* RT2800pci module information.
*/
-static struct pci_device_id rt2800pci_device_table[] = {
- { PCI_DEVICE(0x1462, 0x891a), PCI_DEVICE_DATA(&rt2800pci_ops) },
+static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
+ { PCI_DEVICE(0x1814, 0x0601), PCI_DEVICE_DATA(&rt2800pci_ops) },
+ { PCI_DEVICE(0x1814, 0x0681), PCI_DEVICE_DATA(&rt2800pci_ops) },
+ { PCI_DEVICE(0x1814, 0x0701), PCI_DEVICE_DATA(&rt2800pci_ops) },
+ { PCI_DEVICE(0x1814, 0x0781), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1432, 0x7708), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1432, 0x7727), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1432, 0x7728), PCI_DEVICE_DATA(&rt2800pci_ops) },
@@ -1230,18 +1196,19 @@ static struct pci_device_id rt2800pci_device_table[] = {
{ PCI_DEVICE(0x1432, 0x7748), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1432, 0x7758), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1432, 0x7768), PCI_DEVICE_DATA(&rt2800pci_ops) },
- { PCI_DEVICE(0x1814, 0x0601), PCI_DEVICE_DATA(&rt2800pci_ops) },
- { PCI_DEVICE(0x1814, 0x0681), PCI_DEVICE_DATA(&rt2800pci_ops) },
- { PCI_DEVICE(0x1814, 0x0701), PCI_DEVICE_DATA(&rt2800pci_ops) },
- { PCI_DEVICE(0x1814, 0x0781), PCI_DEVICE_DATA(&rt2800pci_ops) },
- { PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) },
- { PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) },
+ { PCI_DEVICE(0x1a3b, 0x1059), PCI_DEVICE_DATA(&rt2800pci_ops) },
+#ifdef CONFIG_RT2800PCI_RT30XX
{ PCI_DEVICE(0x1814, 0x3090), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x3091), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x3092), PCI_DEVICE_DATA(&rt2800pci_ops) },
+ { PCI_DEVICE(0x1462, 0x891a), PCI_DEVICE_DATA(&rt2800pci_ops) },
+#endif
+#ifdef CONFIG_RT2800PCI_RT35XX
+ { PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) },
+ { PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) },
{ PCI_DEVICE(0x1814, 0x3592), PCI_DEVICE_DATA(&rt2800pci_ops) },
- { PCI_DEVICE(0x1a3b, 0x1059), PCI_DEVICE_DATA(&rt2800pci_ops) },
+#endif
{ 0, }
};
@@ -1255,12 +1222,11 @@ MODULE_DEVICE_TABLE(pci, rt2800pci_device_table);
#endif /* CONFIG_RT2800PCI_PCI */
MODULE_LICENSE("GPL");
-#ifdef CONFIG_RT2800PCI_WISOC
-#if defined(CONFIG_RALINK_RT288X)
-__rt2x00soc_probe(RT2880, &rt2800pci_ops);
-#elif defined(CONFIG_RALINK_RT305X)
-__rt2x00soc_probe(RT3052, &rt2800pci_ops);
-#endif
+#ifdef CONFIG_RT2800PCI_SOC
+static int rt2800soc_probe(struct platform_device *pdev)
+{
+ return rt2x00soc_probe(pdev, rt2800pci_ops);
+}
static struct platform_driver rt2800soc_driver = {
.driver = {
@@ -1268,12 +1234,12 @@ static struct platform_driver rt2800soc_driver = {
.owner = THIS_MODULE,
.mod_name = KBUILD_MODNAME,
},
- .probe = __rt2x00soc_probe,
+ .probe = rt2800soc_probe,
.remove = __devexit_p(rt2x00soc_remove),
.suspend = rt2x00soc_suspend,
.resume = rt2x00soc_resume,
};
-#endif /* CONFIG_RT2800PCI_WISOC */
+#endif /* CONFIG_RT2800PCI_SOC */
#ifdef CONFIG_RT2800PCI_PCI
static struct pci_driver rt2800pci_driver = {
@@ -1290,7 +1256,7 @@ static int __init rt2800pci_init(void)
{
int ret = 0;
-#ifdef CONFIG_RT2800PCI_WISOC
+#ifdef CONFIG_RT2800PCI_SOC
ret = platform_driver_register(&rt2800soc_driver);
if (ret)
return ret;
@@ -1298,7 +1264,7 @@ static int __init rt2800pci_init(void)
#ifdef CONFIG_RT2800PCI_PCI
ret = pci_register_driver(&rt2800pci_driver);
if (ret) {
-#ifdef CONFIG_RT2800PCI_WISOC
+#ifdef CONFIG_RT2800PCI_SOC
platform_driver_unregister(&rt2800soc_driver);
#endif
return ret;
@@ -1313,7 +1279,7 @@ static void __exit rt2800pci_exit(void)
#ifdef CONFIG_RT2800PCI_PCI
pci_unregister_driver(&rt2800pci_driver);
#endif
-#ifdef CONFIG_RT2800PCI_WISOC
+#ifdef CONFIG_RT2800PCI_SOC
platform_driver_unregister(&rt2800soc_driver);
#endif
}
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index ab95346cf6a..5e4ee2023fc 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -92,7 +92,6 @@ static bool rt2800usb_check_crc(const u8 *data, const size_t len)
static int rt2800usb_check_firmware(struct rt2x00_dev *rt2x00dev,
const u8 *data, const size_t len)
{
- u16 chipset = (rt2x00_rev(&rt2x00dev->chip) >> 16) & 0xffff;
size_t offset = 0;
/*
@@ -111,9 +110,9 @@ static int rt2800usb_check_firmware(struct rt2x00_dev *rt2x00dev,
* Check if we need the upper 4kb firmware data or not.
*/
if ((len == 4096) &&
- (chipset != 0x2860) &&
- (chipset != 0x2872) &&
- (chipset != 0x3070))
+ !rt2x00_rt(rt2x00dev, RT2860) &&
+ !rt2x00_rt(rt2x00dev, RT2872) &&
+ !rt2x00_rt(rt2x00dev, RT3070))
return FW_BAD_VERSION;
/*
@@ -138,14 +137,13 @@ static int rt2800usb_load_firmware(struct rt2x00_dev *rt2x00dev,
u32 reg;
u32 offset;
u32 length;
- u16 chipset = (rt2x00_rev(&rt2x00dev->chip) >> 16) & 0xffff;
/*
* Check which section of the firmware we need.
*/
- if ((chipset == 0x2860) ||
- (chipset == 0x2872) ||
- (chipset == 0x3070)) {
+ if (rt2x00_rt(rt2x00dev, RT2860) ||
+ rt2x00_rt(rt2x00dev, RT2872) ||
+ rt2x00_rt(rt2x00dev, RT3070)) {
offset = 0;
length = 4096;
} else {
@@ -200,9 +198,9 @@ static int rt2800usb_load_firmware(struct rt2x00_dev *rt2x00dev,
*/
rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0xff, 0, 0);
- if ((chipset == 0x3070) ||
- (chipset == 0x3071) ||
- (chipset == 0x3572)) {
+ if (rt2x00_rt(rt2x00dev, RT3070) ||
+ rt2x00_rt(rt2x00dev, RT3071) ||
+ rt2x00_rt(rt2x00dev, RT3572)) {
udelay(200);
rt2800_mcu_request(rt2x00dev, MCU_CURRENT, 0, 0, 0);
udelay(10);
@@ -248,24 +246,6 @@ static void rt2800usb_toggle_rx(struct rt2x00_dev *rt2x00dev,
rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
}
-static int rt2800usb_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
-{
- unsigned int i;
- u32 reg;
-
- for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
- if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
- !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
- return 0;
-
- msleep(1);
- }
-
- ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n");
- return -EACCES;
-}
-
static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
{
u32 reg;
@@ -274,7 +254,7 @@ static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
/*
* Initialize all registers.
*/
- if (unlikely(rt2800usb_wait_wpdma_ready(rt2x00dev) ||
+ if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) ||
rt2800_init_registers(rt2x00dev) ||
rt2800_init_bbp(rt2x00dev) ||
rt2800_init_rfcsr(rt2x00dev)))
@@ -295,9 +275,7 @@ static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
rt2800_register_read(rt2x00dev, USB_DMA_CFG, &reg);
rt2x00_set_field32(&reg, USB_DMA_CFG_PHY_CLEAR, 0);
- /* Don't use bulk in aggregation when working with USB 1.1 */
- rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_EN,
- (rt2x00dev->rx->usb_maxpacket == 512));
+ rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_EN, 0);
rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_TIMEOUT, 128);
/*
* Total room for RX frames in kilobytes, PBF might still exceed
@@ -346,7 +324,7 @@ static void rt2800usb_disable_radio(struct rt2x00_dev *rt2x00dev)
rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0);
/* Wait for DMA, ignore error */
- rt2800usb_wait_wpdma_ready(rt2x00dev);
+ rt2800_wait_wpdma_ready(rt2x00dev);
rt2x00usb_disable_radio(rt2x00dev);
}
@@ -573,41 +551,57 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
{
struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
- __le32 *rxd = (__le32 *)entry->skb->data;
+ __le32 *rxi = (__le32 *)entry->skb->data;
__le32 *rxwi;
- u32 rxd0;
+ __le32 *rxd;
+ u32 rxi0;
u32 rxwi0;
u32 rxwi1;
u32 rxwi2;
u32 rxwi3;
+ u32 rxd0;
+ int rx_pkt_len;
+
+ /*
+ * RX frame format is :
+ * | RXINFO | RXWI | header | L2 pad | payload | pad | RXD | USB pad |
+ * |<------------ rx_pkt_len -------------->|
+ */
+ rt2x00_desc_read(rxi, 0, &rxi0);
+ rx_pkt_len = rt2x00_get_field32(rxi0, RXINFO_W0_USB_DMA_RX_PKT_LEN);
+
+ rxwi = (__le32 *)(entry->skb->data + RXINFO_DESC_SIZE);
+
+ /*
+ * FIXME : we need to check for rx_pkt_len validity
+ */
+ rxd = (__le32 *)(entry->skb->data + RXINFO_DESC_SIZE + rx_pkt_len);
/*
* Copy descriptor to the skbdesc->desc buffer, making it safe from
* moving of frame data in rt2x00usb.
*/
- memcpy(skbdesc->desc, rxd, skbdesc->desc_len);
- rxd = (__le32 *)skbdesc->desc;
- rxwi = &rxd[RXINFO_DESC_SIZE / sizeof(__le32)];
+ memcpy(skbdesc->desc, rxi, skbdesc->desc_len);
/*
* It is now safe to read the descriptor on all architectures.
*/
- rt2x00_desc_read(rxd, 0, &rxd0);
rt2x00_desc_read(rxwi, 0, &rxwi0);
rt2x00_desc_read(rxwi, 1, &rxwi1);
rt2x00_desc_read(rxwi, 2, &rxwi2);
rt2x00_desc_read(rxwi, 3, &rxwi3);
+ rt2x00_desc_read(rxd, 0, &rxd0);
- if (rt2x00_get_field32(rxd0, RXINFO_W0_CRC_ERROR))
+ if (rt2x00_get_field32(rxd0, RXD_W0_CRC_ERROR))
rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
rxdesc->cipher = rt2x00_get_field32(rxwi0, RXWI_W0_UDF);
rxdesc->cipher_status =
- rt2x00_get_field32(rxd0, RXINFO_W0_CIPHER_ERROR);
+ rt2x00_get_field32(rxd0, RXD_W0_CIPHER_ERROR);
}
- if (rt2x00_get_field32(rxd0, RXINFO_W0_DECRYPTED)) {
+ if (rt2x00_get_field32(rxd0, RXD_W0_DECRYPTED)) {
/*
* Hardware has stripped IV/EIV data from 802.11 frame during
* decryption. Unfortunately the descriptor doesn't contain
@@ -622,13 +616,11 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
rxdesc->flags |= RX_FLAG_MMIC_ERROR;
}
- if (rt2x00_get_field32(rxd0, RXINFO_W0_MY_BSS))
+ if (rt2x00_get_field32(rxd0, RXD_W0_MY_BSS))
rxdesc->dev_flags |= RXDONE_MY_BSS;
- if (rt2x00_get_field32(rxd0, RXINFO_W0_L2PAD)) {
+ if (rt2x00_get_field32(rxd0, RXD_W0_L2PAD))
rxdesc->dev_flags |= RXDONE_L2PAD;
- skbdesc->flags |= SKBDESC_L2_PADDED;
- }
if (rt2x00_get_field32(rxwi1, RXWI_W1_SHORT_GI))
rxdesc->flags |= RX_FLAG_SHORT_GI;
@@ -663,7 +655,6 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
* Remove RXWI descriptor from start of buffer.
*/
skb_pull(entry->skb, skbdesc->desc_len);
- skb_trim(entry->skb, rxdesc->size);
}
/*
@@ -814,51 +805,27 @@ static struct usb_device_id rt2800usb_device_table[] = {
/* Abocom */
{ USB_DEVICE(0x07b8, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x07b8, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x07b8, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x07b8, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x07b8, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1482, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* AirTies */
- { USB_DEVICE(0x1eda, 0x2310), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Amigo */
- { USB_DEVICE(0x0e0b, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0e0b, 0x9041), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Amit */
{ USB_DEVICE(0x15c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Askey */
{ USB_DEVICE(0x1690, 0x0740), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x1690, 0x0744), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0930, 0x0a07), USB_DEVICE_DATA(&rt2800usb_ops) },
/* ASUS */
{ USB_DEVICE(0x0b05, 0x1731), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0b05, 0x1732), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0b05, 0x1742), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0b05, 0x1760), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0b05, 0x1761), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) },
/* AzureWave */
{ USB_DEVICE(0x13d3, 0x3247), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x13d3, 0x3262), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x13d3, 0x3284), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x13d3, 0x3305), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Belkin */
{ USB_DEVICE(0x050d, 0x8053), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x050d, 0x805c), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x050d, 0x815c), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x050d, 0x825a), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Buffalo */
{ USB_DEVICE(0x0411, 0x00e8), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0411, 0x012e), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Cisco */
- { USB_DEVICE(0x167b, 0x4001), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Conceptronic */
{ USB_DEVICE(0x14b2, 0x3c06), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x14b2, 0x3c07), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x14b2, 0x3c08), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x14b2, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x14b2, 0x3c11), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x14b2, 0x3c12), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x14b2, 0x3c23), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x14b2, 0x3c25), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x14b2, 0x3c27), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -867,157 +834,257 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x07aa, 0x002f), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x07aa, 0x003c), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x07aa, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x07aa, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x07aa, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x18c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x18c5, 0x0012), USB_DEVICE_DATA(&rt2800usb_ops) },
/* D-Link */
{ USB_DEVICE(0x07d1, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07d1, 0x3c11), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Edimax */
+ { USB_DEVICE(0x7392, 0x7717), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x7392, 0x7718), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* EnGenius */
+ { USB_DEVICE(0X1740, 0x9701), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1740, 0x9702), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Gigabyte */
+ { USB_DEVICE(0x1044, 0x800b), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Hawking */
+ { USB_DEVICE(0x0e66, 0x0001), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0e66, 0x0003), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Linksys */
+ { USB_DEVICE(0x1737, 0x0070), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1737, 0x0071), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Logitec */
+ { USB_DEVICE(0x0789, 0x0162), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0789, 0x0163), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0789, 0x0164), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Motorola */
+ { USB_DEVICE(0x100d, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* MSI */
+ { USB_DEVICE(0x0db0, 0x6899), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Philips */
+ { USB_DEVICE(0x0471, 0x200f), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Planex */
+ { USB_DEVICE(0x2019, 0xed06), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Ralink */
+ { USB_DEVICE(0x148f, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x148f, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Samsung */
+ { USB_DEVICE(0x04e8, 0x2018), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Siemens */
+ { USB_DEVICE(0x129b, 0x1828), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Sitecom */
+ { USB_DEVICE(0x0df6, 0x0017), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x002b), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x002c), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x002d), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x0039), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* SMC */
+ { USB_DEVICE(0x083a, 0x6618), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x083a, 0x7512), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x083a, 0x7522), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x083a, 0x8522), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x083a, 0xa618), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x083a, 0xb522), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Sparklan */
+ { USB_DEVICE(0x15a9, 0x0006), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Sweex */
+ { USB_DEVICE(0x177f, 0x0302), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* U-Media*/
+ { USB_DEVICE(0x157e, 0x300e), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* ZCOM */
+ { USB_DEVICE(0x0cde, 0x0022), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0cde, 0x0025), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Zinwell */
+ { USB_DEVICE(0x5a57, 0x0280), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x5a57, 0x0282), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Zyxel */
+ { USB_DEVICE(0x0586, 0x3416), USB_DEVICE_DATA(&rt2800usb_ops) },
+#ifdef CONFIG_RT2800USB_RT30XX
+ /* Abocom */
+ { USB_DEVICE(0x07b8, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07b8, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07b8, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* AirTies */
+ { USB_DEVICE(0x1eda, 0x2310), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* AzureWave */
+ { USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Conceptronic */
+ { USB_DEVICE(0x14b2, 0x3c12), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Corega */
+ { USB_DEVICE(0x18c5, 0x0012), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* D-Link */
{ USB_DEVICE(0x07d1, 0x3c0a), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x07d1, 0x3c0b), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x07d1, 0x3c0d), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x07d1, 0x3c0e), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x07d1, 0x3c0f), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x07d1, 0x3c11), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x07d1, 0x3c15), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Edimax */
{ USB_DEVICE(0x7392, 0x7711), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x7392, 0x7717), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x7392, 0x7718), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Encore */
{ USB_DEVICE(0x203d, 0x1480), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x203d, 0x14a1), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x203d, 0x14a9), USB_DEVICE_DATA(&rt2800usb_ops) },
/* EnGenius */
- { USB_DEVICE(0X1740, 0x9701), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x1740, 0x9702), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1740, 0x9703), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1740, 0x9705), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1740, 0x9706), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Gigabyte */
+ { USB_DEVICE(0x1044, 0x800d), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* I-O DATA */
+ { USB_DEVICE(0x04bb, 0x0945), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* MSI */
+ { USB_DEVICE(0x0db0, 0x3820), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Pegatron */
+ { USB_DEVICE(0x1d4d, 0x000c), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1d4d, 0x000e), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Planex */
+ { USB_DEVICE(0x2019, 0xab25), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Quanta */
+ { USB_DEVICE(0x1a32, 0x0304), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Ralink */
+ { USB_DEVICE(0x148f, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x148f, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x148f, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x148f, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Sitecom */
+ { USB_DEVICE(0x0df6, 0x003e), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0df6, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* SMC */
+ { USB_DEVICE(0x083a, 0x7511), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Zinwell */
+ { USB_DEVICE(0x5a57, 0x0283), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x5a57, 0x5257), USB_DEVICE_DATA(&rt2800usb_ops) },
+#endif
+#ifdef CONFIG_RT2800USB_RT35XX
+ /* Askey */
+ { USB_DEVICE(0x1690, 0x0744), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Cisco */
+ { USB_DEVICE(0x167b, 0x4001), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* EnGenius */
+ { USB_DEVICE(0x1740, 0x9801), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* I-O DATA */
+ { USB_DEVICE(0x04bb, 0x0944), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Ralink */
+ { USB_DEVICE(0x148f, 0x3370), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x148f, 0x3572), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x148f, 0x8070), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Sitecom */
+ { USB_DEVICE(0x0df6, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Zinwell */
+ { USB_DEVICE(0x5a57, 0x0284), USB_DEVICE_DATA(&rt2800usb_ops) },
+#endif
+#ifdef CONFIG_RT2800USB_UNKNOWN
+ /*
+ * Unclear what kind of devices these are (they aren't supported by the
+ * vendor driver).
+ */
+ /* Allwin */
+ { USB_DEVICE(0x8516, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x8516, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x8516, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x8516, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x8516, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x8516, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x8516, 0x3572), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Amigo */
+ { USB_DEVICE(0x0e0b, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0e0b, 0x9041), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Askey */
+ { USB_DEVICE(0x0930, 0x0a07), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* ASUS */
+ { USB_DEVICE(0x0b05, 0x1760), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0b05, 0x1761), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0b05, 0x1790), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1761, 0x0b05), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* AzureWave */
+ { USB_DEVICE(0x13d3, 0x3262), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x13d3, 0x3284), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x13d3, 0x3305), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Belkin */
+ { USB_DEVICE(0x050d, 0x825a), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Buffalo */
+ { USB_DEVICE(0x0411, 0x012e), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0411, 0x0148), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0411, 0x0150), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0411, 0x015d), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Conceptronic */
+ { USB_DEVICE(0x14b2, 0x3c08), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x14b2, 0x3c11), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Corega */
+ { USB_DEVICE(0x07aa, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07aa, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x18c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* D-Link */
+ { USB_DEVICE(0x07d1, 0x3c0b), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07d1, 0x3c13), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07d1, 0x3c15), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x07d1, 0x3c16), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* Encore */
+ { USB_DEVICE(0x203d, 0x14a1), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x203d, 0x14a9), USB_DEVICE_DATA(&rt2800usb_ops) },
+ /* EnGenius */
{ USB_DEVICE(0x1740, 0x9707), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1740, 0x9708), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1740, 0x9709), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x1740, 0x9801), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Gemtek */
{ USB_DEVICE(0x15a9, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Gigabyte */
- { USB_DEVICE(0x1044, 0x800b), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1044, 0x800c), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x1044, 0x800d), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Hawking */
- { USB_DEVICE(0x0e66, 0x0001), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0e66, 0x0003), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0e66, 0x0009), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0e66, 0x000b), USB_DEVICE_DATA(&rt2800usb_ops) },
/* I-O DATA */
- { USB_DEVICE(0x04bb, 0x0944), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x04bb, 0x0945), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x04bb, 0x0947), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x04bb, 0x0948), USB_DEVICE_DATA(&rt2800usb_ops) },
/* LevelOne */
{ USB_DEVICE(0x1740, 0x0605), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1740, 0x0615), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Linksys */
- { USB_DEVICE(0x1737, 0x0070), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x1737, 0x0071), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1737, 0x0077), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1737, 0x0078), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1737, 0x0079), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Logitec */
- { USB_DEVICE(0x0789, 0x0162), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0789, 0x0163), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0789, 0x0164), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Motorola */
- { USB_DEVICE(0x100d, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x100d, 0x9032), USB_DEVICE_DATA(&rt2800usb_ops) },
/* MSI */
- { USB_DEVICE(0x0db0, 0x3820), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0db0, 0x3821), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x3822), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0db0, 0x3870), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0db0, 0x6899), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x3871), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0db0, 0x821a), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x822a), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0db0, 0x870a), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x0db0, 0x871a), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0db0, 0x899a), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Ovislink */
{ USB_DEVICE(0x1b75, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Para */
{ USB_DEVICE(0x20b8, 0x8888), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Pegatron */
+ { USB_DEVICE(0x05a6, 0x0101), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x1d4d, 0x0002), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x1d4d, 0x000c), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x1d4d, 0x000e), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Philips */
- { USB_DEVICE(0x0471, 0x200f), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x1d4d, 0x0010), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Planex */
- { USB_DEVICE(0x2019, 0xed06), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x2019, 0xab24), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x2019, 0xab25), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Qcom */
{ USB_DEVICE(0x18e8, 0x6259), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Quanta */
- { USB_DEVICE(0x1a32, 0x0304), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Ralink */
- { USB_DEVICE(0x148f, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x148f, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x148f, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x148f, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x148f, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x148f, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x148f, 0x3572), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Samsung */
- { USB_DEVICE(0x04e8, 0x2018), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Siemens */
- { USB_DEVICE(0x129b, 0x1828), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Sitecom */
- { USB_DEVICE(0x0df6, 0x0017), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x002b), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x002c), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x002d), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x0039), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0df6, 0x003b), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0df6, 0x003c), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0df6, 0x003d), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x003e), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0df6, 0x0040), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0df6, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0df6, 0x0047), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0df6, 0x0048), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0df6, 0x004a), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0df6, 0x004d), USB_DEVICE_DATA(&rt2800usb_ops) },
/* SMC */
- { USB_DEVICE(0x083a, 0x6618), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x083a, 0x7511), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x083a, 0x7512), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x083a, 0x7522), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x083a, 0x8522), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x083a, 0xa512), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x083a, 0xa618), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x083a, 0xa701), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x083a, 0xa702), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x083a, 0xb522), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x083a, 0xc522), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Sparklan */
- { USB_DEVICE(0x15a9, 0x0006), USB_DEVICE_DATA(&rt2800usb_ops) },
+ { USB_DEVICE(0x083a, 0xd522), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Sweex */
{ USB_DEVICE(0x177f, 0x0153), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x177f, 0x0302), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x177f, 0x0313), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* U-Media*/
- { USB_DEVICE(0x157e, 0x300e), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* ZCOM */
- { USB_DEVICE(0x0cde, 0x0022), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x0cde, 0x0025), USB_DEVICE_DATA(&rt2800usb_ops) },
- /* Zinwell */
- { USB_DEVICE(0x5a57, 0x0280), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x5a57, 0x0282), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x5a57, 0x0283), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x5a57, 0x0284), USB_DEVICE_DATA(&rt2800usb_ops) },
- { USB_DEVICE(0x5a57, 0x5257), USB_DEVICE_DATA(&rt2800usb_ops) },
/* Zyxel */
- { USB_DEVICE(0x0586, 0x3416), USB_DEVICE_DATA(&rt2800usb_ops) },
{ USB_DEVICE(0x0586, 0x341a), USB_DEVICE_DATA(&rt2800usb_ops) },
+#endif
{ 0, }
};
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.h b/drivers/net/wireless/rt2x00/rt2800usb.h
index 1e4340a182e..d1d8ae94b4d 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.h
+++ b/drivers/net/wireless/rt2x00/rt2800usb.h
@@ -79,6 +79,8 @@
*/
#define TXINFO_DESC_SIZE ( 1 * sizeof(__le32) )
#define RXINFO_DESC_SIZE ( 1 * sizeof(__le32) )
+#define RXWI_DESC_SIZE ( 4 * sizeof(__le32) )
+#define RXD_DESC_SIZE ( 1 * sizeof(__le32) )
/*
* TX Info structure
@@ -101,6 +103,54 @@
#define TXINFO_W0_USB_DMA_TX_BURST FIELD32(0x80000000)
/*
+ * RX Info structure
+ */
+
+/*
+ * Word 0
+ */
+
+#define RXINFO_W0_USB_DMA_RX_PKT_LEN FIELD32(0x0000ffff)
+
+/*
+ * RX WI structure
+ */
+
+/*
+ * Word0
+ */
+#define RXWI_W0_WIRELESS_CLI_ID FIELD32(0x000000ff)
+#define RXWI_W0_KEY_INDEX FIELD32(0x00000300)
+#define RXWI_W0_BSSID FIELD32(0x00001c00)
+#define RXWI_W0_UDF FIELD32(0x0000e000)
+#define RXWI_W0_MPDU_TOTAL_BYTE_COUNT FIELD32(0x0fff0000)
+#define RXWI_W0_TID FIELD32(0xf0000000)
+
+/*
+ * Word1
+ */
+#define RXWI_W1_FRAG FIELD32(0x0000000f)
+#define RXWI_W1_SEQUENCE FIELD32(0x0000fff0)
+#define RXWI_W1_MCS FIELD32(0x007f0000)
+#define RXWI_W1_BW FIELD32(0x00800000)
+#define RXWI_W1_SHORT_GI FIELD32(0x01000000)
+#define RXWI_W1_STBC FIELD32(0x06000000)
+#define RXWI_W1_PHYMODE FIELD32(0xc0000000)
+
+/*
+ * Word2
+ */
+#define RXWI_W2_RSSI0 FIELD32(0x000000ff)
+#define RXWI_W2_RSSI1 FIELD32(0x0000ff00)
+#define RXWI_W2_RSSI2 FIELD32(0x00ff0000)
+
+/*
+ * Word3
+ */
+#define RXWI_W3_SNR0 FIELD32(0x000000ff)
+#define RXWI_W3_SNR1 FIELD32(0x0000ff00)
+
+/*
* RX descriptor format for RX Ring.
*/
@@ -115,25 +165,25 @@
* AMSDU: rx with 802.3 header, not 802.11 header.
*/
-#define RXINFO_W0_BA FIELD32(0x00000001)
-#define RXINFO_W0_DATA FIELD32(0x00000002)
-#define RXINFO_W0_NULLDATA FIELD32(0x00000004)
-#define RXINFO_W0_FRAG FIELD32(0x00000008)
-#define RXINFO_W0_UNICAST_TO_ME FIELD32(0x00000010)
-#define RXINFO_W0_MULTICAST FIELD32(0x00000020)
-#define RXINFO_W0_BROADCAST FIELD32(0x00000040)
-#define RXINFO_W0_MY_BSS FIELD32(0x00000080)
-#define RXINFO_W0_CRC_ERROR FIELD32(0x00000100)
-#define RXINFO_W0_CIPHER_ERROR FIELD32(0x00000600)
-#define RXINFO_W0_AMSDU FIELD32(0x00000800)
-#define RXINFO_W0_HTC FIELD32(0x00001000)
-#define RXINFO_W0_RSSI FIELD32(0x00002000)
-#define RXINFO_W0_L2PAD FIELD32(0x00004000)
-#define RXINFO_W0_AMPDU FIELD32(0x00008000)
-#define RXINFO_W0_DECRYPTED FIELD32(0x00010000)
-#define RXINFO_W0_PLCP_RSSI FIELD32(0x00020000)
-#define RXINFO_W0_CIPHER_ALG FIELD32(0x00040000)
-#define RXINFO_W0_LAST_AMSDU FIELD32(0x00080000)
-#define RXINFO_W0_PLCP_SIGNAL FIELD32(0xfff00000)
+#define RXD_W0_BA FIELD32(0x00000001)
+#define RXD_W0_DATA FIELD32(0x00000002)
+#define RXD_W0_NULLDATA FIELD32(0x00000004)
+#define RXD_W0_FRAG FIELD32(0x00000008)
+#define RXD_W0_UNICAST_TO_ME FIELD32(0x00000010)
+#define RXD_W0_MULTICAST FIELD32(0x00000020)
+#define RXD_W0_BROADCAST FIELD32(0x00000040)
+#define RXD_W0_MY_BSS FIELD32(0x00000080)
+#define RXD_W0_CRC_ERROR FIELD32(0x00000100)
+#define RXD_W0_CIPHER_ERROR FIELD32(0x00000600)
+#define RXD_W0_AMSDU FIELD32(0x00000800)
+#define RXD_W0_HTC FIELD32(0x00001000)
+#define RXD_W0_RSSI FIELD32(0x00002000)
+#define RXD_W0_L2PAD FIELD32(0x00004000)
+#define RXD_W0_AMPDU FIELD32(0x00008000)
+#define RXD_W0_DECRYPTED FIELD32(0x00010000)
+#define RXD_W0_PLCP_RSSI FIELD32(0x00020000)
+#define RXD_W0_CIPHER_ALG FIELD32(0x00040000)
+#define RXD_W0_LAST_AMSDU FIELD32(0x00080000)
+#define RXD_W0_PLCP_SIGNAL FIELD32(0xfff00000)
#endif /* RT2800USB_H */
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index dcfc8c25d1a..d9daa9c406f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -104,6 +104,12 @@
#define GET_DURATION_RES(__size, __rate)(((__size) * 8 * 10) % (__rate))
/*
+ * Determine the number of L2 padding bytes required between the header and
+ * the payload.
+ */
+#define L2PAD_SIZE(__hdrlen) (-(__hdrlen) & 3)
+
+/*
* Determine the alignment requirement,
* to make sure the 802.11 payload is padded to a 4-byte boundrary
* we must determine the address of the payload and calculate the
@@ -154,6 +160,7 @@ struct avg_val {
enum rt2x00_chip_intf {
RT2X00_CHIP_INTF_PCI,
RT2X00_CHIP_INTF_USB,
+ RT2X00_CHIP_INTF_SOC,
};
/*
@@ -163,25 +170,26 @@ enum rt2x00_chip_intf {
*/
struct rt2x00_chip {
u16 rt;
-#define RT2460 0x0101
-#define RT2560 0x0201
-#define RT2570 0x1201
-#define RT2561s 0x0301 /* Turbo */
-#define RT2561 0x0302
-#define RT2661 0x0401
-#define RT2571 0x1300
-#define RT2860 0x0601 /* 2.4GHz PCI/CB */
-#define RT2860D 0x0681 /* 2.4GHz, 5GHz PCI/CB */
-#define RT2890 0x0701 /* 2.4GHz PCIe */
-#define RT2890D 0x0781 /* 2.4GHz, 5GHz PCIe */
+#define RT2460 0x2460
+#define RT2560 0x2560
+#define RT2570 0x2570
+#define RT2661 0x2661
+#define RT2573 0x2573
+#define RT2860 0x2860 /* 2.4GHz PCI/CB */
+#define RT2870 0x2870
+#define RT2872 0x2872
#define RT2880 0x2880 /* WSOC */
+#define RT2883 0x2883 /* WSOC */
+#define RT2890 0x2890 /* 2.4GHz PCIe */
#define RT3052 0x3052 /* WSOC */
+#define RT3070 0x3070
+#define RT3071 0x3071
#define RT3090 0x3090 /* 2.4GHz PCIe */
-#define RT2870 0x1600
-#define RT3070 0x1800
+#define RT3390 0x3390
+#define RT3572 0x3572
u16 rf;
- u32 rev;
+ u16 rev;
enum rt2x00_chip_intf intf;
};
@@ -911,51 +919,30 @@ static inline void rt2x00_eeprom_write(struct rt2x00_dev *rt2x00dev,
* Chipset handlers
*/
static inline void rt2x00_set_chip(struct rt2x00_dev *rt2x00dev,
- const u16 rt, const u16 rf, const u32 rev)
+ const u16 rt, const u16 rf, const u16 rev)
{
rt2x00dev->chip.rt = rt;
rt2x00dev->chip.rf = rf;
rt2x00dev->chip.rev = rev;
-}
-
-static inline void rt2x00_set_chip_rt(struct rt2x00_dev *rt2x00dev,
- const u16 rt)
-{
- rt2x00dev->chip.rt = rt;
-}
-
-static inline void rt2x00_set_chip_rf(struct rt2x00_dev *rt2x00dev,
- const u16 rf, const u32 rev)
-{
- rt2x00_set_chip(rt2x00dev, rt2x00dev->chip.rt, rf, rev);
-}
-static inline void rt2x00_print_chip(struct rt2x00_dev *rt2x00dev)
-{
INFO(rt2x00dev,
- "Chipset detected - rt: %04x, rf: %04x, rev: %08x.\n",
+ "Chipset detected - rt: %04x, rf: %04x, rev: %04x.\n",
rt2x00dev->chip.rt, rt2x00dev->chip.rf, rt2x00dev->chip.rev);
}
-static inline char rt2x00_rt(const struct rt2x00_chip *chipset, const u16 chip)
+static inline char rt2x00_rt(struct rt2x00_dev *rt2x00dev, const u16 rt)
{
- return (chipset->rt == chip);
+ return (rt2x00dev->chip.rt == rt);
}
-static inline char rt2x00_rf(const struct rt2x00_chip *chipset, const u16 chip)
+static inline char rt2x00_rf(struct rt2x00_dev *rt2x00dev, const u16 rf)
{
- return (chipset->rf == chip);
+ return (rt2x00dev->chip.rf == rf);
}
-static inline u32 rt2x00_rev(const struct rt2x00_chip *chipset)
+static inline u16 rt2x00_rev(struct rt2x00_dev *rt2x00dev)
{
- return chipset->rev;
-}
-
-static inline bool rt2x00_check_rev(const struct rt2x00_chip *chipset,
- const u32 mask, const u32 rev)
-{
- return ((chipset->rev & mask) == rev);
+ return rt2x00dev->chip.rev;
}
static inline void rt2x00_set_chip_intf(struct rt2x00_dev *rt2x00dev,
@@ -964,20 +951,25 @@ static inline void rt2x00_set_chip_intf(struct rt2x00_dev *rt2x00dev,
rt2x00dev->chip.intf = intf;
}
-static inline bool rt2x00_intf(const struct rt2x00_chip *chipset,
+static inline bool rt2x00_intf(struct rt2x00_dev *rt2x00dev,
enum rt2x00_chip_intf intf)
{
- return (chipset->intf == intf);
+ return (rt2x00dev->chip.intf == intf);
+}
+
+static inline bool rt2x00_is_pci(struct rt2x00_dev *rt2x00dev)
+{
+ return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI);
}
-static inline bool rt2x00_intf_is_pci(struct rt2x00_dev *rt2x00dev)
+static inline bool rt2x00_is_usb(struct rt2x00_dev *rt2x00dev)
{
- return rt2x00_intf(&rt2x00dev->chip, RT2X00_CHIP_INTF_PCI);
+ return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_USB);
}
-static inline bool rt2x00_intf_is_usb(struct rt2x00_dev *rt2x00dev)
+static inline bool rt2x00_is_soc(struct rt2x00_dev *rt2x00dev)
{
- return rt2x00_intf(&rt2x00dev->chip, RT2X00_CHIP_INTF_USB);
+ return rt2x00_intf(rt2x00dev, RT2X00_CHIP_INTF_SOC);
}
/**
@@ -1019,9 +1011,9 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
int rt2x00mac_start(struct ieee80211_hw *hw);
void rt2x00mac_stop(struct ieee80211_hw *hw);
int rt2x00mac_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf);
+ struct ieee80211_vif *vif);
void rt2x00mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf);
+ struct ieee80211_vif *vif);
int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed);
void rt2x00mac_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
@@ -1038,8 +1030,6 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
#endif /* CONFIG_RT2X00_LIB_CRYPTO */
int rt2x00mac_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats);
-int rt2x00mac_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *stats);
void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index 7d323a763b5..70c04c282ef 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -184,7 +184,7 @@ void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
dump_hdr->data_length = cpu_to_le32(skb->len);
dump_hdr->chip_rt = cpu_to_le16(rt2x00dev->chip.rt);
dump_hdr->chip_rf = cpu_to_le16(rt2x00dev->chip.rf);
- dump_hdr->chip_rev = cpu_to_le32(rt2x00dev->chip.rev);
+ dump_hdr->chip_rev = cpu_to_le16(rt2x00dev->chip.rev);
dump_hdr->type = cpu_to_le16(type);
dump_hdr->queue_index = desc->entry->queue->qid;
dump_hdr->entry_index = desc->entry->entry_idx;
@@ -573,7 +573,7 @@ static struct dentry *rt2x00debug_create_file_chipset(const char *name,
blob->data = data;
data += sprintf(data, "rt chip:\t%04x\n", intf->rt2x00dev->chip.rt);
data += sprintf(data, "rf chip:\t%04x\n", intf->rt2x00dev->chip.rf);
- data += sprintf(data, "revision:\t%08x\n", intf->rt2x00dev->chip.rev);
+ data += sprintf(data, "revision:\t%04x\n", intf->rt2x00dev->chip.rev);
data += sprintf(data, "\n");
data += sprintf(data, "register\tbase\twords\twordsize\n");
data += sprintf(data, "csr\t%d\t%d\t%d\n",
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 265e66dba55..b93731b7990 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -385,9 +385,6 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
memset(&rxdesc, 0, sizeof(rxdesc));
rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc);
- /* Trim buffer to correct size */
- skb_trim(entry->skb, rxdesc.size);
-
/*
* The data behind the ieee80211 header must be
* aligned on a 4 byte boundary.
@@ -404,11 +401,16 @@ void rt2x00lib_rxdone(struct rt2x00_dev *rt2x00dev,
(rxdesc.flags & RX_FLAG_IV_STRIPPED))
rt2x00crypto_rx_insert_iv(entry->skb, header_length,
&rxdesc);
- else if (rxdesc.dev_flags & RXDONE_L2PAD)
+ else if (header_length &&
+ (rxdesc.size > header_length) &&
+ (rxdesc.dev_flags & RXDONE_L2PAD))
rt2x00queue_remove_l2pad(entry->skb, header_length);
else
rt2x00queue_align_payload(entry->skb, header_length);
+ /* Trim buffer to correct size */
+ skb_trim(entry->skb, rxdesc.size);
+
/*
* Check if the frame was received using HT. In that case,
* the rate is the MCS index and should be passed to mac80211
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index de549c244ed..abbd857ec75 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -187,10 +187,10 @@ void rt2x00mac_stop(struct ieee80211_hw *hw)
EXPORT_SYMBOL_GPL(rt2x00mac_stop);
int rt2x00mac_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
- struct rt2x00_intf *intf = vif_to_intf(conf->vif);
+ struct rt2x00_intf *intf = vif_to_intf(vif);
struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, QID_BEACON);
struct queue_entry *entry = NULL;
unsigned int i;
@@ -203,7 +203,7 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
!test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags))
return -ENODEV;
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_AP:
/*
* We don't support mixed combinations of
@@ -263,7 +263,7 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
* increase interface count and start initialization.
*/
- if (conf->type == NL80211_IFTYPE_AP)
+ if (vif->type == NL80211_IFTYPE_AP)
rt2x00dev->intf_ap_count++;
else
rt2x00dev->intf_sta_count++;
@@ -273,16 +273,16 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
mutex_init(&intf->beacon_skb_mutex);
intf->beacon = entry;
- if (conf->type == NL80211_IFTYPE_AP)
- memcpy(&intf->bssid, conf->mac_addr, ETH_ALEN);
- memcpy(&intf->mac, conf->mac_addr, ETH_ALEN);
+ if (vif->type == NL80211_IFTYPE_AP)
+ memcpy(&intf->bssid, vif->addr, ETH_ALEN);
+ memcpy(&intf->mac, vif->addr, ETH_ALEN);
/*
* The MAC adddress must be configured after the device
* has been initialized. Otherwise the device can reset
* the MAC registers.
*/
- rt2x00lib_config_intf(rt2x00dev, intf, conf->type, intf->mac, NULL);
+ rt2x00lib_config_intf(rt2x00dev, intf, vif->type, intf->mac, NULL);
/*
* Some filters depend on the current working mode. We can force
@@ -296,10 +296,10 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
EXPORT_SYMBOL_GPL(rt2x00mac_add_interface);
void rt2x00mac_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct rt2x00_dev *rt2x00dev = hw->priv;
- struct rt2x00_intf *intf = vif_to_intf(conf->vif);
+ struct rt2x00_intf *intf = vif_to_intf(vif);
/*
* Don't allow interfaces to be remove while
@@ -307,11 +307,11 @@ void rt2x00mac_remove_interface(struct ieee80211_hw *hw,
* no interface is present.
*/
if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) ||
- (conf->type == NL80211_IFTYPE_AP && !rt2x00dev->intf_ap_count) ||
- (conf->type != NL80211_IFTYPE_AP && !rt2x00dev->intf_sta_count))
+ (vif->type == NL80211_IFTYPE_AP && !rt2x00dev->intf_ap_count) ||
+ (vif->type != NL80211_IFTYPE_AP && !rt2x00dev->intf_sta_count))
return;
- if (conf->type == NL80211_IFTYPE_AP)
+ if (vif->type == NL80211_IFTYPE_AP)
rt2x00dev->intf_ap_count--;
else
rt2x00dev->intf_sta_count--;
@@ -555,22 +555,6 @@ int rt2x00mac_get_stats(struct ieee80211_hw *hw,
}
EXPORT_SYMBOL_GPL(rt2x00mac_get_stats);
-int rt2x00mac_get_tx_stats(struct ieee80211_hw *hw,
- struct ieee80211_tx_queue_stats *stats)
-{
- struct rt2x00_dev *rt2x00dev = hw->priv;
- unsigned int i;
-
- for (i = 0; i < rt2x00dev->ops->tx_queues; i++) {
- stats[i].len = rt2x00dev->tx[i].length;
- stats[i].limit = rt2x00dev->tx[i].limit;
- stats[i].count = rt2x00dev->tx[i].count;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(rt2x00mac_get_tx_stats);
-
void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 0feb4d0e466..047123b766f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -41,6 +41,9 @@ int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
{
unsigned int i;
+ if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
+ return 0;
+
for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
rt2x00pci_register_read(rt2x00dev, offset, reg);
if (!rt2x00_get_field32(*reg, field))
@@ -269,7 +272,6 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
struct ieee80211_hw *hw;
struct rt2x00_dev *rt2x00dev;
int retval;
- u16 chip;
retval = pci_request_regions(pci_dev, pci_name(pci_dev));
if (retval) {
@@ -312,12 +314,6 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI);
- /*
- * Determine RT chipset by reading PCI header.
- */
- pci_read_config_word(pci_dev, PCI_DEVICE_ID, &chip);
- rt2x00_set_chip_rt(rt2x00dev, chip);
-
retval = rt2x00pci_alloc_reg(rt2x00dev);
if (retval)
goto exit_free_device;
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.h b/drivers/net/wireless/rt2x00/rt2x00pci.h
index d4f9449ab0a..8149ff68410 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.h
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.h
@@ -27,6 +27,7 @@
#define RT2X00PCI_H
#include <linux/io.h>
+#include <linux/pci.h>
/*
* This variable should be used with the
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 9915a09141e..0b4801a1460 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -177,55 +177,45 @@ void rt2x00queue_align_payload(struct sk_buff *skb, unsigned int header_length)
void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
{
- struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
- unsigned int frame_length = skb->len;
+ unsigned int payload_length = skb->len - header_length;
unsigned int header_align = ALIGN_SIZE(skb, 0);
unsigned int payload_align = ALIGN_SIZE(skb, header_length);
- unsigned int l2pad = 4 - (payload_align - header_align);
+ unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0;
- if (header_align == payload_align) {
- /*
- * Both header and payload must be moved the same
- * amount of bytes to align them properly. This means
- * we don't use the L2 padding but just move the entire
- * frame.
- */
- rt2x00queue_align_frame(skb);
- } else if (!payload_align) {
- /*
- * Simple L2 padding, only the header needs to be moved,
- * the payload is already properly aligned.
- */
- skb_push(skb, header_align);
- memmove(skb->data, skb->data + header_align, frame_length);
- skbdesc->flags |= SKBDESC_L2_PADDED;
- } else {
- /*
- *
- * Complicated L2 padding, both header and payload need
- * to be moved. By default we only move to the start
- * of the buffer, so our header alignment needs to be
- * increased if there is not enough room for the header
- * to be moved.
- */
- if (payload_align > header_align)
- header_align += 4;
+ /*
+ * Adjust the header alignment if the payload needs to be moved more
+ * than the header.
+ */
+ if (payload_align > header_align)
+ header_align += 4;
+
+ /* There is nothing to do if no alignment is needed */
+ if (!header_align)
+ return;
+
+ /* Reserve the amount of space needed in front of the frame */
+ skb_push(skb, header_align);
+
+ /*
+ * Move the header.
+ */
+ memmove(skb->data, skb->data + header_align, header_length);
- skb_push(skb, header_align);
- memmove(skb->data, skb->data + header_align, header_length);
+ /* Move the payload, if present and if required */
+ if (payload_length && payload_align)
memmove(skb->data + header_length + l2pad,
skb->data + header_length + l2pad + payload_align,
- frame_length - header_length);
- skbdesc->flags |= SKBDESC_L2_PADDED;
- }
+ payload_length);
+
+ /* Trim the skb to the correct size */
+ skb_trim(skb, header_length + l2pad + payload_length);
}
void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
{
- struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
- unsigned int l2pad = 4 - (header_length & 3);
+ unsigned int l2pad = L2PAD_SIZE(header_length);
- if (!l2pad || (skbdesc->flags & SKBDESC_L2_PADDED))
+ if (!l2pad)
return;
memmove(skb->data + l2pad, skb->data, header_length);
@@ -346,7 +336,9 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
* Header and alignment information.
*/
txdesc->header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
- txdesc->l2pad = ALIGN_SIZE(entry->skb, txdesc->header_length);
+ if (test_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags) &&
+ (entry->skb->len > txdesc->header_length))
+ txdesc->l2pad = L2PAD_SIZE(txdesc->header_length);
/*
* Check whether this frame is to be acked.
@@ -387,10 +379,13 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
/*
* Beacons and probe responses require the tsf timestamp
- * to be inserted into the frame.
+ * to be inserted into the frame, except for a frame that has been injected
+ * through a monitor interface. This latter is needed for testing a
+ * monitor interface.
*/
- if (ieee80211_is_beacon(hdr->frame_control) ||
- ieee80211_is_probe_resp(hdr->frame_control))
+ if ((ieee80211_is_beacon(hdr->frame_control) ||
+ ieee80211_is_probe_resp(hdr->frame_control)) &&
+ (!(tx_info->flags & IEEE80211_TX_CTL_INJECTED)))
__set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags);
/*
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index 70775e5ba1a..c1e482bb37b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -92,8 +92,6 @@ enum data_queue_qid {
* @SKBDESC_DMA_MAPPED_TX: &skb_dma field has been mapped for TX
* @SKBDESC_IV_STRIPPED: Frame contained a IV/EIV provided by
* mac80211 but was stripped for processing by the driver.
- * @SKBDESC_L2_PADDED: Payload has been padded for 4-byte alignment,
- * the padded bytes are located between header and payload.
* @SKBDESC_NOT_MAC80211: Frame didn't originate from mac80211,
* don't try to pass it back.
*/
@@ -101,8 +99,7 @@ enum skb_frame_desc_flags {
SKBDESC_DMA_MAPPED_RX = 1 << 0,
SKBDESC_DMA_MAPPED_TX = 1 << 1,
SKBDESC_IV_STRIPPED = 1 << 2,
- SKBDESC_L2_PADDED = 1 << 3,
- SKBDESC_NOT_MAC80211 = 1 << 4,
+ SKBDESC_NOT_MAC80211 = 1 << 3,
};
/**
diff --git a/drivers/net/wireless/rt2x00/rt2x00soc.c b/drivers/net/wireless/rt2x00/rt2x00soc.c
index 19e684f8ffa..4efdc96010f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00soc.c
+++ b/drivers/net/wireless/rt2x00/rt2x00soc.c
@@ -71,9 +71,7 @@ exit:
return -ENOMEM;
}
-int rt2x00soc_probe(struct platform_device *pdev,
- const unsigned short chipset,
- const struct rt2x00_ops *ops)
+int rt2x00soc_probe(struct platform_device *pdev, const struct rt2x00_ops *ops)
{
struct ieee80211_hw *hw;
struct rt2x00_dev *rt2x00dev;
@@ -94,12 +92,7 @@ int rt2x00soc_probe(struct platform_device *pdev,
rt2x00dev->irq = platform_get_irq(pdev, 0);
rt2x00dev->name = pdev->dev.driver->name;
- /*
- * SoC devices mimic PCI behavior.
- */
- rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI);
-
- rt2x00_set_chip_rt(rt2x00dev, chipset);
+ rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_SOC);
retval = rt2x00soc_alloc_reg(rt2x00dev);
if (retval)
diff --git a/drivers/net/wireless/rt2x00/rt2x00soc.h b/drivers/net/wireless/rt2x00/rt2x00soc.h
index 8a3416624af..4739edfe2f0 100644
--- a/drivers/net/wireless/rt2x00/rt2x00soc.h
+++ b/drivers/net/wireless/rt2x00/rt2x00soc.h
@@ -28,18 +28,10 @@
#define KSEG1ADDR(__ptr) __ptr
-#define __rt2x00soc_probe(__chipset, __ops) \
-static int __rt2x00soc_probe(struct platform_device *pdev) \
-{ \
- return rt2x00soc_probe(pdev, (__chipset), (__ops)); \
-}
-
/*
* SoC driver handlers.
*/
-int rt2x00soc_probe(struct platform_device *pdev,
- const unsigned short chipset,
- const struct rt2x00_ops *ops);
+int rt2x00soc_probe(struct platform_device *pdev, const struct rt2x00_ops *ops);
int rt2x00soc_remove(struct platform_device *pdev);
#ifdef CONFIG_PM
int rt2x00soc_suspend(struct platform_device *pdev, pm_message_t state);
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 0ca589306d7..e2da928dd9f 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -637,8 +637,7 @@ static void rt61pci_config_antenna_5x(struct rt2x00_dev *rt2x00dev,
rt61pci_bbp_read(rt2x00dev, 4, &r4);
rt61pci_bbp_read(rt2x00dev, 77, &r77);
- rt2x00_set_field8(&r3, BBP_R3_SMART_MODE,
- rt2x00_rf(&rt2x00dev->chip, RF5325));
+ rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, rt2x00_rf(rt2x00dev, RF5325));
/*
* Configure the RX antenna.
@@ -684,8 +683,7 @@ static void rt61pci_config_antenna_2x(struct rt2x00_dev *rt2x00dev,
rt61pci_bbp_read(rt2x00dev, 4, &r4);
rt61pci_bbp_read(rt2x00dev, 77, &r77);
- rt2x00_set_field8(&r3, BBP_R3_SMART_MODE,
- rt2x00_rf(&rt2x00dev->chip, RF2529));
+ rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, rt2x00_rf(rt2x00dev, RF2529));
rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END,
!test_bit(CONFIG_FRAME_TYPE, &rt2x00dev->flags));
@@ -833,12 +831,11 @@ static void rt61pci_config_ant(struct rt2x00_dev *rt2x00dev,
rt2x00pci_register_write(rt2x00dev, PHY_CSR0, reg);
- if (rt2x00_rf(&rt2x00dev->chip, RF5225) ||
- rt2x00_rf(&rt2x00dev->chip, RF5325))
+ if (rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF5325))
rt61pci_config_antenna_5x(rt2x00dev, ant);
- else if (rt2x00_rf(&rt2x00dev->chip, RF2527))
+ else if (rt2x00_rf(rt2x00dev, RF2527))
rt61pci_config_antenna_2x(rt2x00dev, ant);
- else if (rt2x00_rf(&rt2x00dev->chip, RF2529)) {
+ else if (rt2x00_rf(rt2x00dev, RF2529)) {
if (test_bit(CONFIG_DOUBLE_ANTENNA, &rt2x00dev->flags))
rt61pci_config_antenna_2x(rt2x00dev, ant);
else
@@ -879,8 +876,7 @@ static void rt61pci_config_channel(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower));
rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
- smart = !(rt2x00_rf(&rt2x00dev->chip, RF5225) ||
- rt2x00_rf(&rt2x00dev->chip, RF2527));
+ smart = !(rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF2527));
rt61pci_bbp_read(rt2x00dev, 3, &r3);
rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, smart);
@@ -1135,16 +1131,18 @@ dynamic_cca_tune:
*/
static char *rt61pci_get_firmware_name(struct rt2x00_dev *rt2x00dev)
{
+ u16 chip;
char *fw_name;
- switch (rt2x00dev->chip.rt) {
- case RT2561:
+ pci_read_config_word(to_pci_dev(rt2x00dev->dev), PCI_DEVICE_ID, &chip);
+ switch (chip) {
+ case RT2561_PCI_ID:
fw_name = FIRMWARE_RT2561;
break;
- case RT2561s:
+ case RT2561s_PCI_ID:
fw_name = FIRMWARE_RT2561s;
break;
- case RT2661:
+ case RT2661_PCI_ID:
fw_name = FIRMWARE_RT2661;
break;
default:
@@ -2299,13 +2297,13 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
*/
value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
rt2x00pci_register_read(rt2x00dev, MAC_CSR0, &reg);
- rt2x00_set_chip_rf(rt2x00dev, value, reg);
- rt2x00_print_chip(rt2x00dev);
+ rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET),
+ value, rt2x00_get_field32(reg, MAC_CSR0_REVISION));
- if (!rt2x00_rf(&rt2x00dev->chip, RF5225) &&
- !rt2x00_rf(&rt2x00dev->chip, RF5325) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2527) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2529)) {
+ if (!rt2x00_rf(rt2x00dev, RF5225) &&
+ !rt2x00_rf(rt2x00dev, RF5325) &&
+ !rt2x00_rf(rt2x00dev, RF2527) &&
+ !rt2x00_rf(rt2x00dev, RF2529)) {
ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
return -ENODEV;
}
@@ -2360,7 +2358,7 @@ static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
* the antenna settings should be gathered from the NIC
* eeprom word.
*/
- if (rt2x00_rf(&rt2x00dev->chip, RF2529) &&
+ if (rt2x00_rf(rt2x00dev, RF2529) &&
!test_bit(CONFIG_DOUBLE_ANTENNA, &rt2x00dev->flags)) {
rt2x00dev->default_ant.rx =
ANTENNA_A + rt2x00_get_field16(eeprom, EEPROM_NIC_RX_FIXED);
@@ -2571,8 +2569,7 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->channels = rf_vals_seq;
}
- if (rt2x00_rf(&rt2x00dev->chip, RF5225) ||
- rt2x00_rf(&rt2x00dev->chip, RF5325)) {
+ if (rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF5325)) {
spec->supported_bands |= SUPPORT_BAND_5GHZ;
spec->num_channels = ARRAY_SIZE(rf_vals_seq);
}
@@ -2735,7 +2732,6 @@ static const struct ieee80211_ops rt61pci_mac80211_ops = {
.get_stats = rt2x00mac_get_stats,
.bss_info_changed = rt2x00mac_bss_info_changed,
.conf_tx = rt61pci_conf_tx,
- .get_tx_stats = rt2x00mac_get_tx_stats,
.get_tsf = rt61pci_get_tsf,
.rfkill_poll = rt2x00mac_rfkill_poll,
};
@@ -2812,7 +2808,7 @@ static const struct rt2x00_ops rt61pci_ops = {
/*
* RT61pci module information.
*/
-static struct pci_device_id rt61pci_device_table[] = {
+static DEFINE_PCI_DEVICE_TABLE(rt61pci_device_table) = {
/* RT2561s */
{ PCI_DEVICE(0x1814, 0x0301), PCI_DEVICE_DATA(&rt61pci_ops) },
/* RT2561 v2 */
diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/rt2x00/rt61pci.h
index 8f13810622b..df80f1af22a 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.h
+++ b/drivers/net/wireless/rt2x00/rt61pci.h
@@ -28,6 +28,13 @@
#define RT61PCI_H
/*
+ * RT chip PCI IDs.
+ */
+#define RT2561s_PCI_ID 0x0301
+#define RT2561_PCI_ID 0x0302
+#define RT2661_PCI_ID 0x0401
+
+/*
* RF chip defines.
*/
#define RF5225 0x0001
@@ -225,6 +232,8 @@ struct hw_pairwise_ta_entry {
* MAC_CSR0: ASIC revision number.
*/
#define MAC_CSR0 0x3000
+#define MAC_CSR0_REVISION FIELD32(0x0000000f)
+#define MAC_CSR0_CHIPSET FIELD32(0x000ffff0)
/*
* MAC_CSR1: System control register.
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index ced3b6ab5e1..f39a8ed1784 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -136,8 +136,8 @@ static void rt73usb_rf_write(struct rt2x00_dev *rt2x00dev,
* all others contain 20 bits.
*/
rt2x00_set_field32(&reg, PHY_CSR4_NUMBER_OF_BITS,
- 20 + (rt2x00_rf(&rt2x00dev->chip, RF5225) ||
- rt2x00_rf(&rt2x00dev->chip, RF2527)));
+ 20 + (rt2x00_rf(rt2x00dev, RF5225) ||
+ rt2x00_rf(rt2x00dev, RF2527)));
rt2x00_set_field32(&reg, PHY_CSR4_IF_SELECT, 0);
rt2x00_set_field32(&reg, PHY_CSR4_BUSY, 1);
@@ -741,11 +741,9 @@ static void rt73usb_config_ant(struct rt2x00_dev *rt2x00dev,
rt2x00usb_register_write(rt2x00dev, PHY_CSR0, reg);
- if (rt2x00_rf(&rt2x00dev->chip, RF5226) ||
- rt2x00_rf(&rt2x00dev->chip, RF5225))
+ if (rt2x00_rf(rt2x00dev, RF5226) || rt2x00_rf(rt2x00dev, RF5225))
rt73usb_config_antenna_5x(rt2x00dev, ant);
- else if (rt2x00_rf(&rt2x00dev->chip, RF2528) ||
- rt2x00_rf(&rt2x00dev->chip, RF2527))
+ else if (rt2x00_rf(rt2x00dev, RF2528) || rt2x00_rf(rt2x00dev, RF2527))
rt73usb_config_antenna_2x(rt2x00dev, ant);
}
@@ -779,8 +777,7 @@ static void rt73usb_config_channel(struct rt2x00_dev *rt2x00dev,
rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower));
rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
- smart = !(rt2x00_rf(&rt2x00dev->chip, RF5225) ||
- rt2x00_rf(&rt2x00dev->chip, RF2527));
+ smart = !(rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF2527));
rt73usb_bbp_read(rt2x00dev, 3, &r3);
rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, smart);
@@ -1210,8 +1207,7 @@ static int rt73usb_init_registers(struct rt2x00_dev *rt2x00dev)
rt2x00usb_register_write(rt2x00dev, SEC_CSR5, 0x00000000);
reg = 0x000023b0;
- if (rt2x00_rf(&rt2x00dev->chip, RF5225) ||
- rt2x00_rf(&rt2x00dev->chip, RF2527))
+ if (rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF2527))
rt2x00_set_field32(&reg, PHY_CSR1_RF_RPI, 1);
rt2x00usb_register_write(rt2x00dev, PHY_CSR1, reg);
@@ -1824,19 +1820,18 @@ static int rt73usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
*/
value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
rt2x00usb_register_read(rt2x00dev, MAC_CSR0, &reg);
- rt2x00_set_chip(rt2x00dev, RT2571, value, reg);
- rt2x00_print_chip(rt2x00dev);
+ rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET),
+ value, rt2x00_get_field32(reg, MAC_CSR0_REVISION));
- if (!rt2x00_check_rev(&rt2x00dev->chip, 0x000ffff0, 0x25730) ||
- rt2x00_check_rev(&rt2x00dev->chip, 0x0000000f, 0)) {
+ if (!rt2x00_rt(rt2x00dev, RT2573) || (rt2x00_rev(rt2x00dev) == 0)) {
ERROR(rt2x00dev, "Invalid RT chipset detected.\n");
return -ENODEV;
}
- if (!rt2x00_rf(&rt2x00dev->chip, RF5226) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2528) &&
- !rt2x00_rf(&rt2x00dev->chip, RF5225) &&
- !rt2x00_rf(&rt2x00dev->chip, RF2527)) {
+ if (!rt2x00_rf(rt2x00dev, RF5226) &&
+ !rt2x00_rf(rt2x00dev, RF2528) &&
+ !rt2x00_rf(rt2x00dev, RF5225) &&
+ !rt2x00_rf(rt2x00dev, RF2527)) {
ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
return -ENODEV;
}
@@ -2081,17 +2076,17 @@ static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
spec->supported_bands = SUPPORT_BAND_2GHZ;
spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
- if (rt2x00_rf(&rt2x00dev->chip, RF2528)) {
+ if (rt2x00_rf(rt2x00dev, RF2528)) {
spec->num_channels = ARRAY_SIZE(rf_vals_bg_2528);
spec->channels = rf_vals_bg_2528;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF5226)) {
+ } else if (rt2x00_rf(rt2x00dev, RF5226)) {
spec->supported_bands |= SUPPORT_BAND_5GHZ;
spec->num_channels = ARRAY_SIZE(rf_vals_5226);
spec->channels = rf_vals_5226;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF2527)) {
+ } else if (rt2x00_rf(rt2x00dev, RF2527)) {
spec->num_channels = 14;
spec->channels = rf_vals_5225_2527;
- } else if (rt2x00_rf(&rt2x00dev->chip, RF5225)) {
+ } else if (rt2x00_rf(rt2x00dev, RF5225)) {
spec->supported_bands |= SUPPORT_BAND_5GHZ;
spec->num_channels = ARRAY_SIZE(rf_vals_5225_2527);
spec->channels = rf_vals_5225_2527;
@@ -2249,7 +2244,6 @@ static const struct ieee80211_ops rt73usb_mac80211_ops = {
.get_stats = rt2x00mac_get_stats,
.bss_info_changed = rt2x00mac_bss_info_changed,
.conf_tx = rt73usb_conf_tx,
- .get_tx_stats = rt2x00mac_get_tx_stats,
.get_tsf = rt73usb_get_tsf,
.rfkill_poll = rt2x00mac_rfkill_poll,
};
@@ -2354,6 +2348,7 @@ static struct usb_device_id rt73usb_device_table[] = {
{ USB_DEVICE(0x08dd, 0x0120), USB_DEVICE_DATA(&rt73usb_ops) },
/* Buffalo */
{ USB_DEVICE(0x0411, 0x00d8), USB_DEVICE_DATA(&rt73usb_ops) },
+ { USB_DEVICE(0x0411, 0x00d9), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x0411, 0x00f4), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x0411, 0x0116), USB_DEVICE_DATA(&rt73usb_ops) },
{ USB_DEVICE(0x0411, 0x0119), USB_DEVICE_DATA(&rt73usb_ops) },
diff --git a/drivers/net/wireless/rt2x00/rt73usb.h b/drivers/net/wireless/rt2x00/rt73usb.h
index 7942f810e92..7abe7eb1455 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.h
+++ b/drivers/net/wireless/rt2x00/rt73usb.h
@@ -142,6 +142,8 @@ struct hw_pairwise_ta_entry {
* MAC_CSR0: ASIC revision number.
*/
#define MAC_CSR0 0x3000
+#define MAC_CSR0_REVISION FIELD32(0x0000000f)
+#define MAC_CSR0_CHIPSET FIELD32(0x000ffff0)
/*
* MAC_CSR1: System control register.
diff --git a/drivers/net/wireless/rtl818x/rtl8180.h b/drivers/net/wireless/rtl818x/rtl8180.h
index 8721282a818..de3844fe06d 100644
--- a/drivers/net/wireless/rtl818x/rtl8180.h
+++ b/drivers/net/wireless/rtl818x/rtl8180.h
@@ -60,7 +60,6 @@ struct rtl8180_priv {
struct rtl818x_csr __iomem *map;
const struct rtl818x_rf_ops *rf;
struct ieee80211_vif *vif;
- int mode;
/* rtl8180 driver specific */
spinlock_t lock;
diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180_dev.c
index 8a40a143998..2b928ecf47b 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180_dev.c
@@ -33,7 +33,7 @@ MODULE_AUTHOR("Andrea Merello <andreamrl@tiscali.it>");
MODULE_DESCRIPTION("RTL8180 / RTL8185 PCI wireless driver");
MODULE_LICENSE("GPL");
-static struct pci_device_id rtl8180_table[] __devinitdata = {
+static DEFINE_PCI_DEVICE_TABLE(rtl8180_table) = {
/* rtl8185 */
{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8185) },
{ PCI_DEVICE(PCI_VENDOR_ID_BELKIN, 0x700f) },
@@ -82,8 +82,6 @@ static const struct ieee80211_channel rtl818x_channels[] = {
};
-
-
void rtl8180_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data)
{
struct rtl8180_priv *priv = dev->priv;
@@ -615,7 +613,6 @@ static int rtl8180_start(struct ieee80211_hw *dev)
reg |= RTL818X_CMD_TX_ENABLE;
rtl818x_iowrite8(priv, &priv->map->CMD, reg);
- priv->mode = NL80211_IFTYPE_MONITOR;
return 0;
err_free_rings:
@@ -633,8 +630,6 @@ static void rtl8180_stop(struct ieee80211_hw *dev)
u8 reg;
int i;
- priv->mode = NL80211_IFTYPE_UNSPECIFIED;
-
rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0);
reg = rtl818x_ioread8(priv, &priv->map->CMD);
@@ -657,38 +652,39 @@ static void rtl8180_stop(struct ieee80211_hw *dev)
}
static int rtl8180_add_interface(struct ieee80211_hw *dev,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct rtl8180_priv *priv = dev->priv;
- if (priv->mode != NL80211_IFTYPE_MONITOR)
- return -EOPNOTSUPP;
+ /*
+ * We only support one active interface at a time.
+ */
+ if (priv->vif)
+ return -EBUSY;
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
- priv->mode = conf->type;
break;
default:
return -EOPNOTSUPP;
}
- priv->vif = conf->vif;
+ priv->vif = vif;
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
rtl818x_iowrite32(priv, (__le32 __iomem *)&priv->map->MAC[0],
- le32_to_cpu(*(__le32 *)conf->mac_addr));
+ le32_to_cpu(*(__le32 *)vif->addr));
rtl818x_iowrite16(priv, (__le16 __iomem *)&priv->map->MAC[4],
- le16_to_cpu(*(__le16 *)(conf->mac_addr + 4)));
+ le16_to_cpu(*(__le16 *)(vif->addr + 4)));
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
return 0;
}
static void rtl8180_remove_interface(struct ieee80211_hw *dev,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct rtl8180_priv *priv = dev->priv;
- priv->mode = NL80211_IFTYPE_MONITOR;
priv->vif = NULL;
}
@@ -765,6 +761,14 @@ static void rtl8180_configure_filter(struct ieee80211_hw *dev,
rtl818x_iowrite32(priv, &priv->map->RX_CONF, priv->rx_conf);
}
+static u64 rtl8180_get_tsf(struct ieee80211_hw *dev)
+{
+ struct rtl8180_priv *priv = dev->priv;
+
+ return rtl818x_ioread32(priv, &priv->map->TSFT[0]) |
+ (u64)(rtl818x_ioread32(priv, &priv->map->TSFT[1])) << 32;
+}
+
static const struct ieee80211_ops rtl8180_ops = {
.tx = rtl8180_tx,
.start = rtl8180_start,
@@ -775,6 +779,7 @@ static const struct ieee80211_ops rtl8180_ops = {
.bss_info_changed = rtl8180_bss_info_changed,
.prepare_multicast = rtl8180_prepare_multicast,
.configure_filter = rtl8180_configure_filter,
+ .get_tsf = rtl8180_get_tsf,
};
static void rtl8180_eeprom_register_read(struct eeprom_93cx6 *eeprom)
diff --git a/drivers/net/wireless/rtl818x/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187.h
index 6af0f3f71f3..6bb32112e65 100644
--- a/drivers/net/wireless/rtl818x/rtl8187.h
+++ b/drivers/net/wireless/rtl818x/rtl8187.h
@@ -92,7 +92,7 @@ struct rtl8187_priv {
struct rtl818x_csr *map;
const struct rtl818x_rf_ops *rf;
struct ieee80211_vif *vif;
- int mode;
+
/* The mutex protects the TX loopback state.
* Any attempt to set channels concurrently locks the device.
*/
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index 7ba3052b070..0fb850e0c65 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -1019,31 +1019,30 @@ static void rtl8187_stop(struct ieee80211_hw *dev)
}
static int rtl8187_add_interface(struct ieee80211_hw *dev,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct rtl8187_priv *priv = dev->priv;
int i;
int ret = -EOPNOTSUPP;
mutex_lock(&priv->conf_mutex);
- if (priv->mode != NL80211_IFTYPE_MONITOR)
+ if (priv->vif)
goto exit;
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
- priv->mode = conf->type;
break;
default:
goto exit;
}
ret = 0;
- priv->vif = conf->vif;
+ priv->vif = vif;
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
for (i = 0; i < ETH_ALEN; i++)
rtl818x_iowrite8(priv, &priv->map->MAC[i],
- ((u8 *)conf->mac_addr)[i]);
+ ((u8 *)vif->addr)[i]);
rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
exit:
@@ -1052,11 +1051,10 @@ exit:
}
static void rtl8187_remove_interface(struct ieee80211_hw *dev,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct rtl8187_priv *priv = dev->priv;
mutex_lock(&priv->conf_mutex);
- priv->mode = NL80211_IFTYPE_MONITOR;
priv->vif = NULL;
mutex_unlock(&priv->conf_mutex);
}
@@ -1268,6 +1266,14 @@ static int rtl8187_conf_tx(struct ieee80211_hw *dev, u16 queue,
return 0;
}
+static u64 rtl8187_get_tsf(struct ieee80211_hw *dev)
+{
+ struct rtl8187_priv *priv = dev->priv;
+
+ return rtl818x_ioread32(priv, &priv->map->TSFT[0]) |
+ (u64)(rtl818x_ioread32(priv, &priv->map->TSFT[1])) << 32;
+}
+
static const struct ieee80211_ops rtl8187_ops = {
.tx = rtl8187_tx,
.start = rtl8187_start,
@@ -1279,7 +1285,8 @@ static const struct ieee80211_ops rtl8187_ops = {
.prepare_multicast = rtl8187_prepare_multicast,
.configure_filter = rtl8187_configure_filter,
.conf_tx = rtl8187_conf_tx,
- .rfkill_poll = rtl8187_rfkill_poll
+ .rfkill_poll = rtl8187_rfkill_poll,
+ .get_tsf = rtl8187_get_tsf,
};
static void rtl8187_eeprom_register_read(struct eeprom_93cx6 *eeprom)
@@ -1366,7 +1373,6 @@ static int __devinit rtl8187_probe(struct usb_interface *intf,
dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &priv->band;
- priv->mode = NL80211_IFTYPE_MONITOR;
dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_RX_INCLUDES_FCS;
diff --git a/drivers/net/wireless/rtl818x/rtl8187_leds.c b/drivers/net/wireless/rtl818x/rtl8187_leds.c
index ded44c045eb..4637337d5ce 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_leds.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_leds.c
@@ -33,7 +33,7 @@ static void led_turn_on(struct work_struct *work)
struct rtl8187_led *led = &priv->led_tx;
/* Don't change the LED, when the device is down. */
- if (priv->mode == NL80211_IFTYPE_UNSPECIFIED)
+ if (!priv->vif || priv->vif->type == NL80211_IFTYPE_UNSPECIFIED)
return ;
/* Skip if the LED is not registered. */
@@ -71,7 +71,7 @@ static void led_turn_off(struct work_struct *work)
struct rtl8187_led *led = &priv->led_tx;
/* Don't change the LED, when the device is down. */
- if (priv->mode == NL80211_IFTYPE_UNSPECIFIED)
+ if (!priv->vif || priv->vif->type == NL80211_IFTYPE_UNSPECIFIED)
return ;
/* Skip if the LED is not registered. */
@@ -241,5 +241,5 @@ void rtl8187_leds_exit(struct ieee80211_hw *dev)
cancel_delayed_work_sync(&priv->led_off);
cancel_delayed_work_sync(&priv->led_on);
}
-#endif /* def CONFIG_RTL8187_LED */
+#endif /* def CONFIG_RTL8187_LEDS */
diff --git a/drivers/net/wireless/rtl818x/rtl8187_leds.h b/drivers/net/wireless/rtl818x/rtl8187_leds.h
index efe8041bdda..d743c96d4a2 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_leds.h
+++ b/drivers/net/wireless/rtl818x/rtl8187_leds.h
@@ -54,6 +54,6 @@ struct rtl8187_led {
void rtl8187_leds_init(struct ieee80211_hw *dev, u16 code);
void rtl8187_leds_exit(struct ieee80211_hw *dev);
-#endif /* def CONFIG_RTL8187_LED */
+#endif /* def CONFIG_RTL8187_LEDS */
#endif /* RTL8187_LED_H */
diff --git a/drivers/net/wireless/wl12xx/Makefile b/drivers/net/wireless/wl12xx/Makefile
index 62e37ad01cc..f47ec94c16d 100644
--- a/drivers/net/wireless/wl12xx/Makefile
+++ b/drivers/net/wireless/wl12xx/Makefile
@@ -10,5 +10,7 @@ obj-$(CONFIG_WL1251_SDIO) += wl1251_sdio.o
wl1271-objs = wl1271_main.o wl1271_spi.o wl1271_cmd.o \
wl1271_event.o wl1271_tx.o wl1271_rx.o \
wl1271_ps.o wl1271_acx.o wl1271_boot.o \
- wl1271_init.o wl1271_debugfs.o
+ wl1271_init.o wl1271_debugfs.o wl1271_io.o
+
+wl1271-$(CONFIG_NL80211_TESTMODE) += wl1271_testmode.o
obj-$(CONFIG_WL1271) += wl1271.o
diff --git a/drivers/net/wireless/wl12xx/wl1251.h b/drivers/net/wireless/wl12xx/wl1251.h
index 054533f7a12..37c61c19cae 100644
--- a/drivers/net/wireless/wl12xx/wl1251.h
+++ b/drivers/net/wireless/wl12xx/wl1251.h
@@ -247,6 +247,7 @@ struct wl1251_debugfs {
struct dentry *rxpipe_tx_xfr_host_int_trig_rx_data;
struct dentry *tx_queue_len;
+ struct dentry *tx_queue_status;
struct dentry *retry_count;
struct dentry *excessive_retries;
@@ -340,9 +341,6 @@ struct wl1251 {
/* Are we currently scanning */
bool scanning;
- /* Our association ID */
- u16 aid;
-
/* Default key (for WEP) */
u32 default_key;
diff --git a/drivers/net/wireless/wl12xx/wl1251_acx.c b/drivers/net/wireless/wl12xx/wl1251_acx.c
index acfa086dbfc..beff084040b 100644
--- a/drivers/net/wireless/wl12xx/wl1251_acx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_acx.c
@@ -976,3 +976,72 @@ out:
kfree(acx);
return ret;
}
+
+int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
+ u8 aifs, u16 txop)
+{
+ struct wl1251_acx_ac_cfg *acx;
+ int ret = 0;
+
+ wl1251_debug(DEBUG_ACX, "acx ac cfg %d cw_ming %d cw_max %d "
+ "aifs %d txop %d", ac, cw_min, cw_max, aifs, txop);
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->ac = ac;
+ acx->cw_min = cw_min;
+ acx->cw_max = cw_max;
+ acx->aifsn = aifs;
+ acx->txop_limit = txop;
+
+ ret = wl1251_cmd_configure(wl, ACX_AC_CFG, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1251_warning("acx ac cfg failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
+
+int wl1251_acx_tid_cfg(struct wl1251 *wl, u8 queue,
+ enum wl1251_acx_channel_type type,
+ u8 tsid, enum wl1251_acx_ps_scheme ps_scheme,
+ enum wl1251_acx_ack_policy ack_policy)
+{
+ struct wl1251_acx_tid_cfg *acx;
+ int ret = 0;
+
+ wl1251_debug(DEBUG_ACX, "acx tid cfg %d type %d tsid %d "
+ "ps_scheme %d ack_policy %d", queue, type, tsid,
+ ps_scheme, ack_policy);
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->queue = queue;
+ acx->type = type;
+ acx->tsid = tsid;
+ acx->ps_scheme = ps_scheme;
+ acx->ack_policy = ack_policy;
+
+ ret = wl1251_cmd_configure(wl, ACX_TID_CFG, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1251_warning("acx tid cfg failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
diff --git a/drivers/net/wireless/wl12xx/wl1251_acx.h b/drivers/net/wireless/wl12xx/wl1251_acx.h
index 652371432cd..26160c45784 100644
--- a/drivers/net/wireless/wl12xx/wl1251_acx.h
+++ b/drivers/net/wireless/wl12xx/wl1251_acx.h
@@ -1166,6 +1166,87 @@ struct wl1251_acx_wr_tbtt_and_dtim {
u8 padding;
} __attribute__ ((packed));
+struct wl1251_acx_ac_cfg {
+ struct acx_header header;
+
+ /*
+ * Access Category - The TX queue's access category
+ * (refer to AccessCategory_enum)
+ */
+ u8 ac;
+
+ /*
+ * The contention window minimum size (in slots) for
+ * the access class.
+ */
+ u8 cw_min;
+
+ /*
+ * The contention window maximum size (in slots) for
+ * the access class.
+ */
+ u16 cw_max;
+
+ /* The AIF value (in slots) for the access class. */
+ u8 aifsn;
+
+ u8 reserved;
+
+ /* The TX Op Limit (in microseconds) for the access class. */
+ u16 txop_limit;
+} __attribute__ ((packed));
+
+
+enum wl1251_acx_channel_type {
+ CHANNEL_TYPE_DCF = 0,
+ CHANNEL_TYPE_EDCF = 1,
+ CHANNEL_TYPE_HCCA = 2,
+};
+
+enum wl1251_acx_ps_scheme {
+ /* regular ps: simple sending of packets */
+ WL1251_ACX_PS_SCHEME_LEGACY = 0,
+
+ /* sending a packet triggers a unscheduled apsd downstream */
+ WL1251_ACX_PS_SCHEME_UPSD_TRIGGER = 1,
+
+ /* a pspoll packet will be sent before every data packet */
+ WL1251_ACX_PS_SCHEME_LEGACY_PSPOLL = 2,
+
+ /* scheduled apsd mode */
+ WL1251_ACX_PS_SCHEME_SAPSD = 3,
+};
+
+enum wl1251_acx_ack_policy {
+ WL1251_ACX_ACK_POLICY_LEGACY = 0,
+ WL1251_ACX_ACK_POLICY_NO_ACK = 1,
+ WL1251_ACX_ACK_POLICY_BLOCK = 2,
+};
+
+struct wl1251_acx_tid_cfg {
+ struct acx_header header;
+
+ /* tx queue id number (0-7) */
+ u8 queue;
+
+ /* channel access type for the queue, enum wl1251_acx_channel_type */
+ u8 type;
+
+ /* EDCA: ac index (0-3), HCCA: traffic stream id (8-15) */
+ u8 tsid;
+
+ /* ps scheme of the specified queue, enum wl1251_acx_ps_scheme */
+ u8 ps_scheme;
+
+ /* the tx queue ack policy, enum wl1251_acx_ack_policy */
+ u8 ack_policy;
+
+ u8 padding[3];
+
+ /* not supported */
+ u32 apsdconf[2];
+} __attribute__ ((packed));
+
/*************************************************************************
Host Interrupt Register (WiLink -> Host)
@@ -1322,5 +1403,11 @@ int wl1251_acx_tsf_info(struct wl1251 *wl, u64 *mactime);
int wl1251_acx_rate_policies(struct wl1251 *wl);
int wl1251_acx_mem_cfg(struct wl1251 *wl);
int wl1251_acx_wr_tbtt_and_dtim(struct wl1251 *wl, u16 tbtt, u8 dtim);
+int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
+ u8 aifs, u16 txop);
+int wl1251_acx_tid_cfg(struct wl1251 *wl, u8 queue,
+ enum wl1251_acx_channel_type type,
+ u8 tsid, enum wl1251_acx_ps_scheme ps_scheme,
+ enum wl1251_acx_ack_policy ack_policy);
#endif /* __WL1251_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.c b/drivers/net/wireless/wl12xx/wl1251_cmd.c
index 770f260726b..0320b478bb3 100644
--- a/drivers/net/wireless/wl12xx/wl1251_cmd.c
+++ b/drivers/net/wireless/wl12xx/wl1251_cmd.c
@@ -410,3 +410,86 @@ out:
kfree(cmd);
return ret;
}
+
+int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len,
+ struct ieee80211_channel *channels[],
+ unsigned int n_channels, unsigned int n_probes)
+{
+ struct wl1251_cmd_scan *cmd;
+ int i, ret = 0;
+
+ wl1251_debug(DEBUG_CMD, "cmd scan");
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->params.rx_config_options = cpu_to_le32(CFG_RX_ALL_GOOD);
+ cmd->params.rx_filter_options = cpu_to_le32(CFG_RX_PRSP_EN |
+ CFG_RX_MGMT_EN |
+ CFG_RX_BCN_EN);
+ cmd->params.scan_options = 0;
+ cmd->params.num_channels = n_channels;
+ cmd->params.num_probe_requests = n_probes;
+ cmd->params.tx_rate = cpu_to_le16(1 << 1); /* 2 Mbps */
+ cmd->params.tid_trigger = 0;
+
+ for (i = 0; i < n_channels; i++) {
+ cmd->channels[i].min_duration =
+ cpu_to_le32(WL1251_SCAN_MIN_DURATION);
+ cmd->channels[i].max_duration =
+ cpu_to_le32(WL1251_SCAN_MAX_DURATION);
+ memset(&cmd->channels[i].bssid_lsb, 0xff, 4);
+ memset(&cmd->channels[i].bssid_msb, 0xff, 2);
+ cmd->channels[i].early_termination = 0;
+ cmd->channels[i].tx_power_att = 0;
+ cmd->channels[i].channel = channels[i]->hw_value;
+ }
+
+ cmd->params.ssid_len = ssid_len;
+ if (ssid)
+ memcpy(cmd->params.ssid, ssid, ssid_len);
+
+ ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd));
+ if (ret < 0) {
+ wl1251_error("cmd scan failed: %d", ret);
+ goto out;
+ }
+
+ wl1251_mem_read(wl, wl->cmd_box_addr, cmd, sizeof(*cmd));
+
+ if (cmd->header.status != CMD_STATUS_SUCCESS) {
+ wl1251_error("cmd scan status wasn't success: %d",
+ cmd->header.status);
+ ret = -EIO;
+ goto out;
+ }
+
+out:
+ kfree(cmd);
+ return ret;
+}
+
+int wl1251_cmd_trigger_scan_to(struct wl1251 *wl, u32 timeout)
+{
+ struct wl1251_cmd_trigger_scan_to *cmd;
+ int ret;
+
+ wl1251_debug(DEBUG_CMD, "cmd trigger scan to");
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ cmd->timeout = timeout;
+
+ ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd));
+ if (ret < 0) {
+ wl1251_error("cmd trigger scan to failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(cmd);
+ return ret;
+}
diff --git a/drivers/net/wireless/wl12xx/wl1251_cmd.h b/drivers/net/wireless/wl12xx/wl1251_cmd.h
index dff798ad0ef..4ad67cae94d 100644
--- a/drivers/net/wireless/wl12xx/wl1251_cmd.h
+++ b/drivers/net/wireless/wl12xx/wl1251_cmd.h
@@ -27,6 +27,8 @@
#include "wl1251.h"
+#include <net/cfg80211.h>
+
struct acx_header;
int wl1251_cmd_send(struct wl1251 *wl, u16 type, void *buf, size_t buf_len);
@@ -43,6 +45,10 @@ int wl1251_cmd_read_memory(struct wl1251 *wl, u32 addr, void *answer,
size_t len);
int wl1251_cmd_template_set(struct wl1251 *wl, u16 cmd_id,
void *buf, size_t buf_len);
+int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len,
+ struct ieee80211_channel *channels[],
+ unsigned int n_channels, unsigned int n_probes);
+int wl1251_cmd_trigger_scan_to(struct wl1251 *wl, u32 timeout);
/* unit ms */
#define WL1251_COMMAND_TIMEOUT 2000
@@ -163,8 +169,12 @@ struct cmd_read_write_memory {
#define CMDMBOX_HEADER_LEN 4
#define CMDMBOX_INFO_ELEM_HEADER_LEN 4
+#define WL1251_SCAN_MIN_DURATION 30000
+#define WL1251_SCAN_MAX_DURATION 60000
+
+#define WL1251_SCAN_NUM_PROBES 3
-struct basic_scan_parameters {
+struct wl1251_scan_parameters {
u32 rx_config_options;
u32 rx_filter_options;
@@ -189,11 +199,11 @@ struct basic_scan_parameters {
u8 tid_trigger;
u8 ssid_len;
- u32 ssid[8];
+ u8 ssid[32];
} __attribute__ ((packed));
-struct basic_scan_channel_parameters {
+struct wl1251_scan_ch_parameters {
u32 min_duration; /* in TU */
u32 max_duration; /* in TU */
u32 bssid_lsb;
@@ -213,11 +223,11 @@ struct basic_scan_channel_parameters {
/* SCAN parameters */
#define SCAN_MAX_NUM_OF_CHANNELS 16
-struct cmd_scan {
+struct wl1251_cmd_scan {
struct wl1251_cmd_header header;
- struct basic_scan_parameters params;
- struct basic_scan_channel_parameters channels[SCAN_MAX_NUM_OF_CHANNELS];
+ struct wl1251_scan_parameters params;
+ struct wl1251_scan_ch_parameters channels[SCAN_MAX_NUM_OF_CHANNELS];
} __attribute__ ((packed));
enum {
diff --git a/drivers/net/wireless/wl12xx/wl1251_debugfs.c b/drivers/net/wireless/wl12xx/wl1251_debugfs.c
index a00723059f8..0ccba57fb9f 100644
--- a/drivers/net/wireless/wl12xx/wl1251_debugfs.c
+++ b/drivers/net/wireless/wl12xx/wl1251_debugfs.c
@@ -237,6 +237,27 @@ static const struct file_operations tx_queue_len_ops = {
.open = wl1251_open_file_generic,
};
+static ssize_t tx_queue_status_read(struct file *file, char __user *userbuf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1251 *wl = file->private_data;
+ char buf[3], status;
+ int len;
+
+ if (wl->tx_queue_stopped)
+ status = 's';
+ else
+ status = 'r';
+
+ len = scnprintf(buf, sizeof(buf), "%c\n", status);
+ return simple_read_from_buffer(userbuf, count, ppos, buf, len);
+}
+
+static const struct file_operations tx_queue_status_ops = {
+ .read = tx_queue_status_read,
+ .open = wl1251_open_file_generic,
+};
+
static void wl1251_debugfs_delete_files(struct wl1251 *wl)
{
DEBUGFS_FWSTATS_DEL(tx, internal_desc_overflow);
@@ -331,6 +352,7 @@ static void wl1251_debugfs_delete_files(struct wl1251 *wl)
DEBUGFS_FWSTATS_DEL(rxpipe, tx_xfr_host_int_trig_rx_data);
DEBUGFS_DEL(tx_queue_len);
+ DEBUGFS_DEL(tx_queue_status);
DEBUGFS_DEL(retry_count);
DEBUGFS_DEL(excessive_retries);
}
@@ -431,6 +453,7 @@ static int wl1251_debugfs_add_files(struct wl1251 *wl)
DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data);
DEBUGFS_ADD(tx_queue_len, wl->debugfs.rootdir);
+ DEBUGFS_ADD(tx_queue_status, wl->debugfs.rootdir);
DEBUGFS_ADD(retry_count, wl->debugfs.rootdir);
DEBUGFS_ADD(excessive_retries, wl->debugfs.rootdir);
diff --git a/drivers/net/wireless/wl12xx/wl1251_init.c b/drivers/net/wireless/wl12xx/wl1251_init.c
index 5cb573383ee..5aad56ea715 100644
--- a/drivers/net/wireless/wl12xx/wl1251_init.c
+++ b/drivers/net/wireless/wl12xx/wl1251_init.c
@@ -294,6 +294,11 @@ static int wl1251_hw_init_tx_queue_config(struct wl1251 *wl)
goto out;
}
+ wl1251_acx_ac_cfg(wl, AC_BE, CWMIN_BE, CWMAX_BE, AIFS_DIFS, TXOP_BE);
+ wl1251_acx_ac_cfg(wl, AC_BK, CWMIN_BK, CWMAX_BK, AIFS_DIFS, TXOP_BK);
+ wl1251_acx_ac_cfg(wl, AC_VI, CWMIN_VI, CWMAX_VI, AIFS_DIFS, TXOP_VI);
+ wl1251_acx_ac_cfg(wl, AC_VO, CWMIN_VO, CWMAX_VO, AIFS_DIFS, TXOP_VO);
+
out:
kfree(config);
return ret;
diff --git a/drivers/net/wireless/wl12xx/wl1251_init.h b/drivers/net/wireless/wl12xx/wl1251_init.h
index b3b25ec885e..269cefb3e7d 100644
--- a/drivers/net/wireless/wl12xx/wl1251_init.h
+++ b/drivers/net/wireless/wl12xx/wl1251_init.h
@@ -26,6 +26,53 @@
#include "wl1251.h"
+enum {
+ /* best effort/legacy */
+ AC_BE = 0,
+
+ /* background */
+ AC_BK = 1,
+
+ /* video */
+ AC_VI = 2,
+
+ /* voice */
+ AC_VO = 3,
+
+ /* broadcast dummy access category */
+ AC_BCAST = 4,
+
+ NUM_ACCESS_CATEGORIES = 4
+};
+
+/* following are defult values for the IE fields*/
+#define CWMIN_BK 15
+#define CWMIN_BE 15
+#define CWMIN_VI 7
+#define CWMIN_VO 3
+#define CWMAX_BK 1023
+#define CWMAX_BE 63
+#define CWMAX_VI 15
+#define CWMAX_VO 7
+
+/* slot number setting to start transmission at PIFS interval */
+#define AIFS_PIFS 1
+
+/*
+ * slot number setting to start transmission at DIFS interval - normal DCF
+ * access
+ */
+#define AIFS_DIFS 2
+
+#define AIFSN_BK 7
+#define AIFSN_BE 3
+#define AIFSN_VI AIFS_PIFS
+#define AIFSN_VO AIFS_PIFS
+#define TXOP_BK 0
+#define TXOP_BE 0
+#define TXOP_VI 3008
+#define TXOP_VO 1504
+
int wl1251_hw_init_hwenc_config(struct wl1251 *wl);
int wl1251_hw_init_templates_config(struct wl1251 *wl);
int wl1251_hw_init_rx_config(struct wl1251 *wl, u32 config, u32 filter);
diff --git a/drivers/net/wireless/wl12xx/wl1251_main.c b/drivers/net/wireless/wl12xx/wl1251_main.c
index 2f50a256efa..24ae6a360ac 100644
--- a/drivers/net/wireless/wl12xx/wl1251_main.c
+++ b/drivers/net/wireless/wl12xx/wl1251_main.c
@@ -395,6 +395,7 @@ static int wl1251_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
* the queue here, otherwise the queue will get too long.
*/
if (skb_queue_len(&wl->tx_queue) >= WL1251_TX_QUEUE_MAX_LENGTH) {
+ wl1251_debug(DEBUG_TX, "op_tx: tx_queue full, stop queues");
ieee80211_stop_queues(wl->hw);
/*
@@ -510,13 +511,13 @@ static void wl1251_op_stop(struct ieee80211_hw *hw)
}
static int wl1251_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct wl1251 *wl = hw->priv;
int ret = 0;
wl1251_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
- conf->type, conf->mac_addr);
+ vif->type, vif->addr);
mutex_lock(&wl->mutex);
if (wl->vif) {
@@ -524,9 +525,9 @@ static int wl1251_op_add_interface(struct ieee80211_hw *hw,
goto out;
}
- wl->vif = conf->vif;
+ wl->vif = vif;
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
wl->bss_type = BSS_TYPE_STA_BSS;
break;
@@ -538,8 +539,8 @@ static int wl1251_op_add_interface(struct ieee80211_hw *hw,
goto out;
}
- if (memcmp(wl->mac_addr, conf->mac_addr, ETH_ALEN)) {
- memcpy(wl->mac_addr, conf->mac_addr, ETH_ALEN);
+ if (memcmp(wl->mac_addr, vif->addr, ETH_ALEN)) {
+ memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr);
ret = wl1251_acx_station_id(wl);
if (ret < 0)
@@ -552,7 +553,7 @@ out:
}
static void wl1251_op_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct wl1251 *wl = hw->priv;
@@ -562,43 +563,25 @@ static void wl1251_op_remove_interface(struct ieee80211_hw *hw,
mutex_unlock(&wl->mutex);
}
-static int wl1251_build_null_data(struct wl1251 *wl)
+static int wl1251_build_qos_null_data(struct wl1251 *wl)
{
- struct wl12xx_null_data_template template;
+ struct ieee80211_qos_hdr template;
- if (!is_zero_ether_addr(wl->bssid)) {
- memcpy(template.header.da, wl->bssid, ETH_ALEN);
- memcpy(template.header.bssid, wl->bssid, ETH_ALEN);
- } else {
- memset(template.header.da, 0xff, ETH_ALEN);
- memset(template.header.bssid, 0xff, ETH_ALEN);
- }
-
- memcpy(template.header.sa, wl->mac_addr, ETH_ALEN);
- template.header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_DATA |
- IEEE80211_STYPE_NULLFUNC |
- IEEE80211_FCTL_TODS);
-
- return wl1251_cmd_template_set(wl, CMD_NULL_DATA, &template,
- sizeof(template));
-
-}
-
-static int wl1251_build_ps_poll(struct wl1251 *wl, u16 aid)
-{
- struct wl12xx_ps_poll_template template;
+ memset(&template, 0, sizeof(template));
- memcpy(template.bssid, wl->bssid, ETH_ALEN);
- memcpy(template.ta, wl->mac_addr, ETH_ALEN);
+ memcpy(template.addr1, wl->bssid, ETH_ALEN);
+ memcpy(template.addr2, wl->mac_addr, ETH_ALEN);
+ memcpy(template.addr3, wl->bssid, ETH_ALEN);
- /* aid in PS-Poll has its two MSBs each set to 1 */
- template.aid = cpu_to_le16(1 << 15 | 1 << 14 | aid);
+ template.frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ IEEE80211_STYPE_QOS_NULLFUNC |
+ IEEE80211_FCTL_TODS);
- template.fc = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL);
+ /* FIXME: not sure what priority to use here */
+ template.qos_ctrl = cpu_to_le16(0);
- return wl1251_cmd_template_set(wl, CMD_PS_POLL, &template,
+ return wl1251_cmd_template_set(wl, CMD_QOS_NULL_DATA, &template,
sizeof(template));
-
}
static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
@@ -634,26 +617,34 @@ static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
wl->psm_requested = true;
+ wl->dtim_period = conf->ps_dtim_period;
+
+ ret = wl1251_acx_wr_tbtt_and_dtim(wl, wl->beacon_int,
+ wl->dtim_period);
+
/*
- * We enter PSM only if we're already associated.
- * If we're not, we'll enter it when joining an SSID,
- * through the bss_info_changed() hook.
+ * mac80211 enables PSM only if we're already associated.
*/
ret = wl1251_ps_set_mode(wl, STATION_POWER_SAVE_MODE);
+ if (ret < 0)
+ goto out_sleep;
} else if (!(conf->flags & IEEE80211_CONF_PS) &&
wl->psm_requested) {
wl1251_debug(DEBUG_PSM, "psm disabled");
wl->psm_requested = false;
- if (wl->psm)
+ if (wl->psm) {
ret = wl1251_ps_set_mode(wl, STATION_ACTIVE_MODE);
+ if (ret < 0)
+ goto out_sleep;
+ }
}
if (conf->power_level != wl->power_level) {
ret = wl1251_acx_tx_power(wl, conf->power_level);
if (ret < 0)
- goto out;
+ goto out_sleep;
wl->power_level = conf->power_level;
}
@@ -864,199 +855,61 @@ out:
return ret;
}
-static int wl1251_build_basic_rates(char *rates)
-{
- u8 index = 0;
-
- rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
- rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
- rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB;
- rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB;
-
- return index;
-}
-
-static int wl1251_build_extended_rates(char *rates)
+static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
+ struct cfg80211_scan_request *req)
{
- u8 index = 0;
-
- rates[index++] = IEEE80211_OFDM_RATE_6MB;
- rates[index++] = IEEE80211_OFDM_RATE_9MB;
- rates[index++] = IEEE80211_OFDM_RATE_12MB;
- rates[index++] = IEEE80211_OFDM_RATE_18MB;
- rates[index++] = IEEE80211_OFDM_RATE_24MB;
- rates[index++] = IEEE80211_OFDM_RATE_36MB;
- rates[index++] = IEEE80211_OFDM_RATE_48MB;
- rates[index++] = IEEE80211_OFDM_RATE_54MB;
-
- return index;
-}
-
+ struct wl1251 *wl = hw->priv;
+ struct sk_buff *skb;
+ size_t ssid_len = 0;
+ u8 *ssid = NULL;
+ int ret;
-static int wl1251_build_probe_req(struct wl1251 *wl, u8 *ssid, size_t ssid_len)
-{
- struct wl12xx_probe_req_template template;
- struct wl12xx_ie_rates *rates;
- char *ptr;
- u16 size;
-
- ptr = (char *)&template;
- size = sizeof(struct ieee80211_header);
-
- memset(template.header.da, 0xff, ETH_ALEN);
- memset(template.header.bssid, 0xff, ETH_ALEN);
- memcpy(template.header.sa, wl->mac_addr, ETH_ALEN);
- template.header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
-
- /* IEs */
- /* SSID */
- template.ssid.header.id = WLAN_EID_SSID;
- template.ssid.header.len = ssid_len;
- if (ssid_len && ssid)
- memcpy(template.ssid.ssid, ssid, ssid_len);
- size += sizeof(struct wl12xx_ie_header) + ssid_len;
- ptr += size;
-
- /* Basic Rates */
- rates = (struct wl12xx_ie_rates *)ptr;
- rates->header.id = WLAN_EID_SUPP_RATES;
- rates->header.len = wl1251_build_basic_rates(rates->rates);
- size += sizeof(struct wl12xx_ie_header) + rates->header.len;
- ptr += sizeof(struct wl12xx_ie_header) + rates->header.len;
-
- /* Extended rates */
- rates = (struct wl12xx_ie_rates *)ptr;
- rates->header.id = WLAN_EID_EXT_SUPP_RATES;
- rates->header.len = wl1251_build_extended_rates(rates->rates);
- size += sizeof(struct wl12xx_ie_header) + rates->header.len;
-
- wl1251_dump(DEBUG_SCAN, "PROBE REQ: ", &template, size);
-
- return wl1251_cmd_template_set(wl, CMD_PROBE_REQ, &template,
- size);
-}
+ wl1251_debug(DEBUG_MAC80211, "mac80211 hw scan");
-static int wl1251_hw_scan(struct wl1251 *wl, u8 *ssid, size_t len,
- u8 active_scan, u8 high_prio, u8 num_channels,
- u8 probe_requests)
-{
- struct wl1251_cmd_trigger_scan_to *trigger = NULL;
- struct cmd_scan *params = NULL;
- int i, ret;
- u16 scan_options = 0;
-
- if (wl->scanning)
- return -EINVAL;
-
- params = kzalloc(sizeof(*params), GFP_KERNEL);
- if (!params)
- return -ENOMEM;
-
- params->params.rx_config_options = cpu_to_le32(CFG_RX_ALL_GOOD);
- params->params.rx_filter_options =
- cpu_to_le32(CFG_RX_PRSP_EN | CFG_RX_MGMT_EN | CFG_RX_BCN_EN);
-
- /* High priority scan */
- if (!active_scan)
- scan_options |= SCAN_PASSIVE;
- if (high_prio)
- scan_options |= SCAN_PRIORITY_HIGH;
- params->params.scan_options = scan_options;
-
- params->params.num_channels = num_channels;
- params->params.num_probe_requests = probe_requests;
- params->params.tx_rate = cpu_to_le16(1 << 1); /* 2 Mbps */
- params->params.tid_trigger = 0;
-
- for (i = 0; i < num_channels; i++) {
- params->channels[i].min_duration = cpu_to_le32(30000);
- params->channels[i].max_duration = cpu_to_le32(60000);
- memset(&params->channels[i].bssid_lsb, 0xff, 4);
- memset(&params->channels[i].bssid_msb, 0xff, 2);
- params->channels[i].early_termination = 0;
- params->channels[i].tx_power_att = 0;
- params->channels[i].channel = i + 1;
- memset(params->channels[i].pad, 0, 3);
+ if (req->n_ssids) {
+ ssid = req->ssids[0].ssid;
+ ssid_len = req->ssids[0].ssid_len;
}
- for (i = num_channels; i < SCAN_MAX_NUM_OF_CHANNELS; i++)
- memset(&params->channels[i], 0,
- sizeof(struct basic_scan_channel_parameters));
-
- if (len && ssid) {
- params->params.ssid_len = len;
- memcpy(params->params.ssid, ssid, len);
- } else {
- params->params.ssid_len = 0;
- memset(params->params.ssid, 0, 32);
- }
+ mutex_lock(&wl->mutex);
- ret = wl1251_build_probe_req(wl, ssid, len);
- if (ret < 0) {
- wl1251_error("PROBE request template failed");
+ if (wl->scanning) {
+ wl1251_debug(DEBUG_SCAN, "scan already in progress");
+ ret = -EINVAL;
goto out;
}
- trigger = kzalloc(sizeof(*trigger), GFP_KERNEL);
- if (!trigger)
+ ret = wl1251_ps_elp_wakeup(wl);
+ if (ret < 0)
goto out;
- trigger->timeout = 0;
-
- ret = wl1251_cmd_send(wl, CMD_TRIGGER_SCAN_TO, trigger,
- sizeof(*trigger));
- if (ret < 0) {
- wl1251_error("trigger scan to failed for hw scan");
+ skb = ieee80211_probereq_get(wl->hw, wl->vif, ssid, ssid_len,
+ req->ie, req->ie_len);
+ if (!skb) {
+ ret = -ENOMEM;
goto out;
}
- wl1251_dump(DEBUG_SCAN, "SCAN: ", params, sizeof(*params));
-
- wl->scanning = true;
+ ret = wl1251_cmd_template_set(wl, CMD_PROBE_REQ, skb->data,
+ skb->len);
+ dev_kfree_skb(skb);
+ if (ret < 0)
+ goto out_sleep;
- ret = wl1251_cmd_send(wl, CMD_SCAN, params, sizeof(*params));
+ ret = wl1251_cmd_trigger_scan_to(wl, 0);
if (ret < 0)
- wl1251_error("SCAN failed");
+ goto out_sleep;
- wl1251_mem_read(wl, wl->cmd_box_addr, params, sizeof(*params));
+ wl->scanning = true;
- if (params->header.status != CMD_STATUS_SUCCESS) {
- wl1251_error("TEST command answer error: %d",
- params->header.status);
+ ret = wl1251_cmd_scan(wl, ssid, ssid_len, req->channels,
+ req->n_channels, WL1251_SCAN_NUM_PROBES);
+ if (ret < 0) {
wl->scanning = false;
- ret = -EIO;
- goto out;
- }
-
-out:
- kfree(params);
- return ret;
-
-}
-
-static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
- struct cfg80211_scan_request *req)
-{
- struct wl1251 *wl = hw->priv;
- int ret;
- u8 *ssid = NULL;
- size_t ssid_len = 0;
-
- wl1251_debug(DEBUG_MAC80211, "mac80211 hw scan");
-
- if (req->n_ssids) {
- ssid = req->ssids[0].ssid;
- ssid_len = req->ssids[0].ssid_len;
+ goto out_sleep;
}
- mutex_lock(&wl->mutex);
-
- ret = wl1251_ps_elp_wakeup(wl);
- if (ret < 0)
- goto out;
-
- ret = wl1251_hw_scan(hw->priv, ssid, ssid_len, 1, 0, 13, 3);
-
+out_sleep:
wl1251_ps_elp_sleep(wl);
out:
@@ -1093,9 +946,8 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_bss_conf *bss_conf,
u32 changed)
{
- enum wl1251_cmd_ps_mode mode;
struct wl1251 *wl = hw->priv;
- struct sk_buff *beacon;
+ struct sk_buff *beacon, *skb;
int ret;
wl1251_debug(DEBUG_MAC80211, "mac80211 bss info changed");
@@ -1109,7 +961,17 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BSSID) {
memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
- ret = wl1251_build_null_data(wl);
+ skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
+ if (!skb)
+ goto out_sleep;
+
+ ret = wl1251_cmd_template_set(wl, CMD_NULL_DATA,
+ skb->data, skb->len);
+ dev_kfree_skb(skb);
+ if (ret < 0)
+ goto out_sleep;
+
+ ret = wl1251_build_qos_null_data(wl);
if (ret < 0)
goto out;
@@ -1124,27 +986,21 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_ASSOC) {
if (bss_conf->assoc) {
wl->beacon_int = bss_conf->beacon_int;
- wl->dtim_period = bss_conf->dtim_period;
- ret = wl1251_acx_wr_tbtt_and_dtim(wl, wl->beacon_int,
- wl->dtim_period);
- wl->aid = bss_conf->aid;
+ skb = ieee80211_pspoll_get(wl->hw, wl->vif);
+ if (!skb)
+ goto out_sleep;
- ret = wl1251_build_ps_poll(wl, wl->aid);
+ ret = wl1251_cmd_template_set(wl, CMD_PS_POLL,
+ skb->data,
+ skb->len);
+ dev_kfree_skb(skb);
if (ret < 0)
goto out_sleep;
- ret = wl1251_acx_aid(wl, wl->aid);
+ ret = wl1251_acx_aid(wl, bss_conf->aid);
if (ret < 0)
goto out_sleep;
-
- /* If we want to go in PSM but we're not there yet */
- if (wl->psm_requested && !wl->psm) {
- mode = STATION_POWER_SAVE_MODE;
- ret = wl1251_ps_set_mode(wl, mode);
- if (ret < 0)
- goto out_sleep;
- }
} else {
/* use defaults when not associated */
wl->beacon_int = WL1251_DEFAULT_BEACON_INT;
@@ -1176,7 +1032,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
ret = wl1251_acx_cts_protect(wl, CTSPROTECT_DISABLE);
if (ret < 0) {
wl1251_warning("Set ctsprotect failed %d", ret);
- goto out;
+ goto out_sleep;
}
}
@@ -1187,7 +1043,7 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
if (ret < 0) {
dev_kfree_skb(beacon);
- goto out;
+ goto out_sleep;
}
ret = wl1251_cmd_template_set(wl, CMD_PROBE_RESP, beacon->data,
@@ -1196,13 +1052,13 @@ static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
dev_kfree_skb(beacon);
if (ret < 0)
- goto out;
+ goto out_sleep;
ret = wl1251_join(wl, wl->bss_type, wl->beacon_int,
wl->channel, wl->dtim_period);
if (ret < 0)
- goto out;
+ goto out_sleep;
}
out_sleep:
@@ -1273,6 +1129,49 @@ static struct ieee80211_channel wl1251_channels[] = {
{ .hw_value = 13, .center_freq = 2472},
};
+static int wl1251_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ enum wl1251_acx_ps_scheme ps_scheme;
+ struct wl1251 *wl = hw->priv;
+ int ret;
+
+ mutex_lock(&wl->mutex);
+
+ wl1251_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
+
+ ret = wl1251_ps_elp_wakeup(wl);
+ if (ret < 0)
+ goto out;
+
+ /* mac80211 uses units of 32 usec */
+ ret = wl1251_acx_ac_cfg(wl, wl1251_tx_get_queue(queue),
+ params->cw_min, params->cw_max,
+ params->aifs, params->txop * 32);
+ if (ret < 0)
+ goto out_sleep;
+
+ if (params->uapsd)
+ ps_scheme = WL1251_ACX_PS_SCHEME_UPSD_TRIGGER;
+ else
+ ps_scheme = WL1251_ACX_PS_SCHEME_LEGACY;
+
+ ret = wl1251_acx_tid_cfg(wl, wl1251_tx_get_queue(queue),
+ CHANNEL_TYPE_EDCF,
+ wl1251_tx_get_queue(queue), ps_scheme,
+ WL1251_ACX_ACK_POLICY_LEGACY);
+ if (ret < 0)
+ goto out_sleep;
+
+out_sleep:
+ wl1251_ps_elp_sleep(wl);
+
+out:
+ mutex_unlock(&wl->mutex);
+
+ return ret;
+}
+
/* can't be const, mac80211 writes to this */
static struct ieee80211_supported_band wl1251_band_2ghz = {
.channels = wl1251_channels,
@@ -1293,6 +1192,7 @@ static const struct ieee80211_ops wl1251_ops = {
.hw_scan = wl1251_op_hw_scan,
.bss_info_changed = wl1251_op_bss_info_changed,
.set_rts_threshold = wl1251_op_set_rts_threshold,
+ .conf_tx = wl1251_op_conf_tx,
};
static int wl1251_register_hw(struct wl1251 *wl)
@@ -1332,12 +1232,15 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_NOISE_DBM |
IEEE80211_HW_SUPPORTS_PS |
- IEEE80211_HW_BEACON_FILTER;
+ IEEE80211_HW_BEACON_FILTER |
+ IEEE80211_HW_SUPPORTS_UAPSD;
wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
wl->hw->wiphy->max_scan_ssids = 1;
wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1251_band_2ghz;
+ wl->hw->queues = 4;
+
ret = wl1251_register_hw(wl);
if (ret)
goto out;
diff --git a/drivers/net/wireless/wl12xx/wl1251_ps.c b/drivers/net/wireless/wl12xx/wl1251_ps.c
index 9931b197ff7..851dfb65e47 100644
--- a/drivers/net/wireless/wl12xx/wl1251_ps.c
+++ b/drivers/net/wireless/wl12xx/wl1251_ps.c
@@ -26,7 +26,8 @@
#include "wl1251_cmd.h"
#include "wl1251_io.h"
-#define WL1251_WAKEUP_TIMEOUT 2000
+/* in ms */
+#define WL1251_WAKEUP_TIMEOUT 100
void wl1251_elp_work(struct work_struct *work)
{
@@ -67,7 +68,7 @@ void wl1251_ps_elp_sleep(struct wl1251 *wl)
int wl1251_ps_elp_wakeup(struct wl1251 *wl)
{
- unsigned long timeout;
+ unsigned long timeout, start;
u32 elp_reg;
if (!wl->elp)
@@ -75,6 +76,7 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl)
wl1251_debug(DEBUG_PSM, "waking up chip from elp");
+ start = jiffies;
timeout = jiffies + msecs_to_jiffies(WL1251_WAKEUP_TIMEOUT);
wl1251_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP);
@@ -95,8 +97,7 @@ int wl1251_ps_elp_wakeup(struct wl1251 *wl)
}
wl1251_debug(DEBUG_PSM, "wakeup time: %u ms",
- jiffies_to_msecs(jiffies) -
- (jiffies_to_msecs(timeout) - WL1251_WAKEUP_TIMEOUT));
+ jiffies_to_msecs(jiffies - start));
wl->elp = false;
diff --git a/drivers/net/wireless/wl12xx/wl1251_rx.c b/drivers/net/wireless/wl12xx/wl1251_rx.c
index f84cc89cbff..b56732226cc 100644
--- a/drivers/net/wireless/wl12xx/wl1251_rx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_rx.c
@@ -126,7 +126,7 @@ static void wl1251_rx_body(struct wl1251 *wl,
if (wl->rx_current_buffer)
rx_packet_ring_addr += wl->data_path->rx_packet_ring_chunk_size;
- skb = dev_alloc_skb(length);
+ skb = __dev_alloc_skb(length, GFP_KERNEL);
if (!skb) {
wl1251_error("Couldn't allocate RX frame");
return;
diff --git a/drivers/net/wireless/wl12xx/wl1251_tx.c b/drivers/net/wireless/wl12xx/wl1251_tx.c
index f8597061584..c8223185efd 100644
--- a/drivers/net/wireless/wl12xx/wl1251_tx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_tx.c
@@ -167,8 +167,7 @@ static int wl1251_tx_fill_hdr(struct wl1251 *wl, struct sk_buff *skb,
tx_hdr->expiry_time = cpu_to_le32(1 << 16);
tx_hdr->id = id;
- /* FIXME: how to get the correct queue id? */
- tx_hdr->xmit_queue = 0;
+ tx_hdr->xmit_queue = wl1251_tx_get_queue(skb_get_queue_mapping(skb));
wl1251_tx_control(tx_hdr, control, fc);
wl1251_tx_frag_block_num(tx_hdr);
@@ -220,6 +219,7 @@ static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb,
/* align the buffer on a 4-byte boundary */
skb_reserve(skb, offset);
memmove(skb->data, src, skb->len);
+ tx_hdr = (struct tx_double_buffer_desc *) skb->data;
} else {
wl1251_info("No handler, fixme!");
return -EINVAL;
@@ -237,8 +237,9 @@ static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb,
wl1251_mem_write(wl, addr, skb->data, len);
- wl1251_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u rate 0x%x",
- tx_hdr->id, skb, tx_hdr->length, tx_hdr->rate);
+ wl1251_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u rate 0x%x "
+ "queue %d", tx_hdr->id, skb, tx_hdr->length,
+ tx_hdr->rate, tx_hdr->xmit_queue);
return 0;
}
diff --git a/drivers/net/wireless/wl12xx/wl1251_tx.h b/drivers/net/wireless/wl12xx/wl1251_tx.h
index 7c1c1665c81..55856c6bb97 100644
--- a/drivers/net/wireless/wl12xx/wl1251_tx.h
+++ b/drivers/net/wireless/wl12xx/wl1251_tx.h
@@ -26,6 +26,7 @@
#define __WL1251_TX_H__
#include <linux/bitops.h>
+#include "wl1251_acx.h"
/*
*
@@ -209,6 +210,22 @@ struct tx_result {
u8 done_2;
} __attribute__ ((packed));
+static inline int wl1251_tx_get_queue(int queue)
+{
+ switch (queue) {
+ case 0:
+ return QOS_AC_VO;
+ case 1:
+ return QOS_AC_VI;
+ case 2:
+ return QOS_AC_BE;
+ case 3:
+ return QOS_AC_BK;
+ default:
+ return QOS_AC_BE;
+ }
+}
+
void wl1251_tx_work(struct work_struct *work);
void wl1251_tx_complete(struct wl1251 *wl);
void wl1251_tx_flush(struct wl1251 *wl);
diff --git a/drivers/net/wireless/wl12xx/wl1271.h b/drivers/net/wireless/wl12xx/wl1271.h
index 94359b1a861..97ea5096bc8 100644
--- a/drivers/net/wireless/wl12xx/wl1271.h
+++ b/drivers/net/wireless/wl12xx/wl1271.h
@@ -43,7 +43,7 @@ enum {
DEBUG_SPI = BIT(1),
DEBUG_BOOT = BIT(2),
DEBUG_MAILBOX = BIT(3),
- DEBUG_NETLINK = BIT(4),
+ DEBUG_TESTMODE = BIT(4),
DEBUG_EVENT = BIT(5),
DEBUG_TX = BIT(6),
DEBUG_RX = BIT(7),
@@ -107,11 +107,36 @@ enum {
CFG_RX_CTL_EN | CFG_RX_BCN_EN | \
CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN)
-#define WL1271_DEFAULT_BASIC_RATE_SET (CONF_TX_RATE_MASK_ALL)
-
#define WL1271_FW_NAME "wl1271-fw.bin"
#define WL1271_NVS_NAME "wl1271-nvs.bin"
+/* NVS data structure */
+#define WL1271_NVS_SECTION_SIZE 468
+
+#define WL1271_NVS_GENERAL_PARAMS_SIZE 57
+#define WL1271_NVS_GENERAL_PARAMS_SIZE_PADDED \
+ (WL1271_NVS_GENERAL_PARAMS_SIZE + 1)
+#define WL1271_NVS_STAT_RADIO_PARAMS_SIZE 17
+#define WL1271_NVS_STAT_RADIO_PARAMS_SIZE_PADDED \
+ (WL1271_NVS_STAT_RADIO_PARAMS_SIZE + 1)
+#define WL1271_NVS_DYN_RADIO_PARAMS_SIZE 65
+#define WL1271_NVS_DYN_RADIO_PARAMS_SIZE_PADDED \
+ (WL1271_NVS_DYN_RADIO_PARAMS_SIZE + 1)
+#define WL1271_NVS_FEM_COUNT 2
+#define WL1271_NVS_INI_SPARE_SIZE 124
+
+struct wl1271_nvs_file {
+ /* NVS section */
+ u8 nvs[WL1271_NVS_SECTION_SIZE];
+
+ /* INI section */
+ u8 general_params[WL1271_NVS_GENERAL_PARAMS_SIZE_PADDED];
+ u8 stat_radio_params[WL1271_NVS_STAT_RADIO_PARAMS_SIZE_PADDED];
+ u8 dyn_radio_params[WL1271_NVS_FEM_COUNT]
+ [WL1271_NVS_DYN_RADIO_PARAMS_SIZE_PADDED];
+ u8 ini_spare[WL1271_NVS_INI_SPARE_SIZE];
+} __attribute__ ((packed));
+
/*
* Enable/disable 802.11a support for WL1273
*/
@@ -276,6 +301,7 @@ struct wl1271_debugfs {
struct dentry *retry_count;
struct dentry *excessive_retries;
+ struct dentry *gpio_power;
};
#define NUM_TX_QUEUES 4
@@ -322,6 +348,17 @@ struct wl1271 {
enum wl1271_state state;
struct mutex mutex;
+#define WL1271_FLAG_STA_RATES_CHANGED (0)
+#define WL1271_FLAG_STA_ASSOCIATED (1)
+#define WL1271_FLAG_JOINED (2)
+#define WL1271_FLAG_GPIO_POWER (3)
+#define WL1271_FLAG_TX_QUEUE_STOPPED (4)
+#define WL1271_FLAG_SCANNING (5)
+#define WL1271_FLAG_IN_ELP (6)
+#define WL1271_FLAG_PSM (7)
+#define WL1271_FLAG_PSM_REQUESTED (8)
+ unsigned long flags;
+
struct wl1271_partition_set part;
struct wl1271_chip chip;
@@ -331,8 +368,7 @@ struct wl1271 {
u8 *fw;
size_t fw_len;
- u8 *nvs;
- size_t nvs_len;
+ struct wl1271_nvs_file *nvs;
u8 bssid[ETH_ALEN];
u8 mac_addr[ETH_ALEN];
@@ -359,7 +395,6 @@ struct wl1271 {
/* Frames scheduled for transmission, not handled yet */
struct sk_buff_head tx_queue;
- bool tx_queue_stopped;
struct work_struct tx_work;
@@ -387,14 +422,15 @@ struct wl1271 {
u32 mbox_ptr[2];
/* Are we currently scanning */
- bool scanning;
struct wl1271_scan scan;
/* Our association ID */
u16 aid;
/* currently configured rate set */
+ u32 sta_rate_set;
u32 basic_rate_set;
+ u32 rate_set;
/* The current band */
enum ieee80211_band band;
@@ -405,18 +441,9 @@ struct wl1271 {
unsigned int rx_config;
unsigned int rx_filter;
- /* is firmware in elp mode */
- bool elp;
-
struct completion *elp_compl;
struct delayed_work elp_work;
- /* we can be in psm, but not in elp, we have to differentiate */
- bool psm;
-
- /* PSM mode requested */
- bool psm_requested;
-
/* retry counter for PSM entries */
u8 psm_entry_retry;
@@ -435,9 +462,6 @@ struct wl1271 {
struct ieee80211_vif *vif;
- /* Used for a workaround to send disconnect before rejoining */
- bool joined;
-
/* Current chipset configuration */
struct conf_drv_settings conf;
@@ -455,11 +479,14 @@ int wl1271_plt_stop(struct wl1271 *wl);
#define WL1271_TX_QUEUE_MAX_LENGTH 20
-/* WL1271 needs a 200ms sleep after power on */
+/* WL1271 needs a 200ms sleep after power on, and a 20ms sleep before power
+ on in case is has been shut down shortly before */
+#define WL1271_PRE_POWER_ON_SLEEP 20 /* in miliseconds */
#define WL1271_POWER_ON_SLEEP 200 /* in miliseconds */
static inline bool wl1271_11a_enabled(void)
{
+ /* FIXME: this could be determined based on the NVS-INI file */
#ifdef WL1271_80211A_ENABLED
return true;
#else
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.c b/drivers/net/wireless/wl12xx/wl1271_acx.c
index 5cc89bbdac7..60f10dce480 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.c
@@ -390,6 +390,35 @@ out:
return ret;
}
+int wl1271_acx_dco_itrim_params(struct wl1271 *wl)
+{
+ struct acx_dco_itrim_params *dco;
+ struct conf_itrim_settings *c = &wl->conf.itrim;
+ int ret;
+
+ wl1271_debug(DEBUG_ACX, "acx dco itrim parameters");
+
+ dco = kzalloc(sizeof(*dco), GFP_KERNEL);
+ if (!dco) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ dco->enable = c->enable;
+ dco->timeout = cpu_to_le32(c->timeout);
+
+ ret = wl1271_cmd_configure(wl, ACX_SET_DCO_ITRIM_PARAMS,
+ dco, sizeof(*dco));
+ if (ret < 0) {
+ wl1271_warning("failed to set dco itrim parameters: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(dco);
+ return ret;
+}
+
int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter)
{
struct acx_beacon_filter_option *beacon_filter = NULL;
@@ -758,10 +787,11 @@ int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats)
return 0;
}
-int wl1271_acx_rate_policies(struct wl1271 *wl, u32 enabled_rates)
+int wl1271_acx_rate_policies(struct wl1271 *wl)
{
struct acx_rate_policy *acx;
struct conf_tx_rate_class *c = &wl->conf.tx.rc_conf;
+ int idx = 0;
int ret = 0;
wl1271_debug(DEBUG_ACX, "acx rate policies");
@@ -773,12 +803,21 @@ int wl1271_acx_rate_policies(struct wl1271 *wl, u32 enabled_rates)
goto out;
}
- /* configure one default (one-size-fits-all) rate class */
- acx->rate_class_cnt = cpu_to_le32(1);
- acx->rate_class[0].enabled_rates = cpu_to_le32(enabled_rates);
- acx->rate_class[0].short_retry_limit = c->short_retry_limit;
- acx->rate_class[0].long_retry_limit = c->long_retry_limit;
- acx->rate_class[0].aflags = c->aflags;
+ /* configure one basic rate class */
+ idx = ACX_TX_BASIC_RATE;
+ acx->rate_class[idx].enabled_rates = cpu_to_le32(wl->basic_rate_set);
+ acx->rate_class[idx].short_retry_limit = c->short_retry_limit;
+ acx->rate_class[idx].long_retry_limit = c->long_retry_limit;
+ acx->rate_class[idx].aflags = c->aflags;
+
+ /* configure one AP supported rate class */
+ idx = ACX_TX_AP_FULL_RATE;
+ acx->rate_class[idx].enabled_rates = cpu_to_le32(wl->rate_set);
+ acx->rate_class[idx].short_retry_limit = c->short_retry_limit;
+ acx->rate_class[idx].long_retry_limit = c->long_retry_limit;
+ acx->rate_class[idx].aflags = c->aflags;
+
+ acx->rate_class_cnt = cpu_to_le32(ACX_TX_RATE_POLICY_CNT);
ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
if (ret < 0) {
@@ -791,12 +830,14 @@ out:
return ret;
}
-int wl1271_acx_ac_cfg(struct wl1271 *wl)
+int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
+ u8 aifsn, u16 txop)
{
struct acx_ac_cfg *acx;
- int i, ret = 0;
+ int ret = 0;
- wl1271_debug(DEBUG_ACX, "acx access category config");
+ wl1271_debug(DEBUG_ACX, "acx ac cfg %d cw_ming %d cw_max %d "
+ "aifs %d txop %d", ac, cw_min, cw_max, aifsn, txop);
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
@@ -805,21 +846,16 @@ int wl1271_acx_ac_cfg(struct wl1271 *wl)
goto out;
}
- for (i = 0; i < wl->conf.tx.ac_conf_count; i++) {
- struct conf_tx_ac_category *c = &(wl->conf.tx.ac_conf[i]);
- acx->ac = c->ac;
- acx->cw_min = c->cw_min;
- acx->cw_max = cpu_to_le16(c->cw_max);
- acx->aifsn = c->aifsn;
- acx->reserved = 0;
- acx->tx_op_limit = cpu_to_le16(c->tx_op_limit);
+ acx->ac = ac;
+ acx->cw_min = cw_min;
+ acx->cw_max = cpu_to_le16(cw_max);
+ acx->aifsn = aifsn;
+ acx->tx_op_limit = cpu_to_le16(txop);
- ret = wl1271_cmd_configure(wl, ACX_AC_CFG, acx, sizeof(*acx));
- if (ret < 0) {
- wl1271_warning("Setting of access category "
- "config: %d", ret);
- goto out;
- }
+ ret = wl1271_cmd_configure(wl, ACX_AC_CFG, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx ac cfg failed: %d", ret);
+ goto out;
}
out:
@@ -827,10 +863,12 @@ out:
return ret;
}
-int wl1271_acx_tid_cfg(struct wl1271 *wl)
+int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
+ u8 tsid, u8 ps_scheme, u8 ack_policy,
+ u32 apsd_conf0, u32 apsd_conf1)
{
struct acx_tid_config *acx;
- int i, ret = 0;
+ int ret = 0;
wl1271_debug(DEBUG_ACX, "acx tid config");
@@ -841,21 +879,18 @@ int wl1271_acx_tid_cfg(struct wl1271 *wl)
goto out;
}
- for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
- struct conf_tx_tid *c = &(wl->conf.tx.tid_conf[i]);
- acx->queue_id = c->queue_id;
- acx->channel_type = c->channel_type;
- acx->tsid = c->tsid;
- acx->ps_scheme = c->ps_scheme;
- acx->ack_policy = c->ack_policy;
- acx->apsd_conf[0] = cpu_to_le32(c->apsd_conf[0]);
- acx->apsd_conf[1] = cpu_to_le32(c->apsd_conf[1]);
+ acx->queue_id = queue_id;
+ acx->channel_type = channel_type;
+ acx->tsid = tsid;
+ acx->ps_scheme = ps_scheme;
+ acx->ack_policy = ack_policy;
+ acx->apsd_conf[0] = cpu_to_le32(apsd_conf0);
+ acx->apsd_conf[1] = cpu_to_le32(apsd_conf1);
- ret = wl1271_cmd_configure(wl, ACX_TID_CFG, acx, sizeof(*acx));
- if (ret < 0) {
- wl1271_warning("Setting of tid config failed: %d", ret);
- goto out;
- }
+ ret = wl1271_cmd_configure(wl, ACX_TID_CFG, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("Setting of tid config failed: %d", ret);
+ goto out;
}
out:
@@ -1012,59 +1047,6 @@ out:
return ret;
}
-int wl1271_acx_smart_reflex(struct wl1271 *wl)
-{
- struct acx_smart_reflex_state *sr_state = NULL;
- struct acx_smart_reflex_config_params *sr_param = NULL;
- int i, ret;
-
- wl1271_debug(DEBUG_ACX, "acx smart reflex");
-
- sr_param = kzalloc(sizeof(*sr_param), GFP_KERNEL);
- if (!sr_param) {
- ret = -ENOMEM;
- goto out;
- }
-
- for (i = 0; i < CONF_SR_ERR_TBL_COUNT; i++) {
- struct conf_mart_reflex_err_table *e =
- &(wl->conf.init.sr_err_tbl[i]);
-
- sr_param->error_table[i].len = e->len;
- sr_param->error_table[i].upper_limit = e->upper_limit;
- memcpy(sr_param->error_table[i].values, e->values, e->len);
- }
-
- ret = wl1271_cmd_configure(wl, ACX_SET_SMART_REFLEX_PARAMS,
- sr_param, sizeof(*sr_param));
- if (ret < 0) {
- wl1271_warning("failed to set smart reflex params: %d", ret);
- goto out;
- }
-
- sr_state = kzalloc(sizeof(*sr_state), GFP_KERNEL);
- if (!sr_state) {
- ret = -ENOMEM;
- goto out;
- }
-
- /* enable smart reflex */
- sr_state->enable = wl->conf.init.sr_enable;
-
- ret = wl1271_cmd_configure(wl, ACX_SET_SMART_REFLEX_STATE,
- sr_state, sizeof(*sr_state));
- if (ret < 0) {
- wl1271_warning("failed to set smart reflex params: %d", ret);
- goto out;
- }
-
-out:
- kfree(sr_state);
- kfree(sr_param);
- return ret;
-
-}
-
int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable)
{
struct wl1271_acx_bet_enable *acx = NULL;
@@ -1132,3 +1114,31 @@ out:
kfree(acx);
return ret;
}
+
+int wl1271_acx_pm_config(struct wl1271 *wl)
+{
+ struct wl1271_acx_pm_config *acx = NULL;
+ struct conf_pm_config_settings *c = &wl->conf.pm_config;
+ int ret = 0;
+
+ wl1271_debug(DEBUG_ACX, "acx pm config");
+
+ acx = kzalloc(sizeof(*acx), GFP_KERNEL);
+ if (!acx) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ acx->host_clk_settling_time = cpu_to_le32(c->host_clk_settling_time);
+ acx->host_fast_wakeup_support = c->host_fast_wakeup_support;
+
+ ret = wl1271_cmd_configure(wl, ACX_PM_CONFIG, acx, sizeof(*acx));
+ if (ret < 0) {
+ wl1271_warning("acx pm config failed: %d", ret);
+ goto out;
+ }
+
+out:
+ kfree(acx);
+ return ret;
+}
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.h b/drivers/net/wireless/wl12xx/wl1271_acx.h
index 2ce0a812854..aeccc98581e 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.h
@@ -2,7 +2,7 @@
* This file is part of wl1271
*
* Copyright (C) 1998-2009 Texas Instruments. All rights reserved.
- * Copyright (C) 2008-2009 Nokia Corporation
+ * Copyright (C) 2008-2010 Nokia Corporation
*
* Contact: Luciano Coelho <luciano.coelho@nokia.com>
*
@@ -348,7 +348,7 @@ struct acx_beacon_filter_option {
* ACXBeaconFilterEntry (not 221)
* Byte Offset Size (Bytes) Definition
* =========== ============ ==========
- * 0 1 IE identifier
+ * 0 1 IE identifier
* 1 1 Treatment bit mask
*
* ACXBeaconFilterEntry (221)
@@ -381,8 +381,8 @@ struct acx_beacon_filter_ie_table {
struct acx_header header;
u8 num_ie;
- u8 table[BEACON_FILTER_TABLE_MAX_SIZE];
u8 pad[3];
+ u8 table[BEACON_FILTER_TABLE_MAX_SIZE];
} __attribute__ ((packed));
struct acx_conn_monit_params {
@@ -415,23 +415,12 @@ struct acx_bt_wlan_coex {
u8 pad[3];
} __attribute__ ((packed));
-struct acx_smart_reflex_state {
+struct acx_dco_itrim_params {
struct acx_header header;
u8 enable;
u8 padding[3];
-} __attribute__ ((packed));
-
-struct smart_reflex_err_table {
- u8 len;
- s8 upper_limit;
- s8 values[14];
-} __attribute__ ((packed));
-
-struct acx_smart_reflex_config_params {
- struct acx_header header;
-
- struct smart_reflex_err_table error_table[3];
+ __le32 timeout;
} __attribute__ ((packed));
#define PTA_ANTENNA_TYPE_DEF (0)
@@ -837,6 +826,9 @@ struct acx_rate_class {
u8 reserved;
};
+#define ACX_TX_BASIC_RATE 0
+#define ACX_TX_AP_FULL_RATE 1
+#define ACX_TX_RATE_POLICY_CNT 2
struct acx_rate_policy {
struct acx_header header;
@@ -877,8 +869,8 @@ struct acx_tx_config_options {
__le16 tx_compl_threshold; /* number of packets */
} __attribute__ ((packed));
-#define ACX_RX_MEM_BLOCKS 64
-#define ACX_TX_MIN_MEM_BLOCKS 64
+#define ACX_RX_MEM_BLOCKS 70
+#define ACX_TX_MIN_MEM_BLOCKS 40
#define ACX_TX_DESCRIPTORS 32
#define ACX_NUM_SSID_PROFILES 1
@@ -969,6 +961,13 @@ struct wl1271_acx_arp_filter {
used. */
} __attribute__((packed));
+struct wl1271_acx_pm_config {
+ struct acx_header header;
+
+ __le32 host_clk_settling_time;
+ u8 host_fast_wakeup_support;
+ u8 padding[3];
+} __attribute__ ((packed));
enum {
ACX_WAKE_UP_CONDITIONS = 0x0002,
@@ -1027,13 +1026,13 @@ enum {
ACX_HT_BSS_OPERATION = 0x0058,
ACX_COEX_ACTIVITY = 0x0059,
ACX_SET_SMART_REFLEX_DEBUG = 0x005A,
- ACX_SET_SMART_REFLEX_STATE = 0x005B,
- ACX_SET_SMART_REFLEX_PARAMS = 0x005F,
+ ACX_SET_DCO_ITRIM_PARAMS = 0x0061,
DOT11_RX_MSDU_LIFE_TIME = 0x1004,
DOT11_CUR_TX_PWR = 0x100D,
DOT11_RX_DOT11_MODE = 0x1012,
DOT11_RTS_THRESHOLD = 0x1013,
DOT11_GROUP_ADDRESS_TBL = 0x1014,
+ ACX_PM_CONFIG = 0x1016,
MAX_DOT11_IE = DOT11_GROUP_ADDRESS_TBL,
@@ -1056,6 +1055,7 @@ int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
void *mc_list, u32 mc_list_len);
int wl1271_acx_service_period_timeout(struct wl1271 *wl);
int wl1271_acx_rts_threshold(struct wl1271 *wl, u16 rts_threshold);
+int wl1271_acx_dco_itrim_params(struct wl1271 *wl);
int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter);
int wl1271_acx_beacon_filter_table(struct wl1271 *wl);
int wl1271_acx_conn_monit_params(struct wl1271 *wl);
@@ -1069,9 +1069,12 @@ int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble);
int wl1271_acx_cts_protect(struct wl1271 *wl,
enum acx_ctsprotect_type ctsprotect);
int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats);
-int wl1271_acx_rate_policies(struct wl1271 *wl, u32 enabled_rates);
-int wl1271_acx_ac_cfg(struct wl1271 *wl);
-int wl1271_acx_tid_cfg(struct wl1271 *wl);
+int wl1271_acx_rate_policies(struct wl1271 *wl);
+int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
+ u8 aifsn, u16 txop);
+int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
+ u8 tsid, u8 ps_scheme, u8 ack_policy,
+ u32 apsd_conf0, u32 apsd_conf1);
int wl1271_acx_frag_threshold(struct wl1271 *wl);
int wl1271_acx_tx_config_options(struct wl1271 *wl);
int wl1271_acx_mem_cfg(struct wl1271 *wl);
@@ -1081,5 +1084,6 @@ int wl1271_acx_smart_reflex(struct wl1271 *wl);
int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable);
int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, u8 *address,
u8 version);
+int wl1271_acx_pm_config(struct wl1271 *wl);
#endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.c b/drivers/net/wireless/wl12xx/wl1271_boot.c
index b7c96454cca..2be76ee42bb 100644
--- a/drivers/net/wireless/wl12xx/wl1271_boot.c
+++ b/drivers/net/wireless/wl12xx/wl1271_boot.c
@@ -27,6 +27,7 @@
#include "wl1271_reg.h"
#include "wl1271_boot.h"
#include "wl1271_spi.h"
+#include "wl1271_io.h"
#include "wl1271_event.h"
static struct wl1271_partition_set part_table[PART_TABLE_LEN] = {
@@ -93,19 +94,19 @@ static void wl1271_boot_set_ecpu_ctrl(struct wl1271 *wl, u32 flag)
u32 cpu_ctrl;
/* 10.5.0 run the firmware (I) */
- cpu_ctrl = wl1271_spi_read32(wl, ACX_REG_ECPU_CONTROL);
+ cpu_ctrl = wl1271_read32(wl, ACX_REG_ECPU_CONTROL);
/* 10.5.1 run the firmware (II) */
cpu_ctrl |= flag;
- wl1271_spi_write32(wl, ACX_REG_ECPU_CONTROL, cpu_ctrl);
+ wl1271_write32(wl, ACX_REG_ECPU_CONTROL, cpu_ctrl);
}
static void wl1271_boot_fw_version(struct wl1271 *wl)
{
struct wl1271_static_data static_data;
- wl1271_spi_read(wl, wl->cmd_box_addr,
- &static_data, sizeof(static_data), false);
+ wl1271_read(wl, wl->cmd_box_addr, &static_data, sizeof(static_data),
+ false);
strncpy(wl->chip.fw_ver, static_data.fw_version,
sizeof(wl->chip.fw_ver));
@@ -164,7 +165,7 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
memcpy(chunk, p, CHUNK_SIZE);
wl1271_debug(DEBUG_BOOT, "uploading fw chunk 0x%p to 0x%x",
p, addr);
- wl1271_spi_write(wl, addr, chunk, CHUNK_SIZE, false);
+ wl1271_write(wl, addr, chunk, CHUNK_SIZE, false);
chunk_num++;
}
@@ -175,7 +176,7 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
memcpy(chunk, p, fw_data_len % CHUNK_SIZE);
wl1271_debug(DEBUG_BOOT, "uploading fw last chunk (%zd B) 0x%p to 0x%x",
fw_data_len % CHUNK_SIZE, p, addr);
- wl1271_spi_write(wl, addr, chunk, fw_data_len % CHUNK_SIZE, false);
+ wl1271_write(wl, addr, chunk, fw_data_len % CHUNK_SIZE, false);
kfree(chunk);
return 0;
@@ -219,23 +220,14 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
size_t nvs_len, burst_len;
int i;
u32 dest_addr, val;
- u8 *nvs_ptr, *nvs, *nvs_aligned;
+ u8 *nvs_ptr, *nvs_aligned;
- nvs = wl->nvs;
- if (nvs == NULL)
+ if (wl->nvs == NULL)
return -ENODEV;
- nvs_ptr = nvs;
-
- nvs_len = wl->nvs_len;
-
- /* Update the device MAC address into the nvs */
- nvs[11] = wl->mac_addr[0];
- nvs[10] = wl->mac_addr[1];
- nvs[6] = wl->mac_addr[2];
- nvs[5] = wl->mac_addr[3];
- nvs[4] = wl->mac_addr[4];
- nvs[3] = wl->mac_addr[5];
+ /* only the first part of the NVS needs to be uploaded */
+ nvs_len = sizeof(wl->nvs->nvs);
+ nvs_ptr = (u8 *)wl->nvs->nvs;
/*
* Layout before the actual NVS tables:
@@ -265,7 +257,7 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
wl1271_debug(DEBUG_BOOT,
"nvs burst write 0x%x: 0x%x",
dest_addr, val);
- wl1271_spi_write32(wl, dest_addr, val);
+ wl1271_write32(wl, dest_addr, val);
nvs_ptr += 4;
dest_addr += 4;
@@ -277,7 +269,7 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
* is 7 bytes further.
*/
nvs_ptr += 7;
- nvs_len -= nvs_ptr - nvs;
+ nvs_len -= nvs_ptr - (u8 *)wl->nvs->nvs;
nvs_len = ALIGN(nvs_len, 4);
/* FIXME: The driver sets the partition here, but this is not needed,
@@ -286,15 +278,20 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
wl1271_set_partition(wl, &part_table[PART_WORK]);
/* Copy the NVS tables to a new block to ensure alignment */
- nvs_aligned = kmemdup(nvs_ptr, nvs_len, GFP_KERNEL);
- if (!nvs_aligned)
- return -ENOMEM;
+ /* FIXME: We jump 3 more bytes before uploading the NVS. It seems
+ that our NVS files have three extra zeros here. I'm not sure whether
+ the problem is in our NVS generation or we should really jumpt these
+ 3 bytes here */
+ nvs_ptr += 3;
+
+ nvs_aligned = kmemdup(nvs_ptr, nvs_len, GFP_KERNEL); if
+ (!nvs_aligned) return -ENOMEM;
/* And finally we upload the NVS tables */
/* FIXME: In wl1271, we upload everything at once.
No endianness handling needed here?! The ref driver doesn't do
anything about it at this point */
- wl1271_spi_write(wl, CMD_MBOX_ADDRESS, nvs_aligned, nvs_len, false);
+ wl1271_write(wl, CMD_MBOX_ADDRESS, nvs_aligned, nvs_len, false);
kfree(nvs_aligned);
return 0;
@@ -303,9 +300,9 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
static void wl1271_boot_enable_interrupts(struct wl1271 *wl)
{
enable_irq(wl->irq);
- wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK,
- WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK));
- wl1271_spi_write32(wl, HI_CFG, HI_CFG_DEF_VAL);
+ wl1271_write32(wl, ACX_REG_INTERRUPT_MASK,
+ WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK));
+ wl1271_write32(wl, HI_CFG, HI_CFG_DEF_VAL);
}
static int wl1271_boot_soft_reset(struct wl1271 *wl)
@@ -314,13 +311,12 @@ static int wl1271_boot_soft_reset(struct wl1271 *wl)
u32 boot_data;
/* perform soft reset */
- wl1271_spi_write32(wl, ACX_REG_SLV_SOFT_RESET,
- ACX_SLV_SOFT_RESET_BIT);
+ wl1271_write32(wl, ACX_REG_SLV_SOFT_RESET, ACX_SLV_SOFT_RESET_BIT);
/* SOFT_RESET is self clearing */
timeout = jiffies + usecs_to_jiffies(SOFT_RESET_MAX_TIME);
while (1) {
- boot_data = wl1271_spi_read32(wl, ACX_REG_SLV_SOFT_RESET);
+ boot_data = wl1271_read32(wl, ACX_REG_SLV_SOFT_RESET);
wl1271_debug(DEBUG_BOOT, "soft reset bootdata 0x%x", boot_data);
if ((boot_data & ACX_SLV_SOFT_RESET_BIT) == 0)
break;
@@ -336,10 +332,10 @@ static int wl1271_boot_soft_reset(struct wl1271 *wl)
}
/* disable Rx/Tx */
- wl1271_spi_write32(wl, ENABLE, 0x0);
+ wl1271_write32(wl, ENABLE, 0x0);
/* disable auto calibration on start*/
- wl1271_spi_write32(wl, SPARE_A2, 0xffff);
+ wl1271_write32(wl, SPARE_A2, 0xffff);
return 0;
}
@@ -351,7 +347,7 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
wl1271_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT);
- chip_id = wl1271_spi_read32(wl, CHIP_ID_B);
+ chip_id = wl1271_read32(wl, CHIP_ID_B);
wl1271_debug(DEBUG_BOOT, "chip id after firmware boot: 0x%x", chip_id);
@@ -364,8 +360,7 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
loop = 0;
while (loop++ < INIT_LOOP) {
udelay(INIT_LOOP_DELAY);
- interrupt = wl1271_spi_read32(wl,
- ACX_REG_INTERRUPT_NO_CLEAR);
+ interrupt = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
if (interrupt == 0xffffffff) {
wl1271_error("error reading hardware complete "
@@ -374,8 +369,8 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
}
/* check that ACX_INTR_INIT_COMPLETE is enabled */
else if (interrupt & WL1271_ACX_INTR_INIT_COMPLETE) {
- wl1271_spi_write32(wl, ACX_REG_INTERRUPT_ACK,
- WL1271_ACX_INTR_INIT_COMPLETE);
+ wl1271_write32(wl, ACX_REG_INTERRUPT_ACK,
+ WL1271_ACX_INTR_INIT_COMPLETE);
break;
}
}
@@ -387,10 +382,10 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
}
/* get hardware config command mail box */
- wl->cmd_box_addr = wl1271_spi_read32(wl, REG_COMMAND_MAILBOX_PTR);
+ wl->cmd_box_addr = wl1271_read32(wl, REG_COMMAND_MAILBOX_PTR);
/* get hardware config event mail box */
- wl->event_box_addr = wl1271_spi_read32(wl, REG_EVENT_MAILBOX_PTR);
+ wl->event_box_addr = wl1271_read32(wl, REG_EVENT_MAILBOX_PTR);
/* set the working partition to its "running" mode offset */
wl1271_set_partition(wl, &part_table[PART_WORK]);
@@ -463,9 +458,9 @@ int wl1271_boot(struct wl1271 *wl)
wl1271_top_reg_write(wl, OCP_REG_CLK_POLARITY, val);
}
- wl1271_spi_write32(wl, PLL_PARAMETERS, clk);
+ wl1271_write32(wl, PLL_PARAMETERS, clk);
- pause = wl1271_spi_read32(wl, PLL_PARAMETERS);
+ pause = wl1271_read32(wl, PLL_PARAMETERS);
wl1271_debug(DEBUG_BOOT, "pause1 0x%x", pause);
@@ -474,10 +469,10 @@ int wl1271_boot(struct wl1271 *wl)
* 0x3ff (magic number ). How does
* this work?! */
pause |= WU_COUNTER_PAUSE_VAL;
- wl1271_spi_write32(wl, WU_COUNTER_PAUSE, pause);
+ wl1271_write32(wl, WU_COUNTER_PAUSE, pause);
/* Continue the ELP wake up sequence */
- wl1271_spi_write32(wl, WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
+ wl1271_write32(wl, WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
udelay(500);
wl1271_set_partition(wl, &part_table[PART_DRPW]);
@@ -487,18 +482,18 @@ int wl1271_boot(struct wl1271 *wl)
before taking DRPw out of reset */
wl1271_debug(DEBUG_BOOT, "DRPW_SCRATCH_START %08x", DRPW_SCRATCH_START);
- clk = wl1271_spi_read32(wl, DRPW_SCRATCH_START);
+ clk = wl1271_read32(wl, DRPW_SCRATCH_START);
wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk);
/* 2 */
clk |= (REF_CLOCK << 1) << 4;
- wl1271_spi_write32(wl, DRPW_SCRATCH_START, clk);
+ wl1271_write32(wl, DRPW_SCRATCH_START, clk);
wl1271_set_partition(wl, &part_table[PART_WORK]);
/* Disable interrupts */
- wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
+ wl1271_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
ret = wl1271_boot_soft_reset(wl);
if (ret < 0)
@@ -513,23 +508,22 @@ int wl1271_boot(struct wl1271 *wl)
* ACX_EEPROMLESS_IND_REG */
wl1271_debug(DEBUG_BOOT, "ACX_EEPROMLESS_IND_REG");
- wl1271_spi_write32(wl, ACX_EEPROMLESS_IND_REG,
- ACX_EEPROMLESS_IND_REG);
+ wl1271_write32(wl, ACX_EEPROMLESS_IND_REG, ACX_EEPROMLESS_IND_REG);
- tmp = wl1271_spi_read32(wl, CHIP_ID_B);
+ tmp = wl1271_read32(wl, CHIP_ID_B);
wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp);
/* 6. read the EEPROM parameters */
- tmp = wl1271_spi_read32(wl, SCR_PAD2);
+ tmp = wl1271_read32(wl, SCR_PAD2);
ret = wl1271_boot_write_irq_polarity(wl);
if (ret < 0)
goto out;
/* FIXME: Need to check whether this is really what we want */
- wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK,
- WL1271_ACX_ALL_EVENTS_VECTOR);
+ wl1271_write32(wl, ACX_REG_INTERRUPT_MASK,
+ WL1271_ACX_ALL_EVENTS_VECTOR);
/* WL1271: The reference driver skips steps 7 to 10 (jumps directly
* to upload_fw) */
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.c b/drivers/net/wireless/wl12xx/wl1271_cmd.c
index c3385b3d246..36a64e06f29 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.c
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.c
@@ -30,6 +30,7 @@
#include "wl1271.h"
#include "wl1271_reg.h"
#include "wl1271_spi.h"
+#include "wl1271_io.h"
#include "wl1271_acx.h"
#include "wl12xx_80211.h"
#include "wl1271_cmd.h"
@@ -57,13 +58,13 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
WARN_ON(len % 4 != 0);
- wl1271_spi_write(wl, wl->cmd_box_addr, buf, len, false);
+ wl1271_write(wl, wl->cmd_box_addr, buf, len, false);
- wl1271_spi_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_CMD);
+ wl1271_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_CMD);
timeout = jiffies + msecs_to_jiffies(WL1271_COMMAND_TIMEOUT);
- intr = wl1271_spi_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
+ intr = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
while (!(intr & WL1271_ACX_INTR_CMD_COMPLETE)) {
if (time_after(jiffies, timeout)) {
wl1271_error("command complete timeout");
@@ -73,13 +74,13 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
msleep(1);
- intr = wl1271_spi_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
+ intr = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
}
/* read back the status code of the command */
if (res_len == 0)
res_len = sizeof(struct wl1271_cmd_header);
- wl1271_spi_read(wl, wl->cmd_box_addr, cmd, res_len, false);
+ wl1271_read(wl, wl->cmd_box_addr, cmd, res_len, false);
status = le16_to_cpu(cmd->status);
if (status != CMD_STATUS_SUCCESS) {
@@ -87,8 +88,8 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
ret = -EIO;
}
- wl1271_spi_write32(wl, ACX_REG_INTERRUPT_ACK,
- WL1271_ACX_INTR_CMD_COMPLETE);
+ wl1271_write32(wl, ACX_REG_INTERRUPT_ACK,
+ WL1271_ACX_INTR_CMD_COMPLETE);
out:
return ret;
@@ -191,23 +192,19 @@ static int wl1271_cmd_cal(struct wl1271 *wl)
int wl1271_cmd_general_parms(struct wl1271 *wl)
{
struct wl1271_general_parms_cmd *gen_parms;
- struct conf_general_parms *g = &wl->conf.init.genparam;
int ret;
+ if (!wl->nvs)
+ return -ENODEV;
+
gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
if (!gen_parms)
return -ENOMEM;
gen_parms->test.id = TEST_CMD_INI_FILE_GENERAL_PARAM;
- gen_parms->ref_clk = g->ref_clk;
- gen_parms->settling_time = g->settling_time;
- gen_parms->clk_valid_on_wakeup = g->clk_valid_on_wakeup;
- gen_parms->dc2dcmode = g->dc2dcmode;
- gen_parms->single_dual_band = g->single_dual_band;
- gen_parms->tx_bip_fem_autodetect = g->tx_bip_fem_autodetect;
- gen_parms->tx_bip_fem_manufacturer = g->tx_bip_fem_manufacturer;
- gen_parms->settings = g->settings;
+ memcpy(gen_parms->params, wl->nvs->general_params,
+ WL1271_NVS_GENERAL_PARAMS_SIZE);
ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), 0);
if (ret < 0)
@@ -220,8 +217,11 @@ int wl1271_cmd_general_parms(struct wl1271 *wl)
int wl1271_cmd_radio_parms(struct wl1271 *wl)
{
struct wl1271_radio_parms_cmd *radio_parms;
- struct conf_radio_parms *r = &wl->conf.init.radioparam;
- int i, ret;
+ struct conf_radio_parms *rparam = &wl->conf.init.radioparam;
+ int ret;
+
+ if (!wl->nvs)
+ return -ENODEV;
radio_parms = kzalloc(sizeof(*radio_parms), GFP_KERNEL);
if (!radio_parms)
@@ -229,60 +229,13 @@ int wl1271_cmd_radio_parms(struct wl1271 *wl)
radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM;
- /* Static radio parameters */
- radio_parms->rx_trace_loss = r->rx_trace_loss;
- radio_parms->tx_trace_loss = r->tx_trace_loss;
- memcpy(radio_parms->rx_rssi_and_proc_compens,
- r->rx_rssi_and_proc_compens,
- CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE);
-
- memcpy(radio_parms->rx_trace_loss_5, r->rx_trace_loss_5,
- CONF_NUMBER_OF_SUB_BANDS_5);
- memcpy(radio_parms->tx_trace_loss_5, r->tx_trace_loss_5,
- CONF_NUMBER_OF_SUB_BANDS_5);
- memcpy(radio_parms->rx_rssi_and_proc_compens_5,
- r->rx_rssi_and_proc_compens_5,
- CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE);
-
- /* Dynamic radio parameters */
- radio_parms->tx_ref_pd_voltage = cpu_to_le16(r->tx_ref_pd_voltage);
- radio_parms->tx_ref_power = r->tx_ref_power;
- radio_parms->tx_offset_db = r->tx_offset_db;
-
- memcpy(radio_parms->tx_rate_limits_normal, r->tx_rate_limits_normal,
- CONF_NUMBER_OF_RATE_GROUPS);
- memcpy(radio_parms->tx_rate_limits_degraded, r->tx_rate_limits_degraded,
- CONF_NUMBER_OF_RATE_GROUPS);
-
- memcpy(radio_parms->tx_channel_limits_11b, r->tx_channel_limits_11b,
- CONF_NUMBER_OF_CHANNELS_2_4);
- memcpy(radio_parms->tx_channel_limits_ofdm, r->tx_channel_limits_ofdm,
- CONF_NUMBER_OF_CHANNELS_2_4);
- memcpy(radio_parms->tx_pdv_rate_offsets, r->tx_pdv_rate_offsets,
- CONF_NUMBER_OF_RATE_GROUPS);
- memcpy(radio_parms->tx_ibias, r->tx_ibias, CONF_NUMBER_OF_RATE_GROUPS);
-
- radio_parms->rx_fem_insertion_loss = r->rx_fem_insertion_loss;
-
- for (i = 0; i < CONF_NUMBER_OF_SUB_BANDS_5; i++)
- radio_parms->tx_ref_pd_voltage_5[i] =
- cpu_to_le16(r->tx_ref_pd_voltage_5[i]);
- memcpy(radio_parms->tx_ref_power_5, r->tx_ref_power_5,
- CONF_NUMBER_OF_SUB_BANDS_5);
- memcpy(radio_parms->tx_offset_db_5, r->tx_offset_db_5,
- CONF_NUMBER_OF_SUB_BANDS_5);
- memcpy(radio_parms->tx_rate_limits_normal_5,
- r->tx_rate_limits_normal_5, CONF_NUMBER_OF_RATE_GROUPS);
- memcpy(radio_parms->tx_rate_limits_degraded_5,
- r->tx_rate_limits_degraded_5, CONF_NUMBER_OF_RATE_GROUPS);
- memcpy(radio_parms->tx_channel_limits_ofdm_5,
- r->tx_channel_limits_ofdm_5, CONF_NUMBER_OF_CHANNELS_5);
- memcpy(radio_parms->tx_pdv_rate_offsets_5, r->tx_pdv_rate_offsets_5,
- CONF_NUMBER_OF_RATE_GROUPS);
- memcpy(radio_parms->tx_ibias_5, r->tx_ibias_5,
- CONF_NUMBER_OF_RATE_GROUPS);
- memcpy(radio_parms->rx_fem_insertion_loss_5,
- r->rx_fem_insertion_loss_5, CONF_NUMBER_OF_SUB_BANDS_5);
+ memcpy(radio_parms->stat_radio_params, wl->nvs->stat_radio_params,
+ WL1271_NVS_STAT_RADIO_PARAMS_SIZE);
+ memcpy(radio_parms->dyn_radio_params,
+ wl->nvs->dyn_radio_params[rparam->fem],
+ WL1271_NVS_DYN_RADIO_PARAMS_SIZE);
+
+ /* FIXME: current NVS is missing 5GHz parameters */
wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ",
radio_parms, sizeof(*radio_parms));
@@ -311,19 +264,6 @@ int wl1271_cmd_join(struct wl1271 *wl)
do_cal = false;
}
- /* FIXME: This is a workaround, because with the current stack, we
- * cannot know when we have disassociated. So, if we have already
- * joined, we disconnect before joining again. */
- if (wl->joined) {
- ret = wl1271_cmd_disconnect(wl);
- if (ret < 0) {
- wl1271_error("failed to disconnect before rejoining");
- goto out;
- }
-
- wl->joined = false;
- }
-
join = kzalloc(sizeof(*join), GFP_KERNEL);
if (!join) {
ret = -ENOMEM;
@@ -388,8 +328,6 @@ int wl1271_cmd_join(struct wl1271 *wl)
goto out_free;
}
- wl->joined = true;
-
/*
* ugly hack: we should wait for JOIN_EVENT_COMPLETE_ID but to
* simplify locking we just sleep instead, for now
@@ -487,7 +425,7 @@ int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len)
return 0;
}
-int wl1271_cmd_data_path(struct wl1271 *wl, u8 channel, bool enable)
+int wl1271_cmd_data_path(struct wl1271 *wl, bool enable)
{
struct cmd_enabledisable_path *cmd;
int ret;
@@ -501,7 +439,8 @@ int wl1271_cmd_data_path(struct wl1271 *wl, u8 channel, bool enable)
goto out;
}
- cmd->channel = channel;
+ /* the channel here is only used for calibration, so hardcoded to 1 */
+ cmd->channel = 1;
if (enable) {
cmd_rx = CMD_ENABLE_RX;
@@ -514,29 +453,29 @@ int wl1271_cmd_data_path(struct wl1271 *wl, u8 channel, bool enable)
ret = wl1271_cmd_send(wl, cmd_rx, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("rx %s cmd for channel %d failed",
- enable ? "start" : "stop", channel);
+ enable ? "start" : "stop", cmd->channel);
goto out;
}
wl1271_debug(DEBUG_BOOT, "rx %s cmd channel %d",
- enable ? "start" : "stop", channel);
+ enable ? "start" : "stop", cmd->channel);
ret = wl1271_cmd_send(wl, cmd_tx, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("tx %s cmd for channel %d failed",
- enable ? "start" : "stop", channel);
+ enable ? "start" : "stop", cmd->channel);
return ret;
}
wl1271_debug(DEBUG_BOOT, "tx %s cmd channel %d",
- enable ? "start" : "stop", channel);
+ enable ? "start" : "stop", cmd->channel);
out:
kfree(cmd);
return ret;
}
-int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode)
+int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, bool send)
{
struct wl1271_cmd_ps_params *ps_params = NULL;
int ret = 0;
@@ -557,7 +496,7 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode)
}
ps_params->ps_mode = ps_mode;
- ps_params->send_null_data = 1;
+ ps_params->send_null_data = send;
ps_params->retries = 5;
ps_params->hang_over_period = 128;
ps_params->null_data_rate = cpu_to_le32(1); /* 1 Mbps */
@@ -636,7 +575,7 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
channels = wl->hw->wiphy->bands[ieee_band]->channels;
n_ch = wl->hw->wiphy->bands[ieee_band]->n_channels;
- if (wl->scanning)
+ if (test_bit(WL1271_FLAG_SCANNING, &wl->flags))
return -EINVAL;
params = kzalloc(sizeof(*params), GFP_KERNEL);
@@ -711,7 +650,7 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
wl1271_dump(DEBUG_SCAN, "SCAN: ", params, sizeof(*params));
- wl->scanning = true;
+ set_bit(WL1271_FLAG_SCANNING, &wl->flags);
if (wl1271_11a_enabled()) {
wl->scan.state = band;
if (band == WL1271_SCAN_BAND_DUAL) {
@@ -729,7 +668,7 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
ret = wl1271_cmd_send(wl, CMD_SCAN, params, sizeof(*params), 0);
if (ret < 0) {
wl1271_error("SCAN failed");
- wl->scanning = false;
+ clear_bit(WL1271_FLAG_SCANNING, &wl->flags);
goto out;
}
@@ -1003,7 +942,7 @@ int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_warning("could not set keys");
- goto out;
+ goto out;
}
out:
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.h b/drivers/net/wireless/wl12xx/wl1271_cmd.h
index b4fa4acb922..2dc06c73532 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.h
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.h
@@ -37,8 +37,8 @@ int wl1271_cmd_join(struct wl1271 *wl);
int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
-int wl1271_cmd_data_path(struct wl1271 *wl, u8 channel, bool enable);
-int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode);
+int wl1271_cmd_data_path(struct wl1271 *wl, bool enable);
+int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode, bool send);
int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
size_t len);
int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
@@ -428,67 +428,24 @@ struct wl1271_general_parms_cmd {
struct wl1271_cmd_test_header test;
- u8 ref_clk;
- u8 settling_time;
- u8 clk_valid_on_wakeup;
- u8 dc2dcmode;
- u8 single_dual_band;
-
- u8 tx_bip_fem_autodetect;
- u8 tx_bip_fem_manufacturer;
- u8 settings;
+ u8 params[WL1271_NVS_GENERAL_PARAMS_SIZE];
+ s8 reserved[23];
} __attribute__ ((packed));
+#define WL1271_STAT_RADIO_PARAMS_5_SIZE 29
+#define WL1271_DYN_RADIO_PARAMS_5_SIZE 104
+
struct wl1271_radio_parms_cmd {
struct wl1271_cmd_header header;
struct wl1271_cmd_test_header test;
- /* Static radio parameters */
- /* 2.4GHz */
- u8 rx_trace_loss;
- u8 tx_trace_loss;
- s8 rx_rssi_and_proc_compens[CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE];
-
- /* 5GHz */
- u8 rx_trace_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
- u8 tx_trace_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
- s8 rx_rssi_and_proc_compens_5[CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE];
-
- /* Dynamic radio parameters */
- /* 2.4GHz */
- __le16 tx_ref_pd_voltage;
- s8 tx_ref_power;
- s8 tx_offset_db;
-
- s8 tx_rate_limits_normal[CONF_NUMBER_OF_RATE_GROUPS];
- s8 tx_rate_limits_degraded[CONF_NUMBER_OF_RATE_GROUPS];
-
- s8 tx_channel_limits_11b[CONF_NUMBER_OF_CHANNELS_2_4];
- s8 tx_channel_limits_ofdm[CONF_NUMBER_OF_CHANNELS_2_4];
- s8 tx_pdv_rate_offsets[CONF_NUMBER_OF_RATE_GROUPS];
-
- u8 tx_ibias[CONF_NUMBER_OF_RATE_GROUPS];
- u8 rx_fem_insertion_loss;
+ u8 stat_radio_params[WL1271_NVS_STAT_RADIO_PARAMS_SIZE];
+ u8 stat_radio_params_5[WL1271_STAT_RADIO_PARAMS_5_SIZE];
- u8 padding2;
-
- /* 5GHz */
- __le16 tx_ref_pd_voltage_5[CONF_NUMBER_OF_SUB_BANDS_5];
- s8 tx_ref_power_5[CONF_NUMBER_OF_SUB_BANDS_5];
- s8 tx_offset_db_5[CONF_NUMBER_OF_SUB_BANDS_5];
-
- s8 tx_rate_limits_normal_5[CONF_NUMBER_OF_RATE_GROUPS];
- s8 tx_rate_limits_degraded_5[CONF_NUMBER_OF_RATE_GROUPS];
-
- s8 tx_channel_limits_ofdm_5[CONF_NUMBER_OF_CHANNELS_5];
- s8 tx_pdv_rate_offsets_5[CONF_NUMBER_OF_RATE_GROUPS];
-
- /* FIXME: this is inconsistent with the types for 2.4GHz */
- s8 tx_ibias_5[CONF_NUMBER_OF_RATE_GROUPS];
- s8 rx_fem_insertion_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
-
- u8 padding3[2];
+ u8 dyn_radio_params[WL1271_NVS_DYN_RADIO_PARAMS_SIZE];
+ u8 reserved;
+ u8 dyn_radio_params_5[WL1271_DYN_RADIO_PARAMS_5_SIZE];
} __attribute__ ((packed));
struct wl1271_cmd_cal_channel_tune {
diff --git a/drivers/net/wireless/wl12xx/wl1271_conf.h b/drivers/net/wireless/wl12xx/wl1271_conf.h
index 565373ede26..6f9e75cc564 100644
--- a/drivers/net/wireless/wl12xx/wl1271_conf.h
+++ b/drivers/net/wireless/wl12xx/wl1271_conf.h
@@ -258,7 +258,8 @@ struct conf_rx_settings {
#define CONF_TX_MAX_RATE_CLASSES 8
#define CONF_TX_RATE_MASK_UNSPECIFIED 0
-#define CONF_TX_RATE_MASK_ALL 0x1eff
+#define CONF_TX_RATE_MASK_BASIC (CONF_HW_BIT_RATE_1MBPS | \
+ CONF_HW_BIT_RATE_2MBPS)
#define CONF_TX_RATE_RETRY_LIMIT 10
struct conf_tx_rate_class {
@@ -722,31 +723,6 @@ struct conf_conn_settings {
u8 psm_entry_retries;
};
-#define CONF_SR_ERR_TBL_MAX_VALUES 14
-
-struct conf_mart_reflex_err_table {
- /*
- * Length of the error table values table.
- *
- * Range: 0 - CONF_SR_ERR_TBL_MAX_VALUES
- */
- u8 len;
-
- /*
- * Smart Reflex error table upper limit.
- *
- * Range: s8
- */
- s8 upper_limit;
-
- /*
- * Smart Reflex error table values.
- *
- * Range: s8
- */
- s8 values[CONF_SR_ERR_TBL_MAX_VALUES];
-};
-
enum {
CONF_REF_CLK_19_2_E,
CONF_REF_CLK_26_E,
@@ -759,64 +735,6 @@ enum single_dual_band_enum {
CONF_DUAL_BAND
};
-struct conf_general_parms {
- /*
- * RF Reference Clock type / speed
- *
- * Range: CONF_REF_CLK_*
- */
- u8 ref_clk;
-
- /*
- * Settling time of the reference clock after boot.
- *
- * Range: u8
- */
- u8 settling_time;
-
- /*
- * Flag defining whether clock is valid on wakeup.
- *
- * Range: 0 - not valid on wakeup, 1 - valid on wakeup
- */
- u8 clk_valid_on_wakeup;
-
- /*
- * DC-to-DC mode.
- *
- * Range: Unknown
- */
- u8 dc2dcmode;
-
- /*
- * Flag defining whether used as single or dual-band.
- *
- * Range: CONF_SINGLE_BAND, CONF_DUAL_BAND
- */
- u8 single_dual_band;
-
- /*
- * TX bip fem autodetect flag.
- *
- * Range: Unknown
- */
- u8 tx_bip_fem_autodetect;
-
- /*
- * TX bip gem manufacturer.
- *
- * Range: Unknown
- */
- u8 tx_bip_fem_manufacturer;
-
- /*
- * Settings flags.
- *
- * Range: Unknown
- */
- u8 settings;
-};
-
#define CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE 15
#define CONF_NUMBER_OF_SUB_BANDS_5 7
#define CONF_NUMBER_OF_RATE_GROUPS 6
@@ -825,87 +743,43 @@ struct conf_general_parms {
struct conf_radio_parms {
/*
- * Static radio parameters for 2.4GHz
- *
- * Range: unknown
- */
- u8 rx_trace_loss;
- u8 tx_trace_loss;
- s8 rx_rssi_and_proc_compens[CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE];
-
- /*
- * Static radio parameters for 5GHz
- *
- * Range: unknown
- */
- u8 rx_trace_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
- u8 tx_trace_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
- s8 rx_rssi_and_proc_compens_5[CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE];
-
- /*
- * Dynamic radio parameters for 2.4GHz
+ * FEM parameter set to use
*
- * Range: unknown
+ * Range: 0 or 1
*/
- s16 tx_ref_pd_voltage;
- s8 tx_ref_power;
- s8 tx_offset_db;
-
- s8 tx_rate_limits_normal[CONF_NUMBER_OF_RATE_GROUPS];
- s8 tx_rate_limits_degraded[CONF_NUMBER_OF_RATE_GROUPS];
-
- s8 tx_channel_limits_11b[CONF_NUMBER_OF_CHANNELS_2_4];
- s8 tx_channel_limits_ofdm[CONF_NUMBER_OF_CHANNELS_2_4];
- s8 tx_pdv_rate_offsets[CONF_NUMBER_OF_RATE_GROUPS];
-
- u8 tx_ibias[CONF_NUMBER_OF_RATE_GROUPS];
- u8 rx_fem_insertion_loss;
+ u8 fem;
+};
+struct conf_init_settings {
/*
- * Dynamic radio parameters for 5GHz
- *
- * Range: unknown
+ * Configure radio parameters.
*/
- s16 tx_ref_pd_voltage_5[CONF_NUMBER_OF_SUB_BANDS_5];
- s8 tx_ref_power_5[CONF_NUMBER_OF_SUB_BANDS_5];
- s8 tx_offset_db_5[CONF_NUMBER_OF_SUB_BANDS_5];
-
- s8 tx_rate_limits_normal_5[CONF_NUMBER_OF_RATE_GROUPS];
- s8 tx_rate_limits_degraded_5[CONF_NUMBER_OF_RATE_GROUPS];
-
- s8 tx_channel_limits_ofdm_5[CONF_NUMBER_OF_CHANNELS_5];
- s8 tx_pdv_rate_offsets_5[CONF_NUMBER_OF_RATE_GROUPS];
+ struct conf_radio_parms radioparam;
- /* FIXME: this is inconsistent with the types for 2.4GHz */
- s8 tx_ibias_5[CONF_NUMBER_OF_RATE_GROUPS];
- s8 rx_fem_insertion_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
};
-#define CONF_SR_ERR_TBL_COUNT 3
+struct conf_itrim_settings {
+ /* enable dco itrim */
+ u8 enable;
-struct conf_init_settings {
- /*
- * Configure Smart Reflex error table values.
- */
- struct conf_mart_reflex_err_table sr_err_tbl[CONF_SR_ERR_TBL_COUNT];
+ /* moderation timeout in microsecs from the last TX */
+ u32 timeout;
+};
+struct conf_pm_config_settings {
/*
- * Smart Reflex enable flag.
+ * Host clock settling time
*
- * Range: 1 - Smart Reflex enabled, 0 - Smart Reflex disabled
- */
- u8 sr_enable;
-
- /*
- * Configure general parameters.
+ * Range: 0 - 30000 us
*/
- struct conf_general_parms genparam;
+ u32 host_clk_settling_time;
/*
- * Configure radio parameters.
+ * Host fast wakeup support
+ *
+ * Range: true, false
*/
- struct conf_radio_parms radioparam;
-
+ bool host_fast_wakeup_support;
};
struct conf_drv_settings {
@@ -914,6 +788,8 @@ struct conf_drv_settings {
struct conf_tx_settings tx;
struct conf_conn_settings conn;
struct conf_init_settings init;
+ struct conf_itrim_settings itrim;
+ struct conf_pm_config_settings pm_config;
};
#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_debugfs.c b/drivers/net/wireless/wl12xx/wl1271_debugfs.c
index c1805e5f896..8d7588ca68f 100644
--- a/drivers/net/wireless/wl12xx/wl1271_debugfs.c
+++ b/drivers/net/wireless/wl12xx/wl1271_debugfs.c
@@ -237,6 +237,64 @@ static const struct file_operations tx_queue_len_ops = {
.open = wl1271_open_file_generic,
};
+static ssize_t gpio_power_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ bool state = test_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
+
+ int res;
+ char buf[10];
+
+ res = scnprintf(buf, sizeof(buf), "%d\n", state);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, res);
+}
+
+static ssize_t gpio_power_write(struct file *file,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct wl1271 *wl = file->private_data;
+ char buf[10];
+ size_t len;
+ unsigned long value;
+ int ret;
+
+ mutex_lock(&wl->mutex);
+
+ len = min(count, sizeof(buf) - 1);
+ if (copy_from_user(buf, user_buf, len)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ buf[len] = '\0';
+
+ ret = strict_strtoul(buf, 0, &value);
+ if (ret < 0) {
+ wl1271_warning("illegal value in gpio_power");
+ goto out;
+ }
+
+ if (value) {
+ wl->set_power(true);
+ set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
+ } else {
+ wl->set_power(false);
+ clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
+ }
+
+out:
+ mutex_unlock(&wl->mutex);
+ return count;
+}
+
+static const struct file_operations gpio_power_ops = {
+ .read = gpio_power_read,
+ .write = gpio_power_write,
+ .open = wl1271_open_file_generic
+};
+
static void wl1271_debugfs_delete_files(struct wl1271 *wl)
{
DEBUGFS_FWSTATS_DEL(tx, internal_desc_overflow);
@@ -333,6 +391,8 @@ static void wl1271_debugfs_delete_files(struct wl1271 *wl)
DEBUGFS_DEL(tx_queue_len);
DEBUGFS_DEL(retry_count);
DEBUGFS_DEL(excessive_retries);
+
+ DEBUGFS_DEL(gpio_power);
}
static int wl1271_debugfs_add_files(struct wl1271 *wl)
@@ -434,6 +494,8 @@ static int wl1271_debugfs_add_files(struct wl1271 *wl)
DEBUGFS_ADD(retry_count, wl->debugfs.rootdir);
DEBUGFS_ADD(excessive_retries, wl->debugfs.rootdir);
+ DEBUGFS_ADD(gpio_power, wl->debugfs.rootdir);
+
out:
if (ret < 0)
wl1271_debugfs_delete_files(wl);
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.c b/drivers/net/wireless/wl12xx/wl1271_event.c
index d13fdd99c85..7468ef10194 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.c
+++ b/drivers/net/wireless/wl12xx/wl1271_event.c
@@ -24,6 +24,7 @@
#include "wl1271.h"
#include "wl1271_reg.h"
#include "wl1271_spi.h"
+#include "wl1271_io.h"
#include "wl1271_event.h"
#include "wl1271_ps.h"
#include "wl12xx_80211.h"
@@ -35,7 +36,7 @@ static int wl1271_event_scan_complete(struct wl1271 *wl,
wl1271_debug(DEBUG_EVENT, "status: 0x%x",
mbox->scheduled_scan_status);
- if (wl->scanning) {
+ if (test_bit(WL1271_FLAG_SCANNING, &wl->flags)) {
if (wl->scan.state == WL1271_SCAN_BAND_DUAL) {
wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
NULL, size);
@@ -43,7 +44,7 @@ static int wl1271_event_scan_complete(struct wl1271 *wl,
* to the wl1271_cmd_scan function that we are not
* scanning as it checks that.
*/
- wl->scanning = false;
+ clear_bit(WL1271_FLAG_SCANNING, &wl->flags);
wl1271_cmd_scan(wl, wl->scan.ssid, wl->scan.ssid_len,
wl->scan.active,
wl->scan.high_prio,
@@ -62,7 +63,7 @@ static int wl1271_event_scan_complete(struct wl1271 *wl,
mutex_unlock(&wl->mutex);
ieee80211_scan_completed(wl->hw, false);
mutex_lock(&wl->mutex);
- wl->scanning = false;
+ clear_bit(WL1271_FLAG_SCANNING, &wl->flags);
}
}
return 0;
@@ -78,25 +79,61 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
switch (mbox->ps_status) {
case EVENT_ENTER_POWER_SAVE_FAIL:
- if (!wl->psm) {
+ wl1271_debug(DEBUG_PSM, "PSM entry failed");
+
+ if (!test_bit(WL1271_FLAG_PSM, &wl->flags)) {
+ /* remain in active mode */
wl->psm_entry_retry = 0;
break;
}
if (wl->psm_entry_retry < wl->conf.conn.psm_entry_retries) {
wl->psm_entry_retry++;
- ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE);
+ ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
+ true);
} else {
wl1271_error("PSM entry failed, giving up.\n");
+ /* FIXME: this may need to be reconsidered. for now it
+ is not possible to indicate to the mac80211
+ afterwards that PSM entry failed. To maximize
+ functionality (receiving data and remaining
+ associated) make sure that we are in sync with the
+ AP in regard of PSM mode. */
+ ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
+ false);
wl->psm_entry_retry = 0;
- *beacon_loss = true;
}
break;
case EVENT_ENTER_POWER_SAVE_SUCCESS:
wl->psm_entry_retry = 0;
+
+ /* enable beacon filtering */
+ ret = wl1271_acx_beacon_filter_opt(wl, true);
+ if (ret < 0)
+ break;
+
+ /* enable beacon early termination */
+ ret = wl1271_acx_bet_enable(wl, true);
+ if (ret < 0)
+ break;
+
+ /* go to extremely low power mode */
+ wl1271_ps_elp_sleep(wl);
+ if (ret < 0)
+ break;
break;
case EVENT_EXIT_POWER_SAVE_FAIL:
- wl1271_info("PSM exit failed");
+ wl1271_debug(DEBUG_PSM, "PSM exit failed");
+
+ if (test_bit(WL1271_FLAG_PSM, &wl->flags)) {
+ wl->psm_entry_retry = 0;
+ break;
+ }
+
+ /* make sure the firmware goes to active mode - the frame to
+ be sent next will indicate to the AP, that we are active. */
+ ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
+ false);
break;
case EVENT_EXIT_POWER_SAVE_SUCCESS:
default:
@@ -136,7 +173,8 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
* filtering) is enabled. Without PSM, the stack will receive all
* beacons and can detect beacon loss by itself.
*/
- if (vector & BSS_LOSE_EVENT_ID && wl->psm) {
+ if (vector & BSS_LOSE_EVENT_ID &&
+ test_bit(WL1271_FLAG_PSM, &wl->flags)) {
wl1271_debug(DEBUG_EVENT, "BSS_LOSE_EVENT");
/* indicate to the stack, that beacons have been lost */
@@ -150,7 +188,7 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
return ret;
}
- if (beacon_loss) {
+ if (wl->vif && beacon_loss) {
/* Obviously, it's dangerous to release the mutex while
we are holding many of the variables in the wl struct.
That's why it's done last in the function, and care must
@@ -177,14 +215,14 @@ int wl1271_event_unmask(struct wl1271 *wl)
void wl1271_event_mbox_config(struct wl1271 *wl)
{
- wl->mbox_ptr[0] = wl1271_spi_read32(wl, REG_EVENT_MAILBOX_PTR);
+ wl->mbox_ptr[0] = wl1271_read32(wl, REG_EVENT_MAILBOX_PTR);
wl->mbox_ptr[1] = wl->mbox_ptr[0] + sizeof(struct event_mailbox);
wl1271_debug(DEBUG_EVENT, "MBOX ptrs: 0x%x 0x%x",
wl->mbox_ptr[0], wl->mbox_ptr[1]);
}
-int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num, bool do_ack)
+int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
{
struct event_mailbox mbox;
int ret;
@@ -195,8 +233,8 @@ int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num, bool do_ack)
return -EINVAL;
/* first we read the mbox descriptor */
- wl1271_spi_read(wl, wl->mbox_ptr[mbox_num], &mbox,
- sizeof(struct event_mailbox), false);
+ wl1271_read(wl, wl->mbox_ptr[mbox_num], &mbox,
+ sizeof(struct event_mailbox), false);
/* process the descriptor */
ret = wl1271_event_process(wl, &mbox);
@@ -204,9 +242,7 @@ int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num, bool do_ack)
return ret;
/* then we let the firmware know it can go on...*/
- if (do_ack)
- wl1271_spi_write32(wl, ACX_REG_INTERRUPT_TRIG,
- INTR_TRIG_EVENT_ACK);
+ wl1271_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_EVENT_ACK);
return 0;
}
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.h b/drivers/net/wireless/wl12xx/wl1271_event.h
index 4e3f55ebb1a..278f9206aa5 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.h
+++ b/drivers/net/wireless/wl12xx/wl1271_event.h
@@ -112,6 +112,6 @@ struct event_mailbox {
int wl1271_event_unmask(struct wl1271 *wl);
void wl1271_event_mbox_config(struct wl1271 *wl);
-int wl1271_event_handle(struct wl1271 *wl, u8 mbox, bool do_ack);
+int wl1271_event_handle(struct wl1271 *wl, u8 mbox);
#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_init.c b/drivers/net/wireless/wl12xx/wl1271_init.c
index 11249b436cf..86c30a86a45 100644
--- a/drivers/net/wireless/wl12xx/wl1271_init.c
+++ b/drivers/net/wireless/wl12xx/wl1271_init.c
@@ -49,7 +49,7 @@ static int wl1271_init_hwenc_config(struct wl1271 *wl)
return 0;
}
-static int wl1271_init_templates_config(struct wl1271 *wl)
+int wl1271_init_templates_config(struct wl1271 *wl)
{
int ret;
@@ -113,7 +113,7 @@ static int wl1271_init_rx_config(struct wl1271 *wl, u32 config, u32 filter)
return 0;
}
-static int wl1271_init_phy_config(struct wl1271 *wl)
+int wl1271_init_phy_config(struct wl1271 *wl)
{
int ret;
@@ -156,7 +156,7 @@ static int wl1271_init_beacon_filter(struct wl1271 *wl)
return 0;
}
-static int wl1271_init_pta(struct wl1271 *wl)
+int wl1271_init_pta(struct wl1271 *wl)
{
int ret;
@@ -171,7 +171,7 @@ static int wl1271_init_pta(struct wl1271 *wl)
return 0;
}
-static int wl1271_init_energy_detection(struct wl1271 *wl)
+int wl1271_init_energy_detection(struct wl1271 *wl)
{
int ret;
@@ -195,7 +195,9 @@ static int wl1271_init_beacon_broadcast(struct wl1271 *wl)
int wl1271_hw_init(struct wl1271 *wl)
{
- int ret;
+ struct conf_tx_ac_category *conf_ac;
+ struct conf_tx_tid *conf_tid;
+ int ret, i;
ret = wl1271_cmd_general_parms(wl);
if (ret < 0)
@@ -229,6 +231,10 @@ int wl1271_hw_init(struct wl1271 *wl)
if (ret < 0)
goto out_free_memmap;
+ ret = wl1271_acx_dco_itrim_params(wl);
+ if (ret < 0)
+ goto out_free_memmap;
+
/* Initialize connection monitoring thresholds */
ret = wl1271_acx_conn_monit_params(wl);
if (ret < 0)
@@ -270,22 +276,36 @@ int wl1271_hw_init(struct wl1271 *wl)
goto out_free_memmap;
/* Default TID configuration */
- ret = wl1271_acx_tid_cfg(wl);
- if (ret < 0)
- goto out_free_memmap;
+ for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
+ conf_tid = &wl->conf.tx.tid_conf[i];
+ ret = wl1271_acx_tid_cfg(wl, conf_tid->queue_id,
+ conf_tid->channel_type,
+ conf_tid->tsid,
+ conf_tid->ps_scheme,
+ conf_tid->ack_policy,
+ conf_tid->apsd_conf[0],
+ conf_tid->apsd_conf[1]);
+ if (ret < 0)
+ goto out_free_memmap;
+ }
/* Default AC configuration */
- ret = wl1271_acx_ac_cfg(wl);
- if (ret < 0)
- goto out_free_memmap;
+ for (i = 0; i < wl->conf.tx.ac_conf_count; i++) {
+ conf_ac = &wl->conf.tx.ac_conf[i];
+ ret = wl1271_acx_ac_cfg(wl, conf_ac->ac, conf_ac->cw_min,
+ conf_ac->cw_max, conf_ac->aifsn,
+ conf_ac->tx_op_limit);
+ if (ret < 0)
+ goto out_free_memmap;
+ }
/* Configure TX rate classes */
- ret = wl1271_acx_rate_policies(wl, CONF_TX_RATE_MASK_ALL);
+ ret = wl1271_acx_rate_policies(wl);
if (ret < 0)
goto out_free_memmap;
/* Enable data path */
- ret = wl1271_cmd_data_path(wl, wl->channel, 1);
+ ret = wl1271_cmd_data_path(wl, 1);
if (ret < 0)
goto out_free_memmap;
@@ -299,8 +319,8 @@ int wl1271_hw_init(struct wl1271 *wl)
if (ret < 0)
goto out_free_memmap;
- /* Configure smart reflex */
- ret = wl1271_acx_smart_reflex(wl);
+ /* configure PM */
+ ret = wl1271_acx_pm_config(wl);
if (ret < 0)
goto out_free_memmap;
diff --git a/drivers/net/wireless/wl12xx/wl1271_init.h b/drivers/net/wireless/wl12xx/wl1271_init.h
index 930677fbe85..bc26f8c53b9 100644
--- a/drivers/net/wireless/wl12xx/wl1271_init.h
+++ b/drivers/net/wireless/wl12xx/wl1271_init.h
@@ -27,6 +27,10 @@
#include "wl1271.h"
int wl1271_hw_init_power_auth(struct wl1271 *wl);
+int wl1271_init_templates_config(struct wl1271 *wl);
+int wl1271_init_phy_config(struct wl1271 *wl);
+int wl1271_init_pta(struct wl1271 *wl);
+int wl1271_init_energy_detection(struct wl1271 *wl);
int wl1271_hw_init(struct wl1271 *wl);
#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_io.c b/drivers/net/wireless/wl12xx/wl1271_io.c
new file mode 100644
index 00000000000..5cd94d5666c
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/wl1271_io.c
@@ -0,0 +1,213 @@
+/*
+ * This file is part of wl1271
+ *
+ * Copyright (C) 2008-2010 Nokia Corporation
+ *
+ * Contact: Luciano Coelho <luciano.coelho@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/crc7.h>
+#include <linux/spi/spi.h>
+
+#include "wl1271.h"
+#include "wl12xx_80211.h"
+#include "wl1271_spi.h"
+#include "wl1271_io.h"
+
+static int wl1271_translate_addr(struct wl1271 *wl, int addr)
+{
+ /*
+ * To translate, first check to which window of addresses the
+ * particular address belongs. Then subtract the starting address
+ * of that window from the address. Then, add offset of the
+ * translated region.
+ *
+ * The translated regions occur next to each other in physical device
+ * memory, so just add the sizes of the preceeding address regions to
+ * get the offset to the new region.
+ *
+ * Currently, only the two first regions are addressed, and the
+ * assumption is that all addresses will fall into either of those
+ * two.
+ */
+ if ((addr >= wl->part.reg.start) &&
+ (addr < wl->part.reg.start + wl->part.reg.size))
+ return addr - wl->part.reg.start + wl->part.mem.size;
+ else
+ return addr - wl->part.mem.start;
+}
+
+/* Set the SPI partitions to access the chip addresses
+ *
+ * To simplify driver code, a fixed (virtual) memory map is defined for
+ * register and memory addresses. Because in the chipset, in different stages
+ * of operation, those addresses will move around, an address translation
+ * mechanism is required.
+ *
+ * There are four partitions (three memory and one register partition),
+ * which are mapped to two different areas of the hardware memory.
+ *
+ * Virtual address
+ * space
+ *
+ * | |
+ * ...+----+--> mem.start
+ * Physical address ... | |
+ * space ... | | [PART_0]
+ * ... | |
+ * 00000000 <--+----+... ...+----+--> mem.start + mem.size
+ * | | ... | |
+ * |MEM | ... | |
+ * | | ... | |
+ * mem.size <--+----+... | | {unused area)
+ * | | ... | |
+ * |REG | ... | |
+ * mem.size | | ... | |
+ * + <--+----+... ...+----+--> reg.start
+ * reg.size | | ... | |
+ * |MEM2| ... | | [PART_1]
+ * | | ... | |
+ * ...+----+--> reg.start + reg.size
+ * | |
+ *
+ */
+int wl1271_set_partition(struct wl1271 *wl,
+ struct wl1271_partition_set *p)
+{
+ /* copy partition info */
+ memcpy(&wl->part, p, sizeof(*p));
+
+ wl1271_debug(DEBUG_SPI, "mem_start %08X mem_size %08X",
+ p->mem.start, p->mem.size);
+ wl1271_debug(DEBUG_SPI, "reg_start %08X reg_size %08X",
+ p->reg.start, p->reg.size);
+ wl1271_debug(DEBUG_SPI, "mem2_start %08X mem2_size %08X",
+ p->mem2.start, p->mem2.size);
+ wl1271_debug(DEBUG_SPI, "mem3_start %08X mem3_size %08X",
+ p->mem3.start, p->mem3.size);
+
+ /* write partition info to the chipset */
+ wl1271_raw_write32(wl, HW_PART0_START_ADDR, p->mem.start);
+ wl1271_raw_write32(wl, HW_PART0_SIZE_ADDR, p->mem.size);
+ wl1271_raw_write32(wl, HW_PART1_START_ADDR, p->reg.start);
+ wl1271_raw_write32(wl, HW_PART1_SIZE_ADDR, p->reg.size);
+ wl1271_raw_write32(wl, HW_PART2_START_ADDR, p->mem2.start);
+ wl1271_raw_write32(wl, HW_PART2_SIZE_ADDR, p->mem2.size);
+ wl1271_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start);
+
+ return 0;
+}
+
+void wl1271_io_reset(struct wl1271 *wl)
+{
+ wl1271_spi_reset(wl);
+}
+
+void wl1271_io_init(struct wl1271 *wl)
+{
+ wl1271_spi_init(wl);
+}
+
+void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf,
+ size_t len, bool fixed)
+{
+ wl1271_spi_raw_write(wl, addr, buf, len, fixed);
+}
+
+void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf,
+ size_t len, bool fixed)
+{
+ wl1271_spi_raw_read(wl, addr, buf, len, fixed);
+}
+
+void wl1271_read(struct wl1271 *wl, int addr, void *buf, size_t len,
+ bool fixed)
+{
+ int physical;
+
+ physical = wl1271_translate_addr(wl, addr);
+
+ wl1271_spi_raw_read(wl, physical, buf, len, fixed);
+}
+
+void wl1271_write(struct wl1271 *wl, int addr, void *buf, size_t len,
+ bool fixed)
+{
+ int physical;
+
+ physical = wl1271_translate_addr(wl, addr);
+
+ wl1271_spi_raw_write(wl, physical, buf, len, fixed);
+}
+
+u32 wl1271_read32(struct wl1271 *wl, int addr)
+{
+ return wl1271_raw_read32(wl, wl1271_translate_addr(wl, addr));
+}
+
+void wl1271_write32(struct wl1271 *wl, int addr, u32 val)
+{
+ wl1271_raw_write32(wl, wl1271_translate_addr(wl, addr), val);
+}
+
+void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val)
+{
+ /* write address >> 1 + 0x30000 to OCP_POR_CTR */
+ addr = (addr >> 1) + 0x30000;
+ wl1271_write32(wl, OCP_POR_CTR, addr);
+
+ /* write value to OCP_POR_WDATA */
+ wl1271_write32(wl, OCP_DATA_WRITE, val);
+
+ /* write 1 to OCP_CMD */
+ wl1271_write32(wl, OCP_CMD, OCP_CMD_WRITE);
+}
+
+u16 wl1271_top_reg_read(struct wl1271 *wl, int addr)
+{
+ u32 val;
+ int timeout = OCP_CMD_LOOP;
+
+ /* write address >> 1 + 0x30000 to OCP_POR_CTR */
+ addr = (addr >> 1) + 0x30000;
+ wl1271_write32(wl, OCP_POR_CTR, addr);
+
+ /* write 2 to OCP_CMD */
+ wl1271_write32(wl, OCP_CMD, OCP_CMD_READ);
+
+ /* poll for data ready */
+ do {
+ val = wl1271_read32(wl, OCP_DATA_READ);
+ } while (!(val & OCP_READY_MASK) && --timeout);
+
+ if (!timeout) {
+ wl1271_warning("Top register access timed out.");
+ return 0xffff;
+ }
+
+ /* check data status and return if OK */
+ if ((val & OCP_STATUS_MASK) == OCP_STATUS_OK)
+ return val & 0xffff;
+ else {
+ wl1271_warning("Top register access returned error.");
+ return 0xffff;
+ }
+}
+
diff --git a/drivers/net/wireless/wl12xx/wl1271_io.h b/drivers/net/wireless/wl12xx/wl1271_io.h
new file mode 100644
index 00000000000..fa9a0b35788
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/wl1271_io.h
@@ -0,0 +1,68 @@
+/*
+ * This file is part of wl1271
+ *
+ * Copyright (C) 1998-2009 Texas Instruments. All rights reserved.
+ * Copyright (C) 2008-2010 Nokia Corporation
+ *
+ * Contact: Luciano Coelho <luciano.coelho@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL1271_IO_H__
+#define __WL1271_IO_H__
+
+struct wl1271;
+
+void wl1271_io_reset(struct wl1271 *wl);
+void wl1271_io_init(struct wl1271 *wl);
+
+/* Raw target IO, address is not translated */
+void wl1271_raw_write(struct wl1271 *wl, int addr, void *buf,
+ size_t len, bool fixed);
+void wl1271_raw_read(struct wl1271 *wl, int addr, void *buf,
+ size_t len, bool fixed);
+
+/* Translated target IO */
+void wl1271_read(struct wl1271 *wl, int addr, void *buf, size_t len,
+ bool fixed);
+void wl1271_write(struct wl1271 *wl, int addr, void *buf, size_t len,
+ bool fixed);
+u32 wl1271_read32(struct wl1271 *wl, int addr);
+void wl1271_write32(struct wl1271 *wl, int addr, u32 val);
+
+/* Top Register IO */
+void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val);
+u16 wl1271_top_reg_read(struct wl1271 *wl, int addr);
+
+int wl1271_set_partition(struct wl1271 *wl,
+ struct wl1271_partition_set *p);
+
+static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr)
+{
+ wl1271_raw_read(wl, addr, &wl->buffer_32,
+ sizeof(wl->buffer_32), false);
+
+ return wl->buffer_32;
+}
+
+static inline void wl1271_raw_write32(struct wl1271 *wl, int addr, u32 val)
+{
+ wl->buffer_32 = val;
+ wl1271_raw_write(wl, addr, &wl->buffer_32,
+ sizeof(wl->buffer_32), false);
+}
+#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_main.c b/drivers/net/wireless/wl12xx/wl1271_main.c
index b62c00ff42f..2a864b24291 100644
--- a/drivers/net/wireless/wl12xx/wl1271_main.c
+++ b/drivers/net/wireless/wl12xx/wl1271_main.c
@@ -1,7 +1,7 @@
/*
* This file is part of wl1271
*
- * Copyright (C) 2008-2009 Nokia Corporation
+ * Copyright (C) 2008-2010 Nokia Corporation
*
* Contact: Luciano Coelho <luciano.coelho@nokia.com>
*
@@ -38,6 +38,7 @@
#include "wl12xx_80211.h"
#include "wl1271_reg.h"
#include "wl1271_spi.h"
+#include "wl1271_io.h"
#include "wl1271_event.h"
#include "wl1271_tx.h"
#include "wl1271_rx.h"
@@ -46,6 +47,9 @@
#include "wl1271_debugfs.h"
#include "wl1271_cmd.h"
#include "wl1271_boot.h"
+#include "wl1271_testmode.h"
+
+#define WL1271_BOOT_RETRIES 3
static struct conf_drv_settings default_conf = {
.sg = {
@@ -67,16 +71,17 @@ static struct conf_drv_settings default_conf = {
.ps_poll_timeout = 15,
.upsd_timeout = 15,
.rts_threshold = 2347,
- .rx_cca_threshold = 0xFFEF,
- .irq_blk_threshold = 0,
- .irq_pkt_threshold = USHORT_MAX,
- .irq_timeout = 5,
+ .rx_cca_threshold = 0,
+ .irq_blk_threshold = 0xFFFF,
+ .irq_pkt_threshold = 0,
+ .irq_timeout = 600,
.queue_type = CONF_RX_QUEUE_TYPE_LOW_PRIORITY,
},
.tx = {
.tx_energy_detection = 0,
.rc_conf = {
- .enabled_rates = CONF_TX_RATE_MASK_UNSPECIFIED,
+ .enabled_rates = CONF_HW_BIT_RATE_1MBPS |
+ CONF_HW_BIT_RATE_2MBPS,
.short_retry_limit = 10,
.long_retry_limit = 10,
.aflags = 0
@@ -172,8 +177,8 @@ static struct conf_drv_settings default_conf = {
}
},
.frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD,
- .tx_compl_timeout = 5,
- .tx_compl_threshold = 5
+ .tx_compl_timeout = 700,
+ .tx_compl_threshold = 4
},
.conn = {
.wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
@@ -186,12 +191,12 @@ static struct conf_drv_settings default_conf = {
.rule = CONF_BCN_RULE_PASS_ON_APPEARANCE,
}
},
- .synch_fail_thold = 5,
+ .synch_fail_thold = 10,
.bss_lose_timeout = 100,
.beacon_rx_timeout = 10000,
.broadcast_timeout = 20000,
.rx_broadcast_in_ps = 1,
- .ps_poll_threshold = 4,
+ .ps_poll_threshold = 20,
.sig_trigger_count = 2,
.sig_trigger = {
[0] = {
@@ -226,97 +231,17 @@ static struct conf_drv_settings default_conf = {
.psm_entry_retries = 3
},
.init = {
- .sr_err_tbl = {
- [0] = {
- .len = 7,
- .upper_limit = 0x03,
- .values = {
- 0x18, 0x10, 0x05, 0xfb, 0xf0, 0xe8,
- 0x00 }
- },
- [1] = {
- .len = 7,
- .upper_limit = 0x03,
- .values = {
- 0x18, 0x10, 0x05, 0xf6, 0xf0, 0xe8,
- 0x00 }
- },
- [2] = {
- .len = 7,
- .upper_limit = 0x03,
- .values = {
- 0x18, 0x10, 0x05, 0xfb, 0xf0, 0xe8,
- 0x00 }
- }
- },
- .sr_enable = 1,
- .genparam = {
- .ref_clk = CONF_REF_CLK_38_4_E,
- .settling_time = 5,
- .clk_valid_on_wakeup = 0,
- .dc2dcmode = 0,
- .single_dual_band = CONF_SINGLE_BAND,
- .tx_bip_fem_autodetect = 0,
- .tx_bip_fem_manufacturer = 1,
- .settings = 1,
- },
.radioparam = {
- .rx_trace_loss = 10,
- .tx_trace_loss = 10,
- .rx_rssi_and_proc_compens = {
- 0xec, 0xf6, 0x00, 0x0c, 0x18, 0xf8,
- 0xfc, 0x00, 0x08, 0x10, 0xf0, 0xf8,
- 0x00, 0x0a, 0x14 },
- .rx_trace_loss_5 = { 0, 0, 0, 0, 0, 0, 0 },
- .tx_trace_loss_5 = { 0, 0, 0, 0, 0, 0, 0 },
- .rx_rssi_and_proc_compens_5 = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00 },
- .tx_ref_pd_voltage = 0x24e,
- .tx_ref_power = 0x78,
- .tx_offset_db = 0x0,
- .tx_rate_limits_normal = {
- 0x1e, 0x1f, 0x22, 0x24, 0x28, 0x29 },
- .tx_rate_limits_degraded = {
- 0x1b, 0x1c, 0x1e, 0x20, 0x24, 0x25 },
- .tx_channel_limits_11b = {
- 0x22, 0x50, 0x50, 0x50, 0x50, 0x50,
- 0x50, 0x50, 0x50, 0x50, 0x22, 0x50,
- 0x22, 0x50 },
- .tx_channel_limits_ofdm = {
- 0x20, 0x50, 0x50, 0x50, 0x50, 0x50,
- 0x50, 0x50, 0x50, 0x50, 0x20, 0x50,
- 0x20, 0x50 },
- .tx_pdv_rate_offsets = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
- .tx_ibias = {
- 0x1a, 0x1a, 0x1a, 0x1a, 0x1a, 0x27 },
- .rx_fem_insertion_loss = 0x14,
- .tx_ref_pd_voltage_5 = {
- 0x0190, 0x01a4, 0x01c3, 0x01d8,
- 0x020a, 0x021c },
- .tx_ref_power_5 = {
- 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 },
- .tx_offset_db_5 = {
- 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
- .tx_rate_limits_normal_5 = {
- 0x1b, 0x1e, 0x21, 0x23, 0x27, 0x00 },
- .tx_rate_limits_degraded_5 = {
- 0x1b, 0x1e, 0x21, 0x23, 0x27, 0x00 },
- .tx_channel_limits_ofdm_5 = {
- 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
- 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
- 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
- 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
- 0x50, 0x50, 0x50 },
- .tx_pdv_rate_offsets_5 = {
- 0x01, 0x02, 0x02, 0x02, 0x02, 0x00 },
- .tx_ibias_5 = {
- 0x10, 0x10, 0x10, 0x10, 0x10, 0x10 },
- .rx_fem_insertion_loss_5 = {
- 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10 }
+ .fem = 1,
}
+ },
+ .itrim = {
+ .enable = false,
+ .timeout = 50000,
+ },
+ .pm_config = {
+ .host_clk_settling_time = 5000,
+ .host_fast_wakeup_support = false
}
};
@@ -337,15 +262,14 @@ static void wl1271_conf_init(struct wl1271 *wl)
/* apply driver default configuration */
memcpy(&wl->conf, &default_conf, sizeof(default_conf));
-
- if (wl1271_11a_enabled())
- wl->conf.init.genparam.single_dual_band = CONF_DUAL_BAND;
}
static int wl1271_plt_init(struct wl1271 *wl)
{
- int ret;
+ struct conf_tx_ac_category *conf_ac;
+ struct conf_tx_tid *conf_tid;
+ int ret, i;
ret = wl1271_cmd_general_parms(wl);
if (ret < 0)
@@ -355,15 +279,89 @@ static int wl1271_plt_init(struct wl1271 *wl)
if (ret < 0)
return ret;
- ret = wl1271_acx_init_mem_config(wl);
+ ret = wl1271_init_templates_config(wl);
if (ret < 0)
return ret;
- ret = wl1271_cmd_data_path(wl, wl->channel, 1);
+ ret = wl1271_acx_init_mem_config(wl);
if (ret < 0)
return ret;
+ /* PHY layer config */
+ ret = wl1271_init_phy_config(wl);
+ if (ret < 0)
+ goto out_free_memmap;
+
+ ret = wl1271_acx_dco_itrim_params(wl);
+ if (ret < 0)
+ goto out_free_memmap;
+
+ /* Initialize connection monitoring thresholds */
+ ret = wl1271_acx_conn_monit_params(wl);
+ if (ret < 0)
+ goto out_free_memmap;
+
+ /* Bluetooth WLAN coexistence */
+ ret = wl1271_init_pta(wl);
+ if (ret < 0)
+ goto out_free_memmap;
+
+ /* Energy detection */
+ ret = wl1271_init_energy_detection(wl);
+ if (ret < 0)
+ goto out_free_memmap;
+
+ /* Default fragmentation threshold */
+ ret = wl1271_acx_frag_threshold(wl);
+ if (ret < 0)
+ goto out_free_memmap;
+
+ /* Default TID configuration */
+ for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
+ conf_tid = &wl->conf.tx.tid_conf[i];
+ ret = wl1271_acx_tid_cfg(wl, conf_tid->queue_id,
+ conf_tid->channel_type,
+ conf_tid->tsid,
+ conf_tid->ps_scheme,
+ conf_tid->ack_policy,
+ conf_tid->apsd_conf[0],
+ conf_tid->apsd_conf[1]);
+ if (ret < 0)
+ goto out_free_memmap;
+ }
+
+ /* Default AC configuration */
+ for (i = 0; i < wl->conf.tx.ac_conf_count; i++) {
+ conf_ac = &wl->conf.tx.ac_conf[i];
+ ret = wl1271_acx_ac_cfg(wl, conf_ac->ac, conf_ac->cw_min,
+ conf_ac->cw_max, conf_ac->aifsn,
+ conf_ac->tx_op_limit);
+ if (ret < 0)
+ goto out_free_memmap;
+ }
+
+ /* Enable data path */
+ ret = wl1271_cmd_data_path(wl, 1);
+ if (ret < 0)
+ goto out_free_memmap;
+
+ /* Configure for CAM power saving (ie. always active) */
+ ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
+ if (ret < 0)
+ goto out_free_memmap;
+
+ /* configure PM */
+ ret = wl1271_acx_pm_config(wl);
+ if (ret < 0)
+ goto out_free_memmap;
+
return 0;
+
+ out_free_memmap:
+ kfree(wl->target_mem_map);
+ wl->target_mem_map = NULL;
+
+ return ret;
}
static void wl1271_disable_interrupts(struct wl1271 *wl)
@@ -374,11 +372,13 @@ static void wl1271_disable_interrupts(struct wl1271 *wl)
static void wl1271_power_off(struct wl1271 *wl)
{
wl->set_power(false);
+ clear_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
}
static void wl1271_power_on(struct wl1271 *wl)
{
wl->set_power(true);
+ set_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
}
static void wl1271_fw_status(struct wl1271 *wl,
@@ -387,8 +387,7 @@ static void wl1271_fw_status(struct wl1271 *wl,
u32 total = 0;
int i;
- wl1271_spi_read(wl, FW_STATUS_ADDR, status,
- sizeof(*status), false);
+ wl1271_read(wl, FW_STATUS_ADDR, status, sizeof(*status), false);
wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
"drv_rx_counter = %d, tx_results_counter = %d)",
@@ -435,7 +434,7 @@ static void wl1271_irq_work(struct work_struct *work)
if (ret < 0)
goto out;
- wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
+ wl1271_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
wl1271_fw_status(wl, wl->fw_status);
intr = le32_to_cpu(wl->fw_status->intr);
@@ -447,14 +446,13 @@ static void wl1271_irq_work(struct work_struct *work)
intr &= WL1271_INTR_MASK;
if (intr & WL1271_ACX_INTR_EVENT_A) {
- bool do_ack = (intr & WL1271_ACX_INTR_EVENT_B) ? false : true;
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
- wl1271_event_handle(wl, 0, do_ack);
+ wl1271_event_handle(wl, 0);
}
if (intr & WL1271_ACX_INTR_EVENT_B) {
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
- wl1271_event_handle(wl, 1, true);
+ wl1271_event_handle(wl, 1);
}
if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
@@ -478,8 +476,8 @@ static void wl1271_irq_work(struct work_struct *work)
}
out_sleep:
- wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK,
- WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK));
+ wl1271_write32(wl, ACX_REG_INTERRUPT_MASK,
+ WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK));
wl1271_ps_elp_sleep(wl);
out:
@@ -546,6 +544,40 @@ out:
return ret;
}
+static int wl1271_update_mac_addr(struct wl1271 *wl)
+{
+ int ret = 0;
+ u8 *nvs_ptr = (u8 *)wl->nvs->nvs;
+
+ /* get mac address from the NVS */
+ wl->mac_addr[0] = nvs_ptr[11];
+ wl->mac_addr[1] = nvs_ptr[10];
+ wl->mac_addr[2] = nvs_ptr[6];
+ wl->mac_addr[3] = nvs_ptr[5];
+ wl->mac_addr[4] = nvs_ptr[4];
+ wl->mac_addr[5] = nvs_ptr[3];
+
+ /* FIXME: if it is a zero-address, we should bail out. Now, instead,
+ we randomize an address */
+ if (is_zero_ether_addr(wl->mac_addr)) {
+ static const u8 nokia_oui[3] = {0x00, 0x1f, 0xdf};
+ memcpy(wl->mac_addr, nokia_oui, 3);
+ get_random_bytes(wl->mac_addr + 3, 3);
+
+ /* update this address to the NVS */
+ nvs_ptr[11] = wl->mac_addr[0];
+ nvs_ptr[10] = wl->mac_addr[1];
+ nvs_ptr[6] = wl->mac_addr[2];
+ nvs_ptr[5] = wl->mac_addr[3];
+ nvs_ptr[4] = wl->mac_addr[4];
+ nvs_ptr[3] = wl->mac_addr[5];
+ }
+
+ SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr);
+
+ return ret;
+}
+
static int wl1271_fetch_nvs(struct wl1271 *wl)
{
const struct firmware *fw;
@@ -558,15 +590,14 @@ static int wl1271_fetch_nvs(struct wl1271 *wl)
return ret;
}
- if (fw->size % 4) {
- wl1271_error("nvs size is not multiple of 32 bits: %zu",
- fw->size);
+ if (fw->size != sizeof(struct wl1271_nvs_file)) {
+ wl1271_error("nvs size is not as expected: %zu != %zu",
+ fw->size, sizeof(struct wl1271_nvs_file));
ret = -EILSEQ;
goto out;
}
- wl->nvs_len = fw->size;
- wl->nvs = kmalloc(wl->nvs_len, GFP_KERNEL);
+ wl->nvs = kmalloc(sizeof(struct wl1271_nvs_file), GFP_KERNEL);
if (!wl->nvs) {
wl1271_error("could not allocate memory for the nvs file");
@@ -574,9 +605,9 @@ static int wl1271_fetch_nvs(struct wl1271 *wl)
goto out;
}
- memcpy(wl->nvs, fw->data, wl->nvs_len);
+ memcpy(wl->nvs, fw->data, sizeof(struct wl1271_nvs_file));
- ret = 0;
+ ret = wl1271_update_mac_addr(wl);
out:
release_firmware(fw);
@@ -614,10 +645,11 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
struct wl1271_partition_set partition;
int ret = 0;
+ msleep(WL1271_PRE_POWER_ON_SLEEP);
wl1271_power_on(wl);
msleep(WL1271_POWER_ON_SLEEP);
- wl1271_spi_reset(wl);
- wl1271_spi_init(wl);
+ wl1271_io_reset(wl);
+ wl1271_io_init(wl);
/* We don't need a real memory partition here, because we only want
* to use the registers at this point. */
@@ -632,7 +664,7 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
/* whal_FwCtrl_BootSm() */
/* 0. read chip id from CHIP_ID */
- wl->chip.id = wl1271_spi_read32(wl, CHIP_ID_B);
+ wl->chip.id = wl1271_read32(wl, CHIP_ID_B);
/* 1. check if chip id is valid */
@@ -643,7 +675,7 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
ret = wl1271_setup(wl);
if (ret < 0)
- goto out_power_off;
+ goto out;
break;
case CHIP_ID_1271_PG20:
wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
@@ -651,38 +683,34 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
ret = wl1271_setup(wl);
if (ret < 0)
- goto out_power_off;
+ goto out;
break;
default:
- wl1271_error("unsupported chip id: 0x%x", wl->chip.id);
+ wl1271_warning("unsupported chip id: 0x%x", wl->chip.id);
ret = -ENODEV;
- goto out_power_off;
+ goto out;
}
if (wl->fw == NULL) {
ret = wl1271_fetch_firmware(wl);
if (ret < 0)
- goto out_power_off;
+ goto out;
}
/* No NVS from netlink, try to get it from the filesystem */
if (wl->nvs == NULL) {
ret = wl1271_fetch_nvs(wl);
if (ret < 0)
- goto out_power_off;
+ goto out;
}
- goto out;
-
-out_power_off:
- wl1271_power_off(wl);
-
out:
return ret;
}
int wl1271_plt_start(struct wl1271 *wl)
{
+ int retries = WL1271_BOOT_RETRIES;
int ret;
mutex_lock(&wl->mutex);
@@ -696,35 +724,43 @@ int wl1271_plt_start(struct wl1271 *wl)
goto out;
}
- wl->state = WL1271_STATE_PLT;
-
- ret = wl1271_chip_wakeup(wl);
- if (ret < 0)
- goto out;
-
- ret = wl1271_boot(wl);
- if (ret < 0)
- goto out_power_off;
-
- wl1271_notice("firmware booted in PLT mode (%s)", wl->chip.fw_ver);
-
- ret = wl1271_plt_init(wl);
- if (ret < 0)
- goto out_irq_disable;
+ while (retries) {
+ retries--;
+ ret = wl1271_chip_wakeup(wl);
+ if (ret < 0)
+ goto power_off;
- /* Make sure power saving is disabled */
- ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
- if (ret < 0)
- goto out_irq_disable;
+ ret = wl1271_boot(wl);
+ if (ret < 0)
+ goto power_off;
- goto out;
+ ret = wl1271_plt_init(wl);
+ if (ret < 0)
+ goto irq_disable;
-out_irq_disable:
- wl1271_disable_interrupts(wl);
+ wl->state = WL1271_STATE_PLT;
+ wl1271_notice("firmware booted in PLT mode (%s)",
+ wl->chip.fw_ver);
+ goto out;
-out_power_off:
- wl1271_power_off(wl);
+irq_disable:
+ wl1271_disable_interrupts(wl);
+ mutex_unlock(&wl->mutex);
+ /* Unlocking the mutex in the middle of handling is
+ inherently unsafe. In this case we deem it safe to do,
+ because we need to let any possibly pending IRQ out of
+ the system (and while we are WL1271_STATE_OFF the IRQ
+ work function will not do anything.) Also, any other
+ possible concurrent operations will fail due to the
+ current state, hence the wl1271 struct should be safe. */
+ cancel_work_sync(&wl->irq_work);
+ mutex_lock(&wl->mutex);
+power_off:
+ wl1271_power_off(wl);
+ }
+ wl1271_error("firmware boot in PLT mode failed despite %d retries",
+ WL1271_BOOT_RETRIES);
out:
mutex_unlock(&wl->mutex);
@@ -762,7 +798,20 @@ out:
static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
{
struct wl1271 *wl = hw->priv;
+ struct ieee80211_conf *conf = &hw->conf;
+ struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
+ struct ieee80211_sta *sta = txinfo->control.sta;
+ unsigned long flags;
+
+ /* peek into the rates configured in the STA entry */
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ if (sta && sta->supp_rates[conf->channel->band] != wl->sta_rate_set) {
+ wl->sta_rate_set = sta->supp_rates[conf->channel->band];
+ set_bit(WL1271_FLAG_STA_RATES_CHANGED, &wl->flags);
+ }
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+ /* queue the packet */
skb_queue_tail(&wl->tx_queue, skb);
/*
@@ -784,7 +833,7 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
* protected. Maybe fix this by removing the stupid
* variable altogether and checking the real queue state?
*/
- wl->tx_queue_stopped = true;
+ set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
}
return NETDEV_TX_OK;
@@ -880,6 +929,7 @@ static struct notifier_block wl1271_dev_notifier = {
static int wl1271_op_start(struct ieee80211_hw *hw)
{
struct wl1271 *wl = hw->priv;
+ int retries = WL1271_BOOT_RETRIES;
int ret = 0;
wl1271_debug(DEBUG_MAC80211, "mac80211 start");
@@ -893,30 +943,42 @@ static int wl1271_op_start(struct ieee80211_hw *hw)
goto out;
}
- ret = wl1271_chip_wakeup(wl);
- if (ret < 0)
- goto out;
-
- ret = wl1271_boot(wl);
- if (ret < 0)
- goto out_power_off;
-
- ret = wl1271_hw_init(wl);
- if (ret < 0)
- goto out_irq_disable;
-
- wl->state = WL1271_STATE_ON;
+ while (retries) {
+ retries--;
+ ret = wl1271_chip_wakeup(wl);
+ if (ret < 0)
+ goto power_off;
- wl1271_info("firmware booted (%s)", wl->chip.fw_ver);
+ ret = wl1271_boot(wl);
+ if (ret < 0)
+ goto power_off;
- goto out;
+ ret = wl1271_hw_init(wl);
+ if (ret < 0)
+ goto irq_disable;
-out_irq_disable:
- wl1271_disable_interrupts(wl);
+ wl->state = WL1271_STATE_ON;
+ wl1271_info("firmware booted (%s)", wl->chip.fw_ver);
+ goto out;
-out_power_off:
- wl1271_power_off(wl);
+irq_disable:
+ wl1271_disable_interrupts(wl);
+ mutex_unlock(&wl->mutex);
+ /* Unlocking the mutex in the middle of handling is
+ inherently unsafe. In this case we deem it safe to do,
+ because we need to let any possibly pending IRQ out of
+ the system (and while we are WL1271_STATE_OFF the IRQ
+ work function will not do anything.) Also, any other
+ possible concurrent operations will fail due to the
+ current state, hence the wl1271 struct should be safe. */
+ cancel_work_sync(&wl->irq_work);
+ mutex_lock(&wl->mutex);
+power_off:
+ wl1271_power_off(wl);
+ }
+ wl1271_error("firmware boot failed despite %d retries",
+ WL1271_BOOT_RETRIES);
out:
mutex_unlock(&wl->mutex);
@@ -944,11 +1006,10 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
WARN_ON(wl->state != WL1271_STATE_ON);
- if (wl->scanning) {
+ if (test_and_clear_bit(WL1271_FLAG_SCANNING, &wl->flags)) {
mutex_unlock(&wl->mutex);
ieee80211_scan_completed(wl->hw, true);
mutex_lock(&wl->mutex);
- wl->scanning = false;
}
wl->state = WL1271_STATE_OFF;
@@ -973,10 +1034,7 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
wl->band = IEEE80211_BAND_2GHZ;
wl->rx_counter = 0;
- wl->elp = false;
- wl->psm = 0;
wl->psm_entry_retry = 0;
- wl->tx_queue_stopped = false;
wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
wl->tx_blocks_available = 0;
wl->tx_results_count = 0;
@@ -986,7 +1044,9 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
wl->tx_security_seq_32 = 0;
wl->time_offset = 0;
wl->session_counter = 0;
- wl->joined = false;
+ wl->rate_set = CONF_TX_RATE_MASK_BASIC;
+ wl->sta_rate_set = 0;
+ wl->flags = 0;
for (i = 0; i < NUM_TX_QUEUES; i++)
wl->tx_blocks_freed[i] = 0;
@@ -996,13 +1056,13 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
}
static int wl1271_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct wl1271 *wl = hw->priv;
int ret = 0;
wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
- conf->type, conf->mac_addr);
+ vif->type, vif->addr);
mutex_lock(&wl->mutex);
if (wl->vif) {
@@ -1010,9 +1070,9 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
goto out;
}
- wl->vif = conf->vif;
+ wl->vif = vif;
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_STATION:
wl->bss_type = BSS_TYPE_STA_BSS;
break;
@@ -1032,7 +1092,7 @@ out:
}
static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct wl1271 *wl = hw->priv;
@@ -1109,6 +1169,51 @@ out:
}
#endif
+static int wl1271_join_channel(struct wl1271 *wl, int channel)
+{
+ int ret = 0;
+ /* we need to use a dummy BSSID for now */
+ static const u8 dummy_bssid[ETH_ALEN] = { 0x0b, 0xad, 0xde,
+ 0xad, 0xbe, 0xef };
+
+ /* the dummy join is not required for ad-hoc */
+ if (wl->bss_type == BSS_TYPE_IBSS)
+ goto out;
+
+ /* disable mac filter, so we hear everything */
+ wl->rx_config &= ~CFG_BSSID_FILTER_EN;
+
+ wl->channel = channel;
+ memcpy(wl->bssid, dummy_bssid, ETH_ALEN);
+
+ ret = wl1271_cmd_join(wl);
+ if (ret < 0)
+ goto out;
+
+ set_bit(WL1271_FLAG_JOINED, &wl->flags);
+
+out:
+ return ret;
+}
+
+static int wl1271_unjoin_channel(struct wl1271 *wl)
+{
+ int ret;
+
+ /* to stop listening to a channel, we disconnect */
+ ret = wl1271_cmd_disconnect(wl);
+ if (ret < 0)
+ goto out;
+
+ clear_bit(WL1271_FLAG_JOINED, &wl->flags);
+ wl->channel = 0;
+ memset(wl->bssid, 0, ETH_ALEN);
+ wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
+
+out:
+ return ret;
+}
+
static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
{
struct wl1271 *wl = hw->priv;
@@ -1117,10 +1222,11 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
channel = ieee80211_frequency_to_channel(conf->channel->center_freq);
- wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d",
+ wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s",
channel,
conf->flags & IEEE80211_CONF_PS ? "on" : "off",
- conf->power_level);
+ conf->power_level,
+ conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use");
mutex_lock(&wl->mutex);
@@ -1130,35 +1236,55 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
if (ret < 0)
goto out;
- if (channel != wl->channel) {
- /*
- * We assume that the stack will configure the right channel
- * before associating, so we don't need to send a join
- * command here. We will join the right channel when the
- * BSSID changes
- */
- wl->channel = channel;
+ if (changed & IEEE80211_CONF_CHANGE_IDLE) {
+ if (conf->flags & IEEE80211_CONF_IDLE &&
+ test_bit(WL1271_FLAG_JOINED, &wl->flags))
+ wl1271_unjoin_channel(wl);
+ else if (!(conf->flags & IEEE80211_CONF_IDLE))
+ wl1271_join_channel(wl, channel);
+
+ if (conf->flags & IEEE80211_CONF_IDLE) {
+ wl->rate_set = CONF_TX_RATE_MASK_BASIC;
+ wl->sta_rate_set = 0;
+ wl1271_acx_rate_policies(wl);
+ }
}
- if (conf->flags & IEEE80211_CONF_PS && !wl->psm_requested) {
- wl1271_info("psm enabled");
+ /* if the channel changes while joined, join again */
+ if (channel != wl->channel &&
+ test_bit(WL1271_FLAG_JOINED, &wl->flags)) {
+ wl->channel = channel;
+ /* FIXME: maybe use CMD_CHANNEL_SWITCH for this? */
+ ret = wl1271_cmd_join(wl);
+ if (ret < 0)
+ wl1271_warning("cmd join to update channel failed %d",
+ ret);
+ } else
+ wl->channel = channel;
- wl->psm_requested = true;
+ if (conf->flags & IEEE80211_CONF_PS &&
+ !test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
+ set_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags);
/*
* We enter PSM only if we're already associated.
* If we're not, we'll enter it when joining an SSID,
* through the bss_info_changed() hook.
*/
- ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE);
+ if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) {
+ wl1271_info("psm enabled");
+ ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE,
+ true);
+ }
} else if (!(conf->flags & IEEE80211_CONF_PS) &&
- wl->psm_requested) {
+ test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) {
wl1271_info("psm disabled");
- wl->psm_requested = false;
+ clear_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags);
- if (wl->psm)
- ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE);
+ if (test_bit(WL1271_FLAG_PSM, &wl->flags))
+ ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE,
+ true);
}
if (conf->power_level != wl->power_level) {
@@ -1350,9 +1476,24 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
wl1271_error("Could not add or replace key");
goto out_sleep;
}
+
+ /* the default WEP key needs to be configured at least once */
+ if (key_type == KEY_WEP) {
+ ret = wl1271_cmd_set_default_wep_key(wl,
+ wl->default_key);
+ if (ret < 0)
+ goto out_sleep;
+ }
break;
case DISABLE_KEY:
+ /* The wl1271 does not allow to remove unicast keys - they
+ will be cleared automatically on next CMD_JOIN. Ignore the
+ request silently, as we dont want the mac80211 to emit
+ an error message. */
+ if (!is_broadcast_ether_addr(addr))
+ break;
+
ret = wl1271_cmd_set_key(wl, KEY_REMOVE,
key_conf->keyidx, key_type,
key_conf->keylen, key_conf->key,
@@ -1440,20 +1581,21 @@ out:
return ret;
}
-static u32 wl1271_enabled_rates_get(struct wl1271 *wl, u64 basic_rate_set)
+static void wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *beacon)
{
- struct ieee80211_supported_band *band;
- u32 enabled_rates = 0;
- int bit;
-
- band = wl->hw->wiphy->bands[wl->band];
- for (bit = 0; bit < band->n_bitrates; bit++) {
- if (basic_rate_set & 0x1)
- enabled_rates |= band->bitrates[bit].hw_value;
- basic_rate_set >>= 1;
+ u8 *ptr = beacon->data +
+ offsetof(struct ieee80211_mgmt, u.beacon.variable);
+
+ /* find the location of the ssid in the beacon */
+ while (ptr < beacon->data + beacon->len) {
+ if (ptr[0] == WLAN_EID_SSID) {
+ wl->ssid_len = ptr[1];
+ memcpy(wl->ssid, ptr+2, wl->ssid_len);
+ return;
+ }
+ ptr += ptr[1];
}
-
- return enabled_rates;
+ wl1271_error("ad-hoc beacon template has no SSID!\n");
}
static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
@@ -1463,6 +1605,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
{
enum wl1271_cmd_ps_mode mode;
struct wl1271 *wl = hw->priv;
+ bool do_join = false;
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 bss info changed");
@@ -1473,9 +1616,67 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
if (ret < 0)
goto out;
+ if (wl->bss_type == BSS_TYPE_IBSS) {
+ /* FIXME: This implements rudimentary ad-hoc support -
+ proper templates are on the wish list and notification
+ on when they change. This patch will update the templates
+ on every call to this function. */
+ struct sk_buff *beacon = ieee80211_beacon_get(hw, vif);
+
+ if (beacon) {
+ struct ieee80211_hdr *hdr;
+
+ wl1271_ssid_set(wl, beacon);
+ ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON,
+ beacon->data,
+ beacon->len);
+
+ if (ret < 0) {
+ dev_kfree_skb(beacon);
+ goto out_sleep;
+ }
+
+ hdr = (struct ieee80211_hdr *) beacon->data;
+ hdr->frame_control = cpu_to_le16(
+ IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_PROBE_RESP);
+
+ ret = wl1271_cmd_template_set(wl,
+ CMD_TEMPL_PROBE_RESPONSE,
+ beacon->data,
+ beacon->len);
+ dev_kfree_skb(beacon);
+ if (ret < 0)
+ goto out_sleep;
+
+ /* Need to update the SSID (for filtering etc) */
+ do_join = true;
+ }
+ }
+
+ if ((changed & BSS_CHANGED_BSSID) &&
+ /*
+ * Now we know the correct bssid, so we send a new join command
+ * and enable the BSSID filter
+ */
+ memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) {
+ wl->rx_config |= CFG_BSSID_FILTER_EN;
+ memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
+ ret = wl1271_cmd_build_null_data(wl);
+ if (ret < 0) {
+ wl1271_warning("cmd buld null data failed %d",
+ ret);
+ goto out_sleep;
+ }
+
+ /* Need to update the BSSID (for filtering etc) */
+ do_join = true;
+ }
+
if (changed & BSS_CHANGED_ASSOC) {
if (bss_conf->assoc) {
wl->aid = bss_conf->aid;
+ set_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
/*
* with wl1271, we don't need to update the
@@ -1492,15 +1693,16 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
goto out_sleep;
/* If we want to go in PSM but we're not there yet */
- if (wl->psm_requested && !wl->psm) {
+ if (test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags) &&
+ !test_bit(WL1271_FLAG_PSM, &wl->flags)) {
mode = STATION_POWER_SAVE_MODE;
- ret = wl1271_ps_set_mode(wl, mode);
+ ret = wl1271_ps_set_mode(wl, mode, true);
if (ret < 0)
goto out_sleep;
}
} else {
/* use defaults when not associated */
- wl->basic_rate_set = WL1271_DEFAULT_BASIC_RATE_SET;
+ clear_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
wl->aid = 0;
}
@@ -1535,15 +1737,13 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
}
}
- if (changed & BSS_CHANGED_BASIC_RATES) {
- wl->basic_rate_set = wl1271_enabled_rates_get(
- wl, bss_conf->basic_rates);
-
- ret = wl1271_acx_rate_policies(wl, wl->basic_rate_set);
+ if (do_join) {
+ ret = wl1271_cmd_join(wl);
if (ret < 0) {
- wl1271_warning("Set rate policies failed %d", ret);
+ wl1271_warning("cmd join failed %d", ret);
goto out_sleep;
}
+ set_bit(WL1271_FLAG_JOINED, &wl->flags);
}
out_sleep:
@@ -1553,6 +1753,43 @@ out:
mutex_unlock(&wl->mutex);
}
+static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
+ const struct ieee80211_tx_queue_params *params)
+{
+ struct wl1271 *wl = hw->priv;
+ int ret;
+
+ mutex_lock(&wl->mutex);
+
+ wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
+
+ ret = wl1271_ps_elp_wakeup(wl, false);
+ if (ret < 0)
+ goto out;
+
+ ret = wl1271_acx_ac_cfg(wl, wl1271_tx_get_queue(queue),
+ params->cw_min, params->cw_max,
+ params->aifs, params->txop);
+ if (ret < 0)
+ goto out_sleep;
+
+ ret = wl1271_acx_tid_cfg(wl, wl1271_tx_get_queue(queue),
+ CONF_CHANNEL_TYPE_EDCF,
+ wl1271_tx_get_queue(queue),
+ CONF_PS_SCHEME_LEGACY_PSPOLL,
+ CONF_ACK_POLICY_LEGACY, 0, 0);
+ if (ret < 0)
+ goto out_sleep;
+
+out_sleep:
+ wl1271_ps_elp_sleep(wl);
+
+out:
+ mutex_unlock(&wl->mutex);
+
+ return ret;
+}
+
/* can't be const, mac80211 writes to this */
static struct ieee80211_rate wl1271_rates[] = {
@@ -1599,19 +1836,19 @@ static struct ieee80211_rate wl1271_rates[] = {
/* can't be const, mac80211 writes to this */
static struct ieee80211_channel wl1271_channels[] = {
- { .hw_value = 1, .center_freq = 2412},
- { .hw_value = 2, .center_freq = 2417},
- { .hw_value = 3, .center_freq = 2422},
- { .hw_value = 4, .center_freq = 2427},
- { .hw_value = 5, .center_freq = 2432},
- { .hw_value = 6, .center_freq = 2437},
- { .hw_value = 7, .center_freq = 2442},
- { .hw_value = 8, .center_freq = 2447},
- { .hw_value = 9, .center_freq = 2452},
- { .hw_value = 10, .center_freq = 2457},
- { .hw_value = 11, .center_freq = 2462},
- { .hw_value = 12, .center_freq = 2467},
- { .hw_value = 13, .center_freq = 2472},
+ { .hw_value = 1, .center_freq = 2412, .max_power = 25 },
+ { .hw_value = 2, .center_freq = 2417, .max_power = 25 },
+ { .hw_value = 3, .center_freq = 2422, .max_power = 25 },
+ { .hw_value = 4, .center_freq = 2427, .max_power = 25 },
+ { .hw_value = 5, .center_freq = 2432, .max_power = 25 },
+ { .hw_value = 6, .center_freq = 2437, .max_power = 25 },
+ { .hw_value = 7, .center_freq = 2442, .max_power = 25 },
+ { .hw_value = 8, .center_freq = 2447, .max_power = 25 },
+ { .hw_value = 9, .center_freq = 2452, .max_power = 25 },
+ { .hw_value = 10, .center_freq = 2457, .max_power = 25 },
+ { .hw_value = 11, .center_freq = 2462, .max_power = 25 },
+ { .hw_value = 12, .center_freq = 2467, .max_power = 25 },
+ { .hw_value = 13, .center_freq = 2472, .max_power = 25 },
};
/* can't be const, mac80211 writes to this */
@@ -1718,6 +1955,8 @@ static const struct ieee80211_ops wl1271_ops = {
.hw_scan = wl1271_op_hw_scan,
.bss_info_changed = wl1271_op_bss_info_changed,
.set_rts_threshold = wl1271_op_set_rts_threshold,
+ .conf_tx = wl1271_op_conf_tx,
+ CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
};
static int wl1271_register_hw(struct wl1271 *wl)
@@ -1757,7 +1996,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
IEEE80211_HW_BEACON_FILTER |
IEEE80211_HW_SUPPORTS_PS;
- wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
+ wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
+ BIT(NL80211_IFTYPE_ADHOC);
wl->hw->wiphy->max_scan_ssids = 1;
wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1271_band_2ghz;
@@ -1785,24 +2025,17 @@ static struct platform_device wl1271_device = {
};
#define WL1271_DEFAULT_CHANNEL 0
-static int __devinit wl1271_probe(struct spi_device *spi)
+
+static struct ieee80211_hw *wl1271_alloc_hw(void)
{
- struct wl12xx_platform_data *pdata;
struct ieee80211_hw *hw;
struct wl1271 *wl;
- int ret, i;
- static const u8 nokia_oui[3] = {0x00, 0x1f, 0xdf};
-
- pdata = spi->dev.platform_data;
- if (!pdata) {
- wl1271_error("no platform data");
- return -ENODEV;
- }
+ int i;
hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
if (!hw) {
wl1271_error("could not alloc ieee80211_hw");
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
wl = hw->priv;
@@ -1811,44 +2044,80 @@ static int __devinit wl1271_probe(struct spi_device *spi)
INIT_LIST_HEAD(&wl->list);
wl->hw = hw;
- dev_set_drvdata(&spi->dev, wl);
- wl->spi = spi;
skb_queue_head_init(&wl->tx_queue);
INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
wl->channel = WL1271_DEFAULT_CHANNEL;
- wl->scanning = false;
wl->default_key = 0;
wl->rx_counter = 0;
wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
- wl->elp = false;
- wl->psm = 0;
- wl->psm_requested = false;
wl->psm_entry_retry = 0;
- wl->tx_queue_stopped = false;
wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
- wl->basic_rate_set = WL1271_DEFAULT_BASIC_RATE_SET;
+ wl->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
+ wl->rate_set = CONF_TX_RATE_MASK_BASIC;
+ wl->sta_rate_set = 0;
wl->band = IEEE80211_BAND_2GHZ;
wl->vif = NULL;
- wl->joined = false;
+ wl->flags = 0;
for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
wl->tx_frames[i] = NULL;
spin_lock_init(&wl->wl_lock);
- /*
- * In case our MAC address is not correctly set,
- * we use a random but Nokia MAC.
- */
- memcpy(wl->mac_addr, nokia_oui, 3);
- get_random_bytes(wl->mac_addr + 3, 3);
-
wl->state = WL1271_STATE_OFF;
mutex_init(&wl->mutex);
+ /* Apply default driver configuration. */
+ wl1271_conf_init(wl);
+
+ return hw;
+}
+
+int wl1271_free_hw(struct wl1271 *wl)
+{
+ ieee80211_unregister_hw(wl->hw);
+
+ wl1271_debugfs_exit(wl);
+
+ kfree(wl->target_mem_map);
+ vfree(wl->fw);
+ wl->fw = NULL;
+ kfree(wl->nvs);
+ wl->nvs = NULL;
+
+ kfree(wl->fw_status);
+ kfree(wl->tx_res_if);
+
+ ieee80211_free_hw(wl->hw);
+
+ return 0;
+}
+
+static int __devinit wl1271_probe(struct spi_device *spi)
+{
+ struct wl12xx_platform_data *pdata;
+ struct ieee80211_hw *hw;
+ struct wl1271 *wl;
+ int ret;
+
+ pdata = spi->dev.platform_data;
+ if (!pdata) {
+ wl1271_error("no platform data");
+ return -ENODEV;
+ }
+
+ hw = wl1271_alloc_hw();
+ if (IS_ERR(hw))
+ return PTR_ERR(hw);
+
+ wl = hw->priv;
+
+ dev_set_drvdata(&spi->dev, wl);
+ wl->spi = spi;
+
/* This is the only SPI value that we need to set here, the rest
* comes from the board-peripherals file */
spi->bits_per_word = 32;
@@ -1890,9 +2159,6 @@ static int __devinit wl1271_probe(struct spi_device *spi)
}
dev_set_drvdata(&wl1271_device.dev, wl);
- /* Apply default driver configuration. */
- wl1271_conf_init(wl);
-
ret = wl1271_init_ieee80211(wl);
if (ret)
goto out_platform;
@@ -1923,21 +2189,10 @@ static int __devexit wl1271_remove(struct spi_device *spi)
{
struct wl1271 *wl = dev_get_drvdata(&spi->dev);
- ieee80211_unregister_hw(wl->hw);
-
- wl1271_debugfs_exit(wl);
platform_device_unregister(&wl1271_device);
free_irq(wl->irq, wl);
- kfree(wl->target_mem_map);
- vfree(wl->fw);
- wl->fw = NULL;
- kfree(wl->nvs);
- wl->nvs = NULL;
- kfree(wl->fw_status);
- kfree(wl->tx_res_if);
-
- ieee80211_free_hw(wl->hw);
+ wl1271_free_hw(wl);
return 0;
}
diff --git a/drivers/net/wireless/wl12xx/wl1271_ps.c b/drivers/net/wireless/wl12xx/wl1271_ps.c
index 507cd91d7ee..e2b1ebf096e 100644
--- a/drivers/net/wireless/wl12xx/wl1271_ps.c
+++ b/drivers/net/wireless/wl12xx/wl1271_ps.c
@@ -24,6 +24,7 @@
#include "wl1271_reg.h"
#include "wl1271_ps.h"
#include "wl1271_spi.h"
+#include "wl1271_io.h"
#define WL1271_WAKEUP_TIMEOUT 500
@@ -39,12 +40,13 @@ void wl1271_elp_work(struct work_struct *work)
mutex_lock(&wl->mutex);
- if (wl->elp || !wl->psm)
+ if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags) ||
+ !test_bit(WL1271_FLAG_PSM, &wl->flags))
goto out;
wl1271_debug(DEBUG_PSM, "chip to elp");
wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP);
- wl->elp = true;
+ set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
out:
mutex_unlock(&wl->mutex);
@@ -55,7 +57,7 @@ out:
/* Routines to toggle sleep mode while in ELP */
void wl1271_ps_elp_sleep(struct wl1271 *wl)
{
- if (wl->psm) {
+ if (test_bit(WL1271_FLAG_PSM, &wl->flags)) {
cancel_delayed_work(&wl->elp_work);
ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
msecs_to_jiffies(ELP_ENTRY_DELAY));
@@ -70,7 +72,7 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake)
u32 start_time = jiffies;
bool pending = false;
- if (!wl->elp)
+ if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
return 0;
wl1271_debug(DEBUG_PSM, "waking up chip from elp");
@@ -101,7 +103,7 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake)
}
}
- wl->elp = false;
+ clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
jiffies_to_msecs(jiffies - start_time));
@@ -117,7 +119,8 @@ out:
return 0;
}
-int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode)
+int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
+ bool send)
{
int ret;
@@ -125,25 +128,11 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode)
case STATION_POWER_SAVE_MODE:
wl1271_debug(DEBUG_PSM, "entering psm");
- /* enable beacon filtering */
- ret = wl1271_acx_beacon_filter_opt(wl, true);
+ ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE, send);
if (ret < 0)
return ret;
- /* enable beacon early termination */
- ret = wl1271_acx_bet_enable(wl, true);
- if (ret < 0)
- return ret;
-
- ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE);
- if (ret < 0)
- return ret;
-
- wl1271_ps_elp_sleep(wl);
- if (ret < 0)
- return ret;
-
- wl->psm = 1;
+ set_bit(WL1271_FLAG_PSM, &wl->flags);
break;
case STATION_ACTIVE_MODE:
default:
@@ -162,11 +151,11 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode)
if (ret < 0)
return ret;
- ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE);
+ ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE, send);
if (ret < 0)
return ret;
- wl->psm = 0;
+ clear_bit(WL1271_FLAG_PSM, &wl->flags);
break;
}
diff --git a/drivers/net/wireless/wl12xx/wl1271_ps.h b/drivers/net/wireless/wl12xx/wl1271_ps.h
index 779653d0ae8..940276f517a 100644
--- a/drivers/net/wireless/wl12xx/wl1271_ps.h
+++ b/drivers/net/wireless/wl12xx/wl1271_ps.h
@@ -27,7 +27,8 @@
#include "wl1271.h"
#include "wl1271_acx.h"
-int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode);
+int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
+ bool send);
void wl1271_ps_elp_sleep(struct wl1271 *wl);
int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake);
void wl1271_elp_work(struct work_struct *work);
diff --git a/drivers/net/wireless/wl12xx/wl1271_reg.h b/drivers/net/wireless/wl12xx/wl1271_reg.h
index 1f237389d1c..99096077152 100644
--- a/drivers/net/wireless/wl12xx/wl1271_reg.h
+++ b/drivers/net/wireless/wl12xx/wl1271_reg.h
@@ -62,73 +62,10 @@
#define WL1271_SLV_REG_DATA (REGISTERS_BASE + 0x0008)
#define WL1271_SLV_REG_ADATA (REGISTERS_BASE + 0x000c)
#define WL1271_SLV_MEM_DATA (REGISTERS_BASE + 0x0018)
-/*
- * Interrupt registers.
- * 64 bit interrupt sources registers ws ced.
- * sme interupts were removed and new ones were added.
- * Order was changed.
- */
-#define FIQ_MASK (REGISTERS_BASE + 0x0400)
-#define FIQ_MASK_L (REGISTERS_BASE + 0x0400)
-#define FIQ_MASK_H (REGISTERS_BASE + 0x0404)
-#define FIQ_MASK_SET (REGISTERS_BASE + 0x0408)
-#define FIQ_MASK_SET_L (REGISTERS_BASE + 0x0408)
-#define FIQ_MASK_SET_H (REGISTERS_BASE + 0x040C)
-#define FIQ_MASK_CLR (REGISTERS_BASE + 0x0410)
-#define FIQ_MASK_CLR_L (REGISTERS_BASE + 0x0410)
-#define FIQ_MASK_CLR_H (REGISTERS_BASE + 0x0414)
-#define IRQ_MASK (REGISTERS_BASE + 0x0418)
-#define IRQ_MASK_L (REGISTERS_BASE + 0x0418)
-#define IRQ_MASK_H (REGISTERS_BASE + 0x041C)
-#define IRQ_MASK_SET (REGISTERS_BASE + 0x0420)
-#define IRQ_MASK_SET_L (REGISTERS_BASE + 0x0420)
-#define IRQ_MASK_SET_H (REGISTERS_BASE + 0x0424)
-#define IRQ_MASK_CLR (REGISTERS_BASE + 0x0428)
-#define IRQ_MASK_CLR_L (REGISTERS_BASE + 0x0428)
-#define IRQ_MASK_CLR_H (REGISTERS_BASE + 0x042C)
-#define ECPU_MASK (REGISTERS_BASE + 0x0448)
-#define FIQ_STS_L (REGISTERS_BASE + 0x044C)
-#define FIQ_STS_H (REGISTERS_BASE + 0x0450)
-#define IRQ_STS_L (REGISTERS_BASE + 0x0454)
-#define IRQ_STS_H (REGISTERS_BASE + 0x0458)
-#define INT_STS_ND (REGISTERS_BASE + 0x0464)
-#define INT_STS_RAW_L (REGISTERS_BASE + 0x0464)
-#define INT_STS_RAW_H (REGISTERS_BASE + 0x0468)
-#define INT_STS_CLR (REGISTERS_BASE + 0x04B4)
-#define INT_STS_CLR_L (REGISTERS_BASE + 0x04B4)
-#define INT_STS_CLR_H (REGISTERS_BASE + 0x04B8)
-#define INT_ACK (REGISTERS_BASE + 0x046C)
-#define INT_ACK_L (REGISTERS_BASE + 0x046C)
-#define INT_ACK_H (REGISTERS_BASE + 0x0470)
-#define INT_TRIG (REGISTERS_BASE + 0x0474)
-#define INT_TRIG_L (REGISTERS_BASE + 0x0474)
-#define INT_TRIG_H (REGISTERS_BASE + 0x0478)
-#define HOST_STS_L (REGISTERS_BASE + 0x045C)
-#define HOST_STS_H (REGISTERS_BASE + 0x0460)
-#define HOST_MASK (REGISTERS_BASE + 0x0430)
-#define HOST_MASK_L (REGISTERS_BASE + 0x0430)
-#define HOST_MASK_H (REGISTERS_BASE + 0x0434)
-#define HOST_MASK_SET (REGISTERS_BASE + 0x0438)
-#define HOST_MASK_SET_L (REGISTERS_BASE + 0x0438)
-#define HOST_MASK_SET_H (REGISTERS_BASE + 0x043C)
-#define HOST_MASK_CLR (REGISTERS_BASE + 0x0440)
-#define HOST_MASK_CLR_L (REGISTERS_BASE + 0x0440)
-#define HOST_MASK_CLR_H (REGISTERS_BASE + 0x0444)
#define ACX_REG_INTERRUPT_TRIG (REGISTERS_BASE + 0x0474)
#define ACX_REG_INTERRUPT_TRIG_H (REGISTERS_BASE + 0x0478)
-/* Host Interrupts*/
-#define HINT_MASK (REGISTERS_BASE + 0x0494)
-#define HINT_MASK_SET (REGISTERS_BASE + 0x0498)
-#define HINT_MASK_CLR (REGISTERS_BASE + 0x049C)
-#define HINT_STS_ND_MASKED (REGISTERS_BASE + 0x04A0)
-/*1150 spec calls this HINT_STS_RAW*/
-#define HINT_STS_ND (REGISTERS_BASE + 0x04B0)
-#define HINT_STS_CLR (REGISTERS_BASE + 0x04A4)
-#define HINT_ACK (REGISTERS_BASE + 0x04A8)
-#define HINT_TRIG (REGISTERS_BASE + 0x04AC)
-
/*=============================================
Host Interrupt Mask Register - 32bit (RW)
------------------------------------------
@@ -433,16 +370,6 @@
/*===============================================
- Phy regs
- ===============================================*/
-#define ACX_PHY_ADDR_REG SBB_ADDR
-#define ACX_PHY_DATA_REG SBB_DATA
-#define ACX_PHY_CTRL_REG SBB_CTL
-#define ACX_PHY_REG_WR_MASK 0x00000001ul
-#define ACX_PHY_REG_RD_MASK 0x00000002ul
-
-
-/*===============================================
EEPROM Read/Write Request 32bit RW
------------------------------------------
1 EE_READ - EEPROM Read Request 1 - Setting this bit
@@ -511,28 +438,6 @@
#define ACX_CONT_WIND_MIN_MASK 0x0000007f
#define ACX_CONT_WIND_MAX 0x03ff0000
-/*
- * Indirect slave register/memory registers
- * ----------------------------------------
- */
-#define HW_SLAVE_REG_ADDR_REG 0x00000004
-#define HW_SLAVE_REG_DATA_REG 0x00000008
-#define HW_SLAVE_REG_CTRL_REG 0x0000000c
-
-#define SLAVE_AUTO_INC 0x00010000
-#define SLAVE_NO_AUTO_INC 0x00000000
-#define SLAVE_HOST_LITTLE_ENDIAN 0x00000000
-
-#define HW_SLAVE_MEM_ADDR_REG SLV_MEM_ADDR
-#define HW_SLAVE_MEM_DATA_REG SLV_MEM_DATA
-#define HW_SLAVE_MEM_CTRL_REG SLV_MEM_CTL
-#define HW_SLAVE_MEM_ENDIAN_REG SLV_END_CTL
-
-#define HW_FUNC_EVENT_INT_EN 0x8000
-#define HW_FUNC_EVENT_MASK_REG 0x00000034
-
-#define ACX_MAC_TIMESTAMP_REG (MAC_TIMESTAMP)
-
/*===============================================
HI_CFG Interface Configuration Register Values
------------------------------------------
@@ -647,10 +552,6 @@ b12-b0 - Supported Rate indicator bits as defined below.
******************************************************************************/
-#define TNETW1251_CHIP_ID_PG1_0 0x07010101
-#define TNETW1251_CHIP_ID_PG1_1 0x07020101
-#define TNETW1251_CHIP_ID_PG1_2 0x07030101
-
/*************************************************************************
Interrupt Trigger Register (Host -> WiLink)
diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.c b/drivers/net/wireless/wl12xx/wl1271_rx.c
index ca645f38109..6730f5b96e7 100644
--- a/drivers/net/wireless/wl12xx/wl1271_rx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_rx.c
@@ -26,6 +26,7 @@
#include "wl1271_reg.h"
#include "wl1271_rx.h"
#include "wl1271_spi.h"
+#include "wl1271_io.h"
static u8 wl1271_rx_get_mem_block(struct wl1271_fw_status *status,
u32 drv_rx_counter)
@@ -166,7 +167,7 @@ static void wl1271_rx_handle_data(struct wl1271 *wl, u32 length)
}
buf = skb_put(skb, length);
- wl1271_spi_read(wl, WL1271_SLV_MEM_DATA, buf, length, true);
+ wl1271_read(wl, WL1271_SLV_MEM_DATA, buf, length, true);
/* the data read starts with the descriptor */
desc = (struct wl1271_rx_descriptor *) buf;
@@ -210,15 +211,13 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
wl->rx_mem_pool_addr.addr + 4;
/* Choose the block we want to read */
- wl1271_spi_write(wl, WL1271_SLV_REG_DATA,
- &wl->rx_mem_pool_addr,
- sizeof(wl->rx_mem_pool_addr), false);
+ wl1271_write(wl, WL1271_SLV_REG_DATA, &wl->rx_mem_pool_addr,
+ sizeof(wl->rx_mem_pool_addr), false);
wl1271_rx_handle_data(wl, buf_size);
wl->rx_counter++;
drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
+ wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
}
-
- wl1271_spi_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
}
diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.c b/drivers/net/wireless/wl12xx/wl1271_spi.c
index 02978a16e73..67a82934f36 100644
--- a/drivers/net/wireless/wl12xx/wl1271_spi.c
+++ b/drivers/net/wireless/wl12xx/wl1271_spi.c
@@ -30,28 +30,6 @@
#include "wl12xx_80211.h"
#include "wl1271_spi.h"
-static int wl1271_translate_addr(struct wl1271 *wl, int addr)
-{
- /*
- * To translate, first check to which window of addresses the
- * particular address belongs. Then subtract the starting address
- * of that window from the address. Then, add offset of the
- * translated region.
- *
- * The translated regions occur next to each other in physical device
- * memory, so just add the sizes of the preceeding address regions to
- * get the offset to the new region.
- *
- * Currently, only the two first regions are addressed, and the
- * assumption is that all addresses will fall into either of those
- * two.
- */
- if ((addr >= wl->part.reg.start) &&
- (addr < wl->part.reg.start + wl->part.reg.size))
- return addr - wl->part.reg.start + wl->part.mem.size;
- else
- return addr - wl->part.mem.start;
-}
void wl1271_spi_reset(struct wl1271 *wl)
{
@@ -133,67 +111,6 @@ void wl1271_spi_init(struct wl1271 *wl)
wl1271_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN);
}
-/* Set the SPI partitions to access the chip addresses
- *
- * To simplify driver code, a fixed (virtual) memory map is defined for
- * register and memory addresses. Because in the chipset, in different stages
- * of operation, those addresses will move around, an address translation
- * mechanism is required.
- *
- * There are four partitions (three memory and one register partition),
- * which are mapped to two different areas of the hardware memory.
- *
- * Virtual address
- * space
- *
- * | |
- * ...+----+--> mem.start
- * Physical address ... | |
- * space ... | | [PART_0]
- * ... | |
- * 00000000 <--+----+... ...+----+--> mem.start + mem.size
- * | | ... | |
- * |MEM | ... | |
- * | | ... | |
- * mem.size <--+----+... | | {unused area)
- * | | ... | |
- * |REG | ... | |
- * mem.size | | ... | |
- * + <--+----+... ...+----+--> reg.start
- * reg.size | | ... | |
- * |MEM2| ... | | [PART_1]
- * | | ... | |
- * ...+----+--> reg.start + reg.size
- * | |
- *
- */
-int wl1271_set_partition(struct wl1271 *wl,
- struct wl1271_partition_set *p)
-{
- /* copy partition info */
- memcpy(&wl->part, p, sizeof(*p));
-
- wl1271_debug(DEBUG_SPI, "mem_start %08X mem_size %08X",
- p->mem.start, p->mem.size);
- wl1271_debug(DEBUG_SPI, "reg_start %08X reg_size %08X",
- p->reg.start, p->reg.size);
- wl1271_debug(DEBUG_SPI, "mem2_start %08X mem2_size %08X",
- p->mem2.start, p->mem2.size);
- wl1271_debug(DEBUG_SPI, "mem3_start %08X mem3_size %08X",
- p->mem3.start, p->mem3.size);
-
- /* write partition info to the chipset */
- wl1271_raw_write32(wl, HW_PART0_START_ADDR, p->mem.start);
- wl1271_raw_write32(wl, HW_PART0_SIZE_ADDR, p->mem.size);
- wl1271_raw_write32(wl, HW_PART1_START_ADDR, p->reg.start);
- wl1271_raw_write32(wl, HW_PART1_SIZE_ADDR, p->reg.size);
- wl1271_raw_write32(wl, HW_PART2_START_ADDR, p->mem2.start);
- wl1271_raw_write32(wl, HW_PART2_SIZE_ADDR, p->mem2.size);
- wl1271_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start);
-
- return 0;
-}
-
#define WL1271_BUSY_WORD_TIMEOUT 1000
/* FIXME: Check busy words, removed due to SPI bug */
@@ -338,78 +255,3 @@ void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
wl1271_dump(DEBUG_SPI, "spi_write cmd -> ", cmd, sizeof(*cmd));
wl1271_dump(DEBUG_SPI, "spi_write buf -> ", buf, len);
}
-
-void wl1271_spi_read(struct wl1271 *wl, int addr, void *buf, size_t len,
- bool fixed)
-{
- int physical;
-
- physical = wl1271_translate_addr(wl, addr);
-
- wl1271_spi_raw_read(wl, physical, buf, len, fixed);
-}
-
-void wl1271_spi_write(struct wl1271 *wl, int addr, void *buf, size_t len,
- bool fixed)
-{
- int physical;
-
- physical = wl1271_translate_addr(wl, addr);
-
- wl1271_spi_raw_write(wl, physical, buf, len, fixed);
-}
-
-u32 wl1271_spi_read32(struct wl1271 *wl, int addr)
-{
- return wl1271_raw_read32(wl, wl1271_translate_addr(wl, addr));
-}
-
-void wl1271_spi_write32(struct wl1271 *wl, int addr, u32 val)
-{
- wl1271_raw_write32(wl, wl1271_translate_addr(wl, addr), val);
-}
-
-void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val)
-{
- /* write address >> 1 + 0x30000 to OCP_POR_CTR */
- addr = (addr >> 1) + 0x30000;
- wl1271_spi_write32(wl, OCP_POR_CTR, addr);
-
- /* write value to OCP_POR_WDATA */
- wl1271_spi_write32(wl, OCP_DATA_WRITE, val);
-
- /* write 1 to OCP_CMD */
- wl1271_spi_write32(wl, OCP_CMD, OCP_CMD_WRITE);
-}
-
-u16 wl1271_top_reg_read(struct wl1271 *wl, int addr)
-{
- u32 val;
- int timeout = OCP_CMD_LOOP;
-
- /* write address >> 1 + 0x30000 to OCP_POR_CTR */
- addr = (addr >> 1) + 0x30000;
- wl1271_spi_write32(wl, OCP_POR_CTR, addr);
-
- /* write 2 to OCP_CMD */
- wl1271_spi_write32(wl, OCP_CMD, OCP_CMD_READ);
-
- /* poll for data ready */
- do {
- val = wl1271_spi_read32(wl, OCP_DATA_READ);
- timeout--;
- } while (!(val & OCP_READY_MASK) && timeout);
-
- if (!timeout) {
- wl1271_warning("Top register access timed out.");
- return 0xffff;
- }
-
- /* check data status and return if OK */
- if ((val & OCP_STATUS_MASK) == OCP_STATUS_OK)
- return val & 0xffff;
- else {
- wl1271_warning("Top register access returned error.");
- return 0xffff;
- }
-}
diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.h b/drivers/net/wireless/wl12xx/wl1271_spi.h
index cb7df1c5631..a803596dad4 100644
--- a/drivers/net/wireless/wl12xx/wl1271_spi.h
+++ b/drivers/net/wireless/wl12xx/wl1271_spi.h
@@ -90,37 +90,7 @@ void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
size_t len, bool fixed);
-/* Translated target IO */
-void wl1271_spi_read(struct wl1271 *wl, int addr, void *buf, size_t len,
- bool fixed);
-void wl1271_spi_write(struct wl1271 *wl, int addr, void *buf, size_t len,
- bool fixed);
-u32 wl1271_spi_read32(struct wl1271 *wl, int addr);
-void wl1271_spi_write32(struct wl1271 *wl, int addr, u32 val);
-
-/* Top Register IO */
-void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val);
-u16 wl1271_top_reg_read(struct wl1271 *wl, int addr);
-
/* INIT and RESET words */
void wl1271_spi_reset(struct wl1271 *wl);
void wl1271_spi_init(struct wl1271 *wl);
-int wl1271_set_partition(struct wl1271 *wl,
- struct wl1271_partition_set *p);
-
-static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr)
-{
- wl1271_spi_raw_read(wl, addr, &wl->buffer_32,
- sizeof(wl->buffer_32), false);
-
- return wl->buffer_32;
-}
-
-static inline void wl1271_raw_write32(struct wl1271 *wl, int addr, u32 val)
-{
- wl->buffer_32 = val;
- wl1271_spi_raw_write(wl, addr, &wl->buffer_32,
- sizeof(wl->buffer_32), false);
-}
-
#endif /* __WL1271_SPI_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_testmode.c b/drivers/net/wireless/wl12xx/wl1271_testmode.c
new file mode 100644
index 00000000000..3919102e942
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/wl1271_testmode.c
@@ -0,0 +1,283 @@
+/*
+ * This file is part of wl1271
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * Contact: Luciano Coelho <luciano.coelho@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+#include "wl1271_testmode.h"
+
+#include <net/genetlink.h>
+
+#include "wl1271.h"
+#include "wl1271_spi.h"
+#include "wl1271_acx.h"
+
+#define WL1271_TM_MAX_DATA_LENGTH 1024
+
+enum wl1271_tm_commands {
+ WL1271_TM_CMD_UNSPEC,
+ WL1271_TM_CMD_TEST,
+ WL1271_TM_CMD_INTERROGATE,
+ WL1271_TM_CMD_CONFIGURE,
+ WL1271_TM_CMD_NVS_PUSH,
+ WL1271_TM_CMD_SET_PLT_MODE,
+
+ __WL1271_TM_CMD_AFTER_LAST
+};
+#define WL1271_TM_CMD_MAX (__WL1271_TM_CMD_AFTER_LAST - 1)
+
+enum wl1271_tm_attrs {
+ WL1271_TM_ATTR_UNSPEC,
+ WL1271_TM_ATTR_CMD_ID,
+ WL1271_TM_ATTR_ANSWER,
+ WL1271_TM_ATTR_DATA,
+ WL1271_TM_ATTR_IE_ID,
+ WL1271_TM_ATTR_PLT_MODE,
+
+ __WL1271_TM_ATTR_AFTER_LAST
+};
+#define WL1271_TM_ATTR_MAX (__WL1271_TM_ATTR_AFTER_LAST - 1)
+
+static struct nla_policy wl1271_tm_policy[WL1271_TM_ATTR_MAX + 1] = {
+ [WL1271_TM_ATTR_CMD_ID] = { .type = NLA_U32 },
+ [WL1271_TM_ATTR_ANSWER] = { .type = NLA_U8 },
+ [WL1271_TM_ATTR_DATA] = { .type = NLA_BINARY,
+ .len = WL1271_TM_MAX_DATA_LENGTH },
+ [WL1271_TM_ATTR_IE_ID] = { .type = NLA_U32 },
+ [WL1271_TM_ATTR_PLT_MODE] = { .type = NLA_U32 },
+};
+
+
+static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[])
+{
+ int buf_len, ret, len;
+ struct sk_buff *skb;
+ void *buf;
+ u8 answer = 0;
+
+ wl1271_debug(DEBUG_TESTMODE, "testmode cmd test");
+
+ if (!tb[WL1271_TM_ATTR_DATA])
+ return -EINVAL;
+
+ buf = nla_data(tb[WL1271_TM_ATTR_DATA]);
+ buf_len = nla_len(tb[WL1271_TM_ATTR_DATA]);
+
+ if (tb[WL1271_TM_ATTR_ANSWER])
+ answer = nla_get_u8(tb[WL1271_TM_ATTR_ANSWER]);
+
+ if (buf_len > sizeof(struct wl1271_command))
+ return -EMSGSIZE;
+
+ mutex_lock(&wl->mutex);
+ ret = wl1271_cmd_test(wl, buf, buf_len, answer);
+ mutex_unlock(&wl->mutex);
+
+ if (ret < 0) {
+ wl1271_warning("testmode cmd test failed: %d", ret);
+ return ret;
+ }
+
+ if (answer) {
+ len = nla_total_size(buf_len);
+ skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, len);
+ if (!skb)
+ return -ENOMEM;
+
+ NLA_PUT(skb, WL1271_TM_ATTR_DATA, buf_len, buf);
+ ret = cfg80211_testmode_reply(skb);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+
+nla_put_failure:
+ kfree_skb(skb);
+ return -EMSGSIZE;
+}
+
+static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
+{
+ int ret;
+ struct wl1271_command *cmd;
+ struct sk_buff *skb;
+ u8 ie_id;
+
+ wl1271_debug(DEBUG_TESTMODE, "testmode cmd interrogate");
+
+ if (!tb[WL1271_TM_ATTR_IE_ID])
+ return -EINVAL;
+
+ ie_id = nla_get_u8(tb[WL1271_TM_ATTR_IE_ID]);
+
+ cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ mutex_lock(&wl->mutex);
+ ret = wl1271_cmd_interrogate(wl, ie_id, cmd, sizeof(*cmd));
+ mutex_unlock(&wl->mutex);
+
+ if (ret < 0) {
+ wl1271_warning("testmode cmd interrogate failed: %d", ret);
+ return ret;
+ }
+
+ skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, sizeof(*cmd));
+ if (!skb)
+ return -ENOMEM;
+
+ NLA_PUT(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd);
+
+ return 0;
+
+nla_put_failure:
+ kfree_skb(skb);
+ return -EMSGSIZE;
+}
+
+static int wl1271_tm_cmd_configure(struct wl1271 *wl, struct nlattr *tb[])
+{
+ int buf_len, ret;
+ void *buf;
+ u8 ie_id;
+
+ wl1271_debug(DEBUG_TESTMODE, "testmode cmd configure");
+
+ if (!tb[WL1271_TM_ATTR_DATA])
+ return -EINVAL;
+ if (!tb[WL1271_TM_ATTR_IE_ID])
+ return -EINVAL;
+
+ ie_id = nla_get_u8(tb[WL1271_TM_ATTR_IE_ID]);
+ buf = nla_data(tb[WL1271_TM_ATTR_DATA]);
+ buf_len = nla_len(tb[WL1271_TM_ATTR_DATA]);
+
+ if (buf_len > sizeof(struct wl1271_command))
+ return -EMSGSIZE;
+
+ mutex_lock(&wl->mutex);
+ ret = wl1271_cmd_configure(wl, ie_id, buf, buf_len);
+ mutex_unlock(&wl->mutex);
+
+ if (ret < 0) {
+ wl1271_warning("testmode cmd configure failed: %d", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int wl1271_tm_cmd_nvs_push(struct wl1271 *wl, struct nlattr *tb[])
+{
+ int ret = 0;
+ size_t len;
+ void *buf;
+
+ wl1271_debug(DEBUG_TESTMODE, "testmode cmd nvs push");
+
+ if (!tb[WL1271_TM_ATTR_DATA])
+ return -EINVAL;
+
+ buf = nla_data(tb[WL1271_TM_ATTR_DATA]);
+ len = nla_len(tb[WL1271_TM_ATTR_DATA]);
+
+ if (len != sizeof(struct wl1271_nvs_file)) {
+ wl1271_error("nvs size is not as expected: %zu != %zu",
+ len, sizeof(struct wl1271_nvs_file));
+ return -EMSGSIZE;
+ }
+
+ mutex_lock(&wl->mutex);
+
+ kfree(wl->nvs);
+
+ wl->nvs = kmalloc(sizeof(struct wl1271_nvs_file), GFP_KERNEL);
+ if (!wl->nvs) {
+ wl1271_error("could not allocate memory for the nvs file");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(wl->nvs, buf, len);
+
+ wl1271_debug(DEBUG_TESTMODE, "testmode pushed nvs");
+
+out:
+ mutex_unlock(&wl->mutex);
+
+ return ret;
+}
+
+static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[])
+{
+ u32 val;
+ int ret;
+
+ wl1271_debug(DEBUG_TESTMODE, "testmode cmd set plt mode");
+
+ if (!tb[WL1271_TM_ATTR_PLT_MODE])
+ return -EINVAL;
+
+ val = nla_get_u32(tb[WL1271_TM_ATTR_PLT_MODE]);
+
+ switch (val) {
+ case 0:
+ ret = wl1271_plt_stop(wl);
+ break;
+ case 1:
+ ret = wl1271_plt_start(wl);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len)
+{
+ struct wl1271 *wl = hw->priv;
+ struct nlattr *tb[WL1271_TM_ATTR_MAX + 1];
+ int err;
+
+ err = nla_parse(tb, WL1271_TM_ATTR_MAX, data, len, wl1271_tm_policy);
+ if (err)
+ return err;
+
+ if (!tb[WL1271_TM_ATTR_CMD_ID])
+ return -EINVAL;
+
+ switch (nla_get_u32(tb[WL1271_TM_ATTR_CMD_ID])) {
+ case WL1271_TM_CMD_TEST:
+ return wl1271_tm_cmd_test(wl, tb);
+ case WL1271_TM_CMD_INTERROGATE:
+ return wl1271_tm_cmd_interrogate(wl, tb);
+ case WL1271_TM_CMD_CONFIGURE:
+ return wl1271_tm_cmd_configure(wl, tb);
+ case WL1271_TM_CMD_NVS_PUSH:
+ return wl1271_tm_cmd_nvs_push(wl, tb);
+ case WL1271_TM_CMD_SET_PLT_MODE:
+ return wl1271_tm_cmd_set_plt_mode(wl, tb);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
diff --git a/drivers/net/wireless/wl12xx/wl1271_testmode.h b/drivers/net/wireless/wl12xx/wl1271_testmode.h
new file mode 100644
index 00000000000..c196d28f9d9
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/wl1271_testmode.h
@@ -0,0 +1,31 @@
+/*
+ * This file is part of wl1271
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * Contact: Luciano Coelho <luciano.coelho@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ *
+ */
+
+#ifndef __WL1271_TESTMODE_H__
+#define __WL1271_TESTMODE_H__
+
+#include <net/mac80211.h>
+
+int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len);
+
+#endif /* __WL1271_TESTMODE_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.c b/drivers/net/wireless/wl12xx/wl1271_tx.c
index 00af065c77c..811e739d05b 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.c
@@ -26,6 +26,7 @@
#include "wl1271.h"
#include "wl1271_spi.h"
+#include "wl1271_io.h"
#include "wl1271_reg.h"
#include "wl1271_ps.h"
#include "wl1271_tx.h"
@@ -87,7 +88,7 @@ static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
u32 extra, struct ieee80211_tx_info *control)
{
struct wl1271_tx_hw_descr *desc;
- int pad;
+ int pad, ac;
u16 tx_attr;
desc = (struct wl1271_tx_hw_descr *) skb->data;
@@ -107,9 +108,11 @@ static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
/* configure the tx attributes */
tx_attr = wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER;
- /* FIXME: do we know the packet priority? can we identify mgmt
- packets, and use max prio for them at least? */
- desc->tid = 0;
+
+ /* queue */
+ ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
+ desc->tid = wl1271_tx_ac_to_tid(ac);
+
desc->aid = TX_HW_DEFAULT_AID;
desc->reserved = 0;
@@ -121,6 +124,11 @@ static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
pad = pad - skb->len;
tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD;
+ /* if the packets are destined for AP (have a STA entry) send them
+ with AP rate policies, otherwise use default basic rates */
+ if (control->control.sta)
+ tx_attr |= ACX_TX_AP_FULL_RATE << TX_HW_ATTR_OFST_RATE_POLICY;
+
desc->tx_attr = cpu_to_le16(tx_attr);
wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d", pad);
@@ -158,11 +166,11 @@ static int wl1271_tx_send_packet(struct wl1271 *wl, struct sk_buff *skb,
len = WL1271_TX_ALIGN(skb->len);
/* perform a fixed address block write with the packet */
- wl1271_spi_write(wl, WL1271_SLV_MEM_DATA, skb->data, len, true);
+ wl1271_write(wl, WL1271_SLV_MEM_DATA, skb->data, len, true);
/* write packet new counter into the write access register */
wl->tx_packets_count++;
- wl1271_spi_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count);
+ wl1271_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count);
desc = (struct wl1271_tx_hw_descr *) skb->data;
wl1271_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u (%u words)",
@@ -196,6 +204,7 @@ static int wl1271_tx_frame(struct wl1271 *wl, struct sk_buff *skb)
ret = wl1271_cmd_set_default_wep_key(wl, idx);
if (ret < 0)
return ret;
+ wl->default_key = idx;
}
}
@@ -214,18 +223,50 @@ static int wl1271_tx_frame(struct wl1271 *wl, struct sk_buff *skb)
return ret;
}
+static u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set)
+{
+ struct ieee80211_supported_band *band;
+ u32 enabled_rates = 0;
+ int bit;
+
+ band = wl->hw->wiphy->bands[wl->band];
+ for (bit = 0; bit < band->n_bitrates; bit++) {
+ if (rate_set & 0x1)
+ enabled_rates |= band->bitrates[bit].hw_value;
+ rate_set >>= 1;
+ }
+
+ return enabled_rates;
+}
+
void wl1271_tx_work(struct work_struct *work)
{
struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
struct sk_buff *skb;
bool woken_up = false;
+ u32 sta_rates = 0;
int ret;
+ /* check if the rates supported by the AP have changed */
+ if (unlikely(test_and_clear_bit(WL1271_FLAG_STA_RATES_CHANGED,
+ &wl->flags))) {
+ unsigned long flags;
+ spin_lock_irqsave(&wl->wl_lock, flags);
+ sta_rates = wl->sta_rate_set;
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+ }
+
mutex_lock(&wl->mutex);
if (unlikely(wl->state == WL1271_STATE_OFF))
goto out;
+ /* if rates have changed, re-configure the rate policy */
+ if (unlikely(sta_rates)) {
+ wl->rate_set = wl1271_tx_enabled_rates_get(wl, sta_rates);
+ wl1271_acx_rate_policies(wl);
+ }
+
while ((skb = skb_dequeue(&wl->tx_queue))) {
if (!woken_up) {
ret = wl1271_ps_elp_wakeup(wl, false);
@@ -240,18 +281,18 @@ void wl1271_tx_work(struct work_struct *work)
wl1271_debug(DEBUG_TX, "tx_work: fw buffer full, "
"stop queues");
ieee80211_stop_queues(wl->hw);
- wl->tx_queue_stopped = true;
+ set_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
skb_queue_head(&wl->tx_queue, skb);
goto out;
} else if (ret < 0) {
dev_kfree_skb(skb);
goto out;
- } else if (wl->tx_queue_stopped) {
+ } else if (test_and_clear_bit(WL1271_FLAG_TX_QUEUE_STOPPED,
+ &wl->flags)) {
/* firmware buffer has space, restart queues */
wl1271_debug(DEBUG_TX,
"complete_packet: waking queues");
ieee80211_wake_queues(wl->hw);
- wl->tx_queue_stopped = false;
}
}
@@ -335,8 +376,8 @@ void wl1271_tx_complete(struct wl1271 *wl, u32 count)
wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
/* read the tx results from the chipset */
- wl1271_spi_read(wl, le32_to_cpu(memmap->tx_result),
- wl->tx_res_if, sizeof(*wl->tx_res_if), false);
+ wl1271_read(wl, le32_to_cpu(memmap->tx_result),
+ wl->tx_res_if, sizeof(*wl->tx_res_if), false);
/* verify that the result buffer is not getting overrun */
if (count > TX_HW_RESULT_QUEUE_LEN) {
@@ -357,10 +398,10 @@ void wl1271_tx_complete(struct wl1271 *wl, u32 count)
}
/* write host counter to chipset (to ack) */
- wl1271_spi_write32(wl, le32_to_cpu(memmap->tx_result) +
- offsetof(struct wl1271_tx_hw_res_if,
- tx_result_host_counter),
- le32_to_cpu(wl->tx_res_if->tx_result_fw_counter));
+ wl1271_write32(wl, le32_to_cpu(memmap->tx_result) +
+ offsetof(struct wl1271_tx_hw_res_if,
+ tx_result_host_counter),
+ le32_to_cpu(wl->tx_res_if->tx_result_fw_counter));
}
/* caller must hold wl->mutex */
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.h b/drivers/net/wireless/wl12xx/wl1271_tx.h
index 416396caf0a..17e405a09ca 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.h
@@ -123,6 +123,42 @@ struct wl1271_tx_hw_res_if {
struct wl1271_tx_hw_res_descr tx_results_queue[TX_HW_RESULT_QUEUE_LEN];
} __attribute__ ((packed));
+static inline int wl1271_tx_get_queue(int queue)
+{
+ /* FIXME: use best effort until WMM is enabled */
+ return CONF_TX_AC_BE;
+
+ switch (queue) {
+ case 0:
+ return CONF_TX_AC_VO;
+ case 1:
+ return CONF_TX_AC_VI;
+ case 2:
+ return CONF_TX_AC_BE;
+ case 3:
+ return CONF_TX_AC_BK;
+ default:
+ return CONF_TX_AC_BE;
+ }
+}
+
+/* wl1271 tx descriptor needs the tid and we need to convert it from ac */
+static inline int wl1271_tx_ac_to_tid(int ac)
+{
+ switch (ac) {
+ case 0:
+ return 0;
+ case 1:
+ return 2;
+ case 2:
+ return 4;
+ case 3:
+ return 6;
+ default:
+ return 0;
+ }
+}
+
void wl1271_tx_work(struct work_struct *work);
void wl1271_tx_complete(struct wl1271 *wl, u32 count);
void wl1271_tx_flush(struct wl1271 *wl);
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index 33c8be7ec8e..6917286edca 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -875,20 +875,18 @@ static struct iw_statistics *zd1201_get_wireless_stats(struct net_device *dev)
static void zd1201_set_multicast(struct net_device *dev)
{
struct zd1201 *zd = netdev_priv(dev);
- struct dev_mc_list *mc = dev->mc_list;
+ struct dev_mc_list *mc;
unsigned char reqbuf[ETH_ALEN*ZD1201_MAXMULTI];
int i;
- if (dev->mc_count > ZD1201_MAXMULTI)
+ if (netdev_mc_count(dev) > ZD1201_MAXMULTI)
return;
- for (i=0; i<dev->mc_count; i++) {
- memcpy(reqbuf+i*ETH_ALEN, mc->dmi_addr, ETH_ALEN);
- mc = mc->next;
- }
+ i = 0;
+ netdev_for_each_mc_addr(mc, dev)
+ memcpy(reqbuf + i++ * ETH_ALEN, mc->dmi_addr, ETH_ALEN);
zd1201_setconfig(zd, ZD1201_RID_CNFGROUPADDRESS, reqbuf,
- dev->mc_count*ETH_ALEN, 0);
-
+ netdev_mc_count(dev) * ETH_ALEN, 0);
}
static int zd1201_config_commit(struct net_device *dev,
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index f14deb0c851..2d555cc3050 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -869,7 +869,7 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length)
}
static int zd_op_add_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct zd_mac *mac = zd_hw_mac(hw);
@@ -877,22 +877,22 @@ static int zd_op_add_interface(struct ieee80211_hw *hw,
if (mac->type != NL80211_IFTYPE_UNSPECIFIED)
return -EOPNOTSUPP;
- switch (conf->type) {
+ switch (vif->type) {
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_MESH_POINT:
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC:
- mac->type = conf->type;
+ mac->type = vif->type;
break;
default:
return -EOPNOTSUPP;
}
- return zd_write_mac_addr(&mac->chip, conf->mac_addr);
+ return zd_write_mac_addr(&mac->chip, vif->addr);
}
static void zd_op_remove_interface(struct ieee80211_hw *hw,
- struct ieee80211_if_init_conf *conf)
+ struct ieee80211_vif *vif)
{
struct zd_mac *mac = zd_hw_mac(hw);
mac->type = NL80211_IFTYPE_UNSPECIFIED;
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 72d3e437e19..442fc111732 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -1079,11 +1079,15 @@ static int eject_installer(struct usb_interface *intf)
int r;
/* Find bulk out endpoint */
- endpoint = &iface_desc->endpoint[1].desc;
- if (usb_endpoint_dir_out(endpoint) &&
- usb_endpoint_xfer_bulk(endpoint)) {
- bulk_out_ep = endpoint->bEndpointAddress;
- } else {
+ for (r = 1; r >= 0; r--) {
+ endpoint = &iface_desc->endpoint[r].desc;
+ if (usb_endpoint_dir_out(endpoint) &&
+ usb_endpoint_xfer_bulk(endpoint)) {
+ bulk_out_ep = endpoint->bEndpointAddress;
+ break;
+ }
+ }
+ if (r == -1) {
dev_err(&udev->dev,
"zd1211rw: Could not find bulk out endpoint\n");
return -ENODEV;
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index 8c777ba4e2b..1a74594224b 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -22,11 +22,17 @@
#include <linux/of_device.h>
#include <linux/of_platform.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
#define DRIVER_NAME "xilinx_emaclite"
/* Register offsets for the EmacLite Core */
#define XEL_TXBUFF_OFFSET 0x0 /* Transmit Buffer */
+#define XEL_MDIOADDR_OFFSET 0x07E4 /* MDIO Address Register */
+#define XEL_MDIOWR_OFFSET 0x07E8 /* MDIO Write Data Register */
+#define XEL_MDIORD_OFFSET 0x07EC /* MDIO Read Data Register */
+#define XEL_MDIOCTRL_OFFSET 0x07F0 /* MDIO Control Register */
#define XEL_GIER_OFFSET 0x07F8 /* GIE Register */
#define XEL_TSR_OFFSET 0x07FC /* Tx status */
#define XEL_TPLR_OFFSET 0x07F4 /* Tx packet length */
@@ -37,6 +43,22 @@
#define XEL_BUFFER_OFFSET 0x0800 /* Next Tx/Rx buffer's offset */
+/* MDIO Address Register Bit Masks */
+#define XEL_MDIOADDR_REGADR_MASK 0x0000001F /* Register Address */
+#define XEL_MDIOADDR_PHYADR_MASK 0x000003E0 /* PHY Address */
+#define XEL_MDIOADDR_PHYADR_SHIFT 5
+#define XEL_MDIOADDR_OP_MASK 0x00000400 /* RD/WR Operation */
+
+/* MDIO Write Data Register Bit Masks */
+#define XEL_MDIOWR_WRDATA_MASK 0x0000FFFF /* Data to be Written */
+
+/* MDIO Read Data Register Bit Masks */
+#define XEL_MDIORD_RDDATA_MASK 0x0000FFFF /* Data to be Read */
+
+/* MDIO Control Register Bit Masks */
+#define XEL_MDIOCTRL_MDIOSTS_MASK 0x00000001 /* MDIO Status Mask */
+#define XEL_MDIOCTRL_MDIOEN_MASK 0x00000008 /* MDIO Enable */
+
/* Global Interrupt Enable Register (GIER) Bit Masks */
#define XEL_GIER_GIE_MASK 0x80000000 /* Global Enable */
@@ -87,6 +109,12 @@
* @reset_lock: lock used for synchronization
* @deferred_skb: holds an skb (for transmission at a later time) when the
* Tx buffer is not free
+ * @phy_dev: pointer to the PHY device
+ * @phy_node: pointer to the PHY device node
+ * @mii_bus: pointer to the MII bus
+ * @mdio_irqs: IRQs table for MDIO bus
+ * @last_link: last link status
+ * @has_mdio: indicates whether MDIO is included in the HW
*/
struct net_local {
@@ -100,6 +128,15 @@ struct net_local {
spinlock_t reset_lock;
struct sk_buff *deferred_skb;
+
+ struct phy_device *phy_dev;
+ struct device_node *phy_node;
+
+ struct mii_bus *mii_bus;
+ int mdio_irqs[PHY_MAX_ADDR];
+
+ int last_link;
+ bool has_mdio;
};
@@ -431,7 +468,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
}
/**
- * xemaclite_set_mac_address - Set the MAC address for this device
+ * xemaclite_update_address - Update the MAC address in the device
* @drvdata: Pointer to the Emaclite device private data
* @address_ptr:Pointer to the MAC address (MAC address is a 48-bit value)
*
@@ -441,8 +478,8 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
* The MAC address can be programmed using any of the two transmit
* buffers (if configured).
*/
-static void xemaclite_set_mac_address(struct net_local *drvdata,
- u8 *address_ptr)
+static void xemaclite_update_address(struct net_local *drvdata,
+ u8 *address_ptr)
{
void __iomem *addr;
u32 reg_data;
@@ -465,6 +502,30 @@ static void xemaclite_set_mac_address(struct net_local *drvdata,
}
/**
+ * xemaclite_set_mac_address - Set the MAC address for this device
+ * @dev: Pointer to the network device instance
+ * @addr: Void pointer to the sockaddr structure
+ *
+ * This function copies the HW address from the sockaddr strucutre to the
+ * net_device structure and updates the address in HW.
+ *
+ * Return: Error if the net device is busy or 0 if the addr is set
+ * successfully
+ */
+static int xemaclite_set_mac_address(struct net_device *dev, void *address)
+{
+ struct net_local *lp = (struct net_local *) netdev_priv(dev);
+ struct sockaddr *addr = address;
+
+ if (netif_running(dev))
+ return -EBUSY;
+
+ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+ xemaclite_update_address(lp, dev->dev_addr);
+ return 0;
+}
+
+/**
* xemaclite_tx_timeout - Callback for Tx Timeout
* @dev: Pointer to the network device
*
@@ -641,12 +702,219 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+/**********************/
+/* MDIO Bus functions */
+/**********************/
+
+/**
+ * xemaclite_mdio_wait - Wait for the MDIO to be ready to use
+ * @lp: Pointer to the Emaclite device private data
+ *
+ * This function waits till the device is ready to accept a new MDIO
+ * request.
+ *
+ * Return: 0 for success or ETIMEDOUT for a timeout
+ */
+
+static int xemaclite_mdio_wait(struct net_local *lp)
+{
+ long end = jiffies + 2;
+
+ /* wait for the MDIO interface to not be busy or timeout
+ after some time.
+ */
+ while (in_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
+ XEL_MDIOCTRL_MDIOSTS_MASK) {
+ if (end - jiffies <= 0) {
+ WARN_ON(1);
+ return -ETIMEDOUT;
+ }
+ msleep(1);
+ }
+ return 0;
+}
+
+/**
+ * xemaclite_mdio_read - Read from a given MII management register
+ * @bus: the mii_bus struct
+ * @phy_id: the phy address
+ * @reg: register number to read from
+ *
+ * This function waits till the device is ready to accept a new MDIO
+ * request and then writes the phy address to the MDIO Address register
+ * and reads data from MDIO Read Data register, when its available.
+ *
+ * Return: Value read from the MII management register
+ */
+static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+{
+ struct net_local *lp = bus->priv;
+ u32 ctrl_reg;
+ u32 rc;
+
+ if (xemaclite_mdio_wait(lp))
+ return -ETIMEDOUT;
+
+ /* Write the PHY address, register number and set the OP bit in the
+ * MDIO Address register. Set the Status bit in the MDIO Control
+ * register to start a MDIO read transaction.
+ */
+ ctrl_reg = in_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET);
+ out_be32(lp->base_addr + XEL_MDIOADDR_OFFSET,
+ XEL_MDIOADDR_OP_MASK |
+ ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg));
+ out_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET,
+ ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK);
+
+ if (xemaclite_mdio_wait(lp))
+ return -ETIMEDOUT;
+
+ rc = in_be32(lp->base_addr + XEL_MDIORD_OFFSET);
+
+ dev_dbg(&lp->ndev->dev,
+ "xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n",
+ phy_id, reg, rc);
+
+ return rc;
+}
+
+/**
+ * xemaclite_mdio_write - Write to a given MII management register
+ * @bus: the mii_bus struct
+ * @phy_id: the phy address
+ * @reg: register number to write to
+ * @val: value to write to the register number specified by reg
+ *
+ * This fucntion waits till the device is ready to accept a new MDIO
+ * request and then writes the val to the MDIO Write Data register.
+ */
+static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
+ u16 val)
+{
+ struct net_local *lp = bus->priv;
+ u32 ctrl_reg;
+
+ dev_dbg(&lp->ndev->dev,
+ "xemaclite_mdio_write(phy_id=%i, reg=%x, val=%x)\n",
+ phy_id, reg, val);
+
+ if (xemaclite_mdio_wait(lp))
+ return -ETIMEDOUT;
+
+ /* Write the PHY address, register number and clear the OP bit in the
+ * MDIO Address register and then write the value into the MDIO Write
+ * Data register. Finally, set the Status bit in the MDIO Control
+ * register to start a MDIO write transaction.
+ */
+ ctrl_reg = in_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET);
+ out_be32(lp->base_addr + XEL_MDIOADDR_OFFSET,
+ ~XEL_MDIOADDR_OP_MASK &
+ ((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg));
+ out_be32(lp->base_addr + XEL_MDIOWR_OFFSET, val);
+ out_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET,
+ ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK);
+
+ return 0;
+}
+
+/**
+ * xemaclite_mdio_reset - Reset the mdio bus.
+ * @bus: Pointer to the MII bus
+ *
+ * This function is required(?) as per Documentation/networking/phy.txt.
+ * There is no reset in this device; this function always returns 0.
+ */
+static int xemaclite_mdio_reset(struct mii_bus *bus)
+{
+ return 0;
+}
+
+/**
+ * xemaclite_mdio_setup - Register mii_bus for the Emaclite device
+ * @lp: Pointer to the Emaclite device private data
+ * @ofdev: Pointer to OF device structure
+ *
+ * This function enables MDIO bus in the Emaclite device and registers a
+ * mii_bus.
+ *
+ * Return: 0 upon success or a negative error upon failure
+ */
+static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
+{
+ struct mii_bus *bus;
+ int rc;
+ struct resource res;
+ struct device_node *np = of_get_parent(lp->phy_node);
+
+ /* Don't register the MDIO bus if the phy_node or its parent node
+ * can't be found.
+ */
+ if (!np)
+ return -ENODEV;
+
+ /* Enable the MDIO bus by asserting the enable bit in MDIO Control
+ * register.
+ */
+ out_be32(lp->base_addr + XEL_MDIOCTRL_OFFSET,
+ XEL_MDIOCTRL_MDIOEN_MASK);
+
+ bus = mdiobus_alloc();
+ if (!bus)
+ return -ENOMEM;
+
+ of_address_to_resource(np, 0, &res);
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%.8llx",
+ (unsigned long long)res.start);
+ bus->priv = lp;
+ bus->name = "Xilinx Emaclite MDIO";
+ bus->read = xemaclite_mdio_read;
+ bus->write = xemaclite_mdio_write;
+ bus->reset = xemaclite_mdio_reset;
+ bus->parent = dev;
+ bus->irq = lp->mdio_irqs; /* preallocated IRQ table */
+
+ lp->mii_bus = bus;
+
+ rc = of_mdiobus_register(bus, np);
+ if (rc)
+ goto err_register;
+
+ return 0;
+
+err_register:
+ mdiobus_free(bus);
+ return rc;
+}
+
+/**
+ * xemaclite_adjust_link - Link state callback for the Emaclite device
+ * @ndev: pointer to net_device struct
+ *
+ * There's nothing in the Emaclite device to be configured when the link
+ * state changes. We just print the status.
+ */
+void xemaclite_adjust_link(struct net_device *ndev)
+{
+ struct net_local *lp = netdev_priv(ndev);
+ struct phy_device *phy = lp->phy_dev;
+ int link_state;
+
+ /* hash together the state values to decide if something has changed */
+ link_state = phy->speed | (phy->duplex << 1) | phy->link;
+
+ if (lp->last_link != link_state) {
+ lp->last_link = link_state;
+ phy_print_status(phy);
+ }
+}
+
/**
* xemaclite_open - Open the network device
* @dev: Pointer to the network device
*
* This function sets the MAC address, requests an IRQ and enables interrupts
* for the Emaclite device and starts the Tx queue.
+ * It also connects to the phy device, if MDIO is included in Emaclite device.
*/
static int xemaclite_open(struct net_device *dev)
{
@@ -656,14 +924,47 @@ static int xemaclite_open(struct net_device *dev)
/* Just to be safe, stop the device first */
xemaclite_disable_interrupts(lp);
+ if (lp->phy_node) {
+ u32 bmcr;
+
+ lp->phy_dev = of_phy_connect(lp->ndev, lp->phy_node,
+ xemaclite_adjust_link, 0,
+ PHY_INTERFACE_MODE_MII);
+ if (!lp->phy_dev) {
+ dev_err(&lp->ndev->dev, "of_phy_connect() failed\n");
+ return -ENODEV;
+ }
+
+ /* EmacLite doesn't support giga-bit speeds */
+ lp->phy_dev->supported &= (PHY_BASIC_FEATURES);
+ lp->phy_dev->advertising = lp->phy_dev->supported;
+
+ /* Don't advertise 1000BASE-T Full/Half duplex speeds */
+ phy_write(lp->phy_dev, MII_CTRL1000, 0);
+
+ /* Advertise only 10 and 100mbps full/half duplex speeds */
+ phy_write(lp->phy_dev, MII_ADVERTISE, ADVERTISE_ALL);
+
+ /* Restart auto negotiation */
+ bmcr = phy_read(lp->phy_dev, MII_BMCR);
+ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
+ phy_write(lp->phy_dev, MII_BMCR, bmcr);
+
+ phy_start(lp->phy_dev);
+ }
+
/* Set the MAC address each time opened */
- xemaclite_set_mac_address(lp, dev->dev_addr);
+ xemaclite_update_address(lp, dev->dev_addr);
/* Grab the IRQ */
retval = request_irq(dev->irq, xemaclite_interrupt, 0, dev->name, dev);
if (retval) {
dev_err(&lp->ndev->dev, "Could not allocate interrupt %d\n",
dev->irq);
+ if (lp->phy_dev)
+ phy_disconnect(lp->phy_dev);
+ lp->phy_dev = NULL;
+
return retval;
}
@@ -682,6 +983,7 @@ static int xemaclite_open(struct net_device *dev)
*
* This function stops the Tx queue, disables interrupts and frees the IRQ for
* the Emaclite device.
+ * It also disconnects the phy device associated with the Emaclite device.
*/
static int xemaclite_close(struct net_device *dev)
{
@@ -691,6 +993,10 @@ static int xemaclite_close(struct net_device *dev)
xemaclite_disable_interrupts(lp);
free_irq(dev->irq, dev);
+ if (lp->phy_dev)
+ phy_disconnect(lp->phy_dev);
+ lp->phy_dev = NULL;
+
return 0;
}
@@ -754,42 +1060,6 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
}
/**
- * xemaclite_ioctl - Perform IO Control operations on the network device
- * @dev: Pointer to the network device
- * @rq: Pointer to the interface request structure
- * @cmd: IOCTL command
- *
- * The only IOCTL operation supported by this function is setting the MAC
- * address. An error is reported if any other operations are requested.
- *
- * Return: 0 to indicate success, or a negative error for failure.
- */
-static int xemaclite_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
-{
- struct net_local *lp = (struct net_local *) netdev_priv(dev);
- struct hw_addr_data *hw_addr = (struct hw_addr_data *) &rq->ifr_hwaddr;
-
- switch (cmd) {
- case SIOCETHTOOL:
- return -EIO;
-
- case SIOCSIFHWADDR:
- dev_err(&lp->ndev->dev, "SIOCSIFHWADDR\n");
-
- /* Copy MAC address in from user space */
- copy_from_user((void __force *) dev->dev_addr,
- (void __user __force *) hw_addr,
- IFHWADDRLEN);
- xemaclite_set_mac_address(lp, dev->dev_addr);
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
-/**
* xemaclite_remove_ndev - Free the network device
* @ndev: Pointer to the network device to be freed
*
@@ -840,6 +1110,8 @@ static struct net_device_ops xemaclite_netdev_ops;
* This function probes for the Emaclite device in the device tree.
* It initializes the driver data structure and the hardware, sets the MAC
* address and registers the network device.
+ * It also registers a mii_bus for the Emaclite device, if MDIO is included
+ * in the device.
*
* Return: 0, if the driver is bound to the Emaclite device, or
* a negative error if there is failure.
@@ -880,6 +1152,7 @@ static int __devinit xemaclite_of_probe(struct of_device *ofdev,
}
dev_set_drvdata(dev, ndev);
+ SET_NETDEV_DEV(ndev, &ofdev->dev);
ndev->irq = r_irq.start;
ndev->mem_start = r_mem.start;
@@ -923,13 +1196,14 @@ static int __devinit xemaclite_of_probe(struct of_device *ofdev,
out_be32(lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET, 0);
/* Set the MAC address in the EmacLite device */
- xemaclite_set_mac_address(lp, ndev->dev_addr);
+ xemaclite_update_address(lp, ndev->dev_addr);
- dev_info(dev,
- "MAC address is now %2x:%2x:%2x:%2x:%2x:%2x\n",
- ndev->dev_addr[0], ndev->dev_addr[1],
- ndev->dev_addr[2], ndev->dev_addr[3],
- ndev->dev_addr[4], ndev->dev_addr[5]);
+ lp->phy_node = of_parse_phandle(ofdev->node, "phy-handle", 0);
+ rc = xemaclite_mdio_setup(lp, &ofdev->dev);
+ if (rc)
+ dev_warn(&ofdev->dev, "error registering MDIO bus\n");
+
+ dev_info(dev, "MAC address is now %pM\n", ndev->dev_addr);
ndev->netdev_ops = &xemaclite_netdev_ops;
ndev->flags &= ~IFF_MULTICAST;
@@ -972,12 +1246,25 @@ static int __devexit xemaclite_of_remove(struct of_device *of_dev)
struct device *dev = &of_dev->dev;
struct net_device *ndev = dev_get_drvdata(dev);
+ struct net_local *lp = (struct net_local *) netdev_priv(ndev);
+
+ /* Un-register the mii_bus, if configured */
+ if (lp->has_mdio) {
+ mdiobus_unregister(lp->mii_bus);
+ kfree(lp->mii_bus->irq);
+ mdiobus_free(lp->mii_bus);
+ lp->mii_bus = NULL;
+ }
+
unregister_netdev(ndev);
+ if (lp->phy_node)
+ of_node_put(lp->phy_node);
+ lp->phy_node = NULL;
+
release_mem_region(ndev->mem_start, ndev->mem_end-ndev->mem_start + 1);
xemaclite_remove_ndev(ndev);
-
dev_set_drvdata(dev, NULL);
return 0;
@@ -987,7 +1274,7 @@ static struct net_device_ops xemaclite_netdev_ops = {
.ndo_open = xemaclite_open,
.ndo_stop = xemaclite_close,
.ndo_start_xmit = xemaclite_send,
- .ndo_do_ioctl = xemaclite_ioctl,
+ .ndo_set_mac_address = xemaclite_set_mac_address,
.ndo_tx_timeout = xemaclite_tx_timeout,
.ndo_get_stats = xemaclite_get_stats,
};
@@ -999,6 +1286,7 @@ static struct of_device_id xemaclite_of_match[] __devinitdata = {
{ .compatible = "xlnx,xps-ethernetlite-1.00.a", },
{ .compatible = "xlnx,xps-ethernetlite-2.00.a", },
{ .compatible = "xlnx,xps-ethernetlite-2.01.a", },
+ { .compatible = "xlnx,xps-ethernetlite-3.00.a", },
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(of, xemaclite_of_match);
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index 0f773a9a3ff..7d4107f5eeb 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -23,12 +23,12 @@
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#define DRV_NAME "yellowfin"
#define DRV_VERSION "2.1"
#define DRV_RELDATE "Sep 11, 2006"
-#define PFX DRV_NAME ": "
-
/* The user-configurable values.
These may be modified when a driver module is loaded.*/
@@ -237,7 +237,7 @@ static const struct pci_id_info pci_id_tbl[] = {
{ }
};
-static const struct pci_device_id yellowfin_pci_tbl[] = {
+static DEFINE_PCI_DEVICE_TABLE(yellowfin_pci_tbl) = {
{ 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
{ }
@@ -399,7 +399,7 @@ static int __devinit yellowfin_init_one(struct pci_dev *pdev,
dev = alloc_etherdev(sizeof(*np));
if (!dev) {
- printk (KERN_ERR PFX "cannot allocate ethernet device\n");
+ pr_err("cannot allocate ethernet device\n");
return -ENOMEM;
}
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -487,10 +487,10 @@ static int __devinit yellowfin_init_one(struct pci_dev *pdev,
if (i)
goto err_out_unmap_status;
- printk(KERN_INFO "%s: %s type %8x at %p, %pM, IRQ %d.\n",
- dev->name, pci_id_tbl[chip_idx].name,
- ioread32(ioaddr + ChipRev), ioaddr,
- dev->dev_addr, irq);
+ netdev_info(dev, "%s type %8x at %p, %pM, IRQ %d\n",
+ pci_id_tbl[chip_idx].name,
+ ioread32(ioaddr + ChipRev), ioaddr,
+ dev->dev_addr, irq);
if (np->drv_flags & HasMII) {
int phy, phy_idx = 0;
@@ -499,9 +499,8 @@ static int __devinit yellowfin_init_one(struct pci_dev *pdev,
if (mii_status != 0xffff && mii_status != 0x0000) {
np->phys[phy_idx++] = phy;
np->advertising = mdio_read(ioaddr, phy, 4);
- printk(KERN_INFO "%s: MII PHY found at address %d, status "
- "0x%4.4x advertising %4.4x.\n",
- dev->name, phy, mii_status, np->advertising);
+ netdev_info(dev, "MII PHY found at address %d, status 0x%04x advertising %04x\n",
+ phy, mii_status, np->advertising);
}
}
np->mii_cnt = phy_idx;
@@ -584,8 +583,8 @@ static int yellowfin_open(struct net_device *dev)
return ret;
if (yellowfin_debug > 1)
- printk(KERN_DEBUG "%s: yellowfin_open() irq %d.\n",
- dev->name, dev->irq);
+ netdev_printk(KERN_DEBUG, dev, "%s() irq %d\n",
+ __func__, dev->irq);
ret = yellowfin_init_ring(dev);
if (ret) {
@@ -642,8 +641,7 @@ static int yellowfin_open(struct net_device *dev)
iowrite32(0x80008000, ioaddr + TxCtrl);
if (yellowfin_debug > 2) {
- printk(KERN_DEBUG "%s: Done yellowfin_open().\n",
- dev->name);
+ netdev_printk(KERN_DEBUG, dev, "Done %s()\n", __func__);
}
/* Set the timer to check for link beat. */
@@ -664,8 +662,8 @@ static void yellowfin_timer(unsigned long data)
int next_tick = 60*HZ;
if (yellowfin_debug > 3) {
- printk(KERN_DEBUG "%s: Yellowfin timer tick, status %8.8x.\n",
- dev->name, ioread16(ioaddr + IntrStatus));
+ netdev_printk(KERN_DEBUG, dev, "Yellowfin timer tick, status %08x\n",
+ ioread16(ioaddr + IntrStatus));
}
if (yp->mii_cnt) {
@@ -673,9 +671,8 @@ static void yellowfin_timer(unsigned long data)
int lpa = mdio_read(ioaddr, yp->phys[0], MII_LPA);
int negotiated = lpa & yp->advertising;
if (yellowfin_debug > 1)
- printk(KERN_DEBUG "%s: MII #%d status register is %4.4x, "
- "link partner capability %4.4x.\n",
- dev->name, yp->phys[0], bmsr, lpa);
+ netdev_printk(KERN_DEBUG, dev, "MII #%d status register is %04x, link partner capability %04x\n",
+ yp->phys[0], bmsr, lpa);
yp->full_duplex = mii_duplex(yp->duplex_lock, negotiated);
@@ -696,25 +693,24 @@ static void yellowfin_tx_timeout(struct net_device *dev)
struct yellowfin_private *yp = netdev_priv(dev);
void __iomem *ioaddr = yp->base;
- printk(KERN_WARNING "%s: Yellowfin transmit timed out at %d/%d Tx "
- "status %4.4x, Rx status %4.4x, resetting...\n",
- dev->name, yp->cur_tx, yp->dirty_tx,
- ioread32(ioaddr + TxStatus), ioread32(ioaddr + RxStatus));
+ netdev_warn(dev, "Yellowfin transmit timed out at %d/%d Tx status %04x, Rx status %04x, resetting...\n",
+ yp->cur_tx, yp->dirty_tx,
+ ioread32(ioaddr + TxStatus),
+ ioread32(ioaddr + RxStatus));
/* Note: these should be KERN_DEBUG. */
if (yellowfin_debug) {
int i;
- printk(KERN_WARNING " Rx ring %p: ", yp->rx_ring);
+ pr_warning(" Rx ring %p: ", yp->rx_ring);
for (i = 0; i < RX_RING_SIZE; i++)
- printk(KERN_CONT " %8.8x",
- yp->rx_ring[i].result_status);
- printk(KERN_CONT "\n");
- printk(KERN_WARNING" Tx ring %p: ", yp->tx_ring);
+ pr_cont(" %08x", yp->rx_ring[i].result_status);
+ pr_cont("\n");
+ pr_warning(" Tx ring %p: ", yp->tx_ring);
for (i = 0; i < TX_RING_SIZE; i++)
- printk(KERN_CONT " %4.4x /%8.8x",
+ pr_cont(" %04x /%08x",
yp->tx_status[i].tx_errs,
yp->tx_ring[i].result_status);
- printk(KERN_CONT "\n");
+ pr_cont("\n");
}
/* If the hardware is found to hang regularly, we will update the code
@@ -891,8 +887,8 @@ static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
yp->tx_full = 1;
if (yellowfin_debug > 4) {
- printk(KERN_DEBUG "%s: Yellowfin transmit frame #%d queued in slot %d.\n",
- dev->name, yp->cur_tx, entry);
+ netdev_printk(KERN_DEBUG, dev, "Yellowfin transmit frame #%d queued in slot %d\n",
+ yp->cur_tx, entry);
}
return NETDEV_TX_OK;
}
@@ -916,8 +912,8 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
u16 intr_status = ioread16(ioaddr + IntrClear);
if (yellowfin_debug > 4)
- printk(KERN_DEBUG "%s: Yellowfin interrupt, status %4.4x.\n",
- dev->name, intr_status);
+ netdev_printk(KERN_DEBUG, dev, "Yellowfin interrupt, status %04x\n",
+ intr_status);
if (intr_status == 0)
break;
@@ -963,13 +959,12 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
#ifndef final_version
if (yellowfin_debug > 5)
- printk(KERN_DEBUG "%s: Tx queue %d check, Tx status "
- "%4.4x %4.4x %4.4x %4.4x.\n",
- dev->name, entry,
- yp->tx_status[entry].tx_cnt,
- yp->tx_status[entry].tx_errs,
- yp->tx_status[entry].total_tx_cnt,
- yp->tx_status[entry].paused);
+ netdev_printk(KERN_DEBUG, dev, "Tx queue %d check, Tx status %04x %04x %04x %04x\n",
+ entry,
+ yp->tx_status[entry].tx_cnt,
+ yp->tx_status[entry].tx_errs,
+ yp->tx_status[entry].total_tx_cnt,
+ yp->tx_status[entry].paused);
#endif
if (tx_errs == 0)
break; /* It still hasn't been Txed */
@@ -978,8 +973,8 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
/* There was an major error, log it. */
#ifndef final_version
if (yellowfin_debug > 1)
- printk(KERN_DEBUG "%s: Transmit error, Tx status %4.4x.\n",
- dev->name, tx_errs);
+ netdev_printk(KERN_DEBUG, dev, "Transmit error, Tx status %04x\n",
+ tx_errs);
#endif
dev->stats.tx_errors++;
if (tx_errs & 0xF800) dev->stats.tx_aborted_errors++;
@@ -989,8 +984,8 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
} else {
#ifndef final_version
if (yellowfin_debug > 4)
- printk(KERN_DEBUG "%s: Normal transmit, Tx status %4.4x.\n",
- dev->name, tx_errs);
+ netdev_printk(KERN_DEBUG, dev, "Normal transmit, Tx status %04x\n",
+ tx_errs);
#endif
dev->stats.tx_bytes += skb->len;
dev->stats.collisions += tx_errs & 15;
@@ -1008,8 +1003,8 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
#ifndef final_version
if (yp->cur_tx - dirty_tx > TX_RING_SIZE) {
- printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
- dev->name, dirty_tx, yp->cur_tx, yp->tx_full);
+ netdev_err(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d\n",
+ dirty_tx, yp->cur_tx, yp->tx_full);
dirty_tx += TX_RING_SIZE;
}
#endif
@@ -1031,16 +1026,15 @@ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance)
yellowfin_error(dev, intr_status);
if (--boguscnt < 0) {
- printk(KERN_WARNING "%s: Too much work at interrupt, "
- "status=0x%4.4x.\n",
- dev->name, intr_status);
+ netdev_warn(dev, "Too much work at interrupt, status=%#04x\n",
+ intr_status);
break;
}
} while (1);
if (yellowfin_debug > 3)
- printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
- dev->name, ioread16(ioaddr + IntrStatus));
+ netdev_printk(KERN_DEBUG, dev, "exiting interrupt, status=%#04x\n",
+ ioread16(ioaddr + IntrStatus));
spin_unlock (&yp->lock);
return IRQ_RETVAL(handled);
@@ -1055,9 +1049,9 @@ static int yellowfin_rx(struct net_device *dev)
int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx;
if (yellowfin_debug > 4) {
- printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %8.8x.\n",
+ printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %08x\n",
entry, yp->rx_ring[entry].result_status);
- printk(KERN_DEBUG " #%d desc. %8.8x %8.8x %8.8x.\n",
+ printk(KERN_DEBUG " #%d desc. %08x %08x %08x\n",
entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr,
yp->rx_ring[entry].result_status);
}
@@ -1081,20 +1075,20 @@ static int yellowfin_rx(struct net_device *dev)
le32_to_cpu(desc->result_status)) & 0xffff;
frame_status = get_unaligned_le16(&(buf_addr[data_size - 2]));
if (yellowfin_debug > 4)
- printk(KERN_DEBUG " yellowfin_rx() status was %4.4x.\n",
- frame_status);
+ printk(KERN_DEBUG " %s() status was %04x\n",
+ __func__, frame_status);
if (--boguscnt < 0)
break;
if ( ! (desc_status & RX_EOP)) {
if (data_size != 0)
- printk(KERN_WARNING "%s: Oversized Ethernet frame spanned multiple buffers,"
- " status %4.4x, data_size %d!\n", dev->name, desc_status, data_size);
+ netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %04x, data_size %d!\n",
+ desc_status, data_size);
dev->stats.rx_length_errors++;
} else if ((yp->drv_flags & IsGigabit) && (frame_status & 0x0038)) {
/* There was a error. */
if (yellowfin_debug > 3)
- printk(KERN_DEBUG " yellowfin_rx() Rx error was %4.4x.\n",
- frame_status);
+ printk(KERN_DEBUG " %s() Rx error was %04x\n",
+ __func__, frame_status);
dev->stats.rx_errors++;
if (frame_status & 0x0060) dev->stats.rx_length_errors++;
if (frame_status & 0x0008) dev->stats.rx_frame_errors++;
@@ -1118,8 +1112,8 @@ static int yellowfin_rx(struct net_device *dev)
entry*sizeof(struct yellowfin_desc)),
"\377\377\377\377\377\377", 6) != 0) {
if (bogus_rx++ == 0)
- printk(KERN_WARNING "%s: Bad frame to %pM\n",
- dev->name, buf_addr);
+ netdev_warn(dev, "Bad frame to %pM\n",
+ buf_addr);
#endif
} else {
struct sk_buff *skb;
@@ -1129,9 +1123,8 @@ static int yellowfin_rx(struct net_device *dev)
#ifndef final_version
if (yellowfin_debug > 4)
- printk(KERN_DEBUG " yellowfin_rx() normal Rx pkt length %d"
- " of %d, bogus_cnt %d.\n",
- pkt_len, data_size, boguscnt);
+ printk(KERN_DEBUG " %s() normal Rx pkt length %d of %d, bogus_cnt %d\n",
+ __func__, pkt_len, data_size, boguscnt);
#endif
/* Check if the packet is long enough to just pass up the skbuff
without copying to a properly sized skbuff. */
@@ -1191,8 +1184,7 @@ static int yellowfin_rx(struct net_device *dev)
static void yellowfin_error(struct net_device *dev, int intr_status)
{
- printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
- dev->name, intr_status);
+ netdev_err(dev, "Something Wicked happened! %04x\n", intr_status);
/* Hmmmmm, it's not clear what to do here. */
if (intr_status & (IntrTxPCIErr | IntrTxPCIFault))
dev->stats.tx_errors++;
@@ -1209,13 +1201,13 @@ static int yellowfin_close(struct net_device *dev)
netif_stop_queue (dev);
if (yellowfin_debug > 1) {
- printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %4.4x "
- "Rx %4.4x Int %2.2x.\n",
- dev->name, ioread16(ioaddr + TxStatus),
- ioread16(ioaddr + RxStatus),
- ioread16(ioaddr + IntrStatus));
- printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
- dev->name, yp->cur_tx, yp->dirty_tx, yp->cur_rx, yp->dirty_rx);
+ netdev_printk(KERN_DEBUG, dev, "Shutting down ethercard, status was Tx %04x Rx %04x Int %02x\n",
+ ioread16(ioaddr + TxStatus),
+ ioread16(ioaddr + RxStatus),
+ ioread16(ioaddr + IntrStatus));
+ netdev_printk(KERN_DEBUG, dev, "Queue pointers were Tx %d / %d, Rx %d / %d\n",
+ yp->cur_tx, yp->dirty_tx,
+ yp->cur_rx, yp->dirty_rx);
}
/* Disable interrupts by clearing the interrupt mask. */
@@ -1229,33 +1221,35 @@ static int yellowfin_close(struct net_device *dev)
#if defined(__i386__)
if (yellowfin_debug > 2) {
- printk(KERN_DEBUG" Tx ring at %8.8llx:\n",
+ printk(KERN_DEBUG " Tx ring at %08llx:\n",
(unsigned long long)yp->tx_ring_dma);
for (i = 0; i < TX_RING_SIZE*2; i++)
- printk(KERN_DEBUG " %c #%d desc. %8.8x %8.8x %8.8x %8.8x.\n",
+ printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x %08x\n",
ioread32(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ',
i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr,
yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status);
printk(KERN_DEBUG " Tx status %p:\n", yp->tx_status);
for (i = 0; i < TX_RING_SIZE; i++)
- printk(KERN_DEBUG " #%d status %4.4x %4.4x %4.4x %4.4x.\n",
+ printk(KERN_DEBUG " #%d status %04x %04x %04x %04x\n",
i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs,
yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused);
- printk(KERN_DEBUG " Rx ring %8.8llx:\n",
+ printk(KERN_DEBUG " Rx ring %08llx:\n",
(unsigned long long)yp->rx_ring_dma);
for (i = 0; i < RX_RING_SIZE; i++) {
- printk(KERN_DEBUG " %c #%d desc. %8.8x %8.8x %8.8x\n",
+ printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x\n",
ioread32(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ',
i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr,
yp->rx_ring[i].result_status);
if (yellowfin_debug > 6) {
if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) {
int j;
+
+ printk(KERN_DEBUG);
for (j = 0; j < 0x50; j++)
- printk(" %4.4x",
- get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
- printk("\n");
+ pr_cont(" %04x",
+ get_unaligned(((u16*)yp->rx_ring[i].addr) + j));
+ pr_cont("\n");
}
}
}
@@ -1281,8 +1275,8 @@ static int yellowfin_close(struct net_device *dev)
#ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */
if (yellowfin_debug > 0) {
- printk(KERN_DEBUG "%s: Received %d frames that we should not have.\n",
- dev->name, bogus_rx);
+ netdev_printk(KERN_DEBUG, dev, "Received %d frames that we should not have\n",
+ bogus_rx);
}
#endif
@@ -1301,16 +1295,17 @@ static void set_rx_mode(struct net_device *dev)
iowrite16(cfg_value & ~0x1000, ioaddr + Cnfg);
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
iowrite16(0x000F, ioaddr + AddrMode);
- } else if ((dev->mc_count > 64) || (dev->flags & IFF_ALLMULTI)) {
+ } else if ((netdev_mc_count(dev) > 64) ||
+ (dev->flags & IFF_ALLMULTI)) {
/* Too many to filter well, or accept all multicasts. */
iowrite16(0x000B, ioaddr + AddrMode);
- } else if (dev->mc_count > 0) { /* Must use the multicast hash table. */
+ } else if (!netdev_mc_empty(dev)) { /* Must use the multicast hash table. */
struct dev_mc_list *mclist;
u16 hash_table[4];
int i;
+
memset(hash_table, 0, sizeof(hash_table));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
unsigned int bit;
/* Due to a bug in the early chip versions, multiple filter
diff --git a/drivers/net/znet.c b/drivers/net/znet.c
index bc5ae0f6e93..def49d2ec69 100644
--- a/drivers/net/znet.c
+++ b/drivers/net/znet.c
@@ -313,7 +313,8 @@ static void znet_set_multicast_list (struct net_device *dev)
/* Byte D */
cfblk->dummy_1 = 1; /* set to 1 */
cfblk->tx_ifs_retrig = 3; /* Hmm... Disabled */
- cfblk->mc_all = (dev->mc_list || (dev->flags&IFF_ALLMULTI));/* multicast all mode */
+ cfblk->mc_all = (!netdev_mc_empty(dev) ||
+ (dev->flags & IFF_ALLMULTI)); /* multicast all mode */
cfblk->rcv_mon = 0; /* Monitor mode disabled */
cfblk->frag_acpt = 0; /* Do not accept fragments */
cfblk->tstrttrs = 0; /* No start transmission threshold */
diff --git a/drivers/parisc/eisa_enumerator.c b/drivers/parisc/eisa_enumerator.c
index 0be1d50645a..caa15313375 100644
--- a/drivers/parisc/eisa_enumerator.c
+++ b/drivers/parisc/eisa_enumerator.c
@@ -460,7 +460,7 @@ static int init_slot(int slot, struct eeprom_eisa_slot_info *es)
slot, id_string);
print_eisa_id(id_string, es->eisa_slot_id);
- printk(" expected %s \n", id_string);
+ printk(" expected %s\n", id_string);
return -1;
diff --git a/drivers/parisc/superio.c b/drivers/parisc/superio.c
index a35c9c5b89e..f7806d81f1e 100644
--- a/drivers/parisc/superio.c
+++ b/drivers/parisc/superio.c
@@ -169,7 +169,7 @@ superio_init(struct pci_dev *pcidev)
/* ...then properly fixup the USB to point at suckyio PIC */
sio->usb_pdev->irq = superio_fixup_irq(sio->usb_pdev);
- printk(KERN_INFO PFX "Found NS87560 Legacy I/O device at %s (IRQ %i) \n",
+ printk(KERN_INFO PFX "Found NS87560 Legacy I/O device at %s (IRQ %i)\n",
pci_name(pdev), pdev->irq);
pci_read_config_dword (pdev, SIO_SP1BAR, &sio->sp1_base);
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index ad113b0f62d..0950fa40684 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -2908,6 +2908,7 @@ enum parport_pc_pci_cards {
netmos_9805,
netmos_9815,
netmos_9901,
+ netmos_9865,
quatech_sppxp100,
};
@@ -2989,6 +2990,7 @@ static struct parport_pc_pci {
/* netmos_9805 */ { 1, { { 0, -1 }, } },
/* netmos_9815 */ { 2, { { 0, -1 }, { 2, -1 }, } },
/* netmos_9901 */ { 1, { { 0, -1 }, } },
+ /* netmos_9865 */ { 1, { { 0, -1 }, } },
/* quatech_sppxp100 */ { 1, { { 0, 1 }, } },
};
@@ -3092,6 +3094,10 @@ static const struct pci_device_id parport_pc_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, netmos_9815 },
{ PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9901,
0xA000, 0x2000, 0, 0, netmos_9901 },
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
+ 0xA000, 0x1000, 0, 0, netmos_9865 },
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
+ 0xA000, 0x2000, 0, 0, netmos_9865 },
/* Quatech SPPXP-100 Parallel port PCI ExpressCard */
{ PCI_VENDOR_ID_QUATECH, PCI_DEVICE_ID_QUATECH_SPPXP_100,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, quatech_sppxp100 },
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 8674c1ebe97..3d102dd87c9 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -4,7 +4,7 @@
obj-y += access.o bus.o probe.o remove.o pci.o \
pci-driver.o search.o pci-sysfs.o rom.o setup-res.o \
- irq.o
+ irq.o vpd.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_SYSFS) += slot.o
diff --git a/drivers/pci/hotplug/ibmphp_res.c b/drivers/pci/hotplug/ibmphp_res.c
index ec73294d1fa..e2dc289f767 100644
--- a/drivers/pci/hotplug/ibmphp_res.c
+++ b/drivers/pci/hotplug/ibmphp_res.c
@@ -40,7 +40,7 @@ static void update_resources (struct bus_node *bus_cur, int type, int rangeno);
static int once_over (void);
static int remove_ranges (struct bus_node *, struct bus_node *);
static int update_bridge_ranges (struct bus_node **);
-static int add_range (int type, struct range_node *, struct bus_node *);
+static int add_bus_range (int type, struct range_node *, struct bus_node *);
static void fix_resources (struct bus_node *);
static struct bus_node *find_bus_wprev (u8, struct bus_node **, u8);
@@ -133,7 +133,7 @@ static int __init alloc_bus_range (struct bus_node **new_bus, struct range_node
newrange->rangeno = 1;
else {
/* need to insert our range */
- add_range (flag, newrange, newbus);
+ add_bus_range (flag, newrange, newbus);
debug ("%d resource Primary Bus inserted on bus %x [%x - %x]\n", flag, newbus->busno, newrange->start, newrange->end);
}
@@ -384,7 +384,7 @@ int __init ibmphp_rsrc_init (void)
* Input: type of the resource, range to add, current bus
* Output: 0 or -1, bus and range ptrs
********************************************************************************/
-static int add_range (int type, struct range_node *range, struct bus_node *bus_cur)
+static int add_bus_range (int type, struct range_node *range, struct bus_node *bus_cur)
{
struct range_node *range_cur = NULL;
struct range_node *range_prev;
@@ -455,7 +455,7 @@ static int add_range (int type, struct range_node *range, struct bus_node *bus_c
/*******************************************************************************
* This routine goes through the list of resources of type 'type' and updates
- * the range numbers that they correspond to. It was called from add_range fnc
+ * the range numbers that they correspond to. It was called from add_bus_range fnc
*
* Input: bus, type of the resource, the rangeno starting from which to update
******************************************************************************/
@@ -1999,7 +1999,7 @@ static int __init update_bridge_ranges (struct bus_node **bus)
if (bus_sec->noIORanges > 0) {
if (!range_exists_already (range, bus_sec, IO)) {
- add_range (IO, range, bus_sec);
+ add_bus_range (IO, range, bus_sec);
++bus_sec->noIORanges;
} else {
kfree (range);
@@ -2048,7 +2048,7 @@ static int __init update_bridge_ranges (struct bus_node **bus)
if (bus_sec->noMemRanges > 0) {
if (!range_exists_already (range, bus_sec, MEM)) {
- add_range (MEM, range, bus_sec);
+ add_bus_range (MEM, range, bus_sec);
++bus_sec->noMemRanges;
} else {
kfree (range);
@@ -2102,7 +2102,7 @@ static int __init update_bridge_ranges (struct bus_node **bus)
if (bus_sec->noPFMemRanges > 0) {
if (!range_exists_already (range, bus_sec, PFMEM)) {
- add_range (PFMEM, range, bus_sec);
+ add_bus_range (PFMEM, range, bus_sec);
++bus_sec->noPFMemRanges;
} else {
kfree (range);
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index b2a448e19fe..3e5ab2bf6a5 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -706,6 +706,21 @@ irqreturn_t pci_sriov_migration(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_sriov_migration);
+/**
+ * pci_num_vf - return number of VFs associated with a PF device_release_driver
+ * @dev: the PCI device
+ *
+ * Returns number of VFs, or 0 if SR-IOV is not enabled.
+ */
+int pci_num_vf(struct pci_dev *dev)
+{
+ if (!dev || !dev->is_physfn)
+ return 0;
+ else
+ return dev->sriov->nr_virtfn;
+}
+EXPORT_SYMBOL_GPL(pci_num_vf);
+
static int ats_alloc_one(struct pci_dev *dev, int ps)
{
int pos;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 790eb69a4aa..81d19d5683a 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2529,9 +2529,95 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e7, quirk_i82576_sriov);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e8, quirk_i82576_sriov);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150a, quirk_i82576_sriov);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150d, quirk_i82576_sriov);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x1518, quirk_i82576_sriov);
#endif /* CONFIG_PCI_IOV */
+/*
+ * This is a quirk for the Ricoh MMC controller found as a part of
+ * some mulifunction chips.
+
+ * This is very similiar and based on the ricoh_mmc driver written by
+ * Philip Langdale. Thank you for these magic sequences.
+ *
+ * These chips implement the four main memory card controllers (SD, MMC, MS, xD)
+ * and one or both of cardbus or firewire.
+ *
+ * It happens that they implement SD and MMC
+ * support as separate controllers (and PCI functions). The linux SDHCI
+ * driver supports MMC cards but the chip detects MMC cards in hardware
+ * and directs them to the MMC controller - so the SDHCI driver never sees
+ * them.
+ *
+ * To get around this, we must disable the useless MMC controller.
+ * At that point, the SDHCI controller will start seeing them
+ * It seems to be the case that the relevant PCI registers to deactivate the
+ * MMC controller live on PCI function 0, which might be the cardbus controller
+ * or the firewire controller, depending on the particular chip in question
+ *
+ * This has to be done early, because as soon as we disable the MMC controller
+ * other pci functions shift up one level, e.g. function #2 becomes function
+ * #1, and this will confuse the pci core.
+ */
+
+#ifdef CONFIG_MMC_RICOH_MMC
+static void ricoh_mmc_fixup_rl5c476(struct pci_dev *dev)
+{
+ /* disable via cardbus interface */
+ u8 write_enable;
+ u8 write_target;
+ u8 disable;
+
+ /* disable must be done via function #0 */
+ if (PCI_FUNC(dev->devfn))
+ return;
+
+ pci_read_config_byte(dev, 0xB7, &disable);
+ if (disable & 0x02)
+ return;
+
+ pci_read_config_byte(dev, 0x8E, &write_enable);
+ pci_write_config_byte(dev, 0x8E, 0xAA);
+ pci_read_config_byte(dev, 0x8D, &write_target);
+ pci_write_config_byte(dev, 0x8D, 0xB7);
+ pci_write_config_byte(dev, 0xB7, disable | 0x02);
+ pci_write_config_byte(dev, 0x8E, write_enable);
+ pci_write_config_byte(dev, 0x8D, write_target);
+
+ dev_notice(&dev->dev, "proprietary Ricoh MMC controller disabled (via cardbus function)\n");
+ dev_notice(&dev->dev, "MMC cards are now supported by standard SDHCI controller\n");
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, ricoh_mmc_fixup_rl5c476);
+
+static void ricoh_mmc_fixup_r5c832(struct pci_dev *dev)
+{
+ /* disable via firewire interface */
+ u8 write_enable;
+ u8 disable;
+
+ /* disable must be done via function #0 */
+ if (PCI_FUNC(dev->devfn))
+ return;
+
+ pci_read_config_byte(dev, 0xCB, &disable);
+
+ if (disable & 0x02)
+ return;
+
+ pci_read_config_byte(dev, 0xCA, &write_enable);
+ pci_write_config_byte(dev, 0xCA, 0x57);
+ pci_write_config_byte(dev, 0xCB, disable | 0x02);
+ pci_write_config_byte(dev, 0xCA, write_enable);
+
+ dev_notice(&dev->dev, "proprietary Ricoh MMC controller disabled (via firewire function)\n");
+ dev_notice(&dev->dev, "MMC cards are now supported by standard SDHCI controller\n");
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_R5C832, ricoh_mmc_fixup_r5c832);
+#endif /*CONFIG_MMC_RICOH_MMC*/
+
+
static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
struct pci_fixup *end)
{
diff --git a/drivers/pci/vpd.c b/drivers/pci/vpd.c
new file mode 100644
index 00000000000..a5a5ca17cfe
--- /dev/null
+++ b/drivers/pci/vpd.c
@@ -0,0 +1,61 @@
+/*
+ * File: vpd.c
+ * Purpose: Provide PCI VPD support
+ *
+ * Copyright (C) 2010 Broadcom Corporation.
+ */
+
+#include <linux/pci.h>
+
+int pci_vpd_find_tag(const u8 *buf, unsigned int off, unsigned int len, u8 rdt)
+{
+ int i;
+
+ for (i = off; i < len; ) {
+ u8 val = buf[i];
+
+ if (val & PCI_VPD_LRDT) {
+ /* Don't return success of the tag isn't complete */
+ if (i + PCI_VPD_LRDT_TAG_SIZE > len)
+ break;
+
+ if (val == rdt)
+ return i;
+
+ i += PCI_VPD_LRDT_TAG_SIZE +
+ pci_vpd_lrdt_size(&buf[i]);
+ } else {
+ u8 tag = val & ~PCI_VPD_SRDT_LEN_MASK;
+
+ if (tag == rdt)
+ return i;
+
+ if (tag == PCI_VPD_SRDT_END)
+ break;
+
+ i += PCI_VPD_SRDT_TAG_SIZE +
+ pci_vpd_srdt_size(&buf[i]);
+ }
+ }
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(pci_vpd_find_tag);
+
+int pci_vpd_find_info_keyword(const u8 *buf, unsigned int off,
+ unsigned int len, const char *kw)
+{
+ int i;
+
+ for (i = off; i + PCI_VPD_INFO_FLD_HDR_SIZE <= off + len;) {
+ if (buf[i + 0] == kw[0] &&
+ buf[i + 1] == kw[1])
+ return i;
+
+ i += PCI_VPD_INFO_FLD_HDR_SIZE +
+ pci_vpd_info_field_size(&buf[i]);
+ }
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(pci_vpd_find_info_keyword);
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index 0a6601c7680..d189e4743e6 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -51,17 +51,23 @@ config PCMCIA_LOAD_CIS
config PCMCIA_IOCTL
bool "PCMCIA control ioctl (obsolete)"
- depends on PCMCIA
+ depends on PCMCIA && ARM && !SMP && !PREEMPT
default y
help
If you say Y here, the deprecated ioctl interface to the PCMCIA
- subsystem will be built. It is needed by cardmgr and cardctl
- (pcmcia-cs) to function properly.
+ subsystem will be built. It is needed by the deprecated pcmcia-cs
+ tools (cardmgr, cardctl) to function properly.
You should use the new pcmciautils package instead (see
<file:Documentation/Changes> for location and details).
- If unsure, say Y.
+ This config option will most likely be removed from kernel 2.6.35,
+ the associated code from kernel 2.6.36.
+
+ As the PCMCIA ioctl is not locking safe, it depends on !SMP and
+ !PREEMPT.
+
+ If unsure, say N.
config CARDBUS
bool "32-bit CardBus support"
diff --git a/drivers/pcmcia/cardbus.c b/drivers/pcmcia/cardbus.c
index ac0686efbf7..e6ab2a47d8c 100644
--- a/drivers/pcmcia/cardbus.c
+++ b/drivers/pcmcia/cardbus.c
@@ -71,7 +71,7 @@ int __ref cb_alloc(struct pcmcia_socket *s)
unsigned int max, pass;
s->functions = pci_scan_slot(bus, PCI_DEVFN(0, 0));
- pci_fixup_cardbus(bus);
+ pci_fixup_cardbus(bus);
max = bus->secondary;
for (pass = 0; pass < 2; pass++)
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c
index 2f3622dd4b6..f230f6543bf 100644
--- a/drivers/pcmcia/cistpl.c
+++ b/drivers/pcmcia/cistpl.c
@@ -54,46 +54,44 @@ static const u_int exponent[] = {
/* Upper limit on reasonable # of tuples */
#define MAX_TUPLES 200
-/*====================================================================*/
-
-/* Parameters that can be set with 'insmod' */
-
/* 16-bit CIS? */
static int cis_width;
module_param(cis_width, int, 0444);
void release_cis_mem(struct pcmcia_socket *s)
{
- mutex_lock(&s->ops_mutex);
- if (s->cis_mem.flags & MAP_ACTIVE) {
- s->cis_mem.flags &= ~MAP_ACTIVE;
- s->ops->set_mem_map(s, &s->cis_mem);
- if (s->cis_mem.res) {
- release_resource(s->cis_mem.res);
- kfree(s->cis_mem.res);
- s->cis_mem.res = NULL;
+ mutex_lock(&s->ops_mutex);
+ if (s->cis_mem.flags & MAP_ACTIVE) {
+ s->cis_mem.flags &= ~MAP_ACTIVE;
+ s->ops->set_mem_map(s, &s->cis_mem);
+ if (s->cis_mem.res) {
+ release_resource(s->cis_mem.res);
+ kfree(s->cis_mem.res);
+ s->cis_mem.res = NULL;
+ }
+ iounmap(s->cis_virt);
+ s->cis_virt = NULL;
}
- iounmap(s->cis_virt);
- s->cis_virt = NULL;
- }
- mutex_unlock(&s->ops_mutex);
+ mutex_unlock(&s->ops_mutex);
}
-/*
- * Map the card memory at "card_offset" into virtual space.
+/**
+ * set_cis_map() - map the card memory at "card_offset" into virtual space.
+ *
* If flags & MAP_ATTRIB, map the attribute space, otherwise
* map the memory space.
*
* Must be called with ops_mutex held.
*/
-static void __iomem *
-set_cis_map(struct pcmcia_socket *s, unsigned int card_offset, unsigned int flags)
+static void __iomem *set_cis_map(struct pcmcia_socket *s,
+ unsigned int card_offset, unsigned int flags)
{
pccard_mem_map *mem = &s->cis_mem;
int ret;
if (!(s->features & SS_CAP_STATIC_MAP) && (mem->res == NULL)) {
- mem->res = pcmcia_find_mem_region(0, s->map_size, s->map_size, 0, s);
+ mem->res = pcmcia_find_mem_region(0, s->map_size,
+ s->map_size, 0, s);
if (mem->res == NULL) {
dev_printk(KERN_NOTICE, &s->dev,
"cs: unable to map card memory!\n");
@@ -124,165 +122,170 @@ set_cis_map(struct pcmcia_socket *s, unsigned int card_offset, unsigned int flag
return s->cis_virt;
}
-/*======================================================================
-
- Low-level functions to read and write CIS memory. I think the
- write routine is only useful for writing one-byte registers.
-
-======================================================================*/
/* Bits in attr field */
#define IS_ATTR 1
#define IS_INDIRECT 8
+/**
+ * pcmcia_read_cis_mem() - low-level function to read CIS memory
+ */
int pcmcia_read_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
u_int len, void *ptr)
{
- void __iomem *sys, *end;
- unsigned char *buf = ptr;
-
- dev_dbg(&s->dev, "pcmcia_read_cis_mem(%d, %#x, %u)\n", attr, addr, len);
-
- mutex_lock(&s->ops_mutex);
- if (attr & IS_INDIRECT) {
- /* Indirect accesses use a bunch of special registers at fixed
- locations in common memory */
- u_char flags = ICTRL0_COMMON|ICTRL0_AUTOINC|ICTRL0_BYTEGRAN;
- if (attr & IS_ATTR) {
- addr *= 2;
- flags = ICTRL0_AUTOINC;
- }
+ void __iomem *sys, *end;
+ unsigned char *buf = ptr;
- sys = set_cis_map(s, 0, MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0));
- if (!sys) {
- dev_dbg(&s->dev, "could not map memory\n");
- memset(ptr, 0xff, len);
- mutex_unlock(&s->ops_mutex);
- return -1;
- }
+ dev_dbg(&s->dev, "pcmcia_read_cis_mem(%d, %#x, %u)\n", attr, addr, len);
- writeb(flags, sys+CISREG_ICTRL0);
- writeb(addr & 0xff, sys+CISREG_IADDR0);
- writeb((addr>>8) & 0xff, sys+CISREG_IADDR1);
- writeb((addr>>16) & 0xff, sys+CISREG_IADDR2);
- writeb((addr>>24) & 0xff, sys+CISREG_IADDR3);
- for ( ; len > 0; len--, buf++)
- *buf = readb(sys+CISREG_IDATA0);
- } else {
- u_int inc = 1, card_offset, flags;
-
- if (addr > CISTPL_MAX_CIS_SIZE)
- dev_dbg(&s->dev, "attempt to read CIS mem at addr %#x", addr);
-
- flags = MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0);
- if (attr) {
- flags |= MAP_ATTRIB;
- inc++;
- addr *= 2;
- }
+ mutex_lock(&s->ops_mutex);
+ if (attr & IS_INDIRECT) {
+ /* Indirect accesses use a bunch of special registers at fixed
+ locations in common memory */
+ u_char flags = ICTRL0_COMMON|ICTRL0_AUTOINC|ICTRL0_BYTEGRAN;
+ if (attr & IS_ATTR) {
+ addr *= 2;
+ flags = ICTRL0_AUTOINC;
+ }
- card_offset = addr & ~(s->map_size-1);
- while (len) {
- sys = set_cis_map(s, card_offset, flags);
- if (!sys) {
- dev_dbg(&s->dev, "could not map memory\n");
- memset(ptr, 0xff, len);
- mutex_unlock(&s->ops_mutex);
- return -1;
- }
- end = sys + s->map_size;
- sys = sys + (addr & (s->map_size-1));
- for ( ; len > 0; len--, buf++, sys += inc) {
- if (sys == end)
- break;
- *buf = readb(sys);
- }
- card_offset += s->map_size;
- addr = 0;
+ sys = set_cis_map(s, 0, MAP_ACTIVE |
+ ((cis_width) ? MAP_16BIT : 0));
+ if (!sys) {
+ dev_dbg(&s->dev, "could not map memory\n");
+ memset(ptr, 0xff, len);
+ mutex_unlock(&s->ops_mutex);
+ return -1;
+ }
+
+ writeb(flags, sys+CISREG_ICTRL0);
+ writeb(addr & 0xff, sys+CISREG_IADDR0);
+ writeb((addr>>8) & 0xff, sys+CISREG_IADDR1);
+ writeb((addr>>16) & 0xff, sys+CISREG_IADDR2);
+ writeb((addr>>24) & 0xff, sys+CISREG_IADDR3);
+ for ( ; len > 0; len--, buf++)
+ *buf = readb(sys+CISREG_IDATA0);
+ } else {
+ u_int inc = 1, card_offset, flags;
+
+ if (addr > CISTPL_MAX_CIS_SIZE)
+ dev_dbg(&s->dev,
+ "attempt to read CIS mem at addr %#x", addr);
+
+ flags = MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0);
+ if (attr) {
+ flags |= MAP_ATTRIB;
+ inc++;
+ addr *= 2;
+ }
+
+ card_offset = addr & ~(s->map_size-1);
+ while (len) {
+ sys = set_cis_map(s, card_offset, flags);
+ if (!sys) {
+ dev_dbg(&s->dev, "could not map memory\n");
+ memset(ptr, 0xff, len);
+ mutex_unlock(&s->ops_mutex);
+ return -1;
+ }
+ end = sys + s->map_size;
+ sys = sys + (addr & (s->map_size-1));
+ for ( ; len > 0; len--, buf++, sys += inc) {
+ if (sys == end)
+ break;
+ *buf = readb(sys);
+ }
+ card_offset += s->map_size;
+ addr = 0;
+ }
}
- }
- mutex_unlock(&s->ops_mutex);
- dev_dbg(&s->dev, " %#2.2x %#2.2x %#2.2x %#2.2x ...\n",
- *(u_char *)(ptr+0), *(u_char *)(ptr+1),
- *(u_char *)(ptr+2), *(u_char *)(ptr+3));
- return 0;
+ mutex_unlock(&s->ops_mutex);
+ dev_dbg(&s->dev, " %#2.2x %#2.2x %#2.2x %#2.2x ...\n",
+ *(u_char *)(ptr+0), *(u_char *)(ptr+1),
+ *(u_char *)(ptr+2), *(u_char *)(ptr+3));
+ return 0;
}
+/**
+ * pcmcia_write_cis_mem() - low-level function to write CIS memory
+ *
+ * Probably only useful for writing one-byte registers.
+ */
void pcmcia_write_cis_mem(struct pcmcia_socket *s, int attr, u_int addr,
u_int len, void *ptr)
{
- void __iomem *sys, *end;
- unsigned char *buf = ptr;
-
- dev_dbg(&s->dev, "pcmcia_write_cis_mem(%d, %#x, %u)\n", attr, addr, len);
-
- mutex_lock(&s->ops_mutex);
- if (attr & IS_INDIRECT) {
- /* Indirect accesses use a bunch of special registers at fixed
- locations in common memory */
- u_char flags = ICTRL0_COMMON|ICTRL0_AUTOINC|ICTRL0_BYTEGRAN;
- if (attr & IS_ATTR) {
- addr *= 2;
- flags = ICTRL0_AUTOINC;
- }
+ void __iomem *sys, *end;
+ unsigned char *buf = ptr;
- sys = set_cis_map(s, 0, MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0));
- if (!sys) {
- dev_dbg(&s->dev, "could not map memory\n");
- mutex_unlock(&s->ops_mutex);
- return; /* FIXME: Error */
- }
+ dev_dbg(&s->dev,
+ "pcmcia_write_cis_mem(%d, %#x, %u)\n", attr, addr, len);
- writeb(flags, sys+CISREG_ICTRL0);
- writeb(addr & 0xff, sys+CISREG_IADDR0);
- writeb((addr>>8) & 0xff, sys+CISREG_IADDR1);
- writeb((addr>>16) & 0xff, sys+CISREG_IADDR2);
- writeb((addr>>24) & 0xff, sys+CISREG_IADDR3);
- for ( ; len > 0; len--, buf++)
- writeb(*buf, sys+CISREG_IDATA0);
- } else {
- u_int inc = 1, card_offset, flags;
-
- flags = MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0);
- if (attr & IS_ATTR) {
- flags |= MAP_ATTRIB;
- inc++;
- addr *= 2;
- }
+ mutex_lock(&s->ops_mutex);
+ if (attr & IS_INDIRECT) {
+ /* Indirect accesses use a bunch of special registers at fixed
+ locations in common memory */
+ u_char flags = ICTRL0_COMMON|ICTRL0_AUTOINC|ICTRL0_BYTEGRAN;
+ if (attr & IS_ATTR) {
+ addr *= 2;
+ flags = ICTRL0_AUTOINC;
+ }
- card_offset = addr & ~(s->map_size-1);
- while (len) {
- sys = set_cis_map(s, card_offset, flags);
- if (!sys) {
- dev_dbg(&s->dev, "could not map memory\n");
- mutex_unlock(&s->ops_mutex);
- return; /* FIXME: error */
- }
-
- end = sys + s->map_size;
- sys = sys + (addr & (s->map_size-1));
- for ( ; len > 0; len--, buf++, sys += inc) {
- if (sys == end)
- break;
- writeb(*buf, sys);
- }
- card_offset += s->map_size;
- addr = 0;
- }
- }
- mutex_unlock(&s->ops_mutex);
-}
+ sys = set_cis_map(s, 0, MAP_ACTIVE |
+ ((cis_width) ? MAP_16BIT : 0));
+ if (!sys) {
+ dev_dbg(&s->dev, "could not map memory\n");
+ mutex_unlock(&s->ops_mutex);
+ return; /* FIXME: Error */
+ }
+
+ writeb(flags, sys+CISREG_ICTRL0);
+ writeb(addr & 0xff, sys+CISREG_IADDR0);
+ writeb((addr>>8) & 0xff, sys+CISREG_IADDR1);
+ writeb((addr>>16) & 0xff, sys+CISREG_IADDR2);
+ writeb((addr>>24) & 0xff, sys+CISREG_IADDR3);
+ for ( ; len > 0; len--, buf++)
+ writeb(*buf, sys+CISREG_IDATA0);
+ } else {
+ u_int inc = 1, card_offset, flags;
+ flags = MAP_ACTIVE | ((cis_width) ? MAP_16BIT : 0);
+ if (attr & IS_ATTR) {
+ flags |= MAP_ATTRIB;
+ inc++;
+ addr *= 2;
+ }
-/*======================================================================
+ card_offset = addr & ~(s->map_size-1);
+ while (len) {
+ sys = set_cis_map(s, card_offset, flags);
+ if (!sys) {
+ dev_dbg(&s->dev, "could not map memory\n");
+ mutex_unlock(&s->ops_mutex);
+ return; /* FIXME: error */
+ }
- This is a wrapper around read_cis_mem, with the same interface,
- but which caches information, for cards whose CIS may not be
- readable all the time.
+ end = sys + s->map_size;
+ sys = sys + (addr & (s->map_size-1));
+ for ( ; len > 0; len--, buf++, sys += inc) {
+ if (sys == end)
+ break;
+ writeb(*buf, sys);
+ }
+ card_offset += s->map_size;
+ addr = 0;
+ }
+ }
+ mutex_unlock(&s->ops_mutex);
+}
-======================================================================*/
+/**
+ * read_cis_cache() - read CIS memory or its associated cache
+ *
+ * This is a wrapper around read_cis_mem, with the same interface,
+ * but which caches information, for cards whose CIS may not be
+ * readable all the time.
+ */
static int read_cis_cache(struct pcmcia_socket *s, int attr, u_int addr,
size_t len, void *ptr)
{
@@ -353,7 +356,6 @@ remove_cis_cache(struct pcmcia_socket *s, int attr, u_int addr, u_int len)
* This destroys the CIS cache but keeps any fake CIS alive. Must be
* called with ops_mutex held.
*/
-
void destroy_cis_cache(struct pcmcia_socket *s)
{
struct list_head *l, *n;
@@ -366,13 +368,9 @@ void destroy_cis_cache(struct pcmcia_socket *s)
}
}
-/*======================================================================
-
- This verifies if the CIS of a card matches what is in the CIS
- cache.
-
-======================================================================*/
-
+/**
+ * verify_cis_cache() - does the CIS match what is in the CIS cache?
+ */
int verify_cis_cache(struct pcmcia_socket *s)
{
struct cis_cache_entry *cis;
@@ -404,13 +402,12 @@ int verify_cis_cache(struct pcmcia_socket *s)
return 0;
}
-/*======================================================================
-
- For really bad cards, we provide a facility for uploading a
- replacement CIS.
-
-======================================================================*/
-
+/**
+ * pcmcia_replace_cis() - use a replacement CIS instead of the card's CIS
+ *
+ * For really bad cards, we provide a facility for uploading a
+ * replacement CIS.
+ */
int pcmcia_replace_cis(struct pcmcia_socket *s,
const u8 *data, const size_t len)
{
@@ -433,17 +430,13 @@ int pcmcia_replace_cis(struct pcmcia_socket *s,
return 0;
}
-/*======================================================================
-
- The high-level CIS tuple services
-
-======================================================================*/
+/* The high-level CIS tuple services */
typedef struct tuple_flags {
- u_int link_space:4;
- u_int has_link:1;
- u_int mfc_fn:3;
- u_int space:4;
+ u_int link_space:4;
+ u_int has_link:1;
+ u_int mfc_fn:3;
+ u_int space:4;
} tuple_flags;
#define LINK_SPACE(f) (((tuple_flags *)(&(f)))->link_space)
@@ -451,982 +444,961 @@ typedef struct tuple_flags {
#define MFC_FN(f) (((tuple_flags *)(&(f)))->mfc_fn)
#define SPACE(f) (((tuple_flags *)(&(f)))->space)
-int pccard_get_first_tuple(struct pcmcia_socket *s, unsigned int function, tuple_t *tuple)
+int pccard_get_first_tuple(struct pcmcia_socket *s, unsigned int function,
+ tuple_t *tuple)
{
- if (!s)
- return -EINVAL;
-
- if (!(s->state & SOCKET_PRESENT) || (s->state & SOCKET_CARDBUS))
- return -ENODEV;
- tuple->TupleLink = tuple->Flags = 0;
-
- /* Assume presence of a LONGLINK_C to address 0 */
- tuple->CISOffset = tuple->LinkOffset = 0;
- SPACE(tuple->Flags) = HAS_LINK(tuple->Flags) = 1;
-
- if ((s->functions > 1) && !(tuple->Attributes & TUPLE_RETURN_COMMON)) {
- cisdata_t req = tuple->DesiredTuple;
- tuple->DesiredTuple = CISTPL_LONGLINK_MFC;
- if (pccard_get_next_tuple(s, function, tuple) == 0) {
- tuple->DesiredTuple = CISTPL_LINKTARGET;
- if (pccard_get_next_tuple(s, function, tuple) != 0)
- return -ENOSPC;
- } else
- tuple->CISOffset = tuple->TupleLink = 0;
- tuple->DesiredTuple = req;
- }
- return pccard_get_next_tuple(s, function, tuple);
+ if (!s)
+ return -EINVAL;
+
+ if (!(s->state & SOCKET_PRESENT) || (s->state & SOCKET_CARDBUS))
+ return -ENODEV;
+ tuple->TupleLink = tuple->Flags = 0;
+
+ /* Assume presence of a LONGLINK_C to address 0 */
+ tuple->CISOffset = tuple->LinkOffset = 0;
+ SPACE(tuple->Flags) = HAS_LINK(tuple->Flags) = 1;
+
+ if ((s->functions > 1) && !(tuple->Attributes & TUPLE_RETURN_COMMON)) {
+ cisdata_t req = tuple->DesiredTuple;
+ tuple->DesiredTuple = CISTPL_LONGLINK_MFC;
+ if (pccard_get_next_tuple(s, function, tuple) == 0) {
+ tuple->DesiredTuple = CISTPL_LINKTARGET;
+ if (pccard_get_next_tuple(s, function, tuple) != 0)
+ return -ENOSPC;
+ } else
+ tuple->CISOffset = tuple->TupleLink = 0;
+ tuple->DesiredTuple = req;
+ }
+ return pccard_get_next_tuple(s, function, tuple);
}
static int follow_link(struct pcmcia_socket *s, tuple_t *tuple)
{
- u_char link[5];
- u_int ofs;
- int ret;
-
- if (MFC_FN(tuple->Flags)) {
- /* Get indirect link from the MFC tuple */
- ret = read_cis_cache(s, LINK_SPACE(tuple->Flags),
- tuple->LinkOffset, 5, link);
- if (ret)
+ u_char link[5];
+ u_int ofs;
+ int ret;
+
+ if (MFC_FN(tuple->Flags)) {
+ /* Get indirect link from the MFC tuple */
+ ret = read_cis_cache(s, LINK_SPACE(tuple->Flags),
+ tuple->LinkOffset, 5, link);
+ if (ret)
+ return -1;
+ ofs = get_unaligned_le32(link + 1);
+ SPACE(tuple->Flags) = (link[0] == CISTPL_MFC_ATTR);
+ /* Move to the next indirect link */
+ tuple->LinkOffset += 5;
+ MFC_FN(tuple->Flags)--;
+ } else if (HAS_LINK(tuple->Flags)) {
+ ofs = tuple->LinkOffset;
+ SPACE(tuple->Flags) = LINK_SPACE(tuple->Flags);
+ HAS_LINK(tuple->Flags) = 0;
+ } else
return -1;
- ofs = get_unaligned_le32(link + 1);
- SPACE(tuple->Flags) = (link[0] == CISTPL_MFC_ATTR);
- /* Move to the next indirect link */
- tuple->LinkOffset += 5;
- MFC_FN(tuple->Flags)--;
- } else if (HAS_LINK(tuple->Flags)) {
- ofs = tuple->LinkOffset;
- SPACE(tuple->Flags) = LINK_SPACE(tuple->Flags);
- HAS_LINK(tuple->Flags) = 0;
- } else {
- return -1;
- }
- if (SPACE(tuple->Flags)) {
- /* This is ugly, but a common CIS error is to code the long
- link offset incorrectly, so we check the right spot... */
+
+ if (SPACE(tuple->Flags)) {
+ /* This is ugly, but a common CIS error is to code the long
+ link offset incorrectly, so we check the right spot... */
+ ret = read_cis_cache(s, SPACE(tuple->Flags), ofs, 5, link);
+ if (ret)
+ return -1;
+ if ((link[0] == CISTPL_LINKTARGET) && (link[1] >= 3) &&
+ (strncmp(link+2, "CIS", 3) == 0))
+ return ofs;
+ remove_cis_cache(s, SPACE(tuple->Flags), ofs, 5);
+ /* Then, we try the wrong spot... */
+ ofs = ofs >> 1;
+ }
ret = read_cis_cache(s, SPACE(tuple->Flags), ofs, 5, link);
if (ret)
return -1;
if ((link[0] == CISTPL_LINKTARGET) && (link[1] >= 3) &&
- (strncmp(link+2, "CIS", 3) == 0))
- return ofs;
+ (strncmp(link+2, "CIS", 3) == 0))
+ return ofs;
remove_cis_cache(s, SPACE(tuple->Flags), ofs, 5);
- /* Then, we try the wrong spot... */
- ofs = ofs >> 1;
- }
- ret = read_cis_cache(s, SPACE(tuple->Flags), ofs, 5, link);
- if (ret)
- return -1;
- if ((link[0] == CISTPL_LINKTARGET) && (link[1] >= 3) &&
- (strncmp(link+2, "CIS", 3) == 0))
- return ofs;
- remove_cis_cache(s, SPACE(tuple->Flags), ofs, 5);
- return -1;
+ return -1;
}
-int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function, tuple_t *tuple)
+int pccard_get_next_tuple(struct pcmcia_socket *s, unsigned int function,
+ tuple_t *tuple)
{
- u_char link[2], tmp;
- int ofs, i, attr;
- int ret;
-
- if (!s)
- return -EINVAL;
- if (!(s->state & SOCKET_PRESENT) || (s->state & SOCKET_CARDBUS))
- return -ENODEV;
-
- link[1] = tuple->TupleLink;
- ofs = tuple->CISOffset + tuple->TupleLink;
- attr = SPACE(tuple->Flags);
-
- for (i = 0; i < MAX_TUPLES; i++) {
- if (link[1] == 0xff) {
- link[0] = CISTPL_END;
- } else {
- ret = read_cis_cache(s, attr, ofs, 2, link);
- if (ret)
- return -1;
- if (link[0] == CISTPL_NULL) {
- ofs++; continue;
- }
- }
+ u_char link[2], tmp;
+ int ofs, i, attr;
+ int ret;
- /* End of chain? Follow long link if possible */
- if (link[0] == CISTPL_END) {
- ofs = follow_link(s, tuple);
- if (ofs < 0)
- return -ENOSPC;
- attr = SPACE(tuple->Flags);
- ret = read_cis_cache(s, attr, ofs, 2, link);
- if (ret)
- return -1;
- }
+ if (!s)
+ return -EINVAL;
+ if (!(s->state & SOCKET_PRESENT) || (s->state & SOCKET_CARDBUS))
+ return -ENODEV;
- /* Is this a link tuple? Make a note of it */
- if ((link[0] == CISTPL_LONGLINK_A) ||
- (link[0] == CISTPL_LONGLINK_C) ||
- (link[0] == CISTPL_LONGLINK_MFC) ||
- (link[0] == CISTPL_LINKTARGET) ||
- (link[0] == CISTPL_INDIRECT) ||
- (link[0] == CISTPL_NO_LINK)) {
- switch (link[0]) {
- case CISTPL_LONGLINK_A:
- HAS_LINK(tuple->Flags) = 1;
- LINK_SPACE(tuple->Flags) = attr | IS_ATTR;
- ret = read_cis_cache(s, attr, ofs+2, 4, &tuple->LinkOffset);
- if (ret)
- return -1;
- break;
- case CISTPL_LONGLINK_C:
- HAS_LINK(tuple->Flags) = 1;
- LINK_SPACE(tuple->Flags) = attr & ~IS_ATTR;
- ret = read_cis_cache(s, attr, ofs+2, 4, &tuple->LinkOffset);
- if (ret)
- return -1;
- break;
- case CISTPL_INDIRECT:
- HAS_LINK(tuple->Flags) = 1;
- LINK_SPACE(tuple->Flags) = IS_ATTR | IS_INDIRECT;
- tuple->LinkOffset = 0;
- break;
- case CISTPL_LONGLINK_MFC:
- tuple->LinkOffset = ofs + 3;
- LINK_SPACE(tuple->Flags) = attr;
- if (function == BIND_FN_ALL) {
- /* Follow all the MFC links */
- ret = read_cis_cache(s, attr, ofs+2, 1, &tmp);
- if (ret)
- return -1;
- MFC_FN(tuple->Flags) = tmp;
- } else {
- /* Follow exactly one of the links */
- MFC_FN(tuple->Flags) = 1;
- tuple->LinkOffset += function * 5;
+ link[1] = tuple->TupleLink;
+ ofs = tuple->CISOffset + tuple->TupleLink;
+ attr = SPACE(tuple->Flags);
+
+ for (i = 0; i < MAX_TUPLES; i++) {
+ if (link[1] == 0xff)
+ link[0] = CISTPL_END;
+ else {
+ ret = read_cis_cache(s, attr, ofs, 2, link);
+ if (ret)
+ return -1;
+ if (link[0] == CISTPL_NULL) {
+ ofs++;
+ continue;
+ }
}
- break;
- case CISTPL_NO_LINK:
- HAS_LINK(tuple->Flags) = 0;
- break;
- }
- if ((tuple->Attributes & TUPLE_RETURN_LINK) &&
- (tuple->DesiredTuple == RETURN_FIRST_TUPLE))
- break;
- } else
- if (tuple->DesiredTuple == RETURN_FIRST_TUPLE)
- break;
- if (link[0] == tuple->DesiredTuple)
- break;
- ofs += link[1] + 2;
- }
- if (i == MAX_TUPLES) {
- dev_dbg(&s->dev, "cs: overrun in pcmcia_get_next_tuple\n");
- return -ENOSPC;
- }
-
- tuple->TupleCode = link[0];
- tuple->TupleLink = link[1];
- tuple->CISOffset = ofs + 2;
- return 0;
-}
+ /* End of chain? Follow long link if possible */
+ if (link[0] == CISTPL_END) {
+ ofs = follow_link(s, tuple);
+ if (ofs < 0)
+ return -ENOSPC;
+ attr = SPACE(tuple->Flags);
+ ret = read_cis_cache(s, attr, ofs, 2, link);
+ if (ret)
+ return -1;
+ }
-/*====================================================================*/
+ /* Is this a link tuple? Make a note of it */
+ if ((link[0] == CISTPL_LONGLINK_A) ||
+ (link[0] == CISTPL_LONGLINK_C) ||
+ (link[0] == CISTPL_LONGLINK_MFC) ||
+ (link[0] == CISTPL_LINKTARGET) ||
+ (link[0] == CISTPL_INDIRECT) ||
+ (link[0] == CISTPL_NO_LINK)) {
+ switch (link[0]) {
+ case CISTPL_LONGLINK_A:
+ HAS_LINK(tuple->Flags) = 1;
+ LINK_SPACE(tuple->Flags) = attr | IS_ATTR;
+ ret = read_cis_cache(s, attr, ofs+2, 4,
+ &tuple->LinkOffset);
+ if (ret)
+ return -1;
+ break;
+ case CISTPL_LONGLINK_C:
+ HAS_LINK(tuple->Flags) = 1;
+ LINK_SPACE(tuple->Flags) = attr & ~IS_ATTR;
+ ret = read_cis_cache(s, attr, ofs+2, 4,
+ &tuple->LinkOffset);
+ if (ret)
+ return -1;
+ break;
+ case CISTPL_INDIRECT:
+ HAS_LINK(tuple->Flags) = 1;
+ LINK_SPACE(tuple->Flags) = IS_ATTR |
+ IS_INDIRECT;
+ tuple->LinkOffset = 0;
+ break;
+ case CISTPL_LONGLINK_MFC:
+ tuple->LinkOffset = ofs + 3;
+ LINK_SPACE(tuple->Flags) = attr;
+ if (function == BIND_FN_ALL) {
+ /* Follow all the MFC links */
+ ret = read_cis_cache(s, attr, ofs+2,
+ 1, &tmp);
+ if (ret)
+ return -1;
+ MFC_FN(tuple->Flags) = tmp;
+ } else {
+ /* Follow exactly one of the links */
+ MFC_FN(tuple->Flags) = 1;
+ tuple->LinkOffset += function * 5;
+ }
+ break;
+ case CISTPL_NO_LINK:
+ HAS_LINK(tuple->Flags) = 0;
+ break;
+ }
+ if ((tuple->Attributes & TUPLE_RETURN_LINK) &&
+ (tuple->DesiredTuple == RETURN_FIRST_TUPLE))
+ break;
+ } else
+ if (tuple->DesiredTuple == RETURN_FIRST_TUPLE)
+ break;
+
+ if (link[0] == tuple->DesiredTuple)
+ break;
+ ofs += link[1] + 2;
+ }
+ if (i == MAX_TUPLES) {
+ dev_dbg(&s->dev, "cs: overrun in pcmcia_get_next_tuple\n");
+ return -ENOSPC;
+ }
-#define _MIN(a, b) (((a) < (b)) ? (a) : (b))
+ tuple->TupleCode = link[0];
+ tuple->TupleLink = link[1];
+ tuple->CISOffset = ofs + 2;
+ return 0;
+}
int pccard_get_tuple_data(struct pcmcia_socket *s, tuple_t *tuple)
{
- u_int len;
- int ret;
+ u_int len;
+ int ret;
- if (!s)
- return -EINVAL;
+ if (!s)
+ return -EINVAL;
- if (tuple->TupleLink < tuple->TupleOffset)
- return -ENOSPC;
- len = tuple->TupleLink - tuple->TupleOffset;
- tuple->TupleDataLen = tuple->TupleLink;
- if (len == 0)
+ if (tuple->TupleLink < tuple->TupleOffset)
+ return -ENOSPC;
+ len = tuple->TupleLink - tuple->TupleOffset;
+ tuple->TupleDataLen = tuple->TupleLink;
+ if (len == 0)
+ return 0;
+ ret = read_cis_cache(s, SPACE(tuple->Flags),
+ tuple->CISOffset + tuple->TupleOffset,
+ min(len, (u_int) tuple->TupleDataMax),
+ tuple->TupleData);
+ if (ret)
+ return -1;
return 0;
- ret = read_cis_cache(s, SPACE(tuple->Flags),
- tuple->CISOffset + tuple->TupleOffset,
- _MIN(len, tuple->TupleDataMax), tuple->TupleData);
- if (ret)
- return -1;
- return 0;
}
-/*======================================================================
-
- Parsing routines for individual tuples
-
-======================================================================*/
+/* Parsing routines for individual tuples */
static int parse_device(tuple_t *tuple, cistpl_device_t *device)
{
- int i;
- u_char scale;
- u_char *p, *q;
+ int i;
+ u_char scale;
+ u_char *p, *q;
- p = (u_char *)tuple->TupleData;
- q = p + tuple->TupleDataLen;
+ p = (u_char *)tuple->TupleData;
+ q = p + tuple->TupleDataLen;
- device->ndev = 0;
- for (i = 0; i < CISTPL_MAX_DEVICES; i++) {
+ device->ndev = 0;
+ for (i = 0; i < CISTPL_MAX_DEVICES; i++) {
- if (*p == 0xff)
- break;
- device->dev[i].type = (*p >> 4);
- device->dev[i].wp = (*p & 0x08) ? 1 : 0;
- switch (*p & 0x07) {
- case 0:
- device->dev[i].speed = 0;
- break;
- case 1:
- device->dev[i].speed = 250;
- break;
- case 2:
- device->dev[i].speed = 200;
- break;
- case 3:
- device->dev[i].speed = 150;
- break;
- case 4:
- device->dev[i].speed = 100;
- break;
- case 7:
- if (++p == q)
- return -EINVAL;
- device->dev[i].speed = SPEED_CVT(*p);
- while (*p & 0x80)
+ if (*p == 0xff)
+ break;
+ device->dev[i].type = (*p >> 4);
+ device->dev[i].wp = (*p & 0x08) ? 1 : 0;
+ switch (*p & 0x07) {
+ case 0:
+ device->dev[i].speed = 0;
+ break;
+ case 1:
+ device->dev[i].speed = 250;
+ break;
+ case 2:
+ device->dev[i].speed = 200;
+ break;
+ case 3:
+ device->dev[i].speed = 150;
+ break;
+ case 4:
+ device->dev[i].speed = 100;
+ break;
+ case 7:
if (++p == q)
return -EINVAL;
- break;
- default:
- return -EINVAL;
- }
+ device->dev[i].speed = SPEED_CVT(*p);
+ while (*p & 0x80)
+ if (++p == q)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
- if (++p == q)
- return -EINVAL;
- if (*p == 0xff)
- break;
- scale = *p & 7;
- if (scale == 7)
- return -EINVAL;
- device->dev[i].size = ((*p >> 3) + 1) * (512 << (scale*2));
- device->ndev++;
- if (++p == q)
- break;
- }
+ if (++p == q)
+ return -EINVAL;
+ if (*p == 0xff)
+ break;
+ scale = *p & 7;
+ if (scale == 7)
+ return -EINVAL;
+ device->dev[i].size = ((*p >> 3) + 1) * (512 << (scale*2));
+ device->ndev++;
+ if (++p == q)
+ break;
+ }
- return 0;
+ return 0;
}
-/*====================================================================*/
static int parse_checksum(tuple_t *tuple, cistpl_checksum_t *csum)
{
- u_char *p;
- if (tuple->TupleDataLen < 5)
- return -EINVAL;
- p = (u_char *) tuple->TupleData;
- csum->addr = tuple->CISOffset + get_unaligned_le16(p) - 2;
- csum->len = get_unaligned_le16(p + 2);
- csum->sum = *(p + 4);
- return 0;
+ u_char *p;
+ if (tuple->TupleDataLen < 5)
+ return -EINVAL;
+ p = (u_char *) tuple->TupleData;
+ csum->addr = tuple->CISOffset + get_unaligned_le16(p) - 2;
+ csum->len = get_unaligned_le16(p + 2);
+ csum->sum = *(p + 4);
+ return 0;
}
-/*====================================================================*/
static int parse_longlink(tuple_t *tuple, cistpl_longlink_t *link)
{
- if (tuple->TupleDataLen < 4)
- return -EINVAL;
- link->addr = get_unaligned_le32(tuple->TupleData);
- return 0;
+ if (tuple->TupleDataLen < 4)
+ return -EINVAL;
+ link->addr = get_unaligned_le32(tuple->TupleData);
+ return 0;
}
-/*====================================================================*/
-static int parse_longlink_mfc(tuple_t *tuple,
- cistpl_longlink_mfc_t *link)
+static int parse_longlink_mfc(tuple_t *tuple, cistpl_longlink_mfc_t *link)
{
- u_char *p;
- int i;
-
- p = (u_char *)tuple->TupleData;
-
- link->nfn = *p; p++;
- if (tuple->TupleDataLen <= link->nfn*5)
- return -EINVAL;
- for (i = 0; i < link->nfn; i++) {
- link->fn[i].space = *p; p++;
- link->fn[i].addr = get_unaligned_le32(p);
- p += 4;
- }
- return 0;
+ u_char *p;
+ int i;
+
+ p = (u_char *)tuple->TupleData;
+
+ link->nfn = *p; p++;
+ if (tuple->TupleDataLen <= link->nfn*5)
+ return -EINVAL;
+ for (i = 0; i < link->nfn; i++) {
+ link->fn[i].space = *p; p++;
+ link->fn[i].addr = get_unaligned_le32(p);
+ p += 4;
+ }
+ return 0;
}
-/*====================================================================*/
static int parse_strings(u_char *p, u_char *q, int max,
char *s, u_char *ofs, u_char *found)
{
- int i, j, ns;
+ int i, j, ns;
- if (p == q)
- return -EINVAL;
- ns = 0; j = 0;
- for (i = 0; i < max; i++) {
- if (*p == 0xff)
- break;
- ofs[i] = j;
- ns++;
- for (;;) {
- s[j++] = (*p == 0xff) ? '\0' : *p;
- if ((*p == '\0') || (*p == 0xff))
- break;
- if (++p == q)
- return -EINVAL;
+ if (p == q)
+ return -EINVAL;
+ ns = 0; j = 0;
+ for (i = 0; i < max; i++) {
+ if (*p == 0xff)
+ break;
+ ofs[i] = j;
+ ns++;
+ for (;;) {
+ s[j++] = (*p == 0xff) ? '\0' : *p;
+ if ((*p == '\0') || (*p == 0xff))
+ break;
+ if (++p == q)
+ return -EINVAL;
+ }
+ if ((*p == 0xff) || (++p == q))
+ break;
}
- if ((*p == 0xff) || (++p == q))
- break;
- }
- if (found) {
- *found = ns;
- return 0;
- } else {
+ if (found) {
+ *found = ns;
+ return 0;
+ }
+
return (ns == max) ? 0 : -EINVAL;
- }
}
-/*====================================================================*/
static int parse_vers_1(tuple_t *tuple, cistpl_vers_1_t *vers_1)
{
- u_char *p, *q;
+ u_char *p, *q;
- p = (u_char *)tuple->TupleData;
- q = p + tuple->TupleDataLen;
+ p = (u_char *)tuple->TupleData;
+ q = p + tuple->TupleDataLen;
- vers_1->major = *p; p++;
- vers_1->minor = *p; p++;
- if (p >= q)
- return -EINVAL;
+ vers_1->major = *p; p++;
+ vers_1->minor = *p; p++;
+ if (p >= q)
+ return -EINVAL;
- return parse_strings(p, q, CISTPL_VERS_1_MAX_PROD_STRINGS,
- vers_1->str, vers_1->ofs, &vers_1->ns);
+ return parse_strings(p, q, CISTPL_VERS_1_MAX_PROD_STRINGS,
+ vers_1->str, vers_1->ofs, &vers_1->ns);
}
-/*====================================================================*/
static int parse_altstr(tuple_t *tuple, cistpl_altstr_t *altstr)
{
- u_char *p, *q;
+ u_char *p, *q;
- p = (u_char *)tuple->TupleData;
- q = p + tuple->TupleDataLen;
+ p = (u_char *)tuple->TupleData;
+ q = p + tuple->TupleDataLen;
- return parse_strings(p, q, CISTPL_MAX_ALTSTR_STRINGS,
- altstr->str, altstr->ofs, &altstr->ns);
+ return parse_strings(p, q, CISTPL_MAX_ALTSTR_STRINGS,
+ altstr->str, altstr->ofs, &altstr->ns);
}
-/*====================================================================*/
static int parse_jedec(tuple_t *tuple, cistpl_jedec_t *jedec)
{
- u_char *p, *q;
- int nid;
+ u_char *p, *q;
+ int nid;
- p = (u_char *)tuple->TupleData;
- q = p + tuple->TupleDataLen;
+ p = (u_char *)tuple->TupleData;
+ q = p + tuple->TupleDataLen;
- for (nid = 0; nid < CISTPL_MAX_DEVICES; nid++) {
- if (p > q-2)
- break;
- jedec->id[nid].mfr = p[0];
- jedec->id[nid].info = p[1];
- p += 2;
- }
- jedec->nid = nid;
- return 0;
+ for (nid = 0; nid < CISTPL_MAX_DEVICES; nid++) {
+ if (p > q-2)
+ break;
+ jedec->id[nid].mfr = p[0];
+ jedec->id[nid].info = p[1];
+ p += 2;
+ }
+ jedec->nid = nid;
+ return 0;
}
-/*====================================================================*/
static int parse_manfid(tuple_t *tuple, cistpl_manfid_t *m)
{
- if (tuple->TupleDataLen < 4)
- return -EINVAL;
- m->manf = get_unaligned_le16(tuple->TupleData);
- m->card = get_unaligned_le16(tuple->TupleData + 2);
- return 0;
+ if (tuple->TupleDataLen < 4)
+ return -EINVAL;
+ m->manf = get_unaligned_le16(tuple->TupleData);
+ m->card = get_unaligned_le16(tuple->TupleData + 2);
+ return 0;
}
-/*====================================================================*/
static int parse_funcid(tuple_t *tuple, cistpl_funcid_t *f)
{
- u_char *p;
- if (tuple->TupleDataLen < 2)
- return -EINVAL;
- p = (u_char *)tuple->TupleData;
- f->func = p[0];
- f->sysinit = p[1];
- return 0;
+ u_char *p;
+ if (tuple->TupleDataLen < 2)
+ return -EINVAL;
+ p = (u_char *)tuple->TupleData;
+ f->func = p[0];
+ f->sysinit = p[1];
+ return 0;
}
-/*====================================================================*/
static int parse_funce(tuple_t *tuple, cistpl_funce_t *f)
{
- u_char *p;
- int i;
- if (tuple->TupleDataLen < 1)
- return -EINVAL;
- p = (u_char *)tuple->TupleData;
- f->type = p[0];
- for (i = 1; i < tuple->TupleDataLen; i++)
- f->data[i-1] = p[i];
- return 0;
+ u_char *p;
+ int i;
+ if (tuple->TupleDataLen < 1)
+ return -EINVAL;
+ p = (u_char *)tuple->TupleData;
+ f->type = p[0];
+ for (i = 1; i < tuple->TupleDataLen; i++)
+ f->data[i-1] = p[i];
+ return 0;
}
-/*====================================================================*/
static int parse_config(tuple_t *tuple, cistpl_config_t *config)
{
- int rasz, rmsz, i;
- u_char *p;
-
- p = (u_char *)tuple->TupleData;
- rasz = *p & 0x03;
- rmsz = (*p & 0x3c) >> 2;
- if (tuple->TupleDataLen < rasz+rmsz+4)
- return -EINVAL;
- config->last_idx = *(++p);
- p++;
- config->base = 0;
- for (i = 0; i <= rasz; i++)
- config->base += p[i] << (8*i);
- p += rasz+1;
- for (i = 0; i < 4; i++)
- config->rmask[i] = 0;
- for (i = 0; i <= rmsz; i++)
- config->rmask[i>>2] += p[i] << (8*(i%4));
- config->subtuples = tuple->TupleDataLen - (rasz+rmsz+4);
- return 0;
+ int rasz, rmsz, i;
+ u_char *p;
+
+ p = (u_char *)tuple->TupleData;
+ rasz = *p & 0x03;
+ rmsz = (*p & 0x3c) >> 2;
+ if (tuple->TupleDataLen < rasz+rmsz+4)
+ return -EINVAL;
+ config->last_idx = *(++p);
+ p++;
+ config->base = 0;
+ for (i = 0; i <= rasz; i++)
+ config->base += p[i] << (8*i);
+ p += rasz+1;
+ for (i = 0; i < 4; i++)
+ config->rmask[i] = 0;
+ for (i = 0; i <= rmsz; i++)
+ config->rmask[i>>2] += p[i] << (8*(i%4));
+ config->subtuples = tuple->TupleDataLen - (rasz+rmsz+4);
+ return 0;
}
-/*======================================================================
+/* The following routines are all used to parse the nightmarish
+ * config table entries.
+ */
+
+static u_char *parse_power(u_char *p, u_char *q, cistpl_power_t *pwr)
+{
+ int i;
+ u_int scale;
- The following routines are all used to parse the nightmarish
- config table entries.
+ if (p == q)
+ return NULL;
+ pwr->present = *p;
+ pwr->flags = 0;
+ p++;
+ for (i = 0; i < 7; i++)
+ if (pwr->present & (1<<i)) {
+ if (p == q)
+ return NULL;
+ pwr->param[i] = POWER_CVT(*p);
+ scale = POWER_SCALE(*p);
+ while (*p & 0x80) {
+ if (++p == q)
+ return NULL;
+ if ((*p & 0x7f) < 100)
+ pwr->param[i] +=
+ (*p & 0x7f) * scale / 100;
+ else if (*p == 0x7d)
+ pwr->flags |= CISTPL_POWER_HIGHZ_OK;
+ else if (*p == 0x7e)
+ pwr->param[i] = 0;
+ else if (*p == 0x7f)
+ pwr->flags |= CISTPL_POWER_HIGHZ_REQ;
+ else
+ return NULL;
+ }
+ p++;
+ }
+ return p;
+}
-======================================================================*/
-static u_char *parse_power(u_char *p, u_char *q,
- cistpl_power_t *pwr)
+static u_char *parse_timing(u_char *p, u_char *q, cistpl_timing_t *timing)
{
- int i;
- u_int scale;
-
- if (p == q)
- return NULL;
- pwr->present = *p;
- pwr->flags = 0;
- p++;
- for (i = 0; i < 7; i++)
- if (pwr->present & (1<<i)) {
- if (p == q)
- return NULL;
- pwr->param[i] = POWER_CVT(*p);
- scale = POWER_SCALE(*p);
- while (*p & 0x80) {
+ u_char scale;
+
+ if (p == q)
+ return NULL;
+ scale = *p;
+ if ((scale & 3) != 3) {
if (++p == q)
return NULL;
- if ((*p & 0x7f) < 100)
- pwr->param[i] += (*p & 0x7f) * scale / 100;
- else if (*p == 0x7d)
- pwr->flags |= CISTPL_POWER_HIGHZ_OK;
- else if (*p == 0x7e)
- pwr->param[i] = 0;
- else if (*p == 0x7f)
- pwr->flags |= CISTPL_POWER_HIGHZ_REQ;
- else
- return NULL;
- }
- p++;
- }
- return p;
+ timing->wait = SPEED_CVT(*p);
+ timing->waitscale = exponent[scale & 3];
+ } else
+ timing->wait = 0;
+ scale >>= 2;
+ if ((scale & 7) != 7) {
+ if (++p == q)
+ return NULL;
+ timing->ready = SPEED_CVT(*p);
+ timing->rdyscale = exponent[scale & 7];
+ } else
+ timing->ready = 0;
+ scale >>= 3;
+ if (scale != 7) {
+ if (++p == q)
+ return NULL;
+ timing->reserved = SPEED_CVT(*p);
+ timing->rsvscale = exponent[scale];
+ } else
+ timing->reserved = 0;
+ p++;
+ return p;
}
-/*====================================================================*/
-static u_char *parse_timing(u_char *p, u_char *q,
- cistpl_timing_t *timing)
+static u_char *parse_io(u_char *p, u_char *q, cistpl_io_t *io)
{
- u_char scale;
+ int i, j, bsz, lsz;
- if (p == q)
- return NULL;
- scale = *p;
- if ((scale & 3) != 3) {
- if (++p == q)
- return NULL;
- timing->wait = SPEED_CVT(*p);
- timing->waitscale = exponent[scale & 3];
- } else
- timing->wait = 0;
- scale >>= 2;
- if ((scale & 7) != 7) {
- if (++p == q)
+ if (p == q)
return NULL;
- timing->ready = SPEED_CVT(*p);
- timing->rdyscale = exponent[scale & 7];
- } else
- timing->ready = 0;
- scale >>= 3;
- if (scale != 7) {
+ io->flags = *p;
+
+ if (!(*p & 0x80)) {
+ io->nwin = 1;
+ io->win[0].base = 0;
+ io->win[0].len = (1 << (io->flags & CISTPL_IO_LINES_MASK));
+ return p+1;
+ }
+
if (++p == q)
return NULL;
- timing->reserved = SPEED_CVT(*p);
- timing->rsvscale = exponent[scale];
- } else
- timing->reserved = 0;
- p++;
- return p;
-}
-
-/*====================================================================*/
+ io->nwin = (*p & 0x0f) + 1;
+ bsz = (*p & 0x30) >> 4;
+ if (bsz == 3)
+ bsz++;
+ lsz = (*p & 0xc0) >> 6;
+ if (lsz == 3)
+ lsz++;
+ p++;
-static u_char *parse_io(u_char *p, u_char *q, cistpl_io_t *io)
-{
- int i, j, bsz, lsz;
-
- if (p == q)
- return NULL;
- io->flags = *p;
-
- if (!(*p & 0x80)) {
- io->nwin = 1;
- io->win[0].base = 0;
- io->win[0].len = (1 << (io->flags & CISTPL_IO_LINES_MASK));
- return p+1;
- }
-
- if (++p == q)
- return NULL;
- io->nwin = (*p & 0x0f) + 1;
- bsz = (*p & 0x30) >> 4;
- if (bsz == 3)
- bsz++;
- lsz = (*p & 0xc0) >> 6;
- if (lsz == 3)
- lsz++;
- p++;
-
- for (i = 0; i < io->nwin; i++) {
- io->win[i].base = 0;
- io->win[i].len = 1;
- for (j = 0; j < bsz; j++, p++) {
- if (p == q)
- return NULL;
- io->win[i].base += *p << (j*8);
- }
- for (j = 0; j < lsz; j++, p++) {
- if (p == q)
- return NULL;
- io->win[i].len += *p << (j*8);
+ for (i = 0; i < io->nwin; i++) {
+ io->win[i].base = 0;
+ io->win[i].len = 1;
+ for (j = 0; j < bsz; j++, p++) {
+ if (p == q)
+ return NULL;
+ io->win[i].base += *p << (j*8);
+ }
+ for (j = 0; j < lsz; j++, p++) {
+ if (p == q)
+ return NULL;
+ io->win[i].len += *p << (j*8);
+ }
}
- }
- return p;
+ return p;
}
-/*====================================================================*/
static u_char *parse_mem(u_char *p, u_char *q, cistpl_mem_t *mem)
{
- int i, j, asz, lsz, has_ha;
- u_int len, ca, ha;
-
- if (p == q)
- return NULL;
-
- mem->nwin = (*p & 0x07) + 1;
- lsz = (*p & 0x18) >> 3;
- asz = (*p & 0x60) >> 5;
- has_ha = (*p & 0x80);
- if (++p == q)
- return NULL;
-
- for (i = 0; i < mem->nwin; i++) {
- len = ca = ha = 0;
- for (j = 0; j < lsz; j++, p++) {
- if (p == q)
- return NULL;
- len += *p << (j*8);
- }
- for (j = 0; j < asz; j++, p++) {
- if (p == q)
- return NULL;
- ca += *p << (j*8);
+ int i, j, asz, lsz, has_ha;
+ u_int len, ca, ha;
+
+ if (p == q)
+ return NULL;
+
+ mem->nwin = (*p & 0x07) + 1;
+ lsz = (*p & 0x18) >> 3;
+ asz = (*p & 0x60) >> 5;
+ has_ha = (*p & 0x80);
+ if (++p == q)
+ return NULL;
+
+ for (i = 0; i < mem->nwin; i++) {
+ len = ca = ha = 0;
+ for (j = 0; j < lsz; j++, p++) {
+ if (p == q)
+ return NULL;
+ len += *p << (j*8);
+ }
+ for (j = 0; j < asz; j++, p++) {
+ if (p == q)
+ return NULL;
+ ca += *p << (j*8);
+ }
+ if (has_ha)
+ for (j = 0; j < asz; j++, p++) {
+ if (p == q)
+ return NULL;
+ ha += *p << (j*8);
+ }
+ mem->win[i].len = len << 8;
+ mem->win[i].card_addr = ca << 8;
+ mem->win[i].host_addr = ha << 8;
}
- if (has_ha)
- for (j = 0; j < asz; j++, p++) {
- if (p == q)
- return NULL;
- ha += *p << (j*8);
- }
- mem->win[i].len = len << 8;
- mem->win[i].card_addr = ca << 8;
- mem->win[i].host_addr = ha << 8;
- }
- return p;
+ return p;
}
-/*====================================================================*/
static u_char *parse_irq(u_char *p, u_char *q, cistpl_irq_t *irq)
{
- if (p == q)
- return NULL;
- irq->IRQInfo1 = *p; p++;
- if (irq->IRQInfo1 & IRQ_INFO2_VALID) {
- if (p+2 > q)
+ if (p == q)
return NULL;
- irq->IRQInfo2 = (p[1]<<8) + p[0];
- p += 2;
- }
- return p;
+ irq->IRQInfo1 = *p; p++;
+ if (irq->IRQInfo1 & IRQ_INFO2_VALID) {
+ if (p+2 > q)
+ return NULL;
+ irq->IRQInfo2 = (p[1]<<8) + p[0];
+ p += 2;
+ }
+ return p;
}
-/*====================================================================*/
static int parse_cftable_entry(tuple_t *tuple,
cistpl_cftable_entry_t *entry)
{
- u_char *p, *q, features;
-
- p = tuple->TupleData;
- q = p + tuple->TupleDataLen;
- entry->index = *p & 0x3f;
- entry->flags = 0;
- if (*p & 0x40)
- entry->flags |= CISTPL_CFTABLE_DEFAULT;
- if (*p & 0x80) {
- if (++p == q)
- return -EINVAL;
- if (*p & 0x10)
- entry->flags |= CISTPL_CFTABLE_BVDS;
- if (*p & 0x20)
- entry->flags |= CISTPL_CFTABLE_WP;
+ u_char *p, *q, features;
+
+ p = tuple->TupleData;
+ q = p + tuple->TupleDataLen;
+ entry->index = *p & 0x3f;
+ entry->flags = 0;
if (*p & 0x40)
- entry->flags |= CISTPL_CFTABLE_RDYBSY;
- if (*p & 0x80)
- entry->flags |= CISTPL_CFTABLE_MWAIT;
- entry->interface = *p & 0x0f;
- } else
- entry->interface = 0;
-
- /* Process optional features */
- if (++p == q)
- return -EINVAL;
- features = *p; p++;
-
- /* Power options */
- if ((features & 3) > 0) {
- p = parse_power(p, q, &entry->vcc);
- if (p == NULL)
- return -EINVAL;
- } else
- entry->vcc.present = 0;
- if ((features & 3) > 1) {
- p = parse_power(p, q, &entry->vpp1);
- if (p == NULL)
- return -EINVAL;
- } else
- entry->vpp1.present = 0;
- if ((features & 3) > 2) {
- p = parse_power(p, q, &entry->vpp2);
- if (p == NULL)
- return -EINVAL;
- } else
- entry->vpp2.present = 0;
+ entry->flags |= CISTPL_CFTABLE_DEFAULT;
+ if (*p & 0x80) {
+ if (++p == q)
+ return -EINVAL;
+ if (*p & 0x10)
+ entry->flags |= CISTPL_CFTABLE_BVDS;
+ if (*p & 0x20)
+ entry->flags |= CISTPL_CFTABLE_WP;
+ if (*p & 0x40)
+ entry->flags |= CISTPL_CFTABLE_RDYBSY;
+ if (*p & 0x80)
+ entry->flags |= CISTPL_CFTABLE_MWAIT;
+ entry->interface = *p & 0x0f;
+ } else
+ entry->interface = 0;
- /* Timing options */
- if (features & 0x04) {
- p = parse_timing(p, q, &entry->timing);
- if (p == NULL)
- return -EINVAL;
- } else {
- entry->timing.wait = 0;
- entry->timing.ready = 0;
- entry->timing.reserved = 0;
- }
-
- /* I/O window options */
- if (features & 0x08) {
- p = parse_io(p, q, &entry->io);
- if (p == NULL)
+ /* Process optional features */
+ if (++p == q)
return -EINVAL;
- } else
- entry->io.nwin = 0;
+ features = *p; p++;
- /* Interrupt options */
- if (features & 0x10) {
- p = parse_irq(p, q, &entry->irq);
- if (p == NULL)
- return -EINVAL;
- } else
- entry->irq.IRQInfo1 = 0;
-
- switch (features & 0x60) {
- case 0x00:
- entry->mem.nwin = 0;
- break;
- case 0x20:
- entry->mem.nwin = 1;
- entry->mem.win[0].len = get_unaligned_le16(p) << 8;
- entry->mem.win[0].card_addr = 0;
- entry->mem.win[0].host_addr = 0;
- p += 2;
- if (p > q)
- return -EINVAL;
- break;
- case 0x40:
- entry->mem.nwin = 1;
- entry->mem.win[0].len = get_unaligned_le16(p) << 8;
- entry->mem.win[0].card_addr = get_unaligned_le16(p + 2) << 8;
- entry->mem.win[0].host_addr = 0;
- p += 4;
- if (p > q)
- return -EINVAL;
- break;
- case 0x60:
- p = parse_mem(p, q, &entry->mem);
- if (p == NULL)
- return -EINVAL;
- break;
- }
+ /* Power options */
+ if ((features & 3) > 0) {
+ p = parse_power(p, q, &entry->vcc);
+ if (p == NULL)
+ return -EINVAL;
+ } else
+ entry->vcc.present = 0;
+ if ((features & 3) > 1) {
+ p = parse_power(p, q, &entry->vpp1);
+ if (p == NULL)
+ return -EINVAL;
+ } else
+ entry->vpp1.present = 0;
+ if ((features & 3) > 2) {
+ p = parse_power(p, q, &entry->vpp2);
+ if (p == NULL)
+ return -EINVAL;
+ } else
+ entry->vpp2.present = 0;
- /* Misc features */
- if (features & 0x80) {
- if (p == q)
- return -EINVAL;
- entry->flags |= (*p << 8);
- while (*p & 0x80)
- if (++p == q)
- return -EINVAL;
- p++;
- }
+ /* Timing options */
+ if (features & 0x04) {
+ p = parse_timing(p, q, &entry->timing);
+ if (p == NULL)
+ return -EINVAL;
+ } else {
+ entry->timing.wait = 0;
+ entry->timing.ready = 0;
+ entry->timing.reserved = 0;
+ }
- entry->subtuples = q-p;
+ /* I/O window options */
+ if (features & 0x08) {
+ p = parse_io(p, q, &entry->io);
+ if (p == NULL)
+ return -EINVAL;
+ } else
+ entry->io.nwin = 0;
+
+ /* Interrupt options */
+ if (features & 0x10) {
+ p = parse_irq(p, q, &entry->irq);
+ if (p == NULL)
+ return -EINVAL;
+ } else
+ entry->irq.IRQInfo1 = 0;
+
+ switch (features & 0x60) {
+ case 0x00:
+ entry->mem.nwin = 0;
+ break;
+ case 0x20:
+ entry->mem.nwin = 1;
+ entry->mem.win[0].len = get_unaligned_le16(p) << 8;
+ entry->mem.win[0].card_addr = 0;
+ entry->mem.win[0].host_addr = 0;
+ p += 2;
+ if (p > q)
+ return -EINVAL;
+ break;
+ case 0x40:
+ entry->mem.nwin = 1;
+ entry->mem.win[0].len = get_unaligned_le16(p) << 8;
+ entry->mem.win[0].card_addr = get_unaligned_le16(p + 2) << 8;
+ entry->mem.win[0].host_addr = 0;
+ p += 4;
+ if (p > q)
+ return -EINVAL;
+ break;
+ case 0x60:
+ p = parse_mem(p, q, &entry->mem);
+ if (p == NULL)
+ return -EINVAL;
+ break;
+ }
+
+ /* Misc features */
+ if (features & 0x80) {
+ if (p == q)
+ return -EINVAL;
+ entry->flags |= (*p << 8);
+ while (*p & 0x80)
+ if (++p == q)
+ return -EINVAL;
+ p++;
+ }
+
+ entry->subtuples = q-p;
- return 0;
+ return 0;
}
-/*====================================================================*/
static int parse_device_geo(tuple_t *tuple, cistpl_device_geo_t *geo)
{
- u_char *p, *q;
- int n;
+ u_char *p, *q;
+ int n;
- p = (u_char *)tuple->TupleData;
- q = p + tuple->TupleDataLen;
+ p = (u_char *)tuple->TupleData;
+ q = p + tuple->TupleDataLen;
- for (n = 0; n < CISTPL_MAX_DEVICES; n++) {
- if (p > q-6)
- break;
- geo->geo[n].buswidth = p[0];
- geo->geo[n].erase_block = 1 << (p[1]-1);
- geo->geo[n].read_block = 1 << (p[2]-1);
- geo->geo[n].write_block = 1 << (p[3]-1);
- geo->geo[n].partition = 1 << (p[4]-1);
- geo->geo[n].interleave = 1 << (p[5]-1);
- p += 6;
- }
- geo->ngeo = n;
- return 0;
+ for (n = 0; n < CISTPL_MAX_DEVICES; n++) {
+ if (p > q-6)
+ break;
+ geo->geo[n].buswidth = p[0];
+ geo->geo[n].erase_block = 1 << (p[1]-1);
+ geo->geo[n].read_block = 1 << (p[2]-1);
+ geo->geo[n].write_block = 1 << (p[3]-1);
+ geo->geo[n].partition = 1 << (p[4]-1);
+ geo->geo[n].interleave = 1 << (p[5]-1);
+ p += 6;
+ }
+ geo->ngeo = n;
+ return 0;
}
-/*====================================================================*/
static int parse_vers_2(tuple_t *tuple, cistpl_vers_2_t *v2)
{
- u_char *p, *q;
-
- if (tuple->TupleDataLen < 10)
- return -EINVAL;
-
- p = tuple->TupleData;
- q = p + tuple->TupleDataLen;
-
- v2->vers = p[0];
- v2->comply = p[1];
- v2->dindex = get_unaligned_le16(p + 2);
- v2->vspec8 = p[6];
- v2->vspec9 = p[7];
- v2->nhdr = p[8];
- p += 9;
- return parse_strings(p, q, 2, v2->str, &v2->vendor, NULL);
+ u_char *p, *q;
+
+ if (tuple->TupleDataLen < 10)
+ return -EINVAL;
+
+ p = tuple->TupleData;
+ q = p + tuple->TupleDataLen;
+
+ v2->vers = p[0];
+ v2->comply = p[1];
+ v2->dindex = get_unaligned_le16(p + 2);
+ v2->vspec8 = p[6];
+ v2->vspec9 = p[7];
+ v2->nhdr = p[8];
+ p += 9;
+ return parse_strings(p, q, 2, v2->str, &v2->vendor, NULL);
}
-/*====================================================================*/
static int parse_org(tuple_t *tuple, cistpl_org_t *org)
{
- u_char *p, *q;
- int i;
-
- p = tuple->TupleData;
- q = p + tuple->TupleDataLen;
- if (p == q)
- return -EINVAL;
- org->data_org = *p;
- if (++p == q)
- return -EINVAL;
- for (i = 0; i < 30; i++) {
- org->desc[i] = *p;
- if (*p == '\0')
- break;
+ u_char *p, *q;
+ int i;
+
+ p = tuple->TupleData;
+ q = p + tuple->TupleDataLen;
+ if (p == q)
+ return -EINVAL;
+ org->data_org = *p;
if (++p == q)
return -EINVAL;
- }
- return 0;
+ for (i = 0; i < 30; i++) {
+ org->desc[i] = *p;
+ if (*p == '\0')
+ break;
+ if (++p == q)
+ return -EINVAL;
+ }
+ return 0;
}
-/*====================================================================*/
static int parse_format(tuple_t *tuple, cistpl_format_t *fmt)
{
- u_char *p;
+ u_char *p;
- if (tuple->TupleDataLen < 10)
- return -EINVAL;
+ if (tuple->TupleDataLen < 10)
+ return -EINVAL;
- p = tuple->TupleData;
+ p = tuple->TupleData;
- fmt->type = p[0];
- fmt->edc = p[1];
- fmt->offset = get_unaligned_le32(p + 2);
- fmt->length = get_unaligned_le32(p + 6);
+ fmt->type = p[0];
+ fmt->edc = p[1];
+ fmt->offset = get_unaligned_le32(p + 2);
+ fmt->length = get_unaligned_le32(p + 6);
- return 0;
+ return 0;
}
-/*====================================================================*/
int pcmcia_parse_tuple(tuple_t *tuple, cisparse_t *parse)
{
- int ret = 0;
-
- if (tuple->TupleDataLen > tuple->TupleDataMax)
- return -EINVAL;
- switch (tuple->TupleCode) {
- case CISTPL_DEVICE:
- case CISTPL_DEVICE_A:
- ret = parse_device(tuple, &parse->device);
- break;
- case CISTPL_CHECKSUM:
- ret = parse_checksum(tuple, &parse->checksum);
- break;
- case CISTPL_LONGLINK_A:
- case CISTPL_LONGLINK_C:
- ret = parse_longlink(tuple, &parse->longlink);
- break;
- case CISTPL_LONGLINK_MFC:
- ret = parse_longlink_mfc(tuple, &parse->longlink_mfc);
- break;
- case CISTPL_VERS_1:
- ret = parse_vers_1(tuple, &parse->version_1);
- break;
- case CISTPL_ALTSTR:
- ret = parse_altstr(tuple, &parse->altstr);
- break;
- case CISTPL_JEDEC_A:
- case CISTPL_JEDEC_C:
- ret = parse_jedec(tuple, &parse->jedec);
- break;
- case CISTPL_MANFID:
- ret = parse_manfid(tuple, &parse->manfid);
- break;
- case CISTPL_FUNCID:
- ret = parse_funcid(tuple, &parse->funcid);
- break;
- case CISTPL_FUNCE:
- ret = parse_funce(tuple, &parse->funce);
- break;
- case CISTPL_CONFIG:
- ret = parse_config(tuple, &parse->config);
- break;
- case CISTPL_CFTABLE_ENTRY:
- ret = parse_cftable_entry(tuple, &parse->cftable_entry);
- break;
- case CISTPL_DEVICE_GEO:
- case CISTPL_DEVICE_GEO_A:
- ret = parse_device_geo(tuple, &parse->device_geo);
- break;
- case CISTPL_VERS_2:
- ret = parse_vers_2(tuple, &parse->vers_2);
- break;
- case CISTPL_ORG:
- ret = parse_org(tuple, &parse->org);
- break;
- case CISTPL_FORMAT:
- case CISTPL_FORMAT_A:
- ret = parse_format(tuple, &parse->format);
- break;
- case CISTPL_NO_LINK:
- case CISTPL_LINKTARGET:
- ret = 0;
- break;
- default:
- ret = -EINVAL;
- break;
- }
- if (ret)
- pr_debug("parse_tuple failed %d\n", ret);
- return ret;
+ int ret = 0;
+
+ if (tuple->TupleDataLen > tuple->TupleDataMax)
+ return -EINVAL;
+ switch (tuple->TupleCode) {
+ case CISTPL_DEVICE:
+ case CISTPL_DEVICE_A:
+ ret = parse_device(tuple, &parse->device);
+ break;
+ case CISTPL_CHECKSUM:
+ ret = parse_checksum(tuple, &parse->checksum);
+ break;
+ case CISTPL_LONGLINK_A:
+ case CISTPL_LONGLINK_C:
+ ret = parse_longlink(tuple, &parse->longlink);
+ break;
+ case CISTPL_LONGLINK_MFC:
+ ret = parse_longlink_mfc(tuple, &parse->longlink_mfc);
+ break;
+ case CISTPL_VERS_1:
+ ret = parse_vers_1(tuple, &parse->version_1);
+ break;
+ case CISTPL_ALTSTR:
+ ret = parse_altstr(tuple, &parse->altstr);
+ break;
+ case CISTPL_JEDEC_A:
+ case CISTPL_JEDEC_C:
+ ret = parse_jedec(tuple, &parse->jedec);
+ break;
+ case CISTPL_MANFID:
+ ret = parse_manfid(tuple, &parse->manfid);
+ break;
+ case CISTPL_FUNCID:
+ ret = parse_funcid(tuple, &parse->funcid);
+ break;
+ case CISTPL_FUNCE:
+ ret = parse_funce(tuple, &parse->funce);
+ break;
+ case CISTPL_CONFIG:
+ ret = parse_config(tuple, &parse->config);
+ break;
+ case CISTPL_CFTABLE_ENTRY:
+ ret = parse_cftable_entry(tuple, &parse->cftable_entry);
+ break;
+ case CISTPL_DEVICE_GEO:
+ case CISTPL_DEVICE_GEO_A:
+ ret = parse_device_geo(tuple, &parse->device_geo);
+ break;
+ case CISTPL_VERS_2:
+ ret = parse_vers_2(tuple, &parse->vers_2);
+ break;
+ case CISTPL_ORG:
+ ret = parse_org(tuple, &parse->org);
+ break;
+ case CISTPL_FORMAT:
+ case CISTPL_FORMAT_A:
+ ret = parse_format(tuple, &parse->format);
+ break;
+ case CISTPL_NO_LINK:
+ case CISTPL_LINKTARGET:
+ ret = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ if (ret)
+ pr_debug("parse_tuple failed %d\n", ret);
+ return ret;
}
EXPORT_SYMBOL(pcmcia_parse_tuple);
-/*======================================================================
- This is used internally by Card Services to look up CIS stuff.
-
-======================================================================*/
-
-int pccard_read_tuple(struct pcmcia_socket *s, unsigned int function, cisdata_t code, void *parse)
+/**
+ * pccard_read_tuple() - internal CIS tuple access
+ * @s: the struct pcmcia_socket where the card is inserted
+ * @function: the device function we loop for
+ * @code: which CIS code shall we look for?
+ * @parse: buffer where the tuple shall be parsed (or NULL, if no parse)
+ *
+ * pccard_read_tuple() reads out one tuple and attempts to parse it
+ */
+int pccard_read_tuple(struct pcmcia_socket *s, unsigned int function,
+ cisdata_t code, void *parse)
{
- tuple_t tuple;
- cisdata_t *buf;
- int ret;
-
- buf = kmalloc(256, GFP_KERNEL);
- if (buf == NULL) {
- dev_printk(KERN_WARNING, &s->dev, "no memory to read tuple\n");
- return -ENOMEM;
- }
- tuple.DesiredTuple = code;
- tuple.Attributes = 0;
- if (function == BIND_FN_ALL)
- tuple.Attributes = TUPLE_RETURN_COMMON;
- ret = pccard_get_first_tuple(s, function, &tuple);
- if (ret != 0)
- goto done;
- tuple.TupleData = buf;
- tuple.TupleOffset = 0;
- tuple.TupleDataMax = 255;
- ret = pccard_get_tuple_data(s, &tuple);
- if (ret != 0)
- goto done;
- ret = pcmcia_parse_tuple(&tuple, parse);
+ tuple_t tuple;
+ cisdata_t *buf;
+ int ret;
+
+ buf = kmalloc(256, GFP_KERNEL);
+ if (buf == NULL) {
+ dev_printk(KERN_WARNING, &s->dev, "no memory to read tuple\n");
+ return -ENOMEM;
+ }
+ tuple.DesiredTuple = code;
+ tuple.Attributes = 0;
+ if (function == BIND_FN_ALL)
+ tuple.Attributes = TUPLE_RETURN_COMMON;
+ ret = pccard_get_first_tuple(s, function, &tuple);
+ if (ret != 0)
+ goto done;
+ tuple.TupleData = buf;
+ tuple.TupleOffset = 0;
+ tuple.TupleDataMax = 255;
+ ret = pccard_get_tuple_data(s, &tuple);
+ if (ret != 0)
+ goto done;
+ ret = pcmcia_parse_tuple(&tuple, parse);
done:
- kfree(buf);
- return ret;
+ kfree(buf);
+ return ret;
}
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
index 3889cf07d6c..9254ab0b29b 100644
--- a/drivers/pcmcia/db1xxx_ss.c
+++ b/drivers/pcmcia/db1xxx_ss.c
@@ -42,7 +42,6 @@ struct db1x_pcmcia_sock {
int nr; /* socket number */
void *virt_io;
- /* the "pseudo" addresses of the PCMCIA space. */
phys_addr_t phys_io;
phys_addr_t phys_attr;
phys_addr_t phys_mem;
@@ -437,7 +436,7 @@ static int __devinit db1x_pcmcia_socket_probe(struct platform_device *pdev)
* This includes IRQs for Carddetection/ejection, the card
* itself and optional status change detection.
* Also, the memory areas covered by a socket. For these
- * we require the 32bit "pseudo" addresses (see the au1000.h
+ * we require the real 36bit addresses (see the au1000.h
* header for more information).
*/
@@ -459,11 +458,7 @@ static int __devinit db1x_pcmcia_socket_probe(struct platform_device *pdev)
ret = -ENODEV;
- /*
- * pseudo-attr: The 32bit address of the PCMCIA attribute space
- * for this socket (usually the 36bit address shifted 4 to the
- * right).
- */
+ /* 36bit PCMCIA Attribute area address */
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-attr");
if (!r) {
printk(KERN_ERR "pcmcia%d has no 'pseudo-attr' resource!\n",
@@ -472,10 +467,7 @@ static int __devinit db1x_pcmcia_socket_probe(struct platform_device *pdev)
}
sock->phys_attr = r->start;
- /*
- * pseudo-mem: The 32bit address of the PCMCIA memory space for
- * this socket (usually the 36bit address shifted 4 to the right)
- */
+ /* 36bit PCMCIA Memory area address */
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-mem");
if (!r) {
printk(KERN_ERR "pcmcia%d has no 'pseudo-mem' resource!\n",
@@ -484,10 +476,7 @@ static int __devinit db1x_pcmcia_socket_probe(struct platform_device *pdev)
}
sock->phys_mem = r->start;
- /*
- * pseudo-io: The 32bit address of the PCMCIA IO space for this
- * socket (usually the 36bit address shifted 4 to the right).
- */
+ /* 36bit PCMCIA IO area address */
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-io");
if (!r) {
printk(KERN_ERR "pcmcia%d has no 'pseudo-io' resource!\n",
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c
index e1741cd875a..7c204910a77 100644
--- a/drivers/pcmcia/pd6729.c
+++ b/drivers/pcmcia/pd6729.c
@@ -48,23 +48,13 @@ MODULE_AUTHOR("Jun Komuro <komurojun-mbn@nifty.com>");
* Specifies the interrupt delivery mode. The default (1) is to use PCI
* interrupts; a value of 0 selects ISA interrupts. This must be set for
* correct operation of PCI card readers.
- *
- * irq_list=i,j,...
- * This list limits the set of interrupts that can be used by PCMCIA
- * cards.
- * The default list is 3,4,5,7,9,10,11.
- * (irq_list parameter is not used, if irq_mode = 1)
*/
static int irq_mode = 1; /* 0 = ISA interrupt, 1 = PCI interrupt */
-static int irq_list[16];
-static unsigned int irq_list_count = 0;
module_param(irq_mode, int, 0444);
-module_param_array(irq_list, int, &irq_list_count, 0444);
MODULE_PARM_DESC(irq_mode,
"interrupt delivery mode. 0 = ISA, 1 = PCI. default is 1");
-MODULE_PARM_DESC(irq_list, "interrupts that can be used by PCMCIA cards");
static DEFINE_SPINLOCK(port_lock);
@@ -605,13 +595,7 @@ static u_int __devinit pd6729_isa_scan(void)
return 0;
}
- if (irq_list_count == 0)
- mask0 = 0xffff;
- else
- for (i = mask0 = 0; i < irq_list_count; i++)
- mask0 |= (1<<irq_list[i]);
-
- mask0 &= PD67_MASK;
+ mask0 = PD67_MASK;
/* just find interrupts that aren't in use */
for (i = 0; i < 16; i++)
diff --git a/drivers/pcmcia/rsrc_mgr.c b/drivers/pcmcia/rsrc_mgr.c
index e6f7d410aed..452c83b512c 100644
--- a/drivers/pcmcia/rsrc_mgr.c
+++ b/drivers/pcmcia/rsrc_mgr.c
@@ -79,9 +79,8 @@ static resource_size_t pcmcia_align(void *align_data,
#ifdef CONFIG_X86
if (res->flags & IORESOURCE_IO) {
- if (start & 0x300) {
+ if (start & 0x300)
start = (start + 0x3ff) & ~0x3ff;
- }
}
#endif
diff --git a/drivers/pcmcia/xxs1500_ss.c b/drivers/pcmcia/xxs1500_ss.c
index 61560cd6e28..f9009d34254 100644
--- a/drivers/pcmcia/xxs1500_ss.c
+++ b/drivers/pcmcia/xxs1500_ss.c
@@ -218,11 +218,7 @@ static int __devinit xxs1500_pcmcia_probe(struct platform_device *pdev)
ret = -ENODEV;
- /*
- * pseudo-attr: The 32bit address of the PCMCIA attribute space
- * for this socket (usually the 36bit address shifted 4 to the
- * right).
- */
+ /* 36bit PCMCIA Attribute area address */
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-attr");
if (!r) {
dev_err(&pdev->dev, "missing 'pcmcia-attr' resource!\n");
@@ -230,10 +226,7 @@ static int __devinit xxs1500_pcmcia_probe(struct platform_device *pdev)
}
sock->phys_attr = r->start;
- /*
- * pseudo-mem: The 32bit address of the PCMCIA memory space for
- * this socket (usually the 36bit address shifted 4 to the right)
- */
+ /* 36bit PCMCIA Memory area address */
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-mem");
if (!r) {
dev_err(&pdev->dev, "missing 'pcmcia-mem' resource!\n");
@@ -241,10 +234,7 @@ static int __devinit xxs1500_pcmcia_probe(struct platform_device *pdev)
}
sock->phys_mem = r->start;
- /*
- * pseudo-io: The 32bit address of the PCMCIA IO space for this
- * socket (usually the 36bit address shifted 4 to the right).
- */
+ /* 36bit PCMCIA IO area address */
r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pcmcia-io");
if (!r) {
dev_err(&pdev->dev, "missing 'pcmcia-io' resource!\n");
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index b85375f8762..967c766f53b 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -1408,10 +1408,10 @@ static struct pci_device_id yenta_table[] = {
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_7510, TI12XX),
CB_ID(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_7610, TI12XX),
- CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_710, TI12XX),
- CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_712, TI12XX),
- CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_720, TI12XX),
- CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_722, TI12XX),
+ CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_710, ENE),
+ CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_712, ENE),
+ CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_720, ENE),
+ CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_722, ENE),
CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1211, ENE),
CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1225, ENE),
CB_ID(PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_1410, ENE),
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 6848f213eb5..cd2ee6fce1b 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -59,6 +59,8 @@ config ASUS_LAPTOP
select NEW_LEDS
select BACKLIGHT_CLASS_DEVICE
depends on INPUT
+ depends on RFKILL || RFKILL = n
+ select INPUT_SPARSEKMAP
---help---
This is the new Linux driver for Asus laptops. It may also support some
MEDION, JVC or VICTOR laptops. It makes all the extra buttons generate
@@ -177,6 +179,7 @@ config COMPAL_LAPTOP
tristate "Compal Laptop Extras"
depends on ACPI
depends on BACKLIGHT_CLASS_DEVICE
+ depends on RFKILL
---help---
This is a driver for laptops built by Compal:
@@ -320,9 +323,15 @@ config THINKPAD_ACPI_VIDEO
server running, phase of the moon, and the current mood of
Schroedinger's cat. If you can use X.org's RandR to control
your ThinkPad's video output ports instead of this feature,
- don't think twice: do it and say N here to save some memory.
+ don't think twice: do it and say N here to save memory and avoid
+ bad interactions with X.org.
+
+ NOTE: access to this feature is limited to processes with the
+ CAP_SYS_ADMIN capability, to avoid local DoS issues in platforms
+ where it interacts badly with X.org.
- If you are not sure, say Y here.
+ If you are not sure, say Y here but do try to check if you could
+ be using X.org RandR instead.
config THINKPAD_ACPI_HOTKEY_POLL
bool "Support NVRAM polling for hot keys"
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index 61a1c750365..791fcf32150 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -45,58 +45,23 @@
#include <linux/fb.h>
#include <linux/leds.h>
#include <linux/platform_device.h>
+#include <linux/uaccess.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/rfkill.h>
#include <acpi/acpi_drivers.h>
#include <acpi/acpi_bus.h>
-#include <asm/uaccess.h>
-#include <linux/input.h>
-
-#define ASUS_LAPTOP_VERSION "0.42"
-
-#define ASUS_HOTK_NAME "Asus Laptop Support"
-#define ASUS_HOTK_CLASS "hotkey"
-#define ASUS_HOTK_DEVICE_NAME "Hotkey"
-#define ASUS_HOTK_FILE KBUILD_MODNAME
-#define ASUS_HOTK_PREFIX "\\_SB.ATKD."
+#define ASUS_LAPTOP_VERSION "0.42"
-/*
- * Some events we use, same for all Asus
- */
-#define ATKD_BR_UP 0x10
-#define ATKD_BR_DOWN 0x20
-#define ATKD_LCD_ON 0x33
-#define ATKD_LCD_OFF 0x34
-
-/*
- * Known bits returned by \_SB.ATKD.HWRS
- */
-#define WL_HWRS 0x80
-#define BT_HWRS 0x100
-
-/*
- * Flags for hotk status
- * WL_ON and BT_ON are also used for wireless_status()
- */
-#define WL_ON 0x01 /* internal Wifi */
-#define BT_ON 0x02 /* internal Bluetooth */
-#define MLED_ON 0x04 /* mail LED */
-#define TLED_ON 0x08 /* touchpad LED */
-#define RLED_ON 0x10 /* Record LED */
-#define PLED_ON 0x20 /* Phone LED */
-#define GLED_ON 0x40 /* Gaming LED */
-#define LCD_ON 0x80 /* LCD backlight */
-#define GPS_ON 0x100 /* GPS */
-#define KEY_ON 0x200 /* Keyboard backlight */
-
-#define ASUS_LOG ASUS_HOTK_FILE ": "
-#define ASUS_ERR KERN_ERR ASUS_LOG
-#define ASUS_WARNING KERN_WARNING ASUS_LOG
-#define ASUS_NOTICE KERN_NOTICE ASUS_LOG
-#define ASUS_INFO KERN_INFO ASUS_LOG
-#define ASUS_DEBUG KERN_DEBUG ASUS_LOG
+#define ASUS_LAPTOP_NAME "Asus Laptop Support"
+#define ASUS_LAPTOP_CLASS "hotkey"
+#define ASUS_LAPTOP_DEVICE_NAME "Hotkey"
+#define ASUS_LAPTOP_FILE KBUILD_MODNAME
+#define ASUS_LAPTOP_PREFIX "\\_SB.ATKD."
MODULE_AUTHOR("Julien Lerouge, Karol Kozimor, Corentin Chary");
-MODULE_DESCRIPTION(ASUS_HOTK_NAME);
+MODULE_DESCRIPTION(ASUS_LAPTOP_NAME);
MODULE_LICENSE("GPL");
/*
@@ -113,225 +78,209 @@ static uint wapf = 1;
module_param(wapf, uint, 0644);
MODULE_PARM_DESC(wapf, "WAPF value");
-#define ASUS_HANDLE(object, paths...) \
- static acpi_handle object##_handle = NULL; \
- static char *object##_paths[] = { paths }
+static uint wlan_status = 1;
+static uint bluetooth_status = 1;
+
+module_param(wlan_status, uint, 0644);
+MODULE_PARM_DESC(wlan_status, "Set the wireless status on boot "
+ "(0 = disabled, 1 = enabled, -1 = don't do anything). "
+ "default is 1");
+
+module_param(bluetooth_status, uint, 0644);
+MODULE_PARM_DESC(bluetooth_status, "Set the wireless status on boot "
+ "(0 = disabled, 1 = enabled, -1 = don't do anything). "
+ "default is 1");
+
+/*
+ * Some events we use, same for all Asus
+ */
+#define ATKD_BR_UP 0x10 /* (event & ~ATKD_BR_UP) = brightness level */
+#define ATKD_BR_DOWN 0x20 /* (event & ~ATKD_BR_DOWN) = britghness level */
+#define ATKD_BR_MIN ATKD_BR_UP
+#define ATKD_BR_MAX (ATKD_BR_DOWN | 0xF) /* 0x2f */
+#define ATKD_LCD_ON 0x33
+#define ATKD_LCD_OFF 0x34
+
+/*
+ * Known bits returned by \_SB.ATKD.HWRS
+ */
+#define WL_HWRS 0x80
+#define BT_HWRS 0x100
+
+/*
+ * Flags for hotk status
+ * WL_ON and BT_ON are also used for wireless_status()
+ */
+#define WL_RSTS 0x01 /* internal Wifi */
+#define BT_RSTS 0x02 /* internal Bluetooth */
/* LED */
-ASUS_HANDLE(mled_set, ASUS_HOTK_PREFIX "MLED");
-ASUS_HANDLE(tled_set, ASUS_HOTK_PREFIX "TLED");
-ASUS_HANDLE(rled_set, ASUS_HOTK_PREFIX "RLED"); /* W1JC */
-ASUS_HANDLE(pled_set, ASUS_HOTK_PREFIX "PLED"); /* A7J */
-ASUS_HANDLE(gled_set, ASUS_HOTK_PREFIX "GLED"); /* G1, G2 (probably) */
+#define METHOD_MLED "MLED"
+#define METHOD_TLED "TLED"
+#define METHOD_RLED "RLED" /* W1JC */
+#define METHOD_PLED "PLED" /* A7J */
+#define METHOD_GLED "GLED" /* G1, G2 (probably) */
/* LEDD */
-ASUS_HANDLE(ledd_set, ASUS_HOTK_PREFIX "SLCM");
+#define METHOD_LEDD "SLCM"
/*
* Bluetooth and WLAN
* WLED and BLED are not handled like other XLED, because in some dsdt
* they also control the WLAN/Bluetooth device.
*/
-ASUS_HANDLE(wl_switch, ASUS_HOTK_PREFIX "WLED");
-ASUS_HANDLE(bt_switch, ASUS_HOTK_PREFIX "BLED");
-ASUS_HANDLE(wireless_status, ASUS_HOTK_PREFIX "RSTS"); /* All new models */
+#define METHOD_WLAN "WLED"
+#define METHOD_BLUETOOTH "BLED"
+#define METHOD_WL_STATUS "RSTS"
/* Brightness */
-ASUS_HANDLE(brightness_set, ASUS_HOTK_PREFIX "SPLV");
-ASUS_HANDLE(brightness_get, ASUS_HOTK_PREFIX "GPLV");
+#define METHOD_BRIGHTNESS_SET "SPLV"
+#define METHOD_BRIGHTNESS_GET "GPLV"
/* Backlight */
-ASUS_HANDLE(lcd_switch, "\\_SB.PCI0.SBRG.EC0._Q10", /* All new models */
- "\\_SB.PCI0.ISA.EC0._Q10", /* A1x */
- "\\_SB.PCI0.PX40.ECD0._Q10", /* L3C */
- "\\_SB.PCI0.PX40.EC0.Q10", /* M1A */
- "\\_SB.PCI0.LPCB.EC0._Q10", /* P30 */
- "\\_SB.PCI0.LPCB.EC0._Q0E", /* P30/P35 */
- "\\_SB.PCI0.PX40.Q10", /* S1x */
- "\\Q10"); /* A2x, L2D, L3D, M2E */
+static acpi_handle lcd_switch_handle;
+static const char *lcd_switch_paths[] = {
+ "\\_SB.PCI0.SBRG.EC0._Q10", /* All new models */
+ "\\_SB.PCI0.ISA.EC0._Q10", /* A1x */
+ "\\_SB.PCI0.PX40.ECD0._Q10", /* L3C */
+ "\\_SB.PCI0.PX40.EC0.Q10", /* M1A */
+ "\\_SB.PCI0.LPCB.EC0._Q10", /* P30 */
+ "\\_SB.PCI0.LPCB.EC0._Q0E", /* P30/P35 */
+ "\\_SB.PCI0.PX40.Q10", /* S1x */
+ "\\Q10"}; /* A2x, L2D, L3D, M2E */
/* Display */
-ASUS_HANDLE(display_set, ASUS_HOTK_PREFIX "SDSP");
-ASUS_HANDLE(display_get,
- /* A6B, A6K A6R A7D F3JM L4R M6R A3G M6A M6V VX-1 V6J V6V W3Z */
- "\\_SB.PCI0.P0P1.VGA.GETD",
- /* A3E A4K, A4D A4L A6J A7J A8J Z71V M9V S5A M5A z33A W1Jc W2V G1 */
- "\\_SB.PCI0.P0P2.VGA.GETD",
- /* A6V A6Q */
- "\\_SB.PCI0.P0P3.VGA.GETD",
- /* A6T, A6M */
- "\\_SB.PCI0.P0PA.VGA.GETD",
- /* L3C */
- "\\_SB.PCI0.PCI1.VGAC.NMAP",
- /* Z96F */
- "\\_SB.PCI0.VGA.GETD",
- /* A2D */
- "\\ACTD",
- /* A4G Z71A W1N W5A W5F M2N M3N M5N M6N S1N S5N */
- "\\ADVG",
- /* P30 */
- "\\DNXT",
- /* A2H D1 L2D L3D L3H L2E L5D L5C M1A M2E L4L W3V */
- "\\INFB",
- /* A3F A6F A3N A3L M6N W3N W6A */
- "\\SSTE");
-
-ASUS_HANDLE(ls_switch, ASUS_HOTK_PREFIX "ALSC"); /* Z71A Z71V */
-ASUS_HANDLE(ls_level, ASUS_HOTK_PREFIX "ALSL"); /* Z71A Z71V */
+#define METHOD_SWITCH_DISPLAY "SDSP"
+
+static acpi_handle display_get_handle;
+static const char *display_get_paths[] = {
+ /* A6B, A6K A6R A7D F3JM L4R M6R A3G M6A M6V VX-1 V6J V6V W3Z */
+ "\\_SB.PCI0.P0P1.VGA.GETD",
+ /* A3E A4K, A4D A4L A6J A7J A8J Z71V M9V S5A M5A z33A W1Jc W2V G1 */
+ "\\_SB.PCI0.P0P2.VGA.GETD",
+ /* A6V A6Q */
+ "\\_SB.PCI0.P0P3.VGA.GETD",
+ /* A6T, A6M */
+ "\\_SB.PCI0.P0PA.VGA.GETD",
+ /* L3C */
+ "\\_SB.PCI0.PCI1.VGAC.NMAP",
+ /* Z96F */
+ "\\_SB.PCI0.VGA.GETD",
+ /* A2D */
+ "\\ACTD",
+ /* A4G Z71A W1N W5A W5F M2N M3N M5N M6N S1N S5N */
+ "\\ADVG",
+ /* P30 */
+ "\\DNXT",
+ /* A2H D1 L2D L3D L3H L2E L5D L5C M1A M2E L4L W3V */
+ "\\INFB",
+ /* A3F A6F A3N A3L M6N W3N W6A */
+ "\\SSTE"};
+
+#define METHOD_ALS_CONTROL "ALSC" /* Z71A Z71V */
+#define METHOD_ALS_LEVEL "ALSL" /* Z71A Z71V */
/* GPS */
/* R2H use different handle for GPS on/off */
-ASUS_HANDLE(gps_on, ASUS_HOTK_PREFIX "SDON"); /* R2H */
-ASUS_HANDLE(gps_off, ASUS_HOTK_PREFIX "SDOF"); /* R2H */
-ASUS_HANDLE(gps_status, ASUS_HOTK_PREFIX "GPST");
+#define METHOD_GPS_ON "SDON"
+#define METHOD_GPS_OFF "SDOF"
+#define METHOD_GPS_STATUS "GPST"
/* Keyboard light */
-ASUS_HANDLE(kled_set, ASUS_HOTK_PREFIX "SLKB");
-ASUS_HANDLE(kled_get, ASUS_HOTK_PREFIX "GLKB");
+#define METHOD_KBD_LIGHT_SET "SLKB"
+#define METHOD_KBD_LIGHT_GET "GLKB"
/*
- * This is the main structure, we can use it to store anything interesting
- * about the hotk device
+ * Define a specific led structure to keep the main structure clean
*/
-struct asus_hotk {
- char *name; /* laptop name */
- struct acpi_device *device; /* the device we are in */
- acpi_handle handle; /* the handle of the hotk device */
- char status; /* status of the hotk, for LEDs, ... */
- u32 ledd_status; /* status of the LED display */
- u8 light_level; /* light sensor level */
- u8 light_switch; /* light sensor switch value */
- u16 event_count[128]; /* count for each event TODO make this better */
- struct input_dev *inputdev;
- u16 *keycode_map;
+struct asus_led {
+ int wk;
+ struct work_struct work;
+ struct led_classdev led;
+ struct asus_laptop *asus;
+ const char *method;
};
/*
- * This header is made available to allow proper configuration given model,
- * revision number , ... this info cannot go in struct asus_hotk because it is
- * available before the hotk
- */
-static struct acpi_table_header *asus_info;
-
-/* The actual device the driver binds to */
-static struct asus_hotk *hotk;
-
-/*
- * The hotkey driver declaration
+ * This is the main structure, we can use it to store anything interesting
+ * about the hotk device
*/
-static const struct acpi_device_id asus_device_ids[] = {
- {"ATK0100", 0},
- {"ATK0101", 0},
- {"", 0},
-};
-MODULE_DEVICE_TABLE(acpi, asus_device_ids);
+struct asus_laptop {
+ char *name; /* laptop name */
-static int asus_hotk_add(struct acpi_device *device);
-static int asus_hotk_remove(struct acpi_device *device, int type);
-static void asus_hotk_notify(struct acpi_device *device, u32 event);
+ struct acpi_table_header *dsdt_info;
+ struct platform_device *platform_device;
+ struct acpi_device *device; /* the device we are in */
+ struct backlight_device *backlight_device;
-static struct acpi_driver asus_hotk_driver = {
- .name = ASUS_HOTK_NAME,
- .class = ASUS_HOTK_CLASS,
- .owner = THIS_MODULE,
- .ids = asus_device_ids,
- .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
- .ops = {
- .add = asus_hotk_add,
- .remove = asus_hotk_remove,
- .notify = asus_hotk_notify,
- },
-};
+ struct input_dev *inputdev;
+ struct key_entry *keymap;
-/* The backlight device /sys/class/backlight */
-static struct backlight_device *asus_backlight_device;
+ struct asus_led mled;
+ struct asus_led tled;
+ struct asus_led rled;
+ struct asus_led pled;
+ struct asus_led gled;
+ struct asus_led kled;
+ struct workqueue_struct *led_workqueue;
-/*
- * The backlight class declaration
- */
-static int read_brightness(struct backlight_device *bd);
-static int update_bl_status(struct backlight_device *bd);
-static struct backlight_ops asusbl_ops = {
- .get_brightness = read_brightness,
- .update_status = update_bl_status,
-};
+ int wireless_status;
+ bool have_rsts;
+ int lcd_state;
-/*
- * These functions actually update the LED's, and are called from a
- * workqueue. By doing this as separate work rather than when the LED
- * subsystem asks, we avoid messing with the Asus ACPI stuff during a
- * potentially bad time, such as a timer interrupt.
- */
-static struct workqueue_struct *led_workqueue;
-
-#define ASUS_LED(object, ledname, max) \
- static void object##_led_set(struct led_classdev *led_cdev, \
- enum led_brightness value); \
- static enum led_brightness object##_led_get( \
- struct led_classdev *led_cdev); \
- static void object##_led_update(struct work_struct *ignored); \
- static int object##_led_wk; \
- static DECLARE_WORK(object##_led_work, object##_led_update); \
- static struct led_classdev object##_led = { \
- .name = "asus::" ledname, \
- .brightness_set = object##_led_set, \
- .brightness_get = object##_led_get, \
- .max_brightness = max \
- }
+ struct rfkill *gps_rfkill;
-ASUS_LED(mled, "mail", 1);
-ASUS_LED(tled, "touchpad", 1);
-ASUS_LED(rled, "record", 1);
-ASUS_LED(pled, "phone", 1);
-ASUS_LED(gled, "gaming", 1);
-ASUS_LED(kled, "kbd_backlight", 3);
-
-struct key_entry {
- char type;
- u8 code;
- u16 keycode;
+ acpi_handle handle; /* the handle of the hotk device */
+ u32 ledd_status; /* status of the LED display */
+ u8 light_level; /* light sensor level */
+ u8 light_switch; /* light sensor switch value */
+ u16 event_count[128]; /* count for each event TODO make this better */
+ u16 *keycode_map;
};
-enum { KE_KEY, KE_END };
-
-static struct key_entry asus_keymap[] = {
- {KE_KEY, 0x02, KEY_SCREENLOCK},
- {KE_KEY, 0x05, KEY_WLAN},
- {KE_KEY, 0x08, KEY_F13},
- {KE_KEY, 0x17, KEY_ZOOM},
- {KE_KEY, 0x1f, KEY_BATTERY},
- {KE_KEY, 0x30, KEY_VOLUMEUP},
- {KE_KEY, 0x31, KEY_VOLUMEDOWN},
- {KE_KEY, 0x32, KEY_MUTE},
- {KE_KEY, 0x33, KEY_SWITCHVIDEOMODE},
- {KE_KEY, 0x34, KEY_SWITCHVIDEOMODE},
- {KE_KEY, 0x40, KEY_PREVIOUSSONG},
- {KE_KEY, 0x41, KEY_NEXTSONG},
- {KE_KEY, 0x43, KEY_STOPCD},
- {KE_KEY, 0x45, KEY_PLAYPAUSE},
- {KE_KEY, 0x4c, KEY_MEDIA},
- {KE_KEY, 0x50, KEY_EMAIL},
- {KE_KEY, 0x51, KEY_WWW},
- {KE_KEY, 0x55, KEY_CALC},
- {KE_KEY, 0x5C, KEY_SCREENLOCK}, /* Screenlock */
- {KE_KEY, 0x5D, KEY_WLAN},
- {KE_KEY, 0x5E, KEY_WLAN},
- {KE_KEY, 0x5F, KEY_WLAN},
- {KE_KEY, 0x60, KEY_SWITCHVIDEOMODE},
- {KE_KEY, 0x61, KEY_SWITCHVIDEOMODE},
- {KE_KEY, 0x62, KEY_SWITCHVIDEOMODE},
- {KE_KEY, 0x63, KEY_SWITCHVIDEOMODE},
- {KE_KEY, 0x6B, KEY_F13}, /* Lock Touchpad */
- {KE_KEY, 0x82, KEY_CAMERA},
- {KE_KEY, 0x88, KEY_WLAN },
- {KE_KEY, 0x8A, KEY_PROG1},
- {KE_KEY, 0x95, KEY_MEDIA},
- {KE_KEY, 0x99, KEY_PHONE},
- {KE_KEY, 0xc4, KEY_KBDILLUMUP},
- {KE_KEY, 0xc5, KEY_KBDILLUMDOWN},
+static const struct key_entry asus_keymap[] = {
+ /* Lenovo SL Specific keycodes */
+ {KE_KEY, 0x02, { KEY_SCREENLOCK } },
+ {KE_KEY, 0x05, { KEY_WLAN } },
+ {KE_KEY, 0x08, { KEY_F13 } },
+ {KE_KEY, 0x17, { KEY_ZOOM } },
+ {KE_KEY, 0x1f, { KEY_BATTERY } },
+ /* End of Lenovo SL Specific keycodes */
+ {KE_KEY, 0x30, { KEY_VOLUMEUP } },
+ {KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
+ {KE_KEY, 0x32, { KEY_MUTE } },
+ {KE_KEY, 0x33, { KEY_SWITCHVIDEOMODE } },
+ {KE_KEY, 0x34, { KEY_SWITCHVIDEOMODE } },
+ {KE_KEY, 0x40, { KEY_PREVIOUSSONG } },
+ {KE_KEY, 0x41, { KEY_NEXTSONG } },
+ {KE_KEY, 0x43, { KEY_STOPCD } },
+ {KE_KEY, 0x45, { KEY_PLAYPAUSE } },
+ {KE_KEY, 0x4c, { KEY_MEDIA } },
+ {KE_KEY, 0x50, { KEY_EMAIL } },
+ {KE_KEY, 0x51, { KEY_WWW } },
+ {KE_KEY, 0x55, { KEY_CALC } },
+ {KE_KEY, 0x5C, { KEY_SCREENLOCK } }, /* Screenlock */
+ {KE_KEY, 0x5D, { KEY_WLAN } },
+ {KE_KEY, 0x5E, { KEY_WLAN } },
+ {KE_KEY, 0x5F, { KEY_WLAN } },
+ {KE_KEY, 0x60, { KEY_SWITCHVIDEOMODE } },
+ {KE_KEY, 0x61, { KEY_SWITCHVIDEOMODE } },
+ {KE_KEY, 0x62, { KEY_SWITCHVIDEOMODE } },
+ {KE_KEY, 0x63, { KEY_SWITCHVIDEOMODE } },
+ {KE_KEY, 0x6B, { KEY_F13 } }, /* Lock Touchpad */
+ {KE_KEY, 0x7E, { KEY_BLUETOOTH } },
+ {KE_KEY, 0x7D, { KEY_BLUETOOTH } },
+ {KE_KEY, 0x82, { KEY_CAMERA } },
+ {KE_KEY, 0x88, { KEY_WLAN } },
+ {KE_KEY, 0x8A, { KEY_PROG1 } },
+ {KE_KEY, 0x95, { KEY_MEDIA } },
+ {KE_KEY, 0x99, { KEY_PHONE } },
+ {KE_KEY, 0xc4, { KEY_KBDILLUMUP } },
+ {KE_KEY, 0xc5, { KEY_KBDILLUMDOWN } },
{KE_END, 0},
};
+
/*
* This function evaluates an ACPI method, given an int as parameter, the
* method is searched within the scope of the handle, can be NULL. The output
@@ -339,8 +288,8 @@ static struct key_entry asus_keymap[] = {
*
* returns 0 if write is successful, -1 else.
*/
-static int write_acpi_int(acpi_handle handle, const char *method, int val,
- struct acpi_buffer *output)
+static int write_acpi_int_ret(acpi_handle handle, const char *method, int val,
+ struct acpi_buffer *output)
{
struct acpi_object_list params; /* list of input parameters (an int) */
union acpi_object in_obj; /* the only param we use */
@@ -361,102 +310,82 @@ static int write_acpi_int(acpi_handle handle, const char *method, int val,
return -1;
}
-static int read_wireless_status(int mask)
+static int write_acpi_int(acpi_handle handle, const char *method, int val)
{
- unsigned long long status;
- acpi_status rv = AE_OK;
+ return write_acpi_int_ret(handle, method, val, NULL);
+}
+
+static int acpi_check_handle(acpi_handle handle, const char *method,
+ acpi_handle *ret)
+{
+ acpi_status status;
- if (!wireless_status_handle)
- return (hotk->status & mask) ? 1 : 0;
+ if (method == NULL)
+ return -ENODEV;
- rv = acpi_evaluate_integer(wireless_status_handle, NULL, NULL, &status);
- if (ACPI_FAILURE(rv))
- pr_warning("Error reading Wireless status\n");
- else
- return (status & mask) ? 1 : 0;
+ if (ret)
+ status = acpi_get_handle(handle, (char *)method,
+ ret);
+ else {
+ acpi_handle dummy;
- return (hotk->status & mask) ? 1 : 0;
+ status = acpi_get_handle(handle, (char *)method,
+ &dummy);
+ }
+
+ if (status != AE_OK) {
+ if (ret)
+ pr_warning("Error finding %s\n", method);
+ return -ENODEV;
+ }
+ return 0;
}
-static int read_gps_status(void)
+/* Generic LED function */
+static int asus_led_set(struct asus_laptop *asus, const char *method,
+ int value)
{
- unsigned long long status;
- acpi_status rv = AE_OK;
-
- rv = acpi_evaluate_integer(gps_status_handle, NULL, NULL, &status);
- if (ACPI_FAILURE(rv))
- pr_warning("Error reading GPS status\n");
+ if (!strcmp(method, METHOD_MLED))
+ value = !value;
+ else if (!strcmp(method, METHOD_GLED))
+ value = !value + 1;
else
- return status ? 1 : 0;
+ value = !!value;
- return (hotk->status & GPS_ON) ? 1 : 0;
+ return write_acpi_int(asus->handle, method, value);
}
-/* Generic LED functions */
-static int read_status(int mask)
+/*
+ * LEDs
+ */
+/* /sys/class/led handlers */
+static void asus_led_cdev_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
{
- /* There is a special method for both wireless devices */
- if (mask == BT_ON || mask == WL_ON)
- return read_wireless_status(mask);
- else if (mask == GPS_ON)
- return read_gps_status();
+ struct asus_led *led = container_of(led_cdev, struct asus_led, led);
+ struct asus_laptop *asus = led->asus;
- return (hotk->status & mask) ? 1 : 0;
+ led->wk = !!value;
+ queue_work(asus->led_workqueue, &led->work);
}
-static void write_status(acpi_handle handle, int out, int mask)
+static void asus_led_cdev_update(struct work_struct *work)
{
- hotk->status = (out) ? (hotk->status | mask) : (hotk->status & ~mask);
-
- switch (mask) {
- case MLED_ON:
- out = !(out & 0x1);
- break;
- case GLED_ON:
- out = (out & 0x1) + 1;
- break;
- case GPS_ON:
- handle = (out) ? gps_on_handle : gps_off_handle;
- out = 0x02;
- break;
- default:
- out &= 0x1;
- break;
- }
+ struct asus_led *led = container_of(work, struct asus_led, work);
+ struct asus_laptop *asus = led->asus;
- if (write_acpi_int(handle, NULL, out, NULL))
- pr_warning(" write failed %x\n", mask);
+ asus_led_set(asus, led->method, led->wk);
}
-/* /sys/class/led handlers */
-#define ASUS_LED_HANDLER(object, mask) \
- static void object##_led_set(struct led_classdev *led_cdev, \
- enum led_brightness value) \
- { \
- object##_led_wk = (value > 0) ? 1 : 0; \
- queue_work(led_workqueue, &object##_led_work); \
- } \
- static void object##_led_update(struct work_struct *ignored) \
- { \
- int value = object##_led_wk; \
- write_status(object##_set_handle, value, (mask)); \
- } \
- static enum led_brightness object##_led_get( \
- struct led_classdev *led_cdev) \
- { \
- return led_cdev->brightness; \
- }
-
-ASUS_LED_HANDLER(mled, MLED_ON);
-ASUS_LED_HANDLER(pled, PLED_ON);
-ASUS_LED_HANDLER(rled, RLED_ON);
-ASUS_LED_HANDLER(tled, TLED_ON);
-ASUS_LED_HANDLER(gled, GLED_ON);
+static enum led_brightness asus_led_cdev_get(struct led_classdev *led_cdev)
+{
+ return led_cdev->brightness;
+}
/*
- * Keyboard backlight
+ * Keyboard backlight (also a LED)
*/
-static int get_kled_lvl(void)
+static int asus_kled_lvl(struct asus_laptop *asus)
{
unsigned long long kblv;
struct acpi_object_list params;
@@ -468,75 +397,183 @@ static int get_kled_lvl(void)
in_obj.type = ACPI_TYPE_INTEGER;
in_obj.integer.value = 2;
- rv = acpi_evaluate_integer(kled_get_handle, NULL, &params, &kblv);
+ rv = acpi_evaluate_integer(asus->handle, METHOD_KBD_LIGHT_GET,
+ &params, &kblv);
if (ACPI_FAILURE(rv)) {
pr_warning("Error reading kled level\n");
- return 0;
+ return -ENODEV;
}
return kblv;
}
-static int set_kled_lvl(int kblv)
+static int asus_kled_set(struct asus_laptop *asus, int kblv)
{
if (kblv > 0)
kblv = (1 << 7) | (kblv & 0x7F);
else
kblv = 0;
- if (write_acpi_int(kled_set_handle, NULL, kblv, NULL)) {
+ if (write_acpi_int(asus->handle, METHOD_KBD_LIGHT_SET, kblv)) {
pr_warning("Keyboard LED display write failed\n");
return -EINVAL;
}
return 0;
}
-static void kled_led_set(struct led_classdev *led_cdev,
- enum led_brightness value)
+static void asus_kled_cdev_set(struct led_classdev *led_cdev,
+ enum led_brightness value)
{
- kled_led_wk = value;
- queue_work(led_workqueue, &kled_led_work);
+ struct asus_led *led = container_of(led_cdev, struct asus_led, led);
+ struct asus_laptop *asus = led->asus;
+
+ led->wk = value;
+ queue_work(asus->led_workqueue, &led->work);
}
-static void kled_led_update(struct work_struct *ignored)
+static void asus_kled_cdev_update(struct work_struct *work)
{
- set_kled_lvl(kled_led_wk);
+ struct asus_led *led = container_of(work, struct asus_led, work);
+ struct asus_laptop *asus = led->asus;
+
+ asus_kled_set(asus, led->wk);
}
-static enum led_brightness kled_led_get(struct led_classdev *led_cdev)
+static enum led_brightness asus_kled_cdev_get(struct led_classdev *led_cdev)
{
- return get_kled_lvl();
+ struct asus_led *led = container_of(led_cdev, struct asus_led, led);
+ struct asus_laptop *asus = led->asus;
+
+ return asus_kled_lvl(asus);
}
-static int get_lcd_state(void)
+static void asus_led_exit(struct asus_laptop *asus)
{
- return read_status(LCD_ON);
+ if (asus->mled.led.dev)
+ led_classdev_unregister(&asus->mled.led);
+ if (asus->tled.led.dev)
+ led_classdev_unregister(&asus->tled.led);
+ if (asus->pled.led.dev)
+ led_classdev_unregister(&asus->pled.led);
+ if (asus->rled.led.dev)
+ led_classdev_unregister(&asus->rled.led);
+ if (asus->gled.led.dev)
+ led_classdev_unregister(&asus->gled.led);
+ if (asus->kled.led.dev)
+ led_classdev_unregister(&asus->kled.led);
+ if (asus->led_workqueue) {
+ destroy_workqueue(asus->led_workqueue);
+ asus->led_workqueue = NULL;
+ }
}
-static int set_lcd_state(int value)
+/* Ugly macro, need to fix that later */
+static int asus_led_register(struct asus_laptop *asus,
+ struct asus_led *led,
+ const char *name, const char *method)
+{
+ struct led_classdev *led_cdev = &led->led;
+
+ if (!method || acpi_check_handle(asus->handle, method, NULL))
+ return 0; /* Led not present */
+
+ led->asus = asus;
+ led->method = method;
+
+ INIT_WORK(&led->work, asus_led_cdev_update);
+ led_cdev->name = name;
+ led_cdev->brightness_set = asus_led_cdev_set;
+ led_cdev->brightness_get = asus_led_cdev_get;
+ led_cdev->max_brightness = 1;
+ return led_classdev_register(&asus->platform_device->dev, led_cdev);
+}
+
+static int asus_led_init(struct asus_laptop *asus)
+{
+ int r;
+
+ /*
+ * Functions that actually update the LED's are called from a
+ * workqueue. By doing this as separate work rather than when the LED
+ * subsystem asks, we avoid messing with the Asus ACPI stuff during a
+ * potentially bad time, such as a timer interrupt.
+ */
+ asus->led_workqueue = create_singlethread_workqueue("led_workqueue");
+ if (!asus->led_workqueue)
+ return -ENOMEM;
+
+ r = asus_led_register(asus, &asus->mled, "asus::mail", METHOD_MLED);
+ if (r)
+ goto error;
+ r = asus_led_register(asus, &asus->tled, "asus::touchpad", METHOD_TLED);
+ if (r)
+ goto error;
+ r = asus_led_register(asus, &asus->rled, "asus::record", METHOD_RLED);
+ if (r)
+ goto error;
+ r = asus_led_register(asus, &asus->pled, "asus::phone", METHOD_PLED);
+ if (r)
+ goto error;
+ r = asus_led_register(asus, &asus->gled, "asus::gaming", METHOD_GLED);
+ if (r)
+ goto error;
+ if (!acpi_check_handle(asus->handle, METHOD_KBD_LIGHT_SET, NULL) &&
+ !acpi_check_handle(asus->handle, METHOD_KBD_LIGHT_GET, NULL)) {
+ struct asus_led *led = &asus->kled;
+ struct led_classdev *cdev = &led->led;
+
+ led->asus = asus;
+
+ INIT_WORK(&led->work, asus_kled_cdev_update);
+ cdev->name = "asus::kbd_backlight";
+ cdev->brightness_set = asus_kled_cdev_set;
+ cdev->brightness_get = asus_kled_cdev_get;
+ cdev->max_brightness = 3;
+ r = led_classdev_register(&asus->platform_device->dev, cdev);
+ }
+error:
+ if (r)
+ asus_led_exit(asus);
+ return r;
+}
+
+/*
+ * Backlight device
+ */
+static int asus_lcd_status(struct asus_laptop *asus)
+{
+ return asus->lcd_state;
+}
+
+static int asus_lcd_set(struct asus_laptop *asus, int value)
{
int lcd = 0;
acpi_status status = 0;
- lcd = value ? 1 : 0;
+ lcd = !!value;
- if (lcd == get_lcd_state())
+ if (lcd == asus_lcd_status(asus))
return 0;
- if (lcd_switch_handle) {
- status = acpi_evaluate_object(lcd_switch_handle,
- NULL, NULL, NULL);
+ if (!lcd_switch_handle)
+ return -ENODEV;
+
+ status = acpi_evaluate_object(lcd_switch_handle,
+ NULL, NULL, NULL);
- if (ACPI_FAILURE(status))
- pr_warning("Error switching LCD\n");
+ if (ACPI_FAILURE(status)) {
+ pr_warning("Error switching LCD\n");
+ return -ENODEV;
}
- write_status(NULL, lcd, LCD_ON);
+ asus->lcd_state = lcd;
return 0;
}
-static void lcd_blank(int blank)
+static void lcd_blank(struct asus_laptop *asus, int blank)
{
- struct backlight_device *bd = asus_backlight_device;
+ struct backlight_device *bd = asus->backlight_device;
+
+ asus->lcd_state = (blank == FB_BLANK_UNBLANK);
if (bd) {
bd->props.power = blank;
@@ -544,44 +581,91 @@ static void lcd_blank(int blank)
}
}
-static int read_brightness(struct backlight_device *bd)
+static int asus_read_brightness(struct backlight_device *bd)
{
+ struct asus_laptop *asus = bl_get_data(bd);
unsigned long long value;
acpi_status rv = AE_OK;
- rv = acpi_evaluate_integer(brightness_get_handle, NULL, NULL, &value);
+ rv = acpi_evaluate_integer(asus->handle, METHOD_BRIGHTNESS_GET,
+ NULL, &value);
if (ACPI_FAILURE(rv))
pr_warning("Error reading brightness\n");
return value;
}
-static int set_brightness(struct backlight_device *bd, int value)
+static int asus_set_brightness(struct backlight_device *bd, int value)
{
- int ret = 0;
-
- value = (0 < value) ? ((15 < value) ? 15 : value) : 0;
- /* 0 <= value <= 15 */
+ struct asus_laptop *asus = bl_get_data(bd);
- if (write_acpi_int(brightness_set_handle, NULL, value, NULL)) {
+ if (write_acpi_int(asus->handle, METHOD_BRIGHTNESS_SET, value)) {
pr_warning("Error changing brightness\n");
- ret = -EIO;
+ return -EIO;
}
-
- return ret;
+ return 0;
}
static int update_bl_status(struct backlight_device *bd)
{
+ struct asus_laptop *asus = bl_get_data(bd);
int rv;
int value = bd->props.brightness;
- rv = set_brightness(bd, value);
+ rv = asus_set_brightness(bd, value);
if (rv)
return rv;
value = (bd->props.power == FB_BLANK_UNBLANK) ? 1 : 0;
- return set_lcd_state(value);
+ return asus_lcd_set(asus, value);
+}
+
+static struct backlight_ops asusbl_ops = {
+ .get_brightness = asus_read_brightness,
+ .update_status = update_bl_status,
+};
+
+static int asus_backlight_notify(struct asus_laptop *asus)
+{
+ struct backlight_device *bd = asus->backlight_device;
+ int old = bd->props.brightness;
+
+ backlight_force_update(bd, BACKLIGHT_UPDATE_HOTKEY);
+
+ return old;
+}
+
+static int asus_backlight_init(struct asus_laptop *asus)
+{
+ struct backlight_device *bd;
+ struct device *dev = &asus->platform_device->dev;
+
+ if (!acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_GET, NULL) &&
+ !acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_SET, NULL) &&
+ lcd_switch_handle) {
+ bd = backlight_device_register(ASUS_LAPTOP_FILE, dev,
+ asus, &asusbl_ops);
+ if (IS_ERR(bd)) {
+ pr_err("Could not register asus backlight device\n");
+ asus->backlight_device = NULL;
+ return PTR_ERR(bd);
+ }
+
+ asus->backlight_device = bd;
+
+ bd->props.max_brightness = 15;
+ bd->props.power = FB_BLANK_UNBLANK;
+ bd->props.brightness = asus_read_brightness(bd);
+ backlight_update_status(bd);
+ }
+ return 0;
+}
+
+static void asus_backlight_exit(struct asus_laptop *asus)
+{
+ if (asus->backlight_device)
+ backlight_device_unregister(asus->backlight_device);
+ asus->backlight_device = NULL;
}
/*
@@ -596,25 +680,26 @@ static int update_bl_status(struct backlight_device *bd)
static ssize_t show_infos(struct device *dev,
struct device_attribute *attr, char *page)
{
+ struct asus_laptop *asus = dev_get_drvdata(dev);
int len = 0;
unsigned long long temp;
char buf[16]; /* enough for all info */
acpi_status rv = AE_OK;
/*
- * We use the easy way, we don't care of off and count, so we don't set eof
- * to 1
+ * We use the easy way, we don't care of off and count,
+ * so we don't set eof to 1
*/
- len += sprintf(page, ASUS_HOTK_NAME " " ASUS_LAPTOP_VERSION "\n");
- len += sprintf(page + len, "Model reference : %s\n", hotk->name);
+ len += sprintf(page, ASUS_LAPTOP_NAME " " ASUS_LAPTOP_VERSION "\n");
+ len += sprintf(page + len, "Model reference : %s\n", asus->name);
/*
* The SFUN method probably allows the original driver to get the list
* of features supported by a given model. For now, 0x0100 or 0x0800
* bit signifies that the laptop is equipped with a Wi-Fi MiniPCI card.
* The significance of others is yet to be found.
*/
- rv = acpi_evaluate_integer(hotk->handle, "SFUN", NULL, &temp);
+ rv = acpi_evaluate_integer(asus->handle, "SFUN", NULL, &temp);
if (!ACPI_FAILURE(rv))
len += sprintf(page + len, "SFUN value : %#x\n",
(uint) temp);
@@ -624,7 +709,7 @@ static ssize_t show_infos(struct device *dev,
* The significance of others is yet to be found.
* If we don't find the method, we assume the device are present.
*/
- rv = acpi_evaluate_integer(hotk->handle, "HRWS", NULL, &temp);
+ rv = acpi_evaluate_integer(asus->handle, "HRWS", NULL, &temp);
if (!ACPI_FAILURE(rv))
len += sprintf(page + len, "HRWS value : %#x\n",
(uint) temp);
@@ -635,26 +720,26 @@ static ssize_t show_infos(struct device *dev,
* Note: since not all the laptops provide this method, errors are
* silently ignored.
*/
- rv = acpi_evaluate_integer(hotk->handle, "ASYM", NULL, &temp);
+ rv = acpi_evaluate_integer(asus->handle, "ASYM", NULL, &temp);
if (!ACPI_FAILURE(rv))
len += sprintf(page + len, "ASYM value : %#x\n",
(uint) temp);
- if (asus_info) {
- snprintf(buf, 16, "%d", asus_info->length);
+ if (asus->dsdt_info) {
+ snprintf(buf, 16, "%d", asus->dsdt_info->length);
len += sprintf(page + len, "DSDT length : %s\n", buf);
- snprintf(buf, 16, "%d", asus_info->checksum);
+ snprintf(buf, 16, "%d", asus->dsdt_info->checksum);
len += sprintf(page + len, "DSDT checksum : %s\n", buf);
- snprintf(buf, 16, "%d", asus_info->revision);
+ snprintf(buf, 16, "%d", asus->dsdt_info->revision);
len += sprintf(page + len, "DSDT revision : %s\n", buf);
- snprintf(buf, 7, "%s", asus_info->oem_id);
+ snprintf(buf, 7, "%s", asus->dsdt_info->oem_id);
len += sprintf(page + len, "OEM id : %s\n", buf);
- snprintf(buf, 9, "%s", asus_info->oem_table_id);
+ snprintf(buf, 9, "%s", asus->dsdt_info->oem_table_id);
len += sprintf(page + len, "OEM table id : %s\n", buf);
- snprintf(buf, 16, "%x", asus_info->oem_revision);
+ snprintf(buf, 16, "%x", asus->dsdt_info->oem_revision);
len += sprintf(page + len, "OEM revision : 0x%s\n", buf);
- snprintf(buf, 5, "%s", asus_info->asl_compiler_id);
+ snprintf(buf, 5, "%s", asus->dsdt_info->asl_compiler_id);
len += sprintf(page + len, "ASL comp vendor id : %s\n", buf);
- snprintf(buf, 16, "%x", asus_info->asl_compiler_revision);
+ snprintf(buf, 16, "%x", asus->dsdt_info->asl_compiler_revision);
len += sprintf(page + len, "ASL comp revision : 0x%s\n", buf);
}
@@ -672,8 +757,9 @@ static int parse_arg(const char *buf, unsigned long count, int *val)
return count;
}
-static ssize_t store_status(const char *buf, size_t count,
- acpi_handle handle, int mask)
+static ssize_t sysfs_acpi_set(struct asus_laptop *asus,
+ const char *buf, size_t count,
+ const char *method)
{
int rv, value;
int out = 0;
@@ -682,8 +768,8 @@ static ssize_t store_status(const char *buf, size_t count,
if (rv > 0)
out = value ? 1 : 0;
- write_status(handle, out, mask);
-
+ if (write_acpi_int(asus->handle, method, value))
+ return -ENODEV;
return rv;
}
@@ -693,67 +779,116 @@ static ssize_t store_status(const char *buf, size_t count,
static ssize_t show_ledd(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "0x%08x\n", hotk->ledd_status);
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sprintf(buf, "0x%08x\n", asus->ledd_status);
}
static ssize_t store_ledd(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct asus_laptop *asus = dev_get_drvdata(dev);
int rv, value;
rv = parse_arg(buf, count, &value);
if (rv > 0) {
- if (write_acpi_int(ledd_set_handle, NULL, value, NULL))
+ if (write_acpi_int(asus->handle, METHOD_LEDD, value))
pr_warning("LED display write failed\n");
else
- hotk->ledd_status = (u32) value;
+ asus->ledd_status = (u32) value;
}
return rv;
}
/*
+ * Wireless
+ */
+static int asus_wireless_status(struct asus_laptop *asus, int mask)
+{
+ unsigned long long status;
+ acpi_status rv = AE_OK;
+
+ if (!asus->have_rsts)
+ return (asus->wireless_status & mask) ? 1 : 0;
+
+ rv = acpi_evaluate_integer(asus->handle, METHOD_WL_STATUS,
+ NULL, &status);
+ if (ACPI_FAILURE(rv)) {
+ pr_warning("Error reading Wireless status\n");
+ return -EINVAL;
+ }
+ return !!(status & mask);
+}
+
+/*
* WLAN
*/
+static int asus_wlan_set(struct asus_laptop *asus, int status)
+{
+ if (write_acpi_int(asus->handle, METHOD_WLAN, !!status)) {
+ pr_warning("Error setting wlan status to %d", status);
+ return -EIO;
+ }
+ return 0;
+}
+
static ssize_t show_wlan(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", read_status(WL_ON));
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", asus_wireless_status(asus, WL_RSTS));
}
static ssize_t store_wlan(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- return store_status(buf, count, wl_switch_handle, WL_ON);
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sysfs_acpi_set(asus, buf, count, METHOD_WLAN);
}
/*
* Bluetooth
*/
+static int asus_bluetooth_set(struct asus_laptop *asus, int status)
+{
+ if (write_acpi_int(asus->handle, METHOD_BLUETOOTH, !!status)) {
+ pr_warning("Error setting bluetooth status to %d", status);
+ return -EIO;
+ }
+ return 0;
+}
+
static ssize_t show_bluetooth(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", read_status(BT_ON));
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", asus_wireless_status(asus, BT_RSTS));
}
static ssize_t store_bluetooth(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
{
- return store_status(buf, count, bt_switch_handle, BT_ON);
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sysfs_acpi_set(asus, buf, count, METHOD_BLUETOOTH);
}
/*
* Display
*/
-static void set_display(int value)
+static void asus_set_display(struct asus_laptop *asus, int value)
{
/* no sanity check needed for now */
- if (write_acpi_int(display_set_handle, NULL, value, NULL))
+ if (write_acpi_int(asus->handle, METHOD_SWITCH_DISPLAY, value))
pr_warning("Error setting display\n");
return;
}
-static int read_display(void)
+static int read_display(struct asus_laptop *asus)
{
unsigned long long value = 0;
acpi_status rv = AE_OK;
@@ -769,7 +904,7 @@ static int read_display(void)
pr_warning("Error reading display status\n");
}
- value &= 0x0F; /* needed for some models, shouldn't hurt others */
+ value &= 0x0F; /* needed for some models, shouldn't hurt others */
return value;
}
@@ -781,7 +916,11 @@ static int read_display(void)
static ssize_t show_disp(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", read_display());
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ if (!display_get_handle)
+ return -ENODEV;
+ return sprintf(buf, "%d\n", read_display(asus));
}
/*
@@ -794,65 +933,72 @@ static ssize_t show_disp(struct device *dev,
static ssize_t store_disp(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct asus_laptop *asus = dev_get_drvdata(dev);
int rv, value;
rv = parse_arg(buf, count, &value);
if (rv > 0)
- set_display(value);
+ asus_set_display(asus, value);
return rv;
}
/*
* Light Sens
*/
-static void set_light_sens_switch(int value)
+static void asus_als_switch(struct asus_laptop *asus, int value)
{
- if (write_acpi_int(ls_switch_handle, NULL, value, NULL))
+ if (write_acpi_int(asus->handle, METHOD_ALS_CONTROL, value))
pr_warning("Error setting light sensor switch\n");
- hotk->light_switch = value;
+ asus->light_switch = value;
}
static ssize_t show_lssw(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", hotk->light_switch);
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", asus->light_switch);
}
static ssize_t store_lssw(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct asus_laptop *asus = dev_get_drvdata(dev);
int rv, value;
rv = parse_arg(buf, count, &value);
if (rv > 0)
- set_light_sens_switch(value ? 1 : 0);
+ asus_als_switch(asus, value ? 1 : 0);
return rv;
}
-static void set_light_sens_level(int value)
+static void asus_als_level(struct asus_laptop *asus, int value)
{
- if (write_acpi_int(ls_level_handle, NULL, value, NULL))
+ if (write_acpi_int(asus->handle, METHOD_ALS_LEVEL, value))
pr_warning("Error setting light sensor level\n");
- hotk->light_level = value;
+ asus->light_level = value;
}
static ssize_t show_lslvl(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", hotk->light_level);
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", asus->light_level);
}
static ssize_t store_lslvl(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
+ struct asus_laptop *asus = dev_get_drvdata(dev);
int rv, value;
rv = parse_arg(buf, count, &value);
if (rv > 0) {
value = (0 < value) ? ((15 < value) ? 15 : value) : 0;
/* 0 <= value <= 15 */
- set_light_sens_level(value);
+ asus_als_level(asus, value);
}
return rv;
@@ -861,197 +1007,309 @@ static ssize_t store_lslvl(struct device *dev, struct device_attribute *attr,
/*
* GPS
*/
+static int asus_gps_status(struct asus_laptop *asus)
+{
+ unsigned long long status;
+ acpi_status rv = AE_OK;
+
+ rv = acpi_evaluate_integer(asus->handle, METHOD_GPS_STATUS,
+ NULL, &status);
+ if (ACPI_FAILURE(rv)) {
+ pr_warning("Error reading GPS status\n");
+ return -ENODEV;
+ }
+ return !!status;
+}
+
+static int asus_gps_switch(struct asus_laptop *asus, int status)
+{
+ const char *meth = status ? METHOD_GPS_ON : METHOD_GPS_OFF;
+
+ if (write_acpi_int(asus->handle, meth, 0x02))
+ return -ENODEV;
+ return 0;
+}
+
static ssize_t show_gps(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", read_status(GPS_ON));
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%d\n", asus_gps_status(asus));
}
static ssize_t store_gps(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- return store_status(buf, count, NULL, GPS_ON);
+ struct asus_laptop *asus = dev_get_drvdata(dev);
+ int rv, value;
+ int ret;
+
+ rv = parse_arg(buf, count, &value);
+ if (rv <= 0)
+ return -EINVAL;
+ ret = asus_gps_switch(asus, !!value);
+ if (ret)
+ return ret;
+ rfkill_set_sw_state(asus->gps_rfkill, !value);
+ return rv;
}
/*
- * Hotkey functions
+ * rfkill
*/
-static struct key_entry *asus_get_entry_by_scancode(int code)
+static int asus_gps_rfkill_set(void *data, bool blocked)
{
- struct key_entry *key;
-
- for (key = asus_keymap; key->type != KE_END; key++)
- if (code == key->code)
- return key;
+ acpi_handle handle = data;
- return NULL;
+ return asus_gps_switch(handle, !blocked);
}
-static struct key_entry *asus_get_entry_by_keycode(int code)
-{
- struct key_entry *key;
-
- for (key = asus_keymap; key->type != KE_END; key++)
- if (code == key->keycode && key->type == KE_KEY)
- return key;
+static const struct rfkill_ops asus_gps_rfkill_ops = {
+ .set_block = asus_gps_rfkill_set,
+};
- return NULL;
+static void asus_rfkill_exit(struct asus_laptop *asus)
+{
+ if (asus->gps_rfkill) {
+ rfkill_unregister(asus->gps_rfkill);
+ rfkill_destroy(asus->gps_rfkill);
+ asus->gps_rfkill = NULL;
+ }
}
-static int asus_getkeycode(struct input_dev *dev, int scancode, int *keycode)
+static int asus_rfkill_init(struct asus_laptop *asus)
{
- struct key_entry *key = asus_get_entry_by_scancode(scancode);
+ int result;
- if (key && key->type == KE_KEY) {
- *keycode = key->keycode;
+ if (acpi_check_handle(asus->handle, METHOD_GPS_ON, NULL) ||
+ acpi_check_handle(asus->handle, METHOD_GPS_OFF, NULL) ||
+ acpi_check_handle(asus->handle, METHOD_GPS_STATUS, NULL))
return 0;
+
+ asus->gps_rfkill = rfkill_alloc("asus-gps", &asus->platform_device->dev,
+ RFKILL_TYPE_GPS,
+ &asus_gps_rfkill_ops, NULL);
+ if (!asus->gps_rfkill)
+ return -EINVAL;
+
+ result = rfkill_register(asus->gps_rfkill);
+ if (result) {
+ rfkill_destroy(asus->gps_rfkill);
+ asus->gps_rfkill = NULL;
}
- return -EINVAL;
+ return result;
}
-static int asus_setkeycode(struct input_dev *dev, int scancode, int keycode)
+/*
+ * Input device (i.e. hotkeys)
+ */
+static void asus_input_notify(struct asus_laptop *asus, int event)
{
- struct key_entry *key;
- int old_keycode;
+ if (asus->inputdev)
+ sparse_keymap_report_event(asus->inputdev, event, 1, true);
+}
- if (keycode < 0 || keycode > KEY_MAX)
- return -EINVAL;
+static int asus_input_init(struct asus_laptop *asus)
+{
+ struct input_dev *input;
+ int error;
- key = asus_get_entry_by_scancode(scancode);
- if (key && key->type == KE_KEY) {
- old_keycode = key->keycode;
- key->keycode = keycode;
- set_bit(keycode, dev->keybit);
- if (!asus_get_entry_by_keycode(old_keycode))
- clear_bit(old_keycode, dev->keybit);
+ input = input_allocate_device();
+ if (!input) {
+ pr_info("Unable to allocate input device\n");
return 0;
}
+ input->name = "Asus Laptop extra buttons";
+ input->phys = ASUS_LAPTOP_FILE "/input0";
+ input->id.bustype = BUS_HOST;
+ input->dev.parent = &asus->platform_device->dev;
+ input_set_drvdata(input, asus);
+
+ error = sparse_keymap_setup(input, asus_keymap, NULL);
+ if (error) {
+ pr_err("Unable to setup input device keymap\n");
+ goto err_keymap;
+ }
+ error = input_register_device(input);
+ if (error) {
+ pr_info("Unable to register input device\n");
+ goto err_device;
+ }
+
+ asus->inputdev = input;
+ return 0;
- return -EINVAL;
+err_keymap:
+ sparse_keymap_free(input);
+err_device:
+ input_free_device(input);
+ return error;
}
-static void asus_hotk_notify(struct acpi_device *device, u32 event)
+static void asus_input_exit(struct asus_laptop *asus)
{
- static struct key_entry *key;
- u16 count;
+ if (asus->inputdev) {
+ sparse_keymap_free(asus->inputdev);
+ input_unregister_device(asus->inputdev);
+ }
+}
- /* TODO Find a better way to handle events count. */
- if (!hotk)
- return;
+/*
+ * ACPI driver
+ */
+static void asus_acpi_notify(struct acpi_device *device, u32 event)
+{
+ struct asus_laptop *asus = acpi_driver_data(device);
+ u16 count;
/*
* We need to tell the backlight device when the backlight power is
* switched
*/
- if (event == ATKD_LCD_ON) {
- write_status(NULL, 1, LCD_ON);
- lcd_blank(FB_BLANK_UNBLANK);
- } else if (event == ATKD_LCD_OFF) {
- write_status(NULL, 0, LCD_ON);
- lcd_blank(FB_BLANK_POWERDOWN);
- }
+ if (event == ATKD_LCD_ON)
+ lcd_blank(asus, FB_BLANK_UNBLANK);
+ else if (event == ATKD_LCD_OFF)
+ lcd_blank(asus, FB_BLANK_POWERDOWN);
- count = hotk->event_count[event % 128]++;
- acpi_bus_generate_proc_event(hotk->device, event, count);
- acpi_bus_generate_netlink_event(hotk->device->pnp.device_class,
- dev_name(&hotk->device->dev), event,
+ /* TODO Find a better way to handle events count. */
+ count = asus->event_count[event % 128]++;
+ acpi_bus_generate_proc_event(asus->device, event, count);
+ acpi_bus_generate_netlink_event(asus->device->pnp.device_class,
+ dev_name(&asus->device->dev), event,
count);
- if (hotk->inputdev) {
- key = asus_get_entry_by_scancode(event);
- if (!key)
- return ;
-
- switch (key->type) {
- case KE_KEY:
- input_report_key(hotk->inputdev, key->keycode, 1);
- input_sync(hotk->inputdev);
- input_report_key(hotk->inputdev, key->keycode, 0);
- input_sync(hotk->inputdev);
- break;
+ /* Brightness events are special */
+ if (event >= ATKD_BR_MIN && event <= ATKD_BR_MAX) {
+
+ /* Ignore them completely if the acpi video driver is used */
+ if (asus->backlight_device != NULL) {
+ /* Update the backlight device. */
+ asus_backlight_notify(asus);
}
+ return ;
}
+ asus_input_notify(asus, event);
}
-#define ASUS_CREATE_DEVICE_ATTR(_name) \
- struct device_attribute dev_attr_##_name = { \
- .attr = { \
- .name = __stringify(_name), \
- .mode = 0 }, \
- .show = NULL, \
- .store = NULL, \
+static DEVICE_ATTR(infos, S_IRUGO, show_infos, NULL);
+static DEVICE_ATTR(wlan, S_IRUGO | S_IWUSR, show_wlan, store_wlan);
+static DEVICE_ATTR(bluetooth, S_IRUGO | S_IWUSR, show_bluetooth,
+ store_bluetooth);
+static DEVICE_ATTR(display, S_IRUGO | S_IWUSR, show_disp, store_disp);
+static DEVICE_ATTR(ledd, S_IRUGO | S_IWUSR, show_ledd, store_ledd);
+static DEVICE_ATTR(ls_level, S_IRUGO | S_IWUSR, show_lslvl, store_lslvl);
+static DEVICE_ATTR(ls_switch, S_IRUGO | S_IWUSR, show_lssw, store_lssw);
+static DEVICE_ATTR(gps, S_IRUGO | S_IWUSR, show_gps, store_gps);
+
+static void asus_sysfs_exit(struct asus_laptop *asus)
+{
+ struct platform_device *device = asus->platform_device;
+
+ device_remove_file(&device->dev, &dev_attr_infos);
+ device_remove_file(&device->dev, &dev_attr_wlan);
+ device_remove_file(&device->dev, &dev_attr_bluetooth);
+ device_remove_file(&device->dev, &dev_attr_display);
+ device_remove_file(&device->dev, &dev_attr_ledd);
+ device_remove_file(&device->dev, &dev_attr_ls_switch);
+ device_remove_file(&device->dev, &dev_attr_ls_level);
+ device_remove_file(&device->dev, &dev_attr_gps);
+}
+
+static int asus_sysfs_init(struct asus_laptop *asus)
+{
+ struct platform_device *device = asus->platform_device;
+ int err;
+
+ err = device_create_file(&device->dev, &dev_attr_infos);
+ if (err)
+ return err;
+
+ if (!acpi_check_handle(asus->handle, METHOD_WLAN, NULL)) {
+ err = device_create_file(&device->dev, &dev_attr_wlan);
+ if (err)
+ return err;
}
-#define ASUS_SET_DEVICE_ATTR(_name, _mode, _show, _store) \
- do { \
- dev_attr_##_name.attr.mode = _mode; \
- dev_attr_##_name.show = _show; \
- dev_attr_##_name.store = _store; \
- } while(0)
-
-static ASUS_CREATE_DEVICE_ATTR(infos);
-static ASUS_CREATE_DEVICE_ATTR(wlan);
-static ASUS_CREATE_DEVICE_ATTR(bluetooth);
-static ASUS_CREATE_DEVICE_ATTR(display);
-static ASUS_CREATE_DEVICE_ATTR(ledd);
-static ASUS_CREATE_DEVICE_ATTR(ls_switch);
-static ASUS_CREATE_DEVICE_ATTR(ls_level);
-static ASUS_CREATE_DEVICE_ATTR(gps);
-
-static struct attribute *asuspf_attributes[] = {
- &dev_attr_infos.attr,
- &dev_attr_wlan.attr,
- &dev_attr_bluetooth.attr,
- &dev_attr_display.attr,
- &dev_attr_ledd.attr,
- &dev_attr_ls_switch.attr,
- &dev_attr_ls_level.attr,
- &dev_attr_gps.attr,
- NULL
-};
+ if (!acpi_check_handle(asus->handle, METHOD_BLUETOOTH, NULL)) {
+ err = device_create_file(&device->dev, &dev_attr_bluetooth);
+ if (err)
+ return err;
+ }
-static struct attribute_group asuspf_attribute_group = {
- .attrs = asuspf_attributes
-};
+ if (!acpi_check_handle(asus->handle, METHOD_SWITCH_DISPLAY, NULL)) {
+ err = device_create_file(&device->dev, &dev_attr_display);
+ if (err)
+ return err;
+ }
-static struct platform_driver asuspf_driver = {
- .driver = {
- .name = ASUS_HOTK_FILE,
- .owner = THIS_MODULE,
- }
-};
+ if (!acpi_check_handle(asus->handle, METHOD_LEDD, NULL)) {
+ err = device_create_file(&device->dev, &dev_attr_ledd);
+ if (err)
+ return err;
+ }
-static struct platform_device *asuspf_device;
+ if (!acpi_check_handle(asus->handle, METHOD_ALS_CONTROL, NULL) &&
+ !acpi_check_handle(asus->handle, METHOD_ALS_LEVEL, NULL)) {
+ err = device_create_file(&device->dev, &dev_attr_ls_switch);
+ if (err)
+ return err;
+ err = device_create_file(&device->dev, &dev_attr_ls_level);
+ if (err)
+ return err;
+ }
-static void asus_hotk_add_fs(void)
-{
- ASUS_SET_DEVICE_ATTR(infos, 0444, show_infos, NULL);
+ if (!acpi_check_handle(asus->handle, METHOD_GPS_ON, NULL) &&
+ !acpi_check_handle(asus->handle, METHOD_GPS_OFF, NULL) &&
+ !acpi_check_handle(asus->handle, METHOD_GPS_STATUS, NULL)) {
+ err = device_create_file(&device->dev, &dev_attr_gps);
+ if (err)
+ return err;
+ }
- if (wl_switch_handle)
- ASUS_SET_DEVICE_ATTR(wlan, 0644, show_wlan, store_wlan);
+ return err;
+}
+
+static int asus_platform_init(struct asus_laptop *asus)
+{
+ int err;
- if (bt_switch_handle)
- ASUS_SET_DEVICE_ATTR(bluetooth, 0644,
- show_bluetooth, store_bluetooth);
+ asus->platform_device = platform_device_alloc(ASUS_LAPTOP_FILE, -1);
+ if (!asus->platform_device)
+ return -ENOMEM;
+ platform_set_drvdata(asus->platform_device, asus);
- if (display_set_handle && display_get_handle)
- ASUS_SET_DEVICE_ATTR(display, 0644, show_disp, store_disp);
- else if (display_set_handle)
- ASUS_SET_DEVICE_ATTR(display, 0200, NULL, store_disp);
+ err = platform_device_add(asus->platform_device);
+ if (err)
+ goto fail_platform_device;
- if (ledd_set_handle)
- ASUS_SET_DEVICE_ATTR(ledd, 0644, show_ledd, store_ledd);
+ err = asus_sysfs_init(asus);
+ if (err)
+ goto fail_sysfs;
+ return 0;
- if (ls_switch_handle && ls_level_handle) {
- ASUS_SET_DEVICE_ATTR(ls_level, 0644, show_lslvl, store_lslvl);
- ASUS_SET_DEVICE_ATTR(ls_switch, 0644, show_lssw, store_lssw);
- }
+fail_sysfs:
+ asus_sysfs_exit(asus);
+ platform_device_del(asus->platform_device);
+fail_platform_device:
+ platform_device_put(asus->platform_device);
+ return err;
+}
- if (gps_status_handle && gps_on_handle && gps_off_handle)
- ASUS_SET_DEVICE_ATTR(gps, 0644, show_gps, store_gps);
+static void asus_platform_exit(struct asus_laptop *asus)
+{
+ asus_sysfs_exit(asus);
+ platform_device_unregister(asus->platform_device);
}
+static struct platform_driver platform_driver = {
+ .driver = {
+ .name = ASUS_LAPTOP_FILE,
+ .owner = THIS_MODULE,
+ }
+};
+
static int asus_handle_init(char *name, acpi_handle * handle,
char **paths, int num_paths)
{
@@ -1073,10 +1331,11 @@ static int asus_handle_init(char *name, acpi_handle * handle,
ARRAY_SIZE(object##_paths))
/*
- * This function is used to initialize the hotk with right values. In this
- * method, we can make all the detection we want, and modify the hotk struct
+ * This function is used to initialize the context with right values. In this
+ * method, we can make all the detection we want, and modify the asus_laptop
+ * struct
*/
-static int asus_hotk_get_info(void)
+static int asus_laptop_get_info(struct asus_laptop *asus)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *model = NULL;
@@ -1089,22 +1348,21 @@ static int asus_hotk_get_info(void)
* models, but late enough to allow acpi_bus_register_driver() to fail
* before doing anything ACPI-specific. Should we encounter a machine,
* which needs special handling (i.e. its hotkey device has a different
- * HID), this bit will be moved. A global variable asus_info contains
- * the DSDT header.
+ * HID), this bit will be moved.
*/
- status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus_info);
+ status = acpi_get_table(ACPI_SIG_DSDT, 1, &asus->dsdt_info);
if (ACPI_FAILURE(status))
pr_warning("Couldn't get the DSDT table header\n");
/* We have to write 0 on init this far for all ASUS models */
- if (write_acpi_int(hotk->handle, "INIT", 0, &buffer)) {
+ if (write_acpi_int_ret(asus->handle, "INIT", 0, &buffer)) {
pr_err("Hotkey initialization failed\n");
return -ENODEV;
}
/* This needs to be called for some laptops to init properly */
status =
- acpi_evaluate_integer(hotk->handle, "BSTS", NULL, &bsts_result);
+ acpi_evaluate_integer(asus->handle, "BSTS", NULL, &bsts_result);
if (ACPI_FAILURE(status))
pr_warning("Error calling BSTS\n");
else if (bsts_result)
@@ -1112,8 +1370,8 @@ static int asus_hotk_get_info(void)
(uint) bsts_result);
/* This too ... */
- write_acpi_int(hotk->handle, "CWAP", wapf, NULL);
-
+ if (write_acpi_int(asus->handle, "CWAP", wapf))
+ pr_err("Error calling CWAP(%d)\n", wapf);
/*
* Try to match the object returned by INIT to the specific model.
* Handle every possible object (or the lack of thereof) the DSDT
@@ -1134,397 +1392,210 @@ static int asus_hotk_get_info(void)
break;
}
}
- hotk->name = kstrdup(string, GFP_KERNEL);
- if (!hotk->name)
+ asus->name = kstrdup(string, GFP_KERNEL);
+ if (!asus->name)
return -ENOMEM;
if (*string)
pr_notice(" %s model detected\n", string);
- ASUS_HANDLE_INIT(mled_set);
- ASUS_HANDLE_INIT(tled_set);
- ASUS_HANDLE_INIT(rled_set);
- ASUS_HANDLE_INIT(pled_set);
- ASUS_HANDLE_INIT(gled_set);
-
- ASUS_HANDLE_INIT(ledd_set);
-
- ASUS_HANDLE_INIT(kled_set);
- ASUS_HANDLE_INIT(kled_get);
-
/*
* The HWRS method return informations about the hardware.
* 0x80 bit is for WLAN, 0x100 for Bluetooth.
* The significance of others is yet to be found.
- * If we don't find the method, we assume the device are present.
*/
status =
- acpi_evaluate_integer(hotk->handle, "HRWS", NULL, &hwrs_result);
- if (ACPI_FAILURE(status))
- hwrs_result = WL_HWRS | BT_HWRS;
-
- if (hwrs_result & WL_HWRS)
- ASUS_HANDLE_INIT(wl_switch);
- if (hwrs_result & BT_HWRS)
- ASUS_HANDLE_INIT(bt_switch);
-
- ASUS_HANDLE_INIT(wireless_status);
+ acpi_evaluate_integer(asus->handle, "HRWS", NULL, &hwrs_result);
+ if (!ACPI_FAILURE(status))
+ pr_notice(" HRWS returned %x", (int)hwrs_result);
- ASUS_HANDLE_INIT(brightness_set);
- ASUS_HANDLE_INIT(brightness_get);
+ if (!acpi_check_handle(asus->handle, METHOD_WL_STATUS, NULL))
+ asus->have_rsts = true;
+ /* Scheduled for removal */
ASUS_HANDLE_INIT(lcd_switch);
-
- ASUS_HANDLE_INIT(display_set);
ASUS_HANDLE_INIT(display_get);
- /*
- * There is a lot of models with "ALSL", but a few get
- * a real light sens, so we need to check it.
- */
- if (!ASUS_HANDLE_INIT(ls_switch))
- ASUS_HANDLE_INIT(ls_level);
-
- ASUS_HANDLE_INIT(gps_on);
- ASUS_HANDLE_INIT(gps_off);
- ASUS_HANDLE_INIT(gps_status);
-
kfree(model);
return AE_OK;
}
-static int asus_input_init(void)
-{
- const struct key_entry *key;
- int result;
+static bool asus_device_present;
- hotk->inputdev = input_allocate_device();
- if (!hotk->inputdev) {
- pr_info("Unable to allocate input device\n");
- return 0;
- }
- hotk->inputdev->name = "Asus Laptop extra buttons";
- hotk->inputdev->phys = ASUS_HOTK_FILE "/input0";
- hotk->inputdev->id.bustype = BUS_HOST;
- hotk->inputdev->getkeycode = asus_getkeycode;
- hotk->inputdev->setkeycode = asus_setkeycode;
-
- for (key = asus_keymap; key->type != KE_END; key++) {
- switch (key->type) {
- case KE_KEY:
- set_bit(EV_KEY, hotk->inputdev->evbit);
- set_bit(key->keycode, hotk->inputdev->keybit);
- break;
- }
- }
- result = input_register_device(hotk->inputdev);
- if (result) {
- pr_info("Unable to register input device\n");
- input_free_device(hotk->inputdev);
- }
- return result;
-}
-
-static int asus_hotk_check(void)
+static int __devinit asus_acpi_init(struct asus_laptop *asus)
{
int result = 0;
- result = acpi_bus_get_status(hotk->device);
+ result = acpi_bus_get_status(asus->device);
if (result)
return result;
-
- if (hotk->device->status.present) {
- result = asus_hotk_get_info();
- } else {
+ if (!asus->device->status.present) {
pr_err("Hotkey device not present, aborting\n");
- return -EINVAL;
+ return -ENODEV;
}
- return result;
-}
-
-static int asus_hotk_found;
-
-static int asus_hotk_add(struct acpi_device *device)
-{
- int result;
-
- pr_notice("Asus Laptop Support version %s\n",
- ASUS_LAPTOP_VERSION);
-
- hotk = kzalloc(sizeof(struct asus_hotk), GFP_KERNEL);
- if (!hotk)
- return -ENOMEM;
-
- hotk->handle = device->handle;
- strcpy(acpi_device_name(device), ASUS_HOTK_DEVICE_NAME);
- strcpy(acpi_device_class(device), ASUS_HOTK_CLASS);
- device->driver_data = hotk;
- hotk->device = device;
-
- result = asus_hotk_check();
+ result = asus_laptop_get_info(asus);
if (result)
- goto end;
-
- asus_hotk_add_fs();
-
- asus_hotk_found = 1;
+ return result;
/* WLED and BLED are on by default */
- write_status(bt_switch_handle, 1, BT_ON);
- write_status(wl_switch_handle, 1, WL_ON);
-
- /* If the h/w switch is off, we need to check the real status */
- write_status(NULL, read_status(BT_ON), BT_ON);
- write_status(NULL, read_status(WL_ON), WL_ON);
+ if (bluetooth_status >= 0)
+ asus_bluetooth_set(asus, !!bluetooth_status);
- /* LCD Backlight is on by default */
- write_status(NULL, 1, LCD_ON);
+ if (wlan_status >= 0)
+ asus_wlan_set(asus, !!wlan_status);
/* Keyboard Backlight is on by default */
- if (kled_set_handle)
- set_kled_lvl(1);
+ if (!acpi_check_handle(asus->handle, METHOD_KBD_LIGHT_SET, NULL))
+ asus_kled_set(asus, 1);
/* LED display is off by default */
- hotk->ledd_status = 0xFFF;
+ asus->ledd_status = 0xFFF;
/* Set initial values of light sensor and level */
- hotk->light_switch = 0; /* Default to light sensor disabled */
- hotk->light_level = 5; /* level 5 for sensor sensitivity */
+ asus->light_switch = 0; /* Default to light sensor disabled */
+ asus->light_level = 5; /* level 5 for sensor sensitivity */
- if (ls_switch_handle)
- set_light_sens_switch(hotk->light_switch);
-
- if (ls_level_handle)
- set_light_sens_level(hotk->light_level);
-
- /* GPS is on by default */
- write_status(NULL, 1, GPS_ON);
-
-end:
- if (result) {
- kfree(hotk->name);
- kfree(hotk);
+ if (!acpi_check_handle(asus->handle, METHOD_ALS_CONTROL, NULL) &&
+ !acpi_check_handle(asus->handle, METHOD_ALS_LEVEL, NULL)) {
+ asus_als_switch(asus, asus->light_switch);
+ asus_als_level(asus, asus->light_level);
}
+ asus->lcd_state = 1; /* LCD should be on when the module load */
return result;
}
-static int asus_hotk_remove(struct acpi_device *device, int type)
-{
- kfree(hotk->name);
- kfree(hotk);
-
- return 0;
-}
-
-static void asus_backlight_exit(void)
+static int __devinit asus_acpi_add(struct acpi_device *device)
{
- if (asus_backlight_device)
- backlight_device_unregister(asus_backlight_device);
-}
-
-#define ASUS_LED_UNREGISTER(object) \
- if (object##_led.dev) \
- led_classdev_unregister(&object##_led)
+ struct asus_laptop *asus;
+ int result;
-static void asus_led_exit(void)
-{
- destroy_workqueue(led_workqueue);
- ASUS_LED_UNREGISTER(mled);
- ASUS_LED_UNREGISTER(tled);
- ASUS_LED_UNREGISTER(pled);
- ASUS_LED_UNREGISTER(rled);
- ASUS_LED_UNREGISTER(gled);
- ASUS_LED_UNREGISTER(kled);
-}
+ pr_notice("Asus Laptop Support version %s\n",
+ ASUS_LAPTOP_VERSION);
+ asus = kzalloc(sizeof(struct asus_laptop), GFP_KERNEL);
+ if (!asus)
+ return -ENOMEM;
+ asus->handle = device->handle;
+ strcpy(acpi_device_name(device), ASUS_LAPTOP_DEVICE_NAME);
+ strcpy(acpi_device_class(device), ASUS_LAPTOP_CLASS);
+ device->driver_data = asus;
+ asus->device = device;
-static void asus_input_exit(void)
-{
- if (hotk->inputdev)
- input_unregister_device(hotk->inputdev);
-}
+ result = asus_acpi_init(asus);
+ if (result)
+ goto fail_platform;
-static void __exit asus_laptop_exit(void)
-{
- asus_backlight_exit();
- asus_led_exit();
- asus_input_exit();
+ /*
+ * Register the platform device first. It is used as a parent for the
+ * sub-devices below.
+ */
+ result = asus_platform_init(asus);
+ if (result)
+ goto fail_platform;
- acpi_bus_unregister_driver(&asus_hotk_driver);
- sysfs_remove_group(&asuspf_device->dev.kobj, &asuspf_attribute_group);
- platform_device_unregister(asuspf_device);
- platform_driver_unregister(&asuspf_driver);
-}
+ if (!acpi_video_backlight_support()) {
+ result = asus_backlight_init(asus);
+ if (result)
+ goto fail_backlight;
+ } else
+ pr_info("Backlight controlled by ACPI video driver\n");
-static int asus_backlight_init(struct device *dev)
-{
- struct backlight_device *bd;
+ result = asus_input_init(asus);
+ if (result)
+ goto fail_input;
- if (brightness_set_handle && lcd_switch_handle) {
- bd = backlight_device_register(ASUS_HOTK_FILE, dev,
- NULL, &asusbl_ops);
- if (IS_ERR(bd)) {
- pr_err("Could not register asus backlight device\n");
- asus_backlight_device = NULL;
- return PTR_ERR(bd);
- }
+ result = asus_led_init(asus);
+ if (result)
+ goto fail_led;
- asus_backlight_device = bd;
+ result = asus_rfkill_init(asus);
+ if (result)
+ goto fail_rfkill;
- bd->props.max_brightness = 15;
- bd->props.brightness = read_brightness(NULL);
- bd->props.power = FB_BLANK_UNBLANK;
- backlight_update_status(bd);
- }
+ asus_device_present = true;
return 0;
-}
-static int asus_led_register(acpi_handle handle,
- struct led_classdev *ldev, struct device *dev)
-{
- if (!handle)
- return 0;
+fail_rfkill:
+ asus_led_exit(asus);
+fail_led:
+ asus_input_exit(asus);
+fail_input:
+ asus_backlight_exit(asus);
+fail_backlight:
+ asus_platform_exit(asus);
+fail_platform:
+ kfree(asus->name);
+ kfree(asus);
- return led_classdev_register(dev, ldev);
+ return result;
}
-#define ASUS_LED_REGISTER(object, device) \
- asus_led_register(object##_set_handle, &object##_led, device)
-
-static int asus_led_init(struct device *dev)
+static int asus_acpi_remove(struct acpi_device *device, int type)
{
- int rv;
-
- rv = ASUS_LED_REGISTER(mled, dev);
- if (rv)
- goto out;
-
- rv = ASUS_LED_REGISTER(tled, dev);
- if (rv)
- goto out1;
-
- rv = ASUS_LED_REGISTER(rled, dev);
- if (rv)
- goto out2;
-
- rv = ASUS_LED_REGISTER(pled, dev);
- if (rv)
- goto out3;
-
- rv = ASUS_LED_REGISTER(gled, dev);
- if (rv)
- goto out4;
+ struct asus_laptop *asus = acpi_driver_data(device);
- if (kled_set_handle && kled_get_handle)
- rv = ASUS_LED_REGISTER(kled, dev);
- if (rv)
- goto out5;
-
- led_workqueue = create_singlethread_workqueue("led_workqueue");
- if (!led_workqueue)
- goto out6;
+ asus_backlight_exit(asus);
+ asus_rfkill_exit(asus);
+ asus_led_exit(asus);
+ asus_input_exit(asus);
+ asus_platform_exit(asus);
+ kfree(asus->name);
+ kfree(asus);
return 0;
-out6:
- rv = -ENOMEM;
- ASUS_LED_UNREGISTER(kled);
-out5:
- ASUS_LED_UNREGISTER(gled);
-out4:
- ASUS_LED_UNREGISTER(pled);
-out3:
- ASUS_LED_UNREGISTER(rled);
-out2:
- ASUS_LED_UNREGISTER(tled);
-out1:
- ASUS_LED_UNREGISTER(mled);
-out:
- return rv;
}
+static const struct acpi_device_id asus_device_ids[] = {
+ {"ATK0100", 0},
+ {"ATK0101", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, asus_device_ids);
+
+static struct acpi_driver asus_acpi_driver = {
+ .name = ASUS_LAPTOP_NAME,
+ .class = ASUS_LAPTOP_CLASS,
+ .owner = THIS_MODULE,
+ .ids = asus_device_ids,
+ .flags = ACPI_DRIVER_ALL_NOTIFY_EVENTS,
+ .ops = {
+ .add = asus_acpi_add,
+ .remove = asus_acpi_remove,
+ .notify = asus_acpi_notify,
+ },
+};
+
static int __init asus_laptop_init(void)
{
int result;
- result = acpi_bus_register_driver(&asus_hotk_driver);
+ result = platform_driver_register(&platform_driver);
if (result < 0)
return result;
- /*
- * This is a bit of a kludge. We only want this module loaded
- * for ASUS systems, but there's currently no way to probe the
- * ACPI namespace for ASUS HIDs. So we just return failure if
- * we didn't find one, which will cause the module to be
- * unloaded.
- */
- if (!asus_hotk_found) {
- acpi_bus_unregister_driver(&asus_hotk_driver);
- return -ENODEV;
- }
-
- result = asus_input_init();
- if (result)
- goto fail_input;
-
- /* Register platform stuff */
- result = platform_driver_register(&asuspf_driver);
- if (result)
- goto fail_platform_driver;
-
- asuspf_device = platform_device_alloc(ASUS_HOTK_FILE, -1);
- if (!asuspf_device) {
- result = -ENOMEM;
- goto fail_platform_device1;
+ result = acpi_bus_register_driver(&asus_acpi_driver);
+ if (result < 0)
+ goto fail_acpi_driver;
+ if (!asus_device_present) {
+ result = -ENODEV;
+ goto fail_no_device;
}
-
- result = platform_device_add(asuspf_device);
- if (result)
- goto fail_platform_device2;
-
- result = sysfs_create_group(&asuspf_device->dev.kobj,
- &asuspf_attribute_group);
- if (result)
- goto fail_sysfs;
-
- result = asus_led_init(&asuspf_device->dev);
- if (result)
- goto fail_led;
-
- if (!acpi_video_backlight_support()) {
- result = asus_backlight_init(&asuspf_device->dev);
- if (result)
- goto fail_backlight;
- } else
- pr_info("Brightness ignored, must be controlled by "
- "ACPI video driver\n");
-
return 0;
-fail_backlight:
- asus_led_exit();
-
-fail_led:
- sysfs_remove_group(&asuspf_device->dev.kobj,
- &asuspf_attribute_group);
-
-fail_sysfs:
- platform_device_del(asuspf_device);
-
-fail_platform_device2:
- platform_device_put(asuspf_device);
-
-fail_platform_device1:
- platform_driver_unregister(&asuspf_driver);
-
-fail_platform_driver:
- asus_input_exit();
-
-fail_input:
-
+fail_no_device:
+ acpi_bus_unregister_driver(&asus_acpi_driver);
+fail_acpi_driver:
+ platform_driver_unregister(&platform_driver);
return result;
}
+static void __exit asus_laptop_exit(void)
+{
+ acpi_bus_unregister_driver(&asus_acpi_driver);
+ platform_driver_unregister(&platform_driver);
+}
+
module_init(asus_laptop_init);
module_exit(asus_laptop_exit);
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
index c1d2aeeea94..1381430e110 100644
--- a/drivers/platform/x86/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
@@ -1225,9 +1225,8 @@ static int asus_model_match(char *model)
else if (strncmp(model, "M2N", 3) == 0 ||
strncmp(model, "M3N", 3) == 0 ||
strncmp(model, "M5N", 3) == 0 ||
- strncmp(model, "M6N", 3) == 0 ||
strncmp(model, "S1N", 3) == 0 ||
- strncmp(model, "S5N", 3) == 0 || strncmp(model, "W1N", 3) == 0)
+ strncmp(model, "S5N", 3) == 0)
return xxN;
else if (strncmp(model, "M1", 2) == 0)
return M1A;
diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
index 8cb20e45bad..035a7dd65a3 100644
--- a/drivers/platform/x86/classmate-laptop.c
+++ b/drivers/platform/x86/classmate-laptop.c
@@ -507,6 +507,10 @@ static int cmpc_keys_codes[] = {
KEY_BRIGHTNESSDOWN,
KEY_BRIGHTNESSUP,
KEY_VENDOR,
+ KEY_UNKNOWN,
+ KEY_CAMERA,
+ KEY_BACK,
+ KEY_FORWARD,
KEY_MAX
};
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index b7f4d270591..ef614979afe 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -132,8 +132,8 @@ static struct dmi_system_id __devinitdata dell_blacklist[] = {
};
static struct calling_interface_buffer *buffer;
-struct page *bufferpage;
-DEFINE_MUTEX(buffer_mutex);
+static struct page *bufferpage;
+static DEFINE_MUTEX(buffer_mutex);
static int hwswitch_state;
@@ -580,6 +580,7 @@ static int __init dell_init(void)
fail_backlight:
i8042_remove_filter(dell_laptop_i8042_filter);
+ cancel_delayed_work_sync(&dell_rfkill_work);
fail_filter:
dell_cleanup_rfkill();
fail_rfkill:
@@ -597,12 +598,12 @@ fail_platform_driver:
static void __exit dell_exit(void)
{
- cancel_delayed_work_sync(&dell_rfkill_work);
i8042_remove_filter(dell_laptop_i8042_filter);
+ cancel_delayed_work_sync(&dell_rfkill_work);
backlight_device_unregister(dell_backlight_device);
dell_cleanup_rfkill();
if (platform_device) {
- platform_device_del(platform_device);
+ platform_device_unregister(platform_device);
platform_driver_unregister(&platform_driver);
}
kfree(da_tokens);
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index e2be6bb33d9..9a844caa375 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -578,6 +578,8 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc)
struct pci_dev *dev;
struct pci_bus *bus;
bool blocked = eeepc_wlan_rfkill_blocked(eeepc);
+ bool absent;
+ u32 l;
if (eeepc->wlan_rfkill)
rfkill_set_sw_state(eeepc->wlan_rfkill, blocked);
@@ -591,6 +593,22 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc)
goto out_unlock;
}
+ if (pci_bus_read_config_dword(bus, 0, PCI_VENDOR_ID, &l)) {
+ pr_err("Unable to read PCI config space?\n");
+ goto out_unlock;
+ }
+ absent = (l == 0xffffffff);
+
+ if (blocked != absent) {
+ pr_warning("BIOS says wireless lan is %s, "
+ "but the pci device is %s\n",
+ blocked ? "blocked" : "unblocked",
+ absent ? "absent" : "present");
+ pr_warning("skipped wireless hotplug as probably "
+ "inappropriate for this model\n");
+ goto out_unlock;
+ }
+
if (!blocked) {
dev = pci_get_slot(bus, 0);
if (dev) {
@@ -1277,7 +1295,8 @@ static void eeepc_dmi_check(struct eeepc_laptop *eeepc)
* hotplug code. In fact, current hotplug code seems to unplug another
* device...
*/
- if (strcmp(model, "1005HA") == 0 || strcmp(model, "1201N") == 0) {
+ if (strcmp(model, "1005HA") == 0 || strcmp(model, "1201N") == 0 ||
+ strcmp(model, "1005PE") == 0) {
eeepc->hotplug_disabled = true;
pr_info("wlan hotplug disabled\n");
}
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index eb603f1d55c..e7b0c3bcef8 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -286,6 +286,7 @@ struct ibm_init_struct {
char param[32];
int (*init) (struct ibm_init_struct *);
+ mode_t base_procfs_mode;
struct ibm_struct *data;
};
@@ -2082,6 +2083,7 @@ static struct attribute_set *hotkey_dev_attributes;
static void tpacpi_driver_event(const unsigned int hkey_event);
static void hotkey_driver_event(const unsigned int scancode);
+static void hotkey_poll_setup(const bool may_warn);
/* HKEY.MHKG() return bits */
#define TP_HOTKEY_TABLET_MASK (1 << 3)
@@ -2264,6 +2266,8 @@ static int tpacpi_hotkey_driver_mask_set(const u32 mask)
rc = hotkey_mask_set((hotkey_acpi_mask | hotkey_driver_mask) &
~hotkey_source_mask);
+ hotkey_poll_setup(true);
+
mutex_unlock(&hotkey_mutex);
return rc;
@@ -2548,7 +2552,7 @@ static void hotkey_poll_stop_sync(void)
}
/* call with hotkey_mutex held */
-static void hotkey_poll_setup(bool may_warn)
+static void hotkey_poll_setup(const bool may_warn)
{
const u32 poll_driver_mask = hotkey_driver_mask & hotkey_source_mask;
const u32 poll_user_mask = hotkey_user_mask & hotkey_source_mask;
@@ -2579,7 +2583,7 @@ static void hotkey_poll_setup(bool may_warn)
}
}
-static void hotkey_poll_setup_safe(bool may_warn)
+static void hotkey_poll_setup_safe(const bool may_warn)
{
mutex_lock(&hotkey_mutex);
hotkey_poll_setup(may_warn);
@@ -2597,7 +2601,11 @@ static void hotkey_poll_set_freq(unsigned int freq)
#else /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
-static void hotkey_poll_setup_safe(bool __unused)
+static void hotkey_poll_setup(const bool __unused)
+{
+}
+
+static void hotkey_poll_setup_safe(const bool __unused)
{
}
@@ -2607,16 +2615,11 @@ static int hotkey_inputdev_open(struct input_dev *dev)
{
switch (tpacpi_lifecycle) {
case TPACPI_LIFE_INIT:
- /*
- * hotkey_init will call hotkey_poll_setup_safe
- * at the appropriate moment
- */
- return 0;
- case TPACPI_LIFE_EXITING:
- return -EBUSY;
case TPACPI_LIFE_RUNNING:
hotkey_poll_setup_safe(false);
return 0;
+ case TPACPI_LIFE_EXITING:
+ return -EBUSY;
}
/* Should only happen if tpacpi_lifecycle is corrupt */
@@ -2627,7 +2630,7 @@ static int hotkey_inputdev_open(struct input_dev *dev)
static void hotkey_inputdev_close(struct input_dev *dev)
{
/* disable hotkey polling when possible */
- if (tpacpi_lifecycle == TPACPI_LIFE_RUNNING &&
+ if (tpacpi_lifecycle != TPACPI_LIFE_EXITING &&
!(hotkey_source_mask & hotkey_driver_mask))
hotkey_poll_setup_safe(false);
}
@@ -3655,13 +3658,19 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
break;
case 3:
/* 0x3000-0x3FFF: bay-related wakeups */
- if (hkey == TP_HKEY_EV_BAYEJ_ACK) {
+ switch (hkey) {
+ case TP_HKEY_EV_BAYEJ_ACK:
hotkey_autosleep_ack = 1;
printk(TPACPI_INFO
"bay ejected\n");
hotkey_wakeup_hotunplug_complete_notify_change();
known_ev = true;
- } else {
+ break;
+ case TP_HKEY_EV_OPTDRV_EJ:
+ /* FIXME: kick libata if SATA link offline */
+ known_ev = true;
+ break;
+ default:
known_ev = false;
}
break;
@@ -3870,7 +3879,7 @@ enum {
TP_ACPI_BLUETOOTH_HWPRESENT = 0x01, /* Bluetooth hw available */
TP_ACPI_BLUETOOTH_RADIOSSW = 0x02, /* Bluetooth radio enabled */
TP_ACPI_BLUETOOTH_RESUMECTRL = 0x04, /* Bluetooth state at resume:
- off / last state */
+ 0 = disable, 1 = enable */
};
enum {
@@ -3916,10 +3925,11 @@ static int bluetooth_set_status(enum tpacpi_rfkill_state state)
}
#endif
- /* We make sure to keep TP_ACPI_BLUETOOTH_RESUMECTRL off */
- status = TP_ACPI_BLUETOOTH_RESUMECTRL;
if (state == TPACPI_RFK_RADIO_ON)
- status |= TP_ACPI_BLUETOOTH_RADIOSSW;
+ status = TP_ACPI_BLUETOOTH_RADIOSSW
+ | TP_ACPI_BLUETOOTH_RESUMECTRL;
+ else
+ status = 0;
if (!acpi_evalf(hkey_handle, NULL, "SBDC", "vd", status))
return -EIO;
@@ -4070,7 +4080,7 @@ enum {
TP_ACPI_WANCARD_HWPRESENT = 0x01, /* Wan hw available */
TP_ACPI_WANCARD_RADIOSSW = 0x02, /* Wan radio enabled */
TP_ACPI_WANCARD_RESUMECTRL = 0x04, /* Wan state at resume:
- off / last state */
+ 0 = disable, 1 = enable */
};
#define TPACPI_RFK_WWAN_SW_NAME "tpacpi_wwan_sw"
@@ -4107,10 +4117,11 @@ static int wan_set_status(enum tpacpi_rfkill_state state)
}
#endif
- /* We make sure to set TP_ACPI_WANCARD_RESUMECTRL */
- status = TP_ACPI_WANCARD_RESUMECTRL;
if (state == TPACPI_RFK_RADIO_ON)
- status |= TP_ACPI_WANCARD_RADIOSSW;
+ status = TP_ACPI_WANCARD_RADIOSSW
+ | TP_ACPI_WANCARD_RESUMECTRL;
+ else
+ status = 0;
if (!acpi_evalf(hkey_handle, NULL, "SWAN", "vd", status))
return -EIO;
@@ -4619,6 +4630,10 @@ static int video_read(struct seq_file *m)
return 0;
}
+ /* Even reads can crash X.org, so... */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
status = video_outputsw_get();
if (status < 0)
return status;
@@ -4652,6 +4667,10 @@ static int video_write(char *buf)
if (video_supported == TPACPI_VIDEO_NONE)
return -ENODEV;
+ /* Even reads can crash X.org, let alone writes... */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
enable = 0;
disable = 0;
@@ -6133,13 +6152,13 @@ static const struct tpacpi_quirk brightness_quirk_table[] __initconst = {
TPACPI_Q_IBM('1', 'Y', TPACPI_BRGHT_Q_EC), /* T43/p ATI */
/* Models with ATI GPUs that can use ECNVRAM */
- TPACPI_Q_IBM('1', 'R', TPACPI_BRGHT_Q_EC),
+ TPACPI_Q_IBM('1', 'R', TPACPI_BRGHT_Q_EC), /* R50,51 T40-42 */
TPACPI_Q_IBM('1', 'Q', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
- TPACPI_Q_IBM('7', '6', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
+ TPACPI_Q_IBM('7', '6', TPACPI_BRGHT_Q_EC), /* R52 */
TPACPI_Q_IBM('7', '8', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
/* Models with Intel Extreme Graphics 2 */
- TPACPI_Q_IBM('1', 'U', TPACPI_BRGHT_Q_NOEC),
+ TPACPI_Q_IBM('1', 'U', TPACPI_BRGHT_Q_NOEC), /* X40 */
TPACPI_Q_IBM('1', 'V', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
TPACPI_Q_IBM('1', 'W', TPACPI_BRGHT_Q_ASK|TPACPI_BRGHT_Q_EC),
@@ -6522,7 +6541,8 @@ static int volume_set_status(const u8 status)
return volume_set_status_ec(status);
}
-static int volume_set_mute_ec(const bool mute)
+/* returns < 0 on error, 0 on no change, 1 on change */
+static int __volume_set_mute_ec(const bool mute)
{
int rc;
u8 s, n;
@@ -6537,22 +6557,37 @@ static int volume_set_mute_ec(const bool mute)
n = (mute) ? s | TP_EC_AUDIO_MUTESW_MSK :
s & ~TP_EC_AUDIO_MUTESW_MSK;
- if (n != s)
+ if (n != s) {
rc = volume_set_status_ec(n);
+ if (!rc)
+ rc = 1;
+ }
unlock:
mutex_unlock(&volume_mutex);
return rc;
}
+static int volume_alsa_set_mute(const bool mute)
+{
+ dbg_printk(TPACPI_DBG_MIXER, "ALSA: trying to %smute\n",
+ (mute) ? "" : "un");
+ return __volume_set_mute_ec(mute);
+}
+
static int volume_set_mute(const bool mute)
{
+ int rc;
+
dbg_printk(TPACPI_DBG_MIXER, "trying to %smute\n",
(mute) ? "" : "un");
- return volume_set_mute_ec(mute);
+
+ rc = __volume_set_mute_ec(mute);
+ return (rc < 0) ? rc : 0;
}
-static int volume_set_volume_ec(const u8 vol)
+/* returns < 0 on error, 0 on no change, 1 on change */
+static int __volume_set_volume_ec(const u8 vol)
{
int rc;
u8 s, n;
@@ -6569,19 +6604,22 @@ static int volume_set_volume_ec(const u8 vol)
n = (s & ~TP_EC_AUDIO_LVL_MSK) | vol;
- if (n != s)
+ if (n != s) {
rc = volume_set_status_ec(n);
+ if (!rc)
+ rc = 1;
+ }
unlock:
mutex_unlock(&volume_mutex);
return rc;
}
-static int volume_set_volume(const u8 vol)
+static int volume_alsa_set_volume(const u8 vol)
{
dbg_printk(TPACPI_DBG_MIXER,
- "trying to set volume level to %hu\n", vol);
- return volume_set_volume_ec(vol);
+ "ALSA: trying to set volume level to %hu\n", vol);
+ return __volume_set_volume_ec(vol);
}
static void volume_alsa_notify_change(void)
@@ -6628,7 +6666,7 @@ static int volume_alsa_vol_get(struct snd_kcontrol *kcontrol,
static int volume_alsa_vol_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- return volume_set_volume(ucontrol->value.integer.value[0]);
+ return volume_alsa_set_volume(ucontrol->value.integer.value[0]);
}
#define volume_alsa_mute_info snd_ctl_boolean_mono_info
@@ -6651,7 +6689,7 @@ static int volume_alsa_mute_get(struct snd_kcontrol *kcontrol,
static int volume_alsa_mute_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- return volume_set_mute(!ucontrol->value.integer.value[0]);
+ return volume_alsa_set_mute(!ucontrol->value.integer.value[0]);
}
static struct snd_kcontrol_new volume_alsa_control_vol __devinitdata = {
@@ -8477,9 +8515,10 @@ static int __init ibm_init(struct ibm_init_struct *iibm)
"%s installed\n", ibm->name);
if (ibm->read) {
- mode_t mode;
+ mode_t mode = iibm->base_procfs_mode;
- mode = S_IRUGO;
+ if (!mode)
+ mode = S_IRUGO;
if (ibm->write)
mode |= S_IWUSR;
entry = proc_create_data(ibm->name, mode, proc_dir,
@@ -8670,6 +8709,7 @@ static struct ibm_init_struct ibms_init[] __initdata = {
#ifdef CONFIG_THINKPAD_ACPI_VIDEO
{
.init = video_init,
+ .base_procfs_mode = S_IRUSR,
.data = &video_driver_data,
},
#endif
@@ -9032,6 +9072,9 @@ static int __init thinkpad_acpi_module_init(void)
return ret;
}
}
+
+ tpacpi_lifecycle = TPACPI_LIFE_RUNNING;
+
ret = input_register_device(tpacpi_inputdev);
if (ret < 0) {
printk(TPACPI_ERR "unable to register input device\n");
@@ -9041,7 +9084,6 @@ static int __init thinkpad_acpi_module_init(void)
tp_features.input_device_registered = 1;
}
- tpacpi_lifecycle = TPACPI_LIFE_RUNNING;
return 0;
}
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 26c211724ac..405b969734d 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -814,21 +814,23 @@ static void toshiba_acpi_notify(acpi_handle handle, u32 event, void *context)
if (hci_result == HCI_SUCCESS) {
if (value == 0x100)
continue;
- else if (value & 0x80) {
- key = toshiba_acpi_get_entry_by_scancode
- (value & ~0x80);
- if (!key) {
- printk(MY_INFO "Unknown key %x\n",
- value & ~0x80);
- continue;
- }
- input_report_key(toshiba_acpi.hotkey_dev,
- key->keycode, 1);
- input_sync(toshiba_acpi.hotkey_dev);
- input_report_key(toshiba_acpi.hotkey_dev,
- key->keycode, 0);
- input_sync(toshiba_acpi.hotkey_dev);
+ /* act on key press; ignore key release */
+ if (value & 0x80)
+ continue;
+
+ key = toshiba_acpi_get_entry_by_scancode
+ (value);
+ if (!key) {
+ printk(MY_INFO "Unknown key %x\n",
+ value);
+ continue;
}
+ input_report_key(toshiba_acpi.hotkey_dev,
+ key->keycode, 1);
+ input_sync(toshiba_acpi.hotkey_dev);
+ input_report_key(toshiba_acpi.hotkey_dev,
+ key->keycode, 0);
+ input_sync(toshiba_acpi.hotkey_dev);
} else if (hci_result == HCI_NOT_SUPPORTED) {
/* This is a workaround for an unresolved issue on
* some machines where system events sporadically
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index d4b3d67f054..bf146721395 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -98,10 +98,10 @@ config BATTERY_WM97XX
Say Y to enable support for battery measured by WM97xx aux port.
config BATTERY_BQ27x00
- tristate "BQ27200 battery driver"
+ tristate "BQ27x00 battery driver"
depends on I2C
help
- Say Y here to enable support for batteries with BQ27200(I2C) chip.
+ Say Y here to enable support for batteries with BQ27x00 (I2C) chips.
config BATTERY_DA9030
tristate "DA9030 battery driver"
diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
index 62bb98124e2..bece33ed873 100644
--- a/drivers/power/bq27x00_battery.c
+++ b/drivers/power/bq27x00_battery.c
@@ -26,13 +26,22 @@
#include <linux/i2c.h>
#include <asm/unaligned.h>
-#define DRIVER_VERSION "1.0.0"
+#define DRIVER_VERSION "1.1.0"
#define BQ27x00_REG_TEMP 0x06
#define BQ27x00_REG_VOLT 0x08
-#define BQ27x00_REG_RSOC 0x0B /* Relative State-of-Charge */
#define BQ27x00_REG_AI 0x14
#define BQ27x00_REG_FLAGS 0x0A
+#define BQ27x00_REG_TTE 0x16
+#define BQ27x00_REG_TTF 0x18
+#define BQ27x00_REG_TTECP 0x26
+
+#define BQ27000_REG_RSOC 0x0B /* Relative State-of-Charge */
+#define BQ27000_FLAG_CHGS BIT(7)
+
+#define BQ27500_REG_SOC 0x2c
+#define BQ27500_FLAG_DSC BIT(0)
+#define BQ27500_FLAG_FC BIT(9)
/* If the system has several batteries we need a different name for each
* of them...
@@ -46,25 +55,28 @@ struct bq27x00_access_methods {
struct bq27x00_device_info *di);
};
+enum bq27x00_chip { BQ27000, BQ27500 };
+
struct bq27x00_device_info {
struct device *dev;
int id;
- int voltage_uV;
- int current_uA;
- int temp_C;
- int charge_rsoc;
struct bq27x00_access_methods *bus;
struct power_supply bat;
+ enum bq27x00_chip chip;
struct i2c_client *client;
};
static enum power_supply_property bq27x00_battery_props[] = {
+ POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_TEMP,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW,
+ POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG,
+ POWER_SUPPLY_PROP_TIME_TO_FULL_NOW,
};
/*
@@ -74,16 +86,11 @@ static enum power_supply_property bq27x00_battery_props[] = {
static int bq27x00_read(u8 reg, int *rt_value, int b_single,
struct bq27x00_device_info *di)
{
- int ret;
-
- ret = di->bus->read(reg, rt_value, b_single, di);
- *rt_value = be16_to_cpu(*rt_value);
-
- return ret;
+ return di->bus->read(reg, rt_value, b_single, di);
}
/*
- * Return the battery temperature in Celsius degrees
+ * Return the battery temperature in tenths of degree Celsius
* Or < 0 if something fails.
*/
static int bq27x00_battery_temperature(struct bq27x00_device_info *di)
@@ -97,7 +104,10 @@ static int bq27x00_battery_temperature(struct bq27x00_device_info *di)
return ret;
}
- return (temp >> 2) - 273;
+ if (di->chip == BQ27500)
+ return temp - 2731;
+ else
+ return ((temp >> 2) - 273) * 10;
}
/*
@@ -115,7 +125,7 @@ static int bq27x00_battery_voltage(struct bq27x00_device_info *di)
return ret;
}
- return volt;
+ return volt * 1000;
}
/*
@@ -134,16 +144,23 @@ static int bq27x00_battery_current(struct bq27x00_device_info *di)
dev_err(di->dev, "error reading current\n");
return 0;
}
- ret = bq27x00_read(BQ27x00_REG_FLAGS, &flags, 0, di);
- if (ret < 0) {
- dev_err(di->dev, "error reading flags\n");
- return 0;
- }
- if ((flags & (1 << 7)) != 0) {
- dev_dbg(di->dev, "negative current!\n");
- return -curr;
+
+ if (di->chip == BQ27500) {
+ /* bq27500 returns signed value */
+ curr = (int)(s16)curr;
+ } else {
+ ret = bq27x00_read(BQ27x00_REG_FLAGS, &flags, 0, di);
+ if (ret < 0) {
+ dev_err(di->dev, "error reading flags\n");
+ return 0;
+ }
+ if (flags & BQ27000_FLAG_CHGS) {
+ dev_dbg(di->dev, "negative current!\n");
+ curr = -curr;
+ }
}
- return curr;
+
+ return curr * 1000;
}
/*
@@ -155,13 +172,70 @@ static int bq27x00_battery_rsoc(struct bq27x00_device_info *di)
int ret;
int rsoc = 0;
- ret = bq27x00_read(BQ27x00_REG_RSOC, &rsoc, 1, di);
+ if (di->chip == BQ27500)
+ ret = bq27x00_read(BQ27500_REG_SOC, &rsoc, 0, di);
+ else
+ ret = bq27x00_read(BQ27000_REG_RSOC, &rsoc, 1, di);
if (ret) {
dev_err(di->dev, "error reading relative State-of-Charge\n");
return ret;
}
- return rsoc >> 8;
+ return rsoc;
+}
+
+static int bq27x00_battery_status(struct bq27x00_device_info *di,
+ union power_supply_propval *val)
+{
+ int flags = 0;
+ int status;
+ int ret;
+
+ ret = bq27x00_read(BQ27x00_REG_FLAGS, &flags, 0, di);
+ if (ret < 0) {
+ dev_err(di->dev, "error reading flags\n");
+ return ret;
+ }
+
+ if (di->chip == BQ27500) {
+ if (flags & BQ27500_FLAG_FC)
+ status = POWER_SUPPLY_STATUS_FULL;
+ else if (flags & BQ27500_FLAG_DSC)
+ status = POWER_SUPPLY_STATUS_DISCHARGING;
+ else
+ status = POWER_SUPPLY_STATUS_CHARGING;
+ } else {
+ if (flags & BQ27000_FLAG_CHGS)
+ status = POWER_SUPPLY_STATUS_CHARGING;
+ else
+ status = POWER_SUPPLY_STATUS_DISCHARGING;
+ }
+
+ val->intval = status;
+ return 0;
+}
+
+/*
+ * Read a time register.
+ * Return < 0 if something fails.
+ */
+static int bq27x00_battery_time(struct bq27x00_device_info *di, int reg,
+ union power_supply_propval *val)
+{
+ int tval = 0;
+ int ret;
+
+ ret = bq27x00_read(reg, &tval, 0, di);
+ if (ret) {
+ dev_err(di->dev, "error reading register %02x\n", reg);
+ return ret;
+ }
+
+ if (tval == 65535)
+ return -ENODATA;
+
+ val->intval = tval * 60;
+ return 0;
}
#define to_bq27x00_device_info(x) container_of((x), \
@@ -171,9 +245,13 @@ static int bq27x00_battery_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
+ int ret = 0;
struct bq27x00_device_info *di = to_bq27x00_device_info(psy);
switch (psp) {
+ case POWER_SUPPLY_PROP_STATUS:
+ ret = bq27x00_battery_status(di, val);
+ break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
case POWER_SUPPLY_PROP_PRESENT:
val->intval = bq27x00_battery_voltage(di);
@@ -189,11 +267,20 @@ static int bq27x00_battery_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_TEMP:
val->intval = bq27x00_battery_temperature(di);
break;
+ case POWER_SUPPLY_PROP_TIME_TO_EMPTY_NOW:
+ ret = bq27x00_battery_time(di, BQ27x00_REG_TTE, val);
+ break;
+ case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG:
+ ret = bq27x00_battery_time(di, BQ27x00_REG_TTECP, val);
+ break;
+ case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW:
+ ret = bq27x00_battery_time(di, BQ27x00_REG_TTF, val);
+ break;
default:
return -EINVAL;
}
- return 0;
+ return ret;
}
static void bq27x00_powersupply_init(struct bq27x00_device_info *di)
@@ -206,10 +293,10 @@ static void bq27x00_powersupply_init(struct bq27x00_device_info *di)
}
/*
- * BQ27200 specific code
+ * i2c specific code
*/
-static int bq27200_read(u8 reg, int *rt_value, int b_single,
+static int bq27x00_read_i2c(u8 reg, int *rt_value, int b_single,
struct bq27x00_device_info *di)
{
struct i2c_client *client = di->client;
@@ -238,7 +325,7 @@ static int bq27200_read(u8 reg, int *rt_value, int b_single,
err = i2c_transfer(client->adapter, msg, 1);
if (err >= 0) {
if (!b_single)
- *rt_value = get_unaligned_be16(data);
+ *rt_value = get_unaligned_le16(data);
else
*rt_value = data[0];
@@ -248,7 +335,7 @@ static int bq27200_read(u8 reg, int *rt_value, int b_single,
return err;
}
-static int bq27200_battery_probe(struct i2c_client *client,
+static int bq27x00_battery_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
char *name;
@@ -267,7 +354,7 @@ static int bq27200_battery_probe(struct i2c_client *client,
if (retval < 0)
return retval;
- name = kasprintf(GFP_KERNEL, "bq27200-%d", num);
+ name = kasprintf(GFP_KERNEL, "%s-%d", id->name, num);
if (!name) {
dev_err(&client->dev, "failed to allocate device name\n");
retval = -ENOMEM;
@@ -281,6 +368,7 @@ static int bq27200_battery_probe(struct i2c_client *client,
goto batt_failed_2;
}
di->id = num;
+ di->chip = id->driver_data;
bus = kzalloc(sizeof(*bus), GFP_KERNEL);
if (!bus) {
@@ -293,7 +381,7 @@ static int bq27200_battery_probe(struct i2c_client *client,
i2c_set_clientdata(client, di);
di->dev = &client->dev;
di->bat.name = name;
- bus->read = &bq27200_read;
+ bus->read = &bq27x00_read_i2c;
di->bus = bus;
di->client = client;
@@ -323,7 +411,7 @@ batt_failed_1:
return retval;
}
-static int bq27200_battery_remove(struct i2c_client *client)
+static int bq27x00_battery_remove(struct i2c_client *client)
{
struct bq27x00_device_info *di = i2c_get_clientdata(client);
@@ -344,27 +432,28 @@ static int bq27200_battery_remove(struct i2c_client *client)
* Module stuff
*/
-static const struct i2c_device_id bq27200_id[] = {
- { "bq27200", 0 },
+static const struct i2c_device_id bq27x00_id[] = {
+ { "bq27200", BQ27000 }, /* bq27200 is same as bq27000, but with i2c */
+ { "bq27500", BQ27500 },
{},
};
-static struct i2c_driver bq27200_battery_driver = {
+static struct i2c_driver bq27x00_battery_driver = {
.driver = {
- .name = "bq27200-battery",
+ .name = "bq27x00-battery",
},
- .probe = bq27200_battery_probe,
- .remove = bq27200_battery_remove,
- .id_table = bq27200_id,
+ .probe = bq27x00_battery_probe,
+ .remove = bq27x00_battery_remove,
+ .id_table = bq27x00_id,
};
static int __init bq27x00_battery_init(void)
{
int ret;
- ret = i2c_add_driver(&bq27200_battery_driver);
+ ret = i2c_add_driver(&bq27x00_battery_driver);
if (ret)
- printk(KERN_ERR "Unable to register BQ27200 driver\n");
+ printk(KERN_ERR "Unable to register BQ27x00 driver\n");
return ret;
}
@@ -372,7 +461,7 @@ module_init(bq27x00_battery_init);
static void __exit bq27x00_battery_exit(void)
{
- i2c_del_driver(&bq27200_battery_driver);
+ i2c_del_driver(&bq27x00_battery_driver);
}
module_exit(bq27x00_battery_exit);
diff --git a/drivers/power/da9030_battery.c b/drivers/power/da9030_battery.c
index 3364198134a..a2e71f7b27f 100644
--- a/drivers/power/da9030_battery.c
+++ b/drivers/power/da9030_battery.c
@@ -509,7 +509,7 @@ static int da9030_battery_probe(struct platform_device *pdev)
charger->master = pdev->dev.parent;
- /* 10 seconds between monotor runs unless platfrom defines other
+ /* 10 seconds between monitor runs unless platform defines other
interval */
charger->interval = msecs_to_jiffies(
(pdata->batmon_interval ? : 10) * 1000);
diff --git a/drivers/power/wm97xx_battery.c b/drivers/power/wm97xx_battery.c
index 6ea3cb5837c..23eed356a85 100644
--- a/drivers/power/wm97xx_battery.c
+++ b/drivers/power/wm97xx_battery.c
@@ -26,7 +26,7 @@
static DEFINE_MUTEX(bat_lock);
static struct work_struct bat_work;
-struct mutex work_lock;
+static struct mutex work_lock;
static int bat_status = POWER_SUPPLY_STATUS_UNKNOWN;
static struct wm97xx_batt_info *gpdata;
static enum power_supply_property *prop;
@@ -203,7 +203,7 @@ static int __devinit wm97xx_bat_probe(struct platform_device *dev)
goto err2;
ret = request_irq(gpio_to_irq(pdata->charge_gpio),
wm97xx_chrg_irq, IRQF_DISABLED,
- "AC Detect", 0);
+ "AC Detect", dev);
if (ret)
goto err2;
props++; /* POWER_SUPPLY_PROP_STATUS */
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 262f62eec83..834b4844182 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -27,6 +27,17 @@ config REGULATOR_DEBUG
help
Say yes here to enable debugging support.
+config REGULATOR_DUMMY
+ bool "Provide a dummy regulator if regulator lookups fail"
+ help
+ If this option is enabled then when a regulator lookup fails
+ and the board has not specified that it has provided full
+ constraints then the regulator core will provide an always
+ enabled dummy regulator will be provided, allowing consumer
+ drivers to continue.
+
+ A warning will be generated when this substitution is done.
+
config REGULATOR_FIXED_VOLTAGE
tristate "Fixed voltage regulator support"
help
@@ -69,6 +80,13 @@ config REGULATOR_MAX1586
regulator via I2C bus. The provided regulator is suitable
for PXA27x chips to control VCC_CORE and VCC_USIM voltages.
+config REGULATOR_MAX8649
+ tristate "Maxim 8649 voltage regulator"
+ depends on I2C
+ help
+ This driver controls a Maxim 8649 voltage output regulator via
+ I2C bus.
+
config REGULATOR_MAX8660
tristate "Maxim 8660/8661 voltage regulator"
depends on I2C
@@ -91,19 +109,26 @@ config REGULATOR_WM831X
of PMIC devices.
config REGULATOR_WM8350
- tristate "Wolfson Microelectroncis WM8350 AudioPlus PMIC"
+ tristate "Wolfson Microelectronics WM8350 AudioPlus PMIC"
depends on MFD_WM8350
help
This driver provides support for the voltage and current regulators
of the WM8350 AudioPlus PMIC.
config REGULATOR_WM8400
- tristate "Wolfson Microelectroncis WM8400 AudioPlus PMIC"
+ tristate "Wolfson Microelectronics WM8400 AudioPlus PMIC"
depends on MFD_WM8400
help
This driver provides support for the voltage regulators of the
WM8400 AudioPlus PMIC.
+config REGULATOR_WM8994
+ tristate "Wolfson Microelectronics WM8994 CODEC"
+ depends on MFD_WM8994
+ help
+ This driver provides support for the voltage regulators on the
+ WM8994 CODEC.
+
config REGULATOR_DA903X
tristate "Support regulators on Dialog Semiconductor DA9030/DA9034 PMIC"
depends on PMIC_DA903X
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index b3c806c7941..e845b66ad59 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -9,15 +9,18 @@ obj-$(CONFIG_REGULATOR_VIRTUAL_CONSUMER) += virtual.o
obj-$(CONFIG_REGULATOR_USERSPACE_CONSUMER) += userspace-consumer.o
obj-$(CONFIG_REGULATOR_BQ24022) += bq24022.o
+obj-$(CONFIG_REGULATOR_DUMMY) += dummy.o
obj-$(CONFIG_REGULATOR_LP3971) += lp3971.o
obj-$(CONFIG_REGULATOR_MAX1586) += max1586.o
obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o
+obj-$(CONFIG_REGULATOR_MAX8649) += max8649.o
obj-$(CONFIG_REGULATOR_MAX8660) += max8660.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-isink.o
obj-$(CONFIG_REGULATOR_WM831X) += wm831x-ldo.o
obj-$(CONFIG_REGULATOR_WM8350) += wm8350-regulator.o
obj-$(CONFIG_REGULATOR_WM8400) += wm8400-regulator.o
+obj-$(CONFIG_REGULATOR_WM8994) += wm8994-regulator.o
obj-$(CONFIG_REGULATOR_DA903X) += da903x.o
obj-$(CONFIG_REGULATOR_PCF50633) += pcf50633-regulator.o
obj-$(CONFIG_REGULATOR_PCAP) += pcap-regulator.o
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index b349db4504b..7de950959ed 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -561,7 +561,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = {
* for all the different regulators.
*/
-static int __init ab3100_regulators_probe(struct platform_device *pdev)
+static int __devinit ab3100_regulators_probe(struct platform_device *pdev)
{
struct ab3100_platform_data *plfdata = pdev->dev.platform_data;
struct ab3100 *ab3100 = platform_get_drvdata(pdev);
@@ -641,7 +641,7 @@ static int __init ab3100_regulators_probe(struct platform_device *pdev)
return 0;
}
-static int __exit ab3100_regulators_remove(struct platform_device *pdev)
+static int __devexit ab3100_regulators_remove(struct platform_device *pdev)
{
int i;
@@ -659,7 +659,7 @@ static struct platform_driver ab3100_regulators_driver = {
.owner = THIS_MODULE,
},
.probe = ab3100_regulators_probe,
- .remove = __exit_p(ab3100_regulators_remove),
+ .remove = __devexit_p(ab3100_regulators_remove),
};
static __init int ab3100_regulators_init(void)
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index b60a4c9f8f1..c7bbe30010f 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -19,10 +19,13 @@
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/suspend.h>
+#include <linux/delay.h>
#include <linux/regulator/consumer.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
+#include "dummy.h"
+
#define REGULATOR_VERSION "0.5"
static DEFINE_MUTEX(regulator_list_mutex);
@@ -1084,6 +1087,13 @@ overflow_err:
return NULL;
}
+static int _regulator_get_enable_time(struct regulator_dev *rdev)
+{
+ if (!rdev->desc->ops->enable_time)
+ return 0;
+ return rdev->desc->ops->enable_time(rdev);
+}
+
/* Internal regulator request function */
static struct regulator *_regulator_get(struct device *dev, const char *id,
int exclusive)
@@ -1115,6 +1125,22 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
goto found;
}
}
+
+#ifdef CONFIG_REGULATOR_DUMMY
+ if (!devname)
+ devname = "deviceless";
+
+ /* If the board didn't flag that it was fully constrained then
+ * substitute in a dummy regulator so consumers can continue.
+ */
+ if (!has_full_constraints) {
+ pr_warning("%s supply %s not found, using dummy regulator\n",
+ devname, id);
+ rdev = dummy_regulator_rdev;
+ goto found;
+ }
+#endif
+
mutex_unlock(&regulator_list_mutex);
return regulator;
@@ -1251,7 +1277,7 @@ static int _regulator_can_change_status(struct regulator_dev *rdev)
/* locks held by regulator_enable() */
static int _regulator_enable(struct regulator_dev *rdev)
{
- int ret;
+ int ret, delay;
/* do we need to enable the supply regulator first */
if (rdev->supply) {
@@ -1275,13 +1301,34 @@ static int _regulator_enable(struct regulator_dev *rdev)
if (!_regulator_can_change_status(rdev))
return -EPERM;
- if (rdev->desc->ops->enable) {
- ret = rdev->desc->ops->enable(rdev);
- if (ret < 0)
- return ret;
- } else {
+ if (!rdev->desc->ops->enable)
return -EINVAL;
+
+ /* Query before enabling in case configuration
+ * dependant. */
+ ret = _regulator_get_enable_time(rdev);
+ if (ret >= 0) {
+ delay = ret;
+ } else {
+ printk(KERN_WARNING
+ "%s: enable_time() failed for %s: %d\n",
+ __func__, rdev_get_name(rdev),
+ ret);
+ delay = 0;
}
+
+ /* Allow the regulator to ramp; it would be useful
+ * to extend this for bulk operations so that the
+ * regulators can ramp together. */
+ ret = rdev->desc->ops->enable(rdev);
+ if (ret < 0)
+ return ret;
+
+ if (delay >= 1000)
+ mdelay(delay / 1000);
+ else if (delay)
+ udelay(delay);
+
} else if (ret < 0) {
printk(KERN_ERR "%s: is_enabled() failed for %s: %d\n",
__func__, rdev_get_name(rdev), ret);
@@ -1341,6 +1388,9 @@ static int _regulator_disable(struct regulator_dev *rdev)
__func__, rdev_get_name(rdev));
return ret;
}
+
+ _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
+ NULL);
}
/* decrease our supplies ref count and disable if required */
@@ -1399,8 +1449,8 @@ static int _regulator_force_disable(struct regulator_dev *rdev)
return ret;
}
/* notify other consumers that power has been forced off */
- _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE,
- NULL);
+ _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE |
+ REGULATOR_EVENT_DISABLE, NULL);
}
/* decrease our supplies ref count and disable if required */
@@ -1434,9 +1484,9 @@ EXPORT_SYMBOL_GPL(regulator_force_disable);
static int _regulator_is_enabled(struct regulator_dev *rdev)
{
- /* sanity check */
+ /* If we don't know then assume that the regulator is always on */
if (!rdev->desc->ops->is_enabled)
- return -EINVAL;
+ return 1;
return rdev->desc->ops->is_enabled(rdev);
}
@@ -2451,8 +2501,15 @@ EXPORT_SYMBOL_GPL(regulator_get_init_drvdata);
static int __init regulator_init(void)
{
+ int ret;
+
printk(KERN_INFO "regulator: core version %s\n", REGULATOR_VERSION);
- return class_register(&regulator_class);
+
+ ret = class_register(&regulator_class);
+
+ regulator_dummy_init();
+
+ return ret;
}
/* init early to allow our consumers to complete system booting */
diff --git a/drivers/regulator/dummy.c b/drivers/regulator/dummy.c
new file mode 100644
index 00000000000..c7410bde7b5
--- /dev/null
+++ b/drivers/regulator/dummy.c
@@ -0,0 +1,66 @@
+/*
+ * dummy.c
+ *
+ * Copyright 2010 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This is useful for systems with mixed controllable and
+ * non-controllable regulators, as well as for allowing testing on
+ * systems with no controllable regulators.
+ */
+
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+
+#include "dummy.h"
+
+struct regulator_dev *dummy_regulator_rdev;
+
+static struct regulator_init_data dummy_initdata;
+
+static struct regulator_ops dummy_ops;
+
+static struct regulator_desc dummy_desc = {
+ .name = "dummy",
+ .id = -1,
+ .type = REGULATOR_VOLTAGE,
+ .owner = THIS_MODULE,
+ .ops = &dummy_ops,
+};
+
+static struct platform_device *dummy_pdev;
+
+void __init regulator_dummy_init(void)
+{
+ int ret;
+
+ dummy_pdev = platform_device_alloc("reg-dummy", -1);
+ if (!dummy_pdev) {
+ pr_err("Failed to allocate dummy regulator device\n");
+ return;
+ }
+
+ ret = platform_device_add(dummy_pdev);
+ if (ret != 0) {
+ pr_err("Failed to register dummy regulator device: %d\n", ret);
+ platform_device_put(dummy_pdev);
+ return;
+ }
+
+ dummy_regulator_rdev = regulator_register(&dummy_desc, NULL,
+ &dummy_initdata, NULL);
+ if (IS_ERR(dummy_regulator_rdev)) {
+ ret = PTR_ERR(dummy_regulator_rdev);
+ pr_err("Failed to register regulator: %d\n", ret);
+ platform_device_unregister(dummy_pdev);
+ return;
+ }
+}
diff --git a/drivers/regulator/dummy.h b/drivers/regulator/dummy.h
new file mode 100644
index 00000000000..3921c0e2424
--- /dev/null
+++ b/drivers/regulator/dummy.h
@@ -0,0 +1,31 @@
+/*
+ * dummy.h
+ *
+ * Copyright 2010 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This is useful for systems with mixed controllable and
+ * non-controllable regulators, as well as for allowing testing on
+ * systems with no controllable regulators.
+ */
+
+#ifndef _DUMMY_H
+#define _DUMMY_H
+
+struct regulator_dev;
+
+extern struct regulator_dev *dummy_regulator_rdev;
+
+#ifdef CONFIG_REGULATOR_DUMMY
+void __init regulator_dummy_init(void);
+#else
+static inline void regulator_dummy_init(void) { }
+#endif
+
+#endif
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index f9f516a3028..d11f7622430 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -24,14 +24,16 @@
#include <linux/regulator/driver.h>
#include <linux/regulator/fixed.h>
#include <linux/gpio.h>
+#include <linux/delay.h>
struct fixed_voltage_data {
struct regulator_desc desc;
struct regulator_dev *dev;
int microvolts;
int gpio;
- unsigned enable_high:1;
- unsigned is_enabled:1;
+ unsigned startup_delay;
+ bool enable_high;
+ bool is_enabled;
};
static int fixed_voltage_is_enabled(struct regulator_dev *dev)
@@ -47,7 +49,7 @@ static int fixed_voltage_enable(struct regulator_dev *dev)
if (gpio_is_valid(data->gpio)) {
gpio_set_value_cansleep(data->gpio, data->enable_high);
- data->is_enabled = 1;
+ data->is_enabled = true;
}
return 0;
@@ -59,12 +61,19 @@ static int fixed_voltage_disable(struct regulator_dev *dev)
if (gpio_is_valid(data->gpio)) {
gpio_set_value_cansleep(data->gpio, !data->enable_high);
- data->is_enabled = 0;
+ data->is_enabled = false;
}
return 0;
}
+static int fixed_voltage_enable_time(struct regulator_dev *dev)
+{
+ struct fixed_voltage_data *data = rdev_get_drvdata(dev);
+
+ return data->startup_delay;
+}
+
static int fixed_voltage_get_voltage(struct regulator_dev *dev)
{
struct fixed_voltage_data *data = rdev_get_drvdata(dev);
@@ -87,11 +96,12 @@ static struct regulator_ops fixed_voltage_ops = {
.is_enabled = fixed_voltage_is_enabled,
.enable = fixed_voltage_enable,
.disable = fixed_voltage_disable,
+ .enable_time = fixed_voltage_enable_time,
.get_voltage = fixed_voltage_get_voltage,
.list_voltage = fixed_voltage_list_voltage,
};
-static int regulator_fixed_voltage_probe(struct platform_device *pdev)
+static int __devinit reg_fixed_voltage_probe(struct platform_device *pdev)
{
struct fixed_voltage_config *config = pdev->dev.platform_data;
struct fixed_voltage_data *drvdata;
@@ -117,6 +127,7 @@ static int regulator_fixed_voltage_probe(struct platform_device *pdev)
drvdata->microvolts = config->microvolts;
drvdata->gpio = config->gpio;
+ drvdata->startup_delay = config->startup_delay;
if (gpio_is_valid(config->gpio)) {
drvdata->enable_high = config->enable_high;
@@ -163,7 +174,7 @@ static int regulator_fixed_voltage_probe(struct platform_device *pdev)
/* Regulator without GPIO control is considered
* always enabled
*/
- drvdata->is_enabled = 1;
+ drvdata->is_enabled = true;
}
drvdata->dev = regulator_register(&drvdata->desc, &pdev->dev,
@@ -191,7 +202,7 @@ err:
return ret;
}
-static int regulator_fixed_voltage_remove(struct platform_device *pdev)
+static int __devexit reg_fixed_voltage_remove(struct platform_device *pdev)
{
struct fixed_voltage_data *drvdata = platform_get_drvdata(pdev);
@@ -205,10 +216,11 @@ static int regulator_fixed_voltage_remove(struct platform_device *pdev)
}
static struct platform_driver regulator_fixed_voltage_driver = {
- .probe = regulator_fixed_voltage_probe,
- .remove = regulator_fixed_voltage_remove,
+ .probe = reg_fixed_voltage_probe,
+ .remove = __devexit_p(reg_fixed_voltage_remove),
.driver = {
.name = "reg-fixed-voltage",
+ .owner = THIS_MODULE,
},
};
diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c
index 4f33a0f4a17..f5532ed7927 100644
--- a/drivers/regulator/lp3971.c
+++ b/drivers/regulator/lp3971.c
@@ -54,7 +54,7 @@ static int lp3971_set_bits(struct lp3971 *lp3971, u8 reg, u16 mask, u16 val);
#define LP3971_BUCK2_BASE 0x29
#define LP3971_BUCK3_BASE 0x32
-const static int buck_base_addr[] = {
+static const int buck_base_addr[] = {
LP3971_BUCK1_BASE,
LP3971_BUCK2_BASE,
LP3971_BUCK3_BASE,
@@ -63,7 +63,7 @@ const static int buck_base_addr[] = {
#define LP3971_BUCK_TARGET_VOL1_REG(x) (buck_base_addr[x])
#define LP3971_BUCK_TARGET_VOL2_REG(x) (buck_base_addr[x]+1)
-const static int buck_voltage_map[] = {
+static const int buck_voltage_map[] = {
0, 800, 850, 900, 950, 1000, 1050, 1100,
1150, 1200, 1250, 1300, 1350, 1400, 1450, 1500,
1550, 1600, 1650, 1700, 1800, 1900, 2500, 2800,
@@ -96,17 +96,17 @@ const static int buck_voltage_map[] = {
#define LDO_VOL_CONTR_SHIFT(x) ((x & 1) << 2)
#define LDO_VOL_CONTR_MASK 0x0f
-const static int ldo45_voltage_map[] = {
+static const int ldo45_voltage_map[] = {
1000, 1050, 1100, 1150, 1200, 1250, 1300, 1350,
1400, 1500, 1800, 1900, 2500, 2800, 3000, 3300,
};
-const static int ldo123_voltage_map[] = {
+static const int ldo123_voltage_map[] = {
1800, 1900, 2000, 2100, 2200, 2300, 2400, 2500,
2600, 2700, 2800, 2900, 3000, 3100, 3200, 3300,
};
-const static int *ldo_voltage_map[] = {
+static const int *ldo_voltage_map[] = {
ldo123_voltage_map, /* LDO1 */
ldo123_voltage_map, /* LDO2 */
ldo123_voltage_map, /* LDO3 */
@@ -431,20 +431,20 @@ static int lp3971_set_bits(struct lp3971 *lp3971, u8 reg, u16 mask, u16 val)
return ret;
}
-static int setup_regulators(struct lp3971 *lp3971,
- struct lp3971_platform_data *pdata)
+static int __devinit setup_regulators(struct lp3971 *lp3971,
+ struct lp3971_platform_data *pdata)
{
int i, err;
- int num_regulators = pdata->num_regulators;
- lp3971->num_regulators = num_regulators;
- lp3971->rdev = kzalloc(sizeof(struct regulator_dev *) * num_regulators,
- GFP_KERNEL);
+
+ lp3971->num_regulators = pdata->num_regulators;
+ lp3971->rdev = kcalloc(pdata->num_regulators,
+ sizeof(struct regulator_dev *), GFP_KERNEL);
/* Instantiate the regulators */
- for (i = 0; i < num_regulators; i++) {
- int id = pdata->regulators[i].id;
- lp3971->rdev[i] = regulator_register(&regulators[id],
- lp3971->dev, pdata->regulators[i].initdata, lp3971);
+ for (i = 0; i < pdata->num_regulators; i++) {
+ struct lp3971_regulator_subdev *reg = &pdata->regulators[i];
+ lp3971->rdev[i] = regulator_register(&regulators[reg->id],
+ lp3971->dev, reg->initdata, lp3971);
if (IS_ERR(lp3971->rdev[i])) {
err = PTR_ERR(lp3971->rdev[i]);
@@ -455,10 +455,10 @@ static int setup_regulators(struct lp3971 *lp3971,
}
return 0;
+
error:
- for (i = 0; i < num_regulators; i++)
- if (lp3971->rdev[i])
- regulator_unregister(lp3971->rdev[i]);
+ while (--i >= 0)
+ regulator_unregister(lp3971->rdev[i]);
kfree(lp3971->rdev);
lp3971->rdev = NULL;
return err;
@@ -472,15 +472,17 @@ static int __devinit lp3971_i2c_probe(struct i2c_client *i2c,
int ret;
u16 val;
- lp3971 = kzalloc(sizeof(struct lp3971), GFP_KERNEL);
- if (lp3971 == NULL) {
- ret = -ENOMEM;
- goto err;
+ if (!pdata) {
+ dev_dbg(&i2c->dev, "No platform init data supplied\n");
+ return -ENODEV;
}
+ lp3971 = kzalloc(sizeof(struct lp3971), GFP_KERNEL);
+ if (lp3971 == NULL)
+ return -ENOMEM;
+
lp3971->i2c = i2c;
lp3971->dev = &i2c->dev;
- i2c_set_clientdata(i2c, lp3971);
mutex_init(&lp3971->io_lock);
@@ -493,19 +495,15 @@ static int __devinit lp3971_i2c_probe(struct i2c_client *i2c,
goto err_detect;
}
- if (pdata) {
- ret = setup_regulators(lp3971, pdata);
- if (ret < 0)
- goto err_detect;
- } else
- dev_warn(lp3971->dev, "No platform init data supplied\n");
+ ret = setup_regulators(lp3971, pdata);
+ if (ret < 0)
+ goto err_detect;
+ i2c_set_clientdata(i2c, lp3971);
return 0;
err_detect:
- i2c_set_clientdata(i2c, NULL);
kfree(lp3971);
-err:
return ret;
}
@@ -513,11 +511,13 @@ static int __devexit lp3971_i2c_remove(struct i2c_client *i2c)
{
struct lp3971 *lp3971 = i2c_get_clientdata(i2c);
int i;
+
+ i2c_set_clientdata(i2c, NULL);
+
for (i = 0; i < lp3971->num_regulators; i++)
- if (lp3971->rdev[i])
- regulator_unregister(lp3971->rdev[i]);
+ regulator_unregister(lp3971->rdev[i]);
+
kfree(lp3971->rdev);
- i2c_set_clientdata(i2c, NULL);
kfree(lp3971);
return 0;
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c
index 2c082d3ef48..a49fc952c9a 100644
--- a/drivers/regulator/max1586.c
+++ b/drivers/regulator/max1586.c
@@ -179,8 +179,8 @@ static struct regulator_desc max1586_reg[] = {
},
};
-static int max1586_pmic_probe(struct i2c_client *client,
- const struct i2c_device_id *i2c_id)
+static int __devinit max1586_pmic_probe(struct i2c_client *client,
+ const struct i2c_device_id *i2c_id)
{
struct regulator_dev **rdev;
struct max1586_platform_data *pdata = client->dev.platform_data;
@@ -235,7 +235,7 @@ out:
return ret;
}
-static int max1586_pmic_remove(struct i2c_client *client)
+static int __devexit max1586_pmic_remove(struct i2c_client *client)
{
struct regulator_dev **rdev = i2c_get_clientdata(client);
int i;
@@ -257,9 +257,10 @@ MODULE_DEVICE_TABLE(i2c, max1586_id);
static struct i2c_driver max1586_pmic_driver = {
.probe = max1586_pmic_probe,
- .remove = max1586_pmic_remove,
+ .remove = __devexit_p(max1586_pmic_remove),
.driver = {
.name = "max1586",
+ .owner = THIS_MODULE,
},
.id_table = max1586_id,
};
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c
new file mode 100644
index 00000000000..3ebdf698c64
--- /dev/null
+++ b/drivers/regulator/max8649.c
@@ -0,0 +1,408 @@
+/*
+ * Regulators driver for Maxim max8649
+ *
+ * Copyright (C) 2009-2010 Marvell International Ltd.
+ * Haojian Zhuang <haojian.zhuang@marvell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <linux/i2c.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/max8649.h>
+
+#define MAX8649_DCDC_VMIN 750000 /* uV */
+#define MAX8649_DCDC_VMAX 1380000 /* uV */
+#define MAX8649_DCDC_STEP 10000 /* uV */
+#define MAX8649_VOL_MASK 0x3f
+
+/* Registers */
+#define MAX8649_MODE0 0x00
+#define MAX8649_MODE1 0x01
+#define MAX8649_MODE2 0x02
+#define MAX8649_MODE3 0x03
+#define MAX8649_CONTROL 0x04
+#define MAX8649_SYNC 0x05
+#define MAX8649_RAMP 0x06
+#define MAX8649_CHIP_ID1 0x08
+#define MAX8649_CHIP_ID2 0x09
+
+/* Bits */
+#define MAX8649_EN_PD (1 << 7)
+#define MAX8649_VID0_PD (1 << 6)
+#define MAX8649_VID1_PD (1 << 5)
+#define MAX8649_VID_MASK (3 << 5)
+
+#define MAX8649_FORCE_PWM (1 << 7)
+#define MAX8649_SYNC_EXTCLK (1 << 6)
+
+#define MAX8649_EXT_MASK (3 << 6)
+
+#define MAX8649_RAMP_MASK (7 << 5)
+#define MAX8649_RAMP_DOWN (1 << 1)
+
+struct max8649_regulator_info {
+ struct regulator_dev *regulator;
+ struct i2c_client *i2c;
+ struct device *dev;
+ struct mutex io_lock;
+
+ int vol_reg;
+ unsigned mode:2; /* bit[1:0] = VID1, VID0 */
+ unsigned extclk_freq:2;
+ unsigned extclk:1;
+ unsigned ramp_timing:3;
+ unsigned ramp_down:1;
+};
+
+/* I2C operations */
+
+static inline int max8649_read_device(struct i2c_client *i2c,
+ int reg, int bytes, void *dest)
+{
+ unsigned char data;
+ int ret;
+
+ data = (unsigned char)reg;
+ ret = i2c_master_send(i2c, &data, 1);
+ if (ret < 0)
+ return ret;
+ ret = i2c_master_recv(i2c, dest, bytes);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static inline int max8649_write_device(struct i2c_client *i2c,
+ int reg, int bytes, void *src)
+{
+ unsigned char buf[bytes + 1];
+ int ret;
+
+ buf[0] = (unsigned char)reg;
+ memcpy(&buf[1], src, bytes);
+
+ ret = i2c_master_send(i2c, buf, bytes + 1);
+ if (ret < 0)
+ return ret;
+ return 0;
+}
+
+static int max8649_reg_read(struct i2c_client *i2c, int reg)
+{
+ struct max8649_regulator_info *info = i2c_get_clientdata(i2c);
+ unsigned char data;
+ int ret;
+
+ mutex_lock(&info->io_lock);
+ ret = max8649_read_device(i2c, reg, 1, &data);
+ mutex_unlock(&info->io_lock);
+
+ if (ret < 0)
+ return ret;
+ return (int)data;
+}
+
+static int max8649_set_bits(struct i2c_client *i2c, int reg,
+ unsigned char mask, unsigned char data)
+{
+ struct max8649_regulator_info *info = i2c_get_clientdata(i2c);
+ unsigned char value;
+ int ret;
+
+ mutex_lock(&info->io_lock);
+ ret = max8649_read_device(i2c, reg, 1, &value);
+ if (ret < 0)
+ goto out;
+ value &= ~mask;
+ value |= data;
+ ret = max8649_write_device(i2c, reg, 1, &value);
+out:
+ mutex_unlock(&info->io_lock);
+ return ret;
+}
+
+static inline int check_range(int min_uV, int max_uV)
+{
+ if ((min_uV < MAX8649_DCDC_VMIN) || (max_uV > MAX8649_DCDC_VMAX)
+ || (min_uV > max_uV))
+ return -EINVAL;
+ return 0;
+}
+
+static int max8649_list_voltage(struct regulator_dev *rdev, unsigned index)
+{
+ return (MAX8649_DCDC_VMIN + index * MAX8649_DCDC_STEP);
+}
+
+static int max8649_get_voltage(struct regulator_dev *rdev)
+{
+ struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
+ unsigned char data;
+ int ret;
+
+ ret = max8649_reg_read(info->i2c, info->vol_reg);
+ if (ret < 0)
+ return ret;
+ data = (unsigned char)ret & MAX8649_VOL_MASK;
+ return max8649_list_voltage(rdev, data);
+}
+
+static int max8649_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
+ unsigned char data, mask;
+
+ if (check_range(min_uV, max_uV)) {
+ dev_err(info->dev, "invalid voltage range (%d, %d) uV\n",
+ min_uV, max_uV);
+ return -EINVAL;
+ }
+ data = (min_uV - MAX8649_DCDC_VMIN + MAX8649_DCDC_STEP - 1)
+ / MAX8649_DCDC_STEP;
+ mask = MAX8649_VOL_MASK;
+
+ return max8649_set_bits(info->i2c, info->vol_reg, mask, data);
+}
+
+/* EN_PD means pulldown on EN input */
+static int max8649_enable(struct regulator_dev *rdev)
+{
+ struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
+ return max8649_set_bits(info->i2c, MAX8649_CONTROL, MAX8649_EN_PD, 0);
+}
+
+/*
+ * Applied internal pulldown resistor on EN input pin.
+ * If pulldown EN pin outside, it would be better.
+ */
+static int max8649_disable(struct regulator_dev *rdev)
+{
+ struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
+ return max8649_set_bits(info->i2c, MAX8649_CONTROL, MAX8649_EN_PD,
+ MAX8649_EN_PD);
+}
+
+static int max8649_is_enabled(struct regulator_dev *rdev)
+{
+ struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
+ int ret;
+
+ ret = max8649_reg_read(info->i2c, MAX8649_CONTROL);
+ if (ret < 0)
+ return ret;
+ return !((unsigned char)ret & MAX8649_EN_PD);
+}
+
+static int max8649_enable_time(struct regulator_dev *rdev)
+{
+ struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
+ int voltage, rate, ret;
+
+ /* get voltage */
+ ret = max8649_reg_read(info->i2c, info->vol_reg);
+ if (ret < 0)
+ return ret;
+ ret &= MAX8649_VOL_MASK;
+ voltage = max8649_list_voltage(rdev, (unsigned char)ret); /* uV */
+
+ /* get rate */
+ ret = max8649_reg_read(info->i2c, MAX8649_RAMP);
+ if (ret < 0)
+ return ret;
+ ret = (ret & MAX8649_RAMP_MASK) >> 5;
+ rate = (32 * 1000) >> ret; /* uV/uS */
+
+ return (voltage / rate);
+}
+
+static int max8649_set_mode(struct regulator_dev *rdev, unsigned int mode)
+{
+ struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
+
+ switch (mode) {
+ case REGULATOR_MODE_FAST:
+ max8649_set_bits(info->i2c, info->vol_reg, MAX8649_FORCE_PWM,
+ MAX8649_FORCE_PWM);
+ break;
+ case REGULATOR_MODE_NORMAL:
+ max8649_set_bits(info->i2c, info->vol_reg,
+ MAX8649_FORCE_PWM, 0);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static unsigned int max8649_get_mode(struct regulator_dev *rdev)
+{
+ struct max8649_regulator_info *info = rdev_get_drvdata(rdev);
+ int ret;
+
+ ret = max8649_reg_read(info->i2c, info->vol_reg);
+ if (ret & MAX8649_FORCE_PWM)
+ return REGULATOR_MODE_FAST;
+ return REGULATOR_MODE_NORMAL;
+}
+
+static struct regulator_ops max8649_dcdc_ops = {
+ .set_voltage = max8649_set_voltage,
+ .get_voltage = max8649_get_voltage,
+ .list_voltage = max8649_list_voltage,
+ .enable = max8649_enable,
+ .disable = max8649_disable,
+ .is_enabled = max8649_is_enabled,
+ .enable_time = max8649_enable_time,
+ .set_mode = max8649_set_mode,
+ .get_mode = max8649_get_mode,
+
+};
+
+static struct regulator_desc dcdc_desc = {
+ .name = "max8649",
+ .ops = &max8649_dcdc_ops,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = 1 << 6,
+ .owner = THIS_MODULE,
+};
+
+static int __devinit max8649_regulator_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct max8649_platform_data *pdata = client->dev.platform_data;
+ struct max8649_regulator_info *info = NULL;
+ unsigned char data;
+ int ret;
+
+ info = kzalloc(sizeof(struct max8649_regulator_info), GFP_KERNEL);
+ if (!info) {
+ dev_err(&client->dev, "No enough memory\n");
+ return -ENOMEM;
+ }
+
+ info->i2c = client;
+ info->dev = &client->dev;
+ mutex_init(&info->io_lock);
+ i2c_set_clientdata(client, info);
+
+ info->mode = pdata->mode;
+ switch (info->mode) {
+ case 0:
+ info->vol_reg = MAX8649_MODE0;
+ break;
+ case 1:
+ info->vol_reg = MAX8649_MODE1;
+ break;
+ case 2:
+ info->vol_reg = MAX8649_MODE2;
+ break;
+ case 3:
+ info->vol_reg = MAX8649_MODE3;
+ break;
+ default:
+ break;
+ }
+
+ ret = max8649_reg_read(info->i2c, MAX8649_CHIP_ID1);
+ if (ret < 0) {
+ dev_err(info->dev, "Failed to detect ID of MAX8649:%d\n",
+ ret);
+ goto out;
+ }
+ dev_info(info->dev, "Detected MAX8649 (ID:%x)\n", ret);
+
+ /* enable VID0 & VID1 */
+ max8649_set_bits(info->i2c, MAX8649_CONTROL, MAX8649_VID_MASK, 0);
+
+ /* enable/disable external clock synchronization */
+ info->extclk = pdata->extclk;
+ data = (info->extclk) ? MAX8649_SYNC_EXTCLK : 0;
+ max8649_set_bits(info->i2c, info->vol_reg, MAX8649_SYNC_EXTCLK, data);
+ if (info->extclk) {
+ /* set external clock frequency */
+ info->extclk_freq = pdata->extclk_freq;
+ max8649_set_bits(info->i2c, MAX8649_SYNC, MAX8649_EXT_MASK,
+ info->extclk_freq);
+ }
+
+ if (pdata->ramp_timing) {
+ info->ramp_timing = pdata->ramp_timing;
+ max8649_set_bits(info->i2c, MAX8649_RAMP, MAX8649_RAMP_MASK,
+ info->ramp_timing << 5);
+ }
+
+ info->ramp_down = pdata->ramp_down;
+ if (info->ramp_down) {
+ max8649_set_bits(info->i2c, MAX8649_RAMP, MAX8649_RAMP_DOWN,
+ MAX8649_RAMP_DOWN);
+ }
+
+ info->regulator = regulator_register(&dcdc_desc, &client->dev,
+ pdata->regulator, info);
+ if (IS_ERR(info->regulator)) {
+ dev_err(info->dev, "failed to register regulator %s\n",
+ dcdc_desc.name);
+ ret = PTR_ERR(info->regulator);
+ goto out;
+ }
+
+ dev_info(info->dev, "Max8649 regulator device is detected.\n");
+ return 0;
+out:
+ kfree(info);
+ return ret;
+}
+
+static int __devexit max8649_regulator_remove(struct i2c_client *client)
+{
+ struct max8649_regulator_info *info = i2c_get_clientdata(client);
+
+ if (info) {
+ if (info->regulator)
+ regulator_unregister(info->regulator);
+ kfree(info);
+ }
+ i2c_set_clientdata(client, NULL);
+
+ return 0;
+}
+
+static const struct i2c_device_id max8649_id[] = {
+ { "max8649", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max8649_id);
+
+static struct i2c_driver max8649_driver = {
+ .probe = max8649_regulator_probe,
+ .remove = __devexit_p(max8649_regulator_remove),
+ .driver = {
+ .name = "max8649",
+ },
+ .id_table = max8649_id,
+};
+
+static int __init max8649_init(void)
+{
+ return i2c_add_driver(&max8649_driver);
+}
+subsys_initcall(max8649_init);
+
+static void __exit max8649_exit(void)
+{
+ i2c_del_driver(&max8649_driver);
+}
+module_exit(max8649_exit);
+
+/* Module information */
+MODULE_DESCRIPTION("MAXIM 8649 voltage regulator driver");
+MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>");
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
index acc2fb7b608..f12f1bb6213 100644
--- a/drivers/regulator/max8660.c
+++ b/drivers/regulator/max8660.c
@@ -345,8 +345,8 @@ static struct regulator_desc max8660_reg[] = {
},
};
-static int max8660_probe(struct i2c_client *client,
- const struct i2c_device_id *i2c_id)
+static int __devinit max8660_probe(struct i2c_client *client,
+ const struct i2c_device_id *i2c_id)
{
struct regulator_dev **rdev;
struct max8660_platform_data *pdata = client->dev.platform_data;
@@ -354,7 +354,7 @@ static int max8660_probe(struct i2c_client *client,
int boot_on, i, id, ret = -EINVAL;
if (pdata->num_subdevs > MAX8660_V_END) {
- dev_err(&client->dev, "Too much regulators found!\n");
+ dev_err(&client->dev, "Too many regulators found!\n");
goto out;
}
@@ -462,7 +462,7 @@ out:
return ret;
}
-static int max8660_remove(struct i2c_client *client)
+static int __devexit max8660_remove(struct i2c_client *client)
{
struct regulator_dev **rdev = i2c_get_clientdata(client);
int i;
@@ -485,9 +485,10 @@ MODULE_DEVICE_TABLE(i2c, max8660_id);
static struct i2c_driver max8660_driver = {
.probe = max8660_probe,
- .remove = max8660_remove,
+ .remove = __devexit_p(max8660_remove),
.driver = {
.name = "max8660",
+ .owner = THIS_MODULE,
},
.id_table = max8660_id,
};
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c
index 39c49530004..f7b81845a19 100644
--- a/drivers/regulator/mc13783-regulator.c
+++ b/drivers/regulator/mc13783-regulator.c
@@ -2,6 +2,7 @@
* Regulator Driver for Freescale MC13783 PMIC
*
* Copyright (C) 2008 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
+ * Copyright 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -16,11 +17,44 @@
#include <linux/init.h>
#include <linux/err.h>
-#define MC13783_REG_SWITCHERS4 28
-#define MC13783_REG_SWITCHERS4_PLLEN (1 << 18)
-
#define MC13783_REG_SWITCHERS5 29
#define MC13783_REG_SWITCHERS5_SW3EN (1 << 20)
+#define MC13783_REG_SWITCHERS5_SW3VSEL 18
+#define MC13783_REG_SWITCHERS5_SW3VSEL_M (3 << 18)
+
+#define MC13783_REG_REGULATORSETTING0 30
+#define MC13783_REG_REGULATORSETTING0_VIOLOVSEL 2
+#define MC13783_REG_REGULATORSETTING0_VDIGVSEL 4
+#define MC13783_REG_REGULATORSETTING0_VGENVSEL 6
+#define MC13783_REG_REGULATORSETTING0_VRFDIGVSEL 9
+#define MC13783_REG_REGULATORSETTING0_VRFREFVSEL 11
+#define MC13783_REG_REGULATORSETTING0_VRFCPVSEL 13
+#define MC13783_REG_REGULATORSETTING0_VSIMVSEL 14
+#define MC13783_REG_REGULATORSETTING0_VESIMVSEL 15
+#define MC13783_REG_REGULATORSETTING0_VCAMVSEL 16
+
+#define MC13783_REG_REGULATORSETTING0_VIOLOVSEL_M (3 << 2)
+#define MC13783_REG_REGULATORSETTING0_VDIGVSEL_M (3 << 4)
+#define MC13783_REG_REGULATORSETTING0_VGENVSEL_M (7 << 6)
+#define MC13783_REG_REGULATORSETTING0_VRFDIGVSEL_M (3 << 9)
+#define MC13783_REG_REGULATORSETTING0_VRFREFVSEL_M (3 << 11)
+#define MC13783_REG_REGULATORSETTING0_VRFCPVSEL_M (1 << 13)
+#define MC13783_REG_REGULATORSETTING0_VSIMVSEL_M (1 << 14)
+#define MC13783_REG_REGULATORSETTING0_VESIMVSEL_M (1 << 15)
+#define MC13783_REG_REGULATORSETTING0_VCAMVSEL_M (7 << 16)
+
+#define MC13783_REG_REGULATORSETTING1 31
+#define MC13783_REG_REGULATORSETTING1_VVIBVSEL 0
+#define MC13783_REG_REGULATORSETTING1_VRF1VSEL 2
+#define MC13783_REG_REGULATORSETTING1_VRF2VSEL 4
+#define MC13783_REG_REGULATORSETTING1_VMMC1VSEL 6
+#define MC13783_REG_REGULATORSETTING1_VMMC2VSEL 9
+
+#define MC13783_REG_REGULATORSETTING1_VVIBVSEL_M (3 << 0)
+#define MC13783_REG_REGULATORSETTING1_VRF1VSEL_M (3 << 2)
+#define MC13783_REG_REGULATORSETTING1_VRF2VSEL_M (3 << 4)
+#define MC13783_REG_REGULATORSETTING1_VMMC1VSEL_M (7 << 6)
+#define MC13783_REG_REGULATORSETTING1_VMMC2VSEL_M (7 << 9)
#define MC13783_REG_REGULATORMODE0 32
#define MC13783_REG_REGULATORMODE0_VAUDIOEN (1 << 0)
@@ -48,19 +82,107 @@
#define MC13783_REG_POWERMISC_GPO2EN (1 << 8)
#define MC13783_REG_POWERMISC_GPO3EN (1 << 10)
#define MC13783_REG_POWERMISC_GPO4EN (1 << 12)
+#define MC13783_REG_POWERMISC_PWGT1SPIEN (1 << 15)
+#define MC13783_REG_POWERMISC_PWGT2SPIEN (1 << 16)
+
+#define MC13783_REG_POWERMISC_PWGTSPI_M (3 << 15)
+
struct mc13783_regulator {
struct regulator_desc desc;
int reg;
int enable_bit;
+ int vsel_reg;
+ int vsel_shift;
+ int vsel_mask;
+ int const *voltages;
+};
+
+/* Voltage Values */
+static const int const mc13783_sw3_val[] = {
+ 5000000, 5000000, 5000000, 5500000,
+};
+
+static const int const mc13783_vaudio_val[] = {
+ 2775000,
+};
+
+static const int const mc13783_viohi_val[] = {
+ 2775000,
+};
+
+static const int const mc13783_violo_val[] = {
+ 1200000, 1300000, 1500000, 1800000,
+};
+
+static const int const mc13783_vdig_val[] = {
+ 1200000, 1300000, 1500000, 1800000,
+};
+
+static const int const mc13783_vgen_val[] = {
+ 1200000, 1300000, 1500000, 1800000,
+ 1100000, 2000000, 2775000, 2400000,
+};
+
+static const int const mc13783_vrfdig_val[] = {
+ 1200000, 1500000, 1800000, 1875000,
+};
+
+static const int const mc13783_vrfref_val[] = {
+ 2475000, 2600000, 2700000, 2775000,
+};
+
+static const int const mc13783_vrfcp_val[] = {
+ 2700000, 2775000,
+};
+
+static const int const mc13783_vsim_val[] = {
+ 1800000, 2900000, 3000000,
+};
+
+static const int const mc13783_vesim_val[] = {
+ 1800000, 2900000,
+};
+
+static const int const mc13783_vcam_val[] = {
+ 1500000, 1800000, 2500000, 2550000,
+ 2600000, 2750000, 2800000, 3000000,
+};
+
+static const int const mc13783_vrfbg_val[] = {
+ 1250000,
+};
+
+static const int const mc13783_vvib_val[] = {
+ 1300000, 1800000, 2000000, 3000000,
+};
+
+static const int const mc13783_vmmc_val[] = {
+ 1600000, 1800000, 2000000, 2600000,
+ 2700000, 2800000, 2900000, 3000000,
+};
+
+static const int const mc13783_vrf_val[] = {
+ 1500000, 1875000, 2700000, 2775000,
+};
+
+static const int const mc13783_gpo_val[] = {
+ 3100000,
+};
+
+static const int const mc13783_pwgtdrv_val[] = {
+ 5500000,
};
static struct regulator_ops mc13783_regulator_ops;
+static struct regulator_ops mc13783_fixed_regulator_ops;
+static struct regulator_ops mc13783_gpo_regulator_ops;
-#define MC13783_DEFINE(prefix, _name, _reg) \
+#define MC13783_DEFINE(prefix, _name, _reg, _vsel_reg, _voltages) \
[MC13783_ ## prefix ## _ ## _name] = { \
.desc = { \
.name = #prefix "_" #_name, \
+ .n_voltages = ARRAY_SIZE(_voltages), \
.ops = &mc13783_regulator_ops, \
.type = REGULATOR_VOLTAGE, \
.id = MC13783_ ## prefix ## _ ## _name, \
@@ -68,40 +190,92 @@ static struct regulator_ops mc13783_regulator_ops;
}, \
.reg = MC13783_REG_ ## _reg, \
.enable_bit = MC13783_REG_ ## _reg ## _ ## _name ## EN, \
+ .vsel_reg = MC13783_REG_ ## _vsel_reg, \
+ .vsel_shift = MC13783_REG_ ## _vsel_reg ## _ ## _name ## VSEL,\
+ .vsel_mask = MC13783_REG_ ## _vsel_reg ## _ ## _name ## VSEL_M,\
+ .voltages = _voltages, \
+ }
+
+#define MC13783_FIXED_DEFINE(prefix, _name, _reg, _voltages) \
+ [MC13783_ ## prefix ## _ ## _name] = { \
+ .desc = { \
+ .name = #prefix "_" #_name, \
+ .n_voltages = ARRAY_SIZE(_voltages), \
+ .ops = &mc13783_fixed_regulator_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MC13783_ ## prefix ## _ ## _name, \
+ .owner = THIS_MODULE, \
+ }, \
+ .reg = MC13783_REG_ ## _reg, \
+ .enable_bit = MC13783_REG_ ## _reg ## _ ## _name ## EN, \
+ .voltages = _voltages, \
}
-#define MC13783_DEFINE_SW(_name, _reg) MC13783_DEFINE(SW, _name, _reg)
-#define MC13783_DEFINE_REGU(_name, _reg) MC13783_DEFINE(REGU, _name, _reg)
+#define MC13783_GPO_DEFINE(prefix, _name, _reg, _voltages) \
+ [MC13783_ ## prefix ## _ ## _name] = { \
+ .desc = { \
+ .name = #prefix "_" #_name, \
+ .n_voltages = ARRAY_SIZE(_voltages), \
+ .ops = &mc13783_gpo_regulator_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .id = MC13783_ ## prefix ## _ ## _name, \
+ .owner = THIS_MODULE, \
+ }, \
+ .reg = MC13783_REG_ ## _reg, \
+ .enable_bit = MC13783_REG_ ## _reg ## _ ## _name ## EN, \
+ .voltages = _voltages, \
+ }
+
+#define MC13783_DEFINE_SW(_name, _reg, _vsel_reg, _voltages) \
+ MC13783_DEFINE(SW, _name, _reg, _vsel_reg, _voltages)
+#define MC13783_DEFINE_REGU(_name, _reg, _vsel_reg, _voltages) \
+ MC13783_DEFINE(REGU, _name, _reg, _vsel_reg, _voltages)
static struct mc13783_regulator mc13783_regulators[] = {
- MC13783_DEFINE_SW(SW3, SWITCHERS5),
- MC13783_DEFINE_SW(PLL, SWITCHERS4),
-
- MC13783_DEFINE_REGU(VAUDIO, REGULATORMODE0),
- MC13783_DEFINE_REGU(VIOHI, REGULATORMODE0),
- MC13783_DEFINE_REGU(VIOLO, REGULATORMODE0),
- MC13783_DEFINE_REGU(VDIG, REGULATORMODE0),
- MC13783_DEFINE_REGU(VGEN, REGULATORMODE0),
- MC13783_DEFINE_REGU(VRFDIG, REGULATORMODE0),
- MC13783_DEFINE_REGU(VRFREF, REGULATORMODE0),
- MC13783_DEFINE_REGU(VRFCP, REGULATORMODE0),
- MC13783_DEFINE_REGU(VSIM, REGULATORMODE1),
- MC13783_DEFINE_REGU(VESIM, REGULATORMODE1),
- MC13783_DEFINE_REGU(VCAM, REGULATORMODE1),
- MC13783_DEFINE_REGU(VRFBG, REGULATORMODE1),
- MC13783_DEFINE_REGU(VVIB, REGULATORMODE1),
- MC13783_DEFINE_REGU(VRF1, REGULATORMODE1),
- MC13783_DEFINE_REGU(VRF2, REGULATORMODE1),
- MC13783_DEFINE_REGU(VMMC1, REGULATORMODE1),
- MC13783_DEFINE_REGU(VMMC2, REGULATORMODE1),
- MC13783_DEFINE_REGU(GPO1, POWERMISC),
- MC13783_DEFINE_REGU(GPO2, POWERMISC),
- MC13783_DEFINE_REGU(GPO3, POWERMISC),
- MC13783_DEFINE_REGU(GPO4, POWERMISC),
+ MC13783_DEFINE_SW(SW3, SWITCHERS5, SWITCHERS5, mc13783_sw3_val),
+
+ MC13783_FIXED_DEFINE(REGU, VAUDIO, REGULATORMODE0, mc13783_vaudio_val),
+ MC13783_FIXED_DEFINE(REGU, VIOHI, REGULATORMODE0, mc13783_viohi_val),
+ MC13783_DEFINE_REGU(VIOLO, REGULATORMODE0, REGULATORSETTING0, \
+ mc13783_violo_val),
+ MC13783_DEFINE_REGU(VDIG, REGULATORMODE0, REGULATORSETTING0, \
+ mc13783_vdig_val),
+ MC13783_DEFINE_REGU(VGEN, REGULATORMODE0, REGULATORSETTING0, \
+ mc13783_vgen_val),
+ MC13783_DEFINE_REGU(VRFDIG, REGULATORMODE0, REGULATORSETTING0, \
+ mc13783_vrfdig_val),
+ MC13783_DEFINE_REGU(VRFREF, REGULATORMODE0, REGULATORSETTING0, \
+ mc13783_vrfref_val),
+ MC13783_DEFINE_REGU(VRFCP, REGULATORMODE0, REGULATORSETTING0, \
+ mc13783_vrfcp_val),
+ MC13783_DEFINE_REGU(VSIM, REGULATORMODE1, REGULATORSETTING0, \
+ mc13783_vsim_val),
+ MC13783_DEFINE_REGU(VESIM, REGULATORMODE1, REGULATORSETTING0, \
+ mc13783_vesim_val),
+ MC13783_DEFINE_REGU(VCAM, REGULATORMODE1, REGULATORSETTING0, \
+ mc13783_vcam_val),
+ MC13783_FIXED_DEFINE(REGU, VRFBG, REGULATORMODE1, mc13783_vrfbg_val),
+ MC13783_DEFINE_REGU(VVIB, REGULATORMODE1, REGULATORSETTING1, \
+ mc13783_vvib_val),
+ MC13783_DEFINE_REGU(VRF1, REGULATORMODE1, REGULATORSETTING1, \
+ mc13783_vrf_val),
+ MC13783_DEFINE_REGU(VRF2, REGULATORMODE1, REGULATORSETTING1, \
+ mc13783_vrf_val),
+ MC13783_DEFINE_REGU(VMMC1, REGULATORMODE1, REGULATORSETTING1, \
+ mc13783_vmmc_val),
+ MC13783_DEFINE_REGU(VMMC2, REGULATORMODE1, REGULATORSETTING1, \
+ mc13783_vmmc_val),
+ MC13783_GPO_DEFINE(REGU, GPO1, POWERMISC, mc13783_gpo_val),
+ MC13783_GPO_DEFINE(REGU, GPO2, POWERMISC, mc13783_gpo_val),
+ MC13783_GPO_DEFINE(REGU, GPO3, POWERMISC, mc13783_gpo_val),
+ MC13783_GPO_DEFINE(REGU, GPO4, POWERMISC, mc13783_gpo_val),
+ MC13783_GPO_DEFINE(REGU, PWGT1SPI, POWERMISC, mc13783_pwgtdrv_val),
+ MC13783_GPO_DEFINE(REGU, PWGT2SPI, POWERMISC, mc13783_pwgtdrv_val),
};
struct mc13783_regulator_priv {
struct mc13783 *mc13783;
+ u32 powermisc_pwgt_state;
struct regulator_dev *regulators[];
};
@@ -154,10 +328,241 @@ static int mc13783_regulator_is_enabled(struct regulator_dev *rdev)
return (val & mc13783_regulators[id].enable_bit) != 0;
}
+static int mc13783_regulator_list_voltage(struct regulator_dev *rdev,
+ unsigned selector)
+{
+ int id = rdev_get_id(rdev);
+
+ if (selector >= mc13783_regulators[id].desc.n_voltages)
+ return -EINVAL;
+
+ return mc13783_regulators[id].voltages[selector];
+}
+
+static int mc13783_get_best_voltage_index(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ int reg_id = rdev_get_id(rdev);
+ int i;
+ int bestmatch;
+ int bestindex;
+
+ /*
+ * Locate the minimum voltage fitting the criteria on
+ * this regulator. The switchable voltages are not
+ * in strict falling order so we need to check them
+ * all for the best match.
+ */
+ bestmatch = INT_MAX;
+ bestindex = -1;
+ for (i = 0; i < mc13783_regulators[reg_id].desc.n_voltages; i++) {
+ if (mc13783_regulators[reg_id].voltages[i] >= min_uV &&
+ mc13783_regulators[reg_id].voltages[i] < bestmatch) {
+ bestmatch = mc13783_regulators[reg_id].voltages[i];
+ bestindex = i;
+ }
+ }
+
+ if (bestindex < 0 || bestmatch > max_uV) {
+ dev_warn(&rdev->dev, "no possible value for %d<=x<=%d uV\n",
+ min_uV, max_uV);
+ return -EINVAL;
+ }
+ return bestindex;
+}
+
+static int mc13783_regulator_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
+ int value, id = rdev_get_id(rdev);
+ int ret;
+
+ dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
+ __func__, id, min_uV, max_uV);
+
+ /* Find the best index */
+ value = mc13783_get_best_voltage_index(rdev, min_uV, max_uV);
+ dev_dbg(rdev_get_dev(rdev), "%s best value: %d \n", __func__, value);
+ if (value < 0)
+ return value;
+
+ mc13783_lock(priv->mc13783);
+ ret = mc13783_reg_rmw(priv->mc13783, mc13783_regulators[id].vsel_reg,
+ mc13783_regulators[id].vsel_mask,
+ value << mc13783_regulators[id].vsel_shift);
+ mc13783_unlock(priv->mc13783);
+
+ return ret;
+}
+
+static int mc13783_regulator_get_voltage(struct regulator_dev *rdev)
+{
+ struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
+ int ret, id = rdev_get_id(rdev);
+ unsigned int val;
+
+ dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+ mc13783_lock(priv->mc13783);
+ ret = mc13783_reg_read(priv->mc13783,
+ mc13783_regulators[id].vsel_reg, &val);
+ mc13783_unlock(priv->mc13783);
+
+ if (ret)
+ return ret;
+
+ val = (val & mc13783_regulators[id].vsel_mask)
+ >> mc13783_regulators[id].vsel_shift;
+
+ dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val);
+
+ BUG_ON(val < 0 || val > mc13783_regulators[id].desc.n_voltages);
+
+ return mc13783_regulators[id].voltages[val];
+}
+
static struct regulator_ops mc13783_regulator_ops = {
.enable = mc13783_regulator_enable,
.disable = mc13783_regulator_disable,
.is_enabled = mc13783_regulator_is_enabled,
+ .list_voltage = mc13783_regulator_list_voltage,
+ .set_voltage = mc13783_regulator_set_voltage,
+ .get_voltage = mc13783_regulator_get_voltage,
+};
+
+static int mc13783_fixed_regulator_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ int id = rdev_get_id(rdev);
+
+ dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n",
+ __func__, id, min_uV, max_uV);
+
+ if (min_uV > mc13783_regulators[id].voltages[0] &&
+ max_uV < mc13783_regulators[id].voltages[0])
+ return 0;
+ else
+ return -EINVAL;
+}
+
+static int mc13783_fixed_regulator_get_voltage(struct regulator_dev *rdev)
+{
+ int id = rdev_get_id(rdev);
+
+ dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+ return mc13783_regulators[id].voltages[0];
+}
+
+static struct regulator_ops mc13783_fixed_regulator_ops = {
+ .enable = mc13783_regulator_enable,
+ .disable = mc13783_regulator_disable,
+ .is_enabled = mc13783_regulator_is_enabled,
+ .list_voltage = mc13783_regulator_list_voltage,
+ .set_voltage = mc13783_fixed_regulator_set_voltage,
+ .get_voltage = mc13783_fixed_regulator_get_voltage,
+};
+
+int mc13783_powermisc_rmw(struct mc13783_regulator_priv *priv, u32 mask,
+ u32 val)
+{
+ struct mc13783 *mc13783 = priv->mc13783;
+ int ret;
+ u32 valread;
+
+ BUG_ON(val & ~mask);
+
+ ret = mc13783_reg_read(mc13783, MC13783_REG_POWERMISC, &valread);
+ if (ret)
+ return ret;
+
+ /* Update the stored state for Power Gates. */
+ priv->powermisc_pwgt_state =
+ (priv->powermisc_pwgt_state & ~mask) | val;
+ priv->powermisc_pwgt_state &= MC13783_REG_POWERMISC_PWGTSPI_M;
+
+ /* Construct the new register value */
+ valread = (valread & ~mask) | val;
+ /* Overwrite the PWGTxEN with the stored version */
+ valread = (valread & ~MC13783_REG_POWERMISC_PWGTSPI_M) |
+ priv->powermisc_pwgt_state;
+
+ return mc13783_reg_write(mc13783, MC13783_REG_POWERMISC, valread);
+}
+
+static int mc13783_gpo_regulator_enable(struct regulator_dev *rdev)
+{
+ struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
+ int id = rdev_get_id(rdev);
+ int ret;
+ u32 en_val = mc13783_regulators[id].enable_bit;
+
+ dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+ /* Power Gate enable value is 0 */
+ if (id == MC13783_REGU_PWGT1SPI ||
+ id == MC13783_REGU_PWGT2SPI)
+ en_val = 0;
+
+ mc13783_lock(priv->mc13783);
+ ret = mc13783_powermisc_rmw(priv, mc13783_regulators[id].enable_bit,
+ en_val);
+ mc13783_unlock(priv->mc13783);
+
+ return ret;
+}
+
+static int mc13783_gpo_regulator_disable(struct regulator_dev *rdev)
+{
+ struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
+ int id = rdev_get_id(rdev);
+ int ret;
+ u32 dis_val = 0;
+
+ dev_dbg(rdev_get_dev(rdev), "%s id: %d\n", __func__, id);
+
+ /* Power Gate disable value is 1 */
+ if (id == MC13783_REGU_PWGT1SPI ||
+ id == MC13783_REGU_PWGT2SPI)
+ dis_val = mc13783_regulators[id].enable_bit;
+
+ mc13783_lock(priv->mc13783);
+ ret = mc13783_powermisc_rmw(priv, mc13783_regulators[id].enable_bit,
+ dis_val);
+ mc13783_unlock(priv->mc13783);
+
+ return ret;
+}
+
+static int mc13783_gpo_regulator_is_enabled(struct regulator_dev *rdev)
+{
+ struct mc13783_regulator_priv *priv = rdev_get_drvdata(rdev);
+ int ret, id = rdev_get_id(rdev);
+ unsigned int val;
+
+ mc13783_lock(priv->mc13783);
+ ret = mc13783_reg_read(priv->mc13783, mc13783_regulators[id].reg, &val);
+ mc13783_unlock(priv->mc13783);
+
+ if (ret)
+ return ret;
+
+ /* Power Gates state is stored in powermisc_pwgt_state
+ * where the meaning of bits is negated */
+ val = (val & ~MC13783_REG_POWERMISC_PWGTSPI_M) |
+ (priv->powermisc_pwgt_state ^ MC13783_REG_POWERMISC_PWGTSPI_M);
+
+ return (val & mc13783_regulators[id].enable_bit) != 0;
+}
+
+static struct regulator_ops mc13783_gpo_regulator_ops = {
+ .enable = mc13783_gpo_regulator_enable,
+ .disable = mc13783_gpo_regulator_disable,
+ .is_enabled = mc13783_gpo_regulator_is_enabled,
+ .list_voltage = mc13783_regulator_list_voltage,
+ .set_voltage = mc13783_fixed_regulator_set_voltage,
+ .get_voltage = mc13783_fixed_regulator_get_voltage,
};
static int __devinit mc13783_regulator_probe(struct platform_device *pdev)
diff --git a/drivers/regulator/pcap-regulator.c b/drivers/regulator/pcap-regulator.c
index 33d7d899e03..29d0566379a 100644
--- a/drivers/regulator/pcap-regulator.c
+++ b/drivers/regulator/pcap-regulator.c
@@ -288,16 +288,18 @@ static int __devexit pcap_regulator_remove(struct platform_device *pdev)
struct regulator_dev *rdev = platform_get_drvdata(pdev);
regulator_unregister(rdev);
+ platform_set_drvdata(pdev, NULL);
return 0;
}
static struct platform_driver pcap_regulator_driver = {
.driver = {
- .name = "pcap-regulator",
+ .name = "pcap-regulator",
+ .owner = THIS_MODULE,
},
- .probe = pcap_regulator_probe,
- .remove = __devexit_p(pcap_regulator_remove),
+ .probe = pcap_regulator_probe,
+ .remove = __devexit_p(pcap_regulator_remove),
};
static int __init pcap_regulator_init(void)
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c
index 07fda0a75ad..1f183543bdb 100644
--- a/drivers/regulator/tps65023-regulator.c
+++ b/drivers/regulator/tps65023-regulator.c
@@ -457,8 +457,8 @@ static struct regulator_ops tps65023_ldo_ops = {
.list_voltage = tps65023_ldo_list_voltage,
};
-static
-int tps_65023_probe(struct i2c_client *client, const struct i2c_device_id *id)
+static int __devinit tps_65023_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
static int desc_id;
const struct tps_info *info = (void *)id->driver_data;
@@ -466,6 +466,7 @@ int tps_65023_probe(struct i2c_client *client, const struct i2c_device_id *id)
struct regulator_dev *rdev;
struct tps_pmic *tps;
int i;
+ int error;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;
@@ -475,7 +476,6 @@ int tps_65023_probe(struct i2c_client *client, const struct i2c_device_id *id)
* coming from the board-evm file.
*/
init_data = client->dev.platform_data;
-
if (!init_data)
return -EIO;
@@ -502,21 +502,12 @@ int tps_65023_probe(struct i2c_client *client, const struct i2c_device_id *id)
/* Register the regulators */
rdev = regulator_register(&tps->desc[i], &client->dev,
- init_data, tps);
+ init_data, tps);
if (IS_ERR(rdev)) {
dev_err(&client->dev, "failed to register %s\n",
id->name);
-
- /* Unregister */
- while (i)
- regulator_unregister(tps->rdev[--i]);
-
- tps->client = NULL;
-
- /* clear the client data in i2c */
- i2c_set_clientdata(client, NULL);
- kfree(tps);
- return PTR_ERR(rdev);
+ error = PTR_ERR(rdev);
+ goto fail;
}
/* Save regulator for cleanup */
@@ -526,6 +517,13 @@ int tps_65023_probe(struct i2c_client *client, const struct i2c_device_id *id)
i2c_set_clientdata(client, tps);
return 0;
+
+ fail:
+ while (--i >= 0)
+ regulator_unregister(tps->rdev[i]);
+
+ kfree(tps);
+ return error;
}
/**
@@ -539,13 +537,12 @@ static int __devexit tps_65023_remove(struct i2c_client *client)
struct tps_pmic *tps = i2c_get_clientdata(client);
int i;
+ /* clear the client data in i2c */
+ i2c_set_clientdata(client, NULL);
+
for (i = 0; i < TPS65023_NUM_REGULATOR; i++)
regulator_unregister(tps->rdev[i]);
- tps->client = NULL;
-
- /* clear the client data in i2c */
- i2c_set_clientdata(client, NULL);
kfree(tps);
return 0;
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c
index f8a6dfbef75..c2a9539acd7 100644
--- a/drivers/regulator/tps6507x-regulator.c
+++ b/drivers/regulator/tps6507x-regulator.c
@@ -538,8 +538,8 @@ static struct regulator_ops tps6507x_ldo_ops = {
.list_voltage = tps6507x_ldo_list_voltage,
};
-static
-int tps_6507x_probe(struct i2c_client *client, const struct i2c_device_id *id)
+static int __devinit tps_6507x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
static int desc_id;
const struct tps_info *info = (void *)id->driver_data;
@@ -547,6 +547,7 @@ int tps_6507x_probe(struct i2c_client *client, const struct i2c_device_id *id)
struct regulator_dev *rdev;
struct tps_pmic *tps;
int i;
+ int error;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_BYTE_DATA))
@@ -557,7 +558,6 @@ int tps_6507x_probe(struct i2c_client *client, const struct i2c_device_id *id)
* coming from the board-evm file.
*/
init_data = client->dev.platform_data;
-
if (!init_data)
return -EIO;
@@ -586,18 +586,8 @@ int tps_6507x_probe(struct i2c_client *client, const struct i2c_device_id *id)
if (IS_ERR(rdev)) {
dev_err(&client->dev, "failed to register %s\n",
id->name);
-
- /* Unregister */
- while (i)
- regulator_unregister(tps->rdev[--i]);
-
- tps->client = NULL;
-
- /* clear the client data in i2c */
- i2c_set_clientdata(client, NULL);
-
- kfree(tps);
- return PTR_ERR(rdev);
+ error = PTR_ERR(rdev);
+ goto fail;
}
/* Save regulator for cleanup */
@@ -607,6 +597,13 @@ int tps_6507x_probe(struct i2c_client *client, const struct i2c_device_id *id)
i2c_set_clientdata(client, tps);
return 0;
+
+fail:
+ while (--i >= 0)
+ regulator_unregister(tps->rdev[i]);
+
+ kfree(tps);
+ return error;
}
/**
@@ -620,13 +617,12 @@ static int __devexit tps_6507x_remove(struct i2c_client *client)
struct tps_pmic *tps = i2c_get_clientdata(client);
int i;
+ /* clear the client data in i2c */
+ i2c_set_clientdata(client, NULL);
+
for (i = 0; i < TPS6507X_NUM_REGULATOR; i++)
regulator_unregister(tps->rdev[i]);
- tps->client = NULL;
-
- /* clear the client data in i2c */
- i2c_set_clientdata(client, NULL);
kfree(tps);
return 0;
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c
index 7e674859bd5..9729d760fb4 100644
--- a/drivers/regulator/twl-regulator.c
+++ b/drivers/regulator/twl-regulator.c
@@ -519,19 +519,19 @@ static struct twlreg_info twl_regs[] = {
/* 6030 REG with base as PMC Slave Misc : 0x0030 */
/* Turnon-delay and remap configuration values for 6030 are not
verified since the specification is not public */
- TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1, 0, 0x08),
- TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 2, 0, 0x08),
- TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 3, 0, 0x08),
- TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 4, 0, 0x08),
- TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 5, 0, 0x08),
- TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 7, 0, 0x08),
- TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15, 0, 0x08),
- TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16, 0, 0x08),
- TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17, 0, 0x08),
- TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18, 0, 0x08)
+ TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1, 0, 0x21),
+ TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 2, 0, 0x21),
+ TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 3, 0, 0x21),
+ TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 4, 0, 0x21),
+ TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 5, 0, 0x21),
+ TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 7, 0, 0x21),
+ TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15, 0, 0x21),
+ TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16, 0, 0x21),
+ TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17, 0, 0x21),
+ TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18, 0, 0x21)
};
-static int twlreg_probe(struct platform_device *pdev)
+static int __devinit twlreg_probe(struct platform_device *pdev)
{
int i;
struct twlreg_info *info;
diff --git a/drivers/regulator/virtual.c b/drivers/regulator/virtual.c
index addc032c84b..d96cecaac73 100644
--- a/drivers/regulator/virtual.c
+++ b/drivers/regulator/virtual.c
@@ -19,7 +19,7 @@
struct virtual_consumer_data {
struct mutex lock;
struct regulator *regulator;
- int enabled;
+ bool enabled;
int min_uV;
int max_uV;
int min_uA;
@@ -49,7 +49,7 @@ static void update_voltage_constraints(struct device *dev,
dev_dbg(dev, "Enabling regulator\n");
ret = regulator_enable(data->regulator);
if (ret == 0)
- data->enabled = 1;
+ data->enabled = true;
else
dev_err(dev, "regulator_enable() failed: %d\n",
ret);
@@ -59,7 +59,7 @@ static void update_voltage_constraints(struct device *dev,
dev_dbg(dev, "Disabling regulator\n");
ret = regulator_disable(data->regulator);
if (ret == 0)
- data->enabled = 0;
+ data->enabled = false;
else
dev_err(dev, "regulator_disable() failed: %d\n",
ret);
@@ -89,7 +89,7 @@ static void update_current_limit_constraints(struct device *dev,
dev_dbg(dev, "Enabling regulator\n");
ret = regulator_enable(data->regulator);
if (ret == 0)
- data->enabled = 1;
+ data->enabled = true;
else
dev_err(dev, "regulator_enable() failed: %d\n",
ret);
@@ -99,7 +99,7 @@ static void update_current_limit_constraints(struct device *dev,
dev_dbg(dev, "Disabling regulator\n");
ret = regulator_disable(data->regulator);
if (ret == 0)
- data->enabled = 0;
+ data->enabled = false;
else
dev_err(dev, "regulator_disable() failed: %d\n",
ret);
@@ -270,24 +270,28 @@ static DEVICE_ATTR(min_microamps, 0666, show_min_uA, set_min_uA);
static DEVICE_ATTR(max_microamps, 0666, show_max_uA, set_max_uA);
static DEVICE_ATTR(mode, 0666, show_mode, set_mode);
-static struct device_attribute *attributes[] = {
- &dev_attr_min_microvolts,
- &dev_attr_max_microvolts,
- &dev_attr_min_microamps,
- &dev_attr_max_microamps,
- &dev_attr_mode,
+static struct attribute *regulator_virtual_attributes[] = {
+ &dev_attr_min_microvolts.attr,
+ &dev_attr_max_microvolts.attr,
+ &dev_attr_min_microamps.attr,
+ &dev_attr_max_microamps.attr,
+ &dev_attr_mode.attr,
+ NULL
};
-static int regulator_virtual_consumer_probe(struct platform_device *pdev)
+static const struct attribute_group regulator_virtual_attr_group = {
+ .attrs = regulator_virtual_attributes,
+};
+
+static int __devinit regulator_virtual_probe(struct platform_device *pdev)
{
char *reg_id = pdev->dev.platform_data;
struct virtual_consumer_data *drvdata;
- int ret, i;
+ int ret;
drvdata = kzalloc(sizeof(struct virtual_consumer_data), GFP_KERNEL);
- if (drvdata == NULL) {
+ if (drvdata == NULL)
return -ENOMEM;
- }
mutex_init(&drvdata->lock);
@@ -299,13 +303,12 @@ static int regulator_virtual_consumer_probe(struct platform_device *pdev)
goto err;
}
- for (i = 0; i < ARRAY_SIZE(attributes); i++) {
- ret = device_create_file(&pdev->dev, attributes[i]);
- if (ret != 0) {
- dev_err(&pdev->dev, "Failed to create attr %d: %d\n",
- i, ret);
- goto err_regulator;
- }
+ ret = sysfs_create_group(&pdev->dev.kobj,
+ &regulator_virtual_attr_group);
+ if (ret != 0) {
+ dev_err(&pdev->dev,
+ "Failed to create attribute group: %d\n", ret);
+ goto err_regulator;
}
drvdata->mode = regulator_get_mode(drvdata->regulator);
@@ -317,37 +320,36 @@ static int regulator_virtual_consumer_probe(struct platform_device *pdev)
err_regulator:
regulator_put(drvdata->regulator);
err:
- for (i = 0; i < ARRAY_SIZE(attributes); i++)
- device_remove_file(&pdev->dev, attributes[i]);
kfree(drvdata);
return ret;
}
-static int regulator_virtual_consumer_remove(struct platform_device *pdev)
+static int __devexit regulator_virtual_remove(struct platform_device *pdev)
{
struct virtual_consumer_data *drvdata = platform_get_drvdata(pdev);
- int i;
- for (i = 0; i < ARRAY_SIZE(attributes); i++)
- device_remove_file(&pdev->dev, attributes[i]);
+ sysfs_remove_group(&pdev->dev.kobj, &regulator_virtual_attr_group);
+
if (drvdata->enabled)
regulator_disable(drvdata->regulator);
regulator_put(drvdata->regulator);
kfree(drvdata);
+ platform_set_drvdata(pdev, NULL);
+
return 0;
}
static struct platform_driver regulator_virtual_consumer_driver = {
- .probe = regulator_virtual_consumer_probe,
- .remove = regulator_virtual_consumer_remove,
+ .probe = regulator_virtual_probe,
+ .remove = __devexit_p(regulator_virtual_remove),
.driver = {
.name = "reg-virt-consumer",
+ .owner = THIS_MODULE,
},
};
-
static int __init regulator_virtual_consumer_init(void)
{
return platform_driver_register(&regulator_virtual_consumer_driver);
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index 0a6577577e8..6e18e56d850 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -600,6 +600,8 @@ static __devexit int wm831x_buckv_remove(struct platform_device *pdev)
struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
struct wm831x *wm831x = dcdc->wm831x;
+ platform_set_drvdata(pdev, NULL);
+
wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "HC"), dcdc);
wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc);
regulator_unregister(dcdc->regulator);
@@ -615,6 +617,7 @@ static struct platform_driver wm831x_buckv_driver = {
.remove = __devexit_p(wm831x_buckv_remove),
.driver = {
.name = "wm831x-buckv",
+ .owner = THIS_MODULE,
},
};
@@ -769,6 +772,8 @@ static __devexit int wm831x_buckp_remove(struct platform_device *pdev)
struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
struct wm831x *wm831x = dcdc->wm831x;
+ platform_set_drvdata(pdev, NULL);
+
wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc);
regulator_unregister(dcdc->regulator);
kfree(dcdc);
@@ -781,6 +786,7 @@ static struct platform_driver wm831x_buckp_driver = {
.remove = __devexit_p(wm831x_buckp_remove),
.driver = {
.name = "wm831x-buckp",
+ .owner = THIS_MODULE,
},
};
@@ -895,6 +901,8 @@ static __devexit int wm831x_boostp_remove(struct platform_device *pdev)
struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
struct wm831x *wm831x = dcdc->wm831x;
+ platform_set_drvdata(pdev, NULL);
+
wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), dcdc);
regulator_unregister(dcdc->regulator);
kfree(dcdc);
@@ -907,6 +915,7 @@ static struct platform_driver wm831x_boostp_driver = {
.remove = __devexit_p(wm831x_boostp_remove),
.driver = {
.name = "wm831x-boostp",
+ .owner = THIS_MODULE,
},
};
@@ -979,6 +988,8 @@ static __devexit int wm831x_epe_remove(struct platform_device *pdev)
{
struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
+ platform_set_drvdata(pdev, NULL);
+
regulator_unregister(dcdc->regulator);
kfree(dcdc);
@@ -990,6 +1001,7 @@ static struct platform_driver wm831x_epe_driver = {
.remove = __devexit_p(wm831x_epe_remove),
.driver = {
.name = "wm831x-epe",
+ .owner = THIS_MODULE,
},
};
diff --git a/drivers/regulator/wm831x-isink.c b/drivers/regulator/wm831x-isink.c
index 48857008758..ca0f6b6c384 100644
--- a/drivers/regulator/wm831x-isink.c
+++ b/drivers/regulator/wm831x-isink.c
@@ -222,6 +222,8 @@ static __devexit int wm831x_isink_remove(struct platform_device *pdev)
struct wm831x_isink *isink = platform_get_drvdata(pdev);
struct wm831x *wm831x = isink->wm831x;
+ platform_set_drvdata(pdev, NULL);
+
wm831x_free_irq(wm831x, platform_get_irq(pdev, 0), isink);
regulator_unregister(isink->regulator);
@@ -235,6 +237,7 @@ static struct platform_driver wm831x_isink_driver = {
.remove = __devexit_p(wm831x_isink_remove),
.driver = {
.name = "wm831x-isink",
+ .owner = THIS_MODULE,
},
};
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index 61e02ac2fda..d2406c1519a 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -371,6 +371,8 @@ static __devexit int wm831x_gp_ldo_remove(struct platform_device *pdev)
struct wm831x_ldo *ldo = platform_get_drvdata(pdev);
struct wm831x *wm831x = ldo->wm831x;
+ platform_set_drvdata(pdev, NULL);
+
wm831x_free_irq(wm831x, platform_get_irq_byname(pdev, "UV"), ldo);
regulator_unregister(ldo->regulator);
kfree(ldo);
@@ -383,6 +385,7 @@ static struct platform_driver wm831x_gp_ldo_driver = {
.remove = __devexit_p(wm831x_gp_ldo_remove),
.driver = {
.name = "wm831x-ldo",
+ .owner = THIS_MODULE,
},
};
@@ -640,6 +643,7 @@ static struct platform_driver wm831x_aldo_driver = {
.remove = __devexit_p(wm831x_aldo_remove),
.driver = {
.name = "wm831x-aldo",
+ .owner = THIS_MODULE,
},
};
@@ -811,6 +815,7 @@ static struct platform_driver wm831x_alive_ldo_driver = {
.remove = __devexit_p(wm831x_alive_ldo_remove),
.driver = {
.name = "wm831x-alive-ldo",
+ .owner = THIS_MODULE,
},
};
diff --git a/drivers/regulator/wm8350-regulator.c b/drivers/regulator/wm8350-regulator.c
index e7b89e704af..94227dd6ba7 100644
--- a/drivers/regulator/wm8350-regulator.c
+++ b/drivers/regulator/wm8350-regulator.c
@@ -290,6 +290,51 @@ static int wm8350_isink_is_enabled(struct regulator_dev *rdev)
return -EINVAL;
}
+static int wm8350_isink_enable_time(struct regulator_dev *rdev)
+{
+ struct wm8350 *wm8350 = rdev_get_drvdata(rdev);
+ int isink = rdev_get_id(rdev);
+ int reg;
+
+ switch (isink) {
+ case WM8350_ISINK_A:
+ reg = wm8350_reg_read(wm8350, WM8350_CSA_FLASH_CONTROL);
+ break;
+ case WM8350_ISINK_B:
+ reg = wm8350_reg_read(wm8350, WM8350_CSB_FLASH_CONTROL);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (reg & WM8350_CS1_FLASH_MODE) {
+ switch (reg & WM8350_CS1_ON_RAMP_MASK) {
+ case 0:
+ return 0;
+ case 1:
+ return 1950;
+ case 2:
+ return 3910;
+ case 3:
+ return 7800;
+ }
+ } else {
+ switch (reg & WM8350_CS1_ON_RAMP_MASK) {
+ case 0:
+ return 0;
+ case 1:
+ return 250000;
+ case 2:
+ return 500000;
+ case 3:
+ return 1000000;
+ }
+ }
+
+ return -EINVAL;
+}
+
+
int wm8350_isink_set_flash(struct wm8350 *wm8350, int isink, u16 mode,
u16 trigger, u16 duration, u16 on_ramp, u16 off_ramp,
u16 drive)
@@ -1221,6 +1266,7 @@ static struct regulator_ops wm8350_isink_ops = {
.enable = wm8350_isink_enable,
.disable = wm8350_isink_disable,
.is_enabled = wm8350_isink_is_enabled,
+ .enable_time = wm8350_isink_enable_time,
};
static struct regulator_desc wm8350_reg[NUM_WM8350_REGULATORS] = {
diff --git a/drivers/regulator/wm8400-regulator.c b/drivers/regulator/wm8400-regulator.c
index d9a2c988c6e..924c7eb29ee 100644
--- a/drivers/regulator/wm8400-regulator.c
+++ b/drivers/regulator/wm8400-regulator.c
@@ -317,14 +317,17 @@ static struct regulator_desc regulators[] = {
static int __devinit wm8400_regulator_probe(struct platform_device *pdev)
{
+ struct wm8400 *wm8400 = container_of(pdev, struct wm8400, regulators[pdev->id]);
struct regulator_dev *rdev;
rdev = regulator_register(&regulators[pdev->id], &pdev->dev,
- pdev->dev.platform_data, dev_get_drvdata(&pdev->dev));
+ pdev->dev.platform_data, wm8400);
if (IS_ERR(rdev))
return PTR_ERR(rdev);
+ platform_set_drvdata(pdev, rdev);
+
return 0;
}
@@ -332,6 +335,7 @@ static int __devexit wm8400_regulator_remove(struct platform_device *pdev)
{
struct regulator_dev *rdev = platform_get_drvdata(pdev);
+ platform_set_drvdata(pdev, NULL);
regulator_unregister(rdev);
return 0;
@@ -370,7 +374,6 @@ int wm8400_register_regulator(struct device *dev, int reg,
wm8400->regulators[reg].id = reg;
wm8400->regulators[reg].dev.parent = dev;
wm8400->regulators[reg].dev.platform_data = initdata;
- dev_set_drvdata(&wm8400->regulators[reg].dev, wm8400);
return platform_device_register(&wm8400->regulators[reg]);
}
diff --git a/drivers/regulator/wm8994-regulator.c b/drivers/regulator/wm8994-regulator.c
new file mode 100644
index 00000000000..95454a4637b
--- /dev/null
+++ b/drivers/regulator/wm8994-regulator.c
@@ -0,0 +1,307 @@
+/*
+ * wm8994-regulator.c -- Regulator driver for the WM8994
+ *
+ * Copyright 2009 Wolfson Microelectronics PLC.
+ *
+ * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/driver.h>
+#include <linux/gpio.h>
+
+#include <linux/mfd/wm8994/core.h>
+#include <linux/mfd/wm8994/registers.h>
+#include <linux/mfd/wm8994/pdata.h>
+
+struct wm8994_ldo {
+ int enable;
+ bool is_enabled;
+ struct regulator_dev *regulator;
+ struct wm8994 *wm8994;
+};
+
+#define WM8994_LDO1_MAX_SELECTOR 0x7
+#define WM8994_LDO2_MAX_SELECTOR 0x3
+
+static int wm8994_ldo_enable(struct regulator_dev *rdev)
+{
+ struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
+
+ /* If we have no soft control assume that the LDO is always enabled. */
+ if (!ldo->enable)
+ return 0;
+
+ gpio_set_value(ldo->enable, 1);
+ ldo->is_enabled = true;
+
+ return 0;
+}
+
+static int wm8994_ldo_disable(struct regulator_dev *rdev)
+{
+ struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
+
+ /* If we have no soft control assume that the LDO is always enabled. */
+ if (!ldo->enable)
+ return -EINVAL;
+
+ gpio_set_value(ldo->enable, 0);
+ ldo->is_enabled = false;
+
+ return 0;
+}
+
+static int wm8994_ldo_is_enabled(struct regulator_dev *rdev)
+{
+ struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
+
+ return ldo->is_enabled;
+}
+
+static int wm8994_ldo_enable_time(struct regulator_dev *rdev)
+{
+ /* 3ms is fairly conservative but this shouldn't be too performance
+ * critical; can be tweaked per-system if required. */
+ return 3000;
+}
+
+static int wm8994_ldo1_list_voltage(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ if (selector > WM8994_LDO1_MAX_SELECTOR)
+ return -EINVAL;
+
+ return (selector * 100000) + 2400000;
+}
+
+static int wm8994_ldo1_get_voltage(struct regulator_dev *rdev)
+{
+ struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
+ int val;
+
+ val = wm8994_reg_read(ldo->wm8994, WM8994_LDO_1);
+ if (val < 0)
+ return val;
+
+ val = (val & WM8994_LDO1_VSEL_MASK) >> WM8994_LDO1_VSEL_SHIFT;
+
+ return wm8994_ldo1_list_voltage(rdev, val);
+}
+
+static int wm8994_ldo1_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
+ int selector, v;
+
+ selector = (min_uV - 2400000) / 100000;
+ v = wm8994_ldo1_list_voltage(rdev, selector);
+ if (v < 0 || v > max_uV)
+ return -EINVAL;
+
+ selector <<= WM8994_LDO1_VSEL_SHIFT;
+
+ return wm8994_set_bits(ldo->wm8994, WM8994_LDO_1,
+ WM8994_LDO1_VSEL_MASK, selector);
+}
+
+static struct regulator_ops wm8994_ldo1_ops = {
+ .enable = wm8994_ldo_enable,
+ .disable = wm8994_ldo_disable,
+ .is_enabled = wm8994_ldo_is_enabled,
+ .enable_time = wm8994_ldo_enable_time,
+
+ .list_voltage = wm8994_ldo1_list_voltage,
+ .get_voltage = wm8994_ldo1_get_voltage,
+ .set_voltage = wm8994_ldo1_set_voltage,
+};
+
+static int wm8994_ldo2_list_voltage(struct regulator_dev *rdev,
+ unsigned int selector)
+{
+ if (selector > WM8994_LDO2_MAX_SELECTOR)
+ return -EINVAL;
+
+ return (selector * 100000) + 900000;
+}
+
+static int wm8994_ldo2_get_voltage(struct regulator_dev *rdev)
+{
+ struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
+ int val;
+
+ val = wm8994_reg_read(ldo->wm8994, WM8994_LDO_2);
+ if (val < 0)
+ return val;
+
+ val = (val & WM8994_LDO2_VSEL_MASK) >> WM8994_LDO2_VSEL_SHIFT;
+
+ return wm8994_ldo2_list_voltage(rdev, val);
+}
+
+static int wm8994_ldo2_set_voltage(struct regulator_dev *rdev,
+ int min_uV, int max_uV)
+{
+ struct wm8994_ldo *ldo = rdev_get_drvdata(rdev);
+ int selector, v;
+
+ selector = (min_uV - 900000) / 100000;
+ v = wm8994_ldo2_list_voltage(rdev, selector);
+ if (v < 0 || v > max_uV)
+ return -EINVAL;
+
+ selector <<= WM8994_LDO2_VSEL_SHIFT;
+
+ return wm8994_set_bits(ldo->wm8994, WM8994_LDO_2,
+ WM8994_LDO2_VSEL_MASK, selector);
+}
+
+static struct regulator_ops wm8994_ldo2_ops = {
+ .enable = wm8994_ldo_enable,
+ .disable = wm8994_ldo_disable,
+ .is_enabled = wm8994_ldo_is_enabled,
+ .enable_time = wm8994_ldo_enable_time,
+
+ .list_voltage = wm8994_ldo2_list_voltage,
+ .get_voltage = wm8994_ldo2_get_voltage,
+ .set_voltage = wm8994_ldo2_set_voltage,
+};
+
+static struct regulator_desc wm8994_ldo_desc[] = {
+ {
+ .name = "LDO1",
+ .id = 1,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = WM8994_LDO1_MAX_SELECTOR + 1,
+ .ops = &wm8994_ldo1_ops,
+ .owner = THIS_MODULE,
+ },
+ {
+ .name = "LDO2",
+ .id = 2,
+ .type = REGULATOR_VOLTAGE,
+ .n_voltages = WM8994_LDO2_MAX_SELECTOR + 1,
+ .ops = &wm8994_ldo2_ops,
+ .owner = THIS_MODULE,
+ },
+};
+
+static __devinit int wm8994_ldo_probe(struct platform_device *pdev)
+{
+ struct wm8994 *wm8994 = dev_get_drvdata(pdev->dev.parent);
+ struct wm8994_pdata *pdata = wm8994->dev->platform_data;
+ int id = pdev->id % ARRAY_SIZE(pdata->ldo);
+ struct wm8994_ldo *ldo;
+ int ret;
+
+ dev_dbg(&pdev->dev, "Probing LDO%d\n", id + 1);
+
+ if (!pdata)
+ return -ENODEV;
+
+ ldo = kzalloc(sizeof(struct wm8994_ldo), GFP_KERNEL);
+ if (ldo == NULL) {
+ dev_err(&pdev->dev, "Unable to allocate private data\n");
+ return -ENOMEM;
+ }
+
+ ldo->wm8994 = wm8994;
+
+ ldo->is_enabled = true;
+
+ if (pdata->ldo[id].enable && gpio_is_valid(pdata->ldo[id].enable)) {
+ ldo->enable = pdata->ldo[id].enable;
+
+ ret = gpio_request(ldo->enable, "WM8994 LDO enable");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to get enable GPIO: %d\n",
+ ret);
+ goto err;
+ }
+
+ ret = gpio_direction_output(ldo->enable, ldo->is_enabled);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to set GPIO up: %d\n",
+ ret);
+ goto err_gpio;
+ }
+ }
+
+ ldo->regulator = regulator_register(&wm8994_ldo_desc[id], &pdev->dev,
+ pdata->ldo[id].init_data, ldo);
+ if (IS_ERR(ldo->regulator)) {
+ ret = PTR_ERR(ldo->regulator);
+ dev_err(wm8994->dev, "Failed to register LDO%d: %d\n",
+ id + 1, ret);
+ goto err_gpio;
+ }
+
+ platform_set_drvdata(pdev, ldo);
+
+ return 0;
+
+err_gpio:
+ if (gpio_is_valid(ldo->enable))
+ gpio_free(ldo->enable);
+err:
+ kfree(ldo);
+ return ret;
+}
+
+static __devexit int wm8994_ldo_remove(struct platform_device *pdev)
+{
+ struct wm8994_ldo *ldo = platform_get_drvdata(pdev);
+
+ platform_set_drvdata(pdev, NULL);
+
+ regulator_unregister(ldo->regulator);
+ if (gpio_is_valid(ldo->enable))
+ gpio_free(ldo->enable);
+ kfree(ldo);
+
+ return 0;
+}
+
+static struct platform_driver wm8994_ldo_driver = {
+ .probe = wm8994_ldo_probe,
+ .remove = __devexit_p(wm8994_ldo_remove),
+ .driver = {
+ .name = "wm8994-ldo",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init wm8994_ldo_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&wm8994_ldo_driver);
+ if (ret != 0)
+ pr_err("Failed to register Wm8994 GP LDO driver: %d\n", ret);
+
+ return ret;
+}
+subsys_initcall(wm8994_ldo_init);
+
+static void __exit wm8994_ldo_exit(void)
+{
+ platform_driver_unregister(&wm8994_ldo_driver);
+}
+module_exit(wm8994_ldo_exit);
+
+/* Module information */
+MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
+MODULE_DESCRIPTION("WM8994 LDO driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:wm8994-ldo");
diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c
index be5a6b73e60..40845c7e932 100644
--- a/drivers/rtc/class.c
+++ b/drivers/rtc/class.c
@@ -226,6 +226,7 @@ static void __exit rtc_exit(void)
{
rtc_dev_exit();
class_destroy(rtc_class);
+ idr_destroy(&rtc_idr);
}
subsys_initcall(rtc_init);
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c
index 86c61f14351..78a018b5c94 100644
--- a/drivers/rtc/rtc-at91sam9.c
+++ b/drivers/rtc/rtc-at91sam9.c
@@ -161,7 +161,7 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
if (offset == 0)
return -EILSEQ;
- memset(alrm, 0, sizeof(alrm));
+ memset(alrm, 0, sizeof(*alrm));
if (alarm != ALARM_DISABLED && offset != 0) {
rtc_time_to_tm(offset + alarm, tm);
diff --git a/drivers/rtc/rtc-coh901331.c b/drivers/rtc/rtc-coh901331.c
index 03ea530981d..44c4399ee71 100644
--- a/drivers/rtc/rtc-coh901331.c
+++ b/drivers/rtc/rtc-coh901331.c
@@ -271,12 +271,13 @@ static int coh901331_resume(struct platform_device *pdev)
{
struct coh901331_port *rtap = dev_get_drvdata(&pdev->dev);
- if (device_may_wakeup(&pdev->dev))
+ if (device_may_wakeup(&pdev->dev)) {
disable_irq_wake(rtap->irq);
- else
+ } else {
clk_enable(rtap->clk);
writel(rtap->irqmaskstore, rtap->virtbase + COH901331_IRQ_MASK);
clk_disable(rtap->clk);
+ }
return 0;
}
#else
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c
index 9da02d108b7..91bde976bc0 100644
--- a/drivers/rtc/rtc-ep93xx.c
+++ b/drivers/rtc/rtc-ep93xx.c
@@ -115,6 +115,15 @@ static ssize_t ep93xx_rtc_show_comp_delete(struct device *dev,
}
static DEVICE_ATTR(comp_delete, S_IRUGO, ep93xx_rtc_show_comp_delete, NULL);
+static struct attribute *ep93xx_rtc_attrs[] = {
+ &dev_attr_comp_preload.attr,
+ &dev_attr_comp_delete.attr,
+ NULL
+};
+
+static const struct attribute_group ep93xx_rtc_sysfs_files = {
+ .attrs = ep93xx_rtc_attrs,
+};
static int __init ep93xx_rtc_probe(struct platform_device *pdev)
{
@@ -123,27 +132,22 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
struct rtc_device *rtc;
int err;
- ep93xx_rtc = kzalloc(sizeof(struct ep93xx_rtc), GFP_KERNEL);
- if (ep93xx_rtc == NULL)
+ ep93xx_rtc = devm_kzalloc(&pdev->dev, sizeof(*ep93xx_rtc), GFP_KERNEL);
+ if (!ep93xx_rtc)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (res == NULL) {
- err = -ENXIO;
- goto fail_free;
- }
+ if (!res)
+ return -ENXIO;
- res = request_mem_region(res->start, resource_size(res), pdev->name);
- if (res == NULL) {
- err = -EBUSY;
- goto fail_free;
- }
+ if (!devm_request_mem_region(&pdev->dev, res->start,
+ resource_size(res), pdev->name))
+ return -EBUSY;
- ep93xx_rtc->mmio_base = ioremap(res->start, resource_size(res));
- if (ep93xx_rtc->mmio_base == NULL) {
- err = -ENXIO;
- goto fail;
- }
+ ep93xx_rtc->mmio_base = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!ep93xx_rtc->mmio_base)
+ return -ENXIO;
pdev->dev.platform_data = ep93xx_rtc;
@@ -151,53 +155,34 @@ static int __init ep93xx_rtc_probe(struct platform_device *pdev)
&pdev->dev, &ep93xx_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc)) {
err = PTR_ERR(rtc);
- goto fail;
+ goto exit;
}
platform_set_drvdata(pdev, rtc);
- err = device_create_file(&pdev->dev, &dev_attr_comp_preload);
+ err = sysfs_create_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files);
if (err)
goto fail;
- err = device_create_file(&pdev->dev, &dev_attr_comp_delete);
- if (err) {
- device_remove_file(&pdev->dev, &dev_attr_comp_preload);
- goto fail;
- }
return 0;
fail:
- if (ep93xx_rtc->mmio_base) {
- iounmap(ep93xx_rtc->mmio_base);
- pdev->dev.platform_data = NULL;
- }
- release_mem_region(res->start, resource_size(res));
-fail_free:
- kfree(ep93xx_rtc);
+ platform_set_drvdata(pdev, NULL);
+ rtc_device_unregister(rtc);
+exit:
+ pdev->dev.platform_data = NULL;
return err;
}
static int __exit ep93xx_rtc_remove(struct platform_device *pdev)
{
struct rtc_device *rtc = platform_get_drvdata(pdev);
- struct ep93xx_rtc *ep93xx_rtc = pdev->dev.platform_data;
- struct resource *res;
-
- /* cleanup sysfs */
- device_remove_file(&pdev->dev, &dev_attr_comp_delete);
- device_remove_file(&pdev->dev, &dev_attr_comp_preload);
+ sysfs_remove_group(&pdev->dev.kobj, &ep93xx_rtc_sysfs_files);
+ platform_set_drvdata(pdev, NULL);
rtc_device_unregister(rtc);
-
- iounmap(ep93xx_rtc->mmio_base);
pdev->dev.platform_data = NULL;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
- platform_set_drvdata(pdev, NULL);
-
return 0;
}
diff --git a/drivers/rtc/rtc-mc13783.c b/drivers/rtc/rtc-mc13783.c
index 850f983c039..d60c81b7b69 100644
--- a/drivers/rtc/rtc-mc13783.c
+++ b/drivers/rtc/rtc-mc13783.c
@@ -28,6 +28,34 @@ struct mc13783_rtc {
int valid;
};
+static int mc13783_rtc_irq_enable_unlocked(struct device *dev,
+ unsigned int enabled, int irq)
+{
+ struct mc13783_rtc *priv = dev_get_drvdata(dev);
+ int (*func)(struct mc13783 *mc13783, int irq);
+
+ if (!priv->valid)
+ return -ENODATA;
+
+ func = enabled ? mc13783_irq_unmask : mc13783_irq_mask;
+ return func(priv->mc13783, irq);
+}
+
+static int mc13783_rtc_irq_enable(struct device *dev,
+ unsigned int enabled, int irq)
+{
+ struct mc13783_rtc *priv = dev_get_drvdata(dev);
+ int ret;
+
+ mc13783_lock(priv->mc13783);
+
+ ret = mc13783_rtc_irq_enable_unlocked(dev, enabled, irq);
+
+ mc13783_unlock(priv->mc13783);
+
+ return ret;
+}
+
static int mc13783_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct mc13783_rtc *priv = dev_get_drvdata(dev);
@@ -78,6 +106,7 @@ static int mc13783_rtc_set_mmss(struct device *dev, unsigned long secs)
{
struct mc13783_rtc *priv = dev_get_drvdata(dev);
unsigned int seconds, days;
+ unsigned int alarmseconds;
int ret;
seconds = secs % 86400;
@@ -86,7 +115,22 @@ static int mc13783_rtc_set_mmss(struct device *dev, unsigned long secs)
mc13783_lock(priv->mc13783);
/*
- * first write seconds=0 to prevent a day switch between writing days
+ * temporarily invalidate alarm to prevent triggering it when the day is
+ * already updated while the time isn't yet.
+ */
+ ret = mc13783_reg_read(priv->mc13783, MC13783_RTCTODA, &alarmseconds);
+ if (unlikely(ret))
+ goto out;
+
+ if (alarmseconds < 86400) {
+ ret = mc13783_reg_write(priv->mc13783,
+ MC13783_RTCTODA, 0x1ffff);
+ if (unlikely(ret))
+ goto out;
+ }
+
+ /*
+ * write seconds=0 to prevent a day switch between writing days
* and seconds below
*/
ret = mc13783_reg_write(priv->mc13783, MC13783_RTCTOD, 0);
@@ -101,11 +145,19 @@ static int mc13783_rtc_set_mmss(struct device *dev, unsigned long secs)
if (unlikely(ret))
goto out;
- ret = mc13783_ackirq(priv->mc13783, MC13783_IRQ_RTCRST);
+ /* restore alarm */
+ if (alarmseconds < 86400) {
+ ret = mc13783_reg_write(priv->mc13783,
+ MC13783_RTCTODA, alarmseconds);
+ if (unlikely(ret))
+ goto out;
+ }
+
+ ret = mc13783_irq_ack(priv->mc13783, MC13783_IRQ_RTCRST);
if (unlikely(ret))
goto out;
- ret = mc13783_unmask(priv->mc13783, MC13783_IRQ_RTCRST);
+ ret = mc13783_irq_unmask(priv->mc13783, MC13783_IRQ_RTCRST);
out:
priv->valid = !ret;
@@ -114,41 +166,139 @@ out:
return ret;
}
-static irqreturn_t mc13783_rtc_update_handler(int irq, void *dev)
+static int mc13783_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
- struct mc13783_rtc *priv = dev;
- struct mc13783 *mc13783 = priv->mc13783;
+ struct mc13783_rtc *priv = dev_get_drvdata(dev);
+ unsigned seconds, days;
+ unsigned long s1970;
+ int enabled, pending;
+ int ret;
- dev_dbg(&priv->rtc->dev, "1HZ\n");
+ mc13783_lock(priv->mc13783);
- rtc_update_irq(priv->rtc, 1, RTC_IRQF | RTC_UF);
+ ret = mc13783_reg_read(priv->mc13783, MC13783_RTCTODA, &seconds);
+ if (unlikely(ret))
+ goto out;
+ if (seconds >= 86400) {
+ ret = -ENODATA;
+ goto out;
+ }
+
+ ret = mc13783_reg_read(priv->mc13783, MC13783_RTCDAY, &days);
+ if (unlikely(ret))
+ goto out;
- mc13783_ackirq(mc13783, irq);
+ ret = mc13783_irq_status(priv->mc13783, MC13783_IRQ_TODA,
+ &enabled, &pending);
- return IRQ_HANDLED;
+out:
+ mc13783_unlock(priv->mc13783);
+
+ if (ret)
+ return ret;
+
+ alarm->enabled = enabled;
+ alarm->pending = pending;
+
+ s1970 = days * 86400 + seconds;
+
+ rtc_time_to_tm(s1970, &alarm->time);
+ dev_dbg(dev, "%s: %lu\n", __func__, s1970);
+
+ return 0;
}
-static int mc13783_rtc_update_irq_enable(struct device *dev,
- unsigned int enabled)
+static int mc13783_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct mc13783_rtc *priv = dev_get_drvdata(dev);
- int ret = -ENODATA;
+ unsigned long s1970;
+ unsigned seconds, days;
+ int ret;
mc13783_lock(priv->mc13783);
- if (!priv->valid)
+
+ /* disable alarm to prevent false triggering */
+ ret = mc13783_reg_write(priv->mc13783, MC13783_RTCTODA, 0x1ffff);
+ if (unlikely(ret))
goto out;
- ret = (enabled ? mc13783_unmask : mc13783_mask)(priv->mc13783,
- MC13783_IRQ_1HZ);
+ ret = mc13783_irq_ack(priv->mc13783, MC13783_IRQ_TODA);
+ if (unlikely(ret))
+ goto out;
+
+ ret = rtc_tm_to_time(&alarm->time, &s1970);
+ if (unlikely(ret))
+ goto out;
+
+ dev_dbg(dev, "%s: o%2.s %lu\n", __func__, alarm->enabled ? "n" : "ff",
+ s1970);
+
+ ret = mc13783_rtc_irq_enable_unlocked(dev, alarm->enabled,
+ MC13783_IRQ_TODA);
+ if (unlikely(ret))
+ goto out;
+
+ seconds = s1970 % 86400;
+ days = s1970 / 86400;
+
+ ret = mc13783_reg_write(priv->mc13783, MC13783_RTCDAYA, days);
+ if (unlikely(ret))
+ goto out;
+
+ ret = mc13783_reg_write(priv->mc13783, MC13783_RTCTODA, seconds);
+
out:
mc13783_unlock(priv->mc13783);
return ret;
}
+static irqreturn_t mc13783_rtc_alarm_handler(int irq, void *dev)
+{
+ struct mc13783_rtc *priv = dev;
+ struct mc13783 *mc13783 = priv->mc13783;
+
+ dev_dbg(&priv->rtc->dev, "Alarm\n");
+
+ rtc_update_irq(priv->rtc, 1, RTC_IRQF | RTC_AF);
+
+ mc13783_irq_ack(mc13783, irq);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mc13783_rtc_update_handler(int irq, void *dev)
+{
+ struct mc13783_rtc *priv = dev;
+ struct mc13783 *mc13783 = priv->mc13783;
+
+ dev_dbg(&priv->rtc->dev, "1HZ\n");
+
+ rtc_update_irq(priv->rtc, 1, RTC_IRQF | RTC_UF);
+
+ mc13783_irq_ack(mc13783, irq);
+
+ return IRQ_HANDLED;
+}
+
+static int mc13783_rtc_update_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ return mc13783_rtc_irq_enable(dev, enabled, MC13783_IRQ_1HZ);
+}
+
+static int mc13783_rtc_alarm_irq_enable(struct device *dev,
+ unsigned int enabled)
+{
+ return mc13783_rtc_irq_enable(dev, enabled, MC13783_IRQ_TODA);
+}
+
static const struct rtc_class_ops mc13783_rtc_ops = {
.read_time = mc13783_rtc_read_time,
.set_mmss = mc13783_rtc_set_mmss,
+ .read_alarm = mc13783_rtc_read_alarm,
+ .set_alarm = mc13783_rtc_set_alarm,
+ .alarm_irq_enable = mc13783_rtc_alarm_irq_enable,
.update_irq_enable = mc13783_rtc_update_irq_enable,
};
@@ -160,7 +310,7 @@ static irqreturn_t mc13783_rtc_reset_handler(int irq, void *dev)
dev_dbg(&priv->rtc->dev, "RTCRST\n");
priv->valid = 0;
- mc13783_mask(mc13783, irq);
+ mc13783_irq_mask(mc13783, irq);
return IRQ_HANDLED;
}
@@ -169,6 +319,7 @@ static int __devinit mc13783_rtc_probe(struct platform_device *pdev)
{
int ret;
struct mc13783_rtc *priv;
+ int rtcrst_pending;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
@@ -177,8 +328,6 @@ static int __devinit mc13783_rtc_probe(struct platform_device *pdev)
priv->mc13783 = dev_get_drvdata(pdev->dev.parent);
platform_set_drvdata(pdev, priv);
- priv->valid = 1;
-
mc13783_lock(priv->mc13783);
ret = mc13783_irq_request(priv->mc13783, MC13783_IRQ_RTCRST,
@@ -186,33 +335,45 @@ static int __devinit mc13783_rtc_probe(struct platform_device *pdev)
if (ret)
goto err_reset_irq_request;
+ ret = mc13783_irq_status(priv->mc13783, MC13783_IRQ_RTCRST,
+ NULL, &rtcrst_pending);
+ if (ret)
+ goto err_reset_irq_status;
+
+ priv->valid = !rtcrst_pending;
+
ret = mc13783_irq_request_nounmask(priv->mc13783, MC13783_IRQ_1HZ,
mc13783_rtc_update_handler, DRIVER_NAME, priv);
if (ret)
goto err_update_irq_request;
- mc13783_unlock(priv->mc13783);
+ ret = mc13783_irq_request_nounmask(priv->mc13783, MC13783_IRQ_TODA,
+ mc13783_rtc_alarm_handler, DRIVER_NAME, priv);
+ if (ret)
+ goto err_alarm_irq_request;
priv->rtc = rtc_device_register(pdev->name,
&pdev->dev, &mc13783_rtc_ops, THIS_MODULE);
-
if (IS_ERR(priv->rtc)) {
ret = PTR_ERR(priv->rtc);
- mc13783_lock(priv->mc13783);
+ mc13783_irq_free(priv->mc13783, MC13783_IRQ_TODA, priv);
+err_alarm_irq_request:
mc13783_irq_free(priv->mc13783, MC13783_IRQ_1HZ, priv);
err_update_irq_request:
+err_reset_irq_status:
+
mc13783_irq_free(priv->mc13783, MC13783_IRQ_RTCRST, priv);
err_reset_irq_request:
- mc13783_unlock(priv->mc13783);
-
platform_set_drvdata(pdev, NULL);
kfree(priv);
}
+ mc13783_unlock(priv->mc13783);
+
return ret;
}
@@ -220,10 +381,11 @@ static int __devexit mc13783_rtc_remove(struct platform_device *pdev)
{
struct mc13783_rtc *priv = platform_get_drvdata(pdev);
- rtc_device_unregister(priv->rtc);
-
mc13783_lock(priv->mc13783);
+ rtc_device_unregister(priv->rtc);
+
+ mc13783_irq_free(priv->mc13783, MC13783_IRQ_TODA, priv);
mc13783_irq_free(priv->mc13783, MC13783_IRQ_1HZ, priv);
mc13783_irq_free(priv->mc13783, MC13783_IRQ_RTCRST, priv);
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c
index 6bd5072d4eb..8710f9415d9 100644
--- a/drivers/rtc/rtc-mxc.c
+++ b/drivers/rtc/rtc-mxc.c
@@ -396,8 +396,11 @@ static int __init mxc_rtc_probe(struct platform_device *pdev)
pdata->ioaddr = ioremap(res->start, resource_size(res));
clk = clk_get(&pdev->dev, "ckil");
- if (IS_ERR(clk))
- return PTR_ERR(clk);
+ if (IS_ERR(clk)) {
+ iounmap(pdata->ioaddr);
+ ret = PTR_ERR(clk);
+ goto exit_free_pdata;
+ }
rate = clk_get_rate(clk);
clk_put(clk);
diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c
index e75df9d50e2..2ceb365533b 100644
--- a/drivers/rtc/rtc-pcf2123.c
+++ b/drivers/rtc/rtc-pcf2123.c
@@ -315,7 +315,7 @@ kfree_exit:
return ret;
}
-static int pcf2123_remove(struct spi_device *spi)
+static int __devexit pcf2123_remove(struct spi_device *spi)
{
struct pcf2123_plat_data *pdata = spi->dev.platform_data;
int i;
diff --git a/drivers/rtc/rtc-twl.c b/drivers/rtc/rtc-twl.c
index c6a83a2a722..ed1b8682812 100644
--- a/drivers/rtc/rtc-twl.c
+++ b/drivers/rtc/rtc-twl.c
@@ -57,7 +57,7 @@ enum {
REG_RTC_COMP_LSB_REG,
REG_RTC_COMP_MSB_REG,
};
-const static u8 twl4030_rtc_reg_map[] = {
+static const u8 twl4030_rtc_reg_map[] = {
[REG_SECONDS_REG] = 0x00,
[REG_MINUTES_REG] = 0x01,
[REG_HOURS_REG] = 0x02,
@@ -80,7 +80,7 @@ const static u8 twl4030_rtc_reg_map[] = {
[REG_RTC_COMP_LSB_REG] = 0x10,
[REG_RTC_COMP_MSB_REG] = 0x11,
};
-const static u8 twl6030_rtc_reg_map[] = {
+static const u8 twl6030_rtc_reg_map[] = {
[REG_SECONDS_REG] = 0x00,
[REG_MINUTES_REG] = 0x01,
[REG_HOURS_REG] = 0x02,
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index b232693378c..a3ac4456e0b 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -649,6 +649,7 @@ struct qeth_card_options {
int performance_stats;
int rx_sg_cb;
enum qeth_ipa_isolation_modes isolation;
+ int sniffer;
};
/*
@@ -737,6 +738,7 @@ struct qeth_card {
struct qeth_discipline discipline;
atomic_t force_alloc_skb;
struct service_level qeth_service_level;
+ struct qdio_ssqd_desc ssqd;
};
struct qeth_card_list_struct {
@@ -811,7 +813,8 @@ int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *,
enum qeth_ipa_cmds, enum qeth_prot_versions);
int qeth_query_setadapterparms(struct qeth_card *);
-int qeth_check_qdio_errors(struct qdio_buffer *, unsigned int, const char *);
+int qeth_check_qdio_errors(struct qeth_card *, struct qdio_buffer *,
+ unsigned int, const char *);
void qeth_queue_input_buffer(struct qeth_card *, int);
struct sk_buff *qeth_core_get_next_skb(struct qeth_card *,
struct qdio_buffer *, struct qdio_buffer_element **, int *,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index d34804d5ece..fa8a519218a 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -269,6 +269,7 @@ int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
card->qdio.init_pool.buf_count = bufcnt;
return qeth_alloc_buffer_pool(card);
}
+EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool);
static int qeth_issue_next_read(struct qeth_card *card)
{
@@ -350,8 +351,10 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
if (IS_IPA(iob->data)) {
cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
if (IS_IPA_REPLY(cmd)) {
- if (cmd->hdr.command < IPA_CMD_SETCCID ||
- cmd->hdr.command > IPA_CMD_MODCCID)
+ if (cmd->hdr.command != IPA_CMD_SETCCID &&
+ cmd->hdr.command != IPA_CMD_DELCCID &&
+ cmd->hdr.command != IPA_CMD_MODCCID &&
+ cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
qeth_issue_ipa_msg(cmd,
cmd->hdr.return_code, card);
return cmd;
@@ -1100,11 +1103,6 @@ static int qeth_setup_card(struct qeth_card *card)
card->thread_running_mask = 0;
INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
INIT_LIST_HEAD(&card->ip_list);
- card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
- if (!card->ip_tbd_list) {
- QETH_DBF_TEXT(SETUP, 0, "iptbdnom");
- return -ENOMEM;
- }
INIT_LIST_HEAD(card->ip_tbd_list);
INIT_LIST_HEAD(&card->cmd_waiter_list);
init_waitqueue_head(&card->wait_q);
@@ -1138,21 +1136,30 @@ static struct qeth_card *qeth_alloc_card(void)
QETH_DBF_TEXT(SETUP, 2, "alloccrd");
card = kzalloc(sizeof(struct qeth_card), GFP_DMA|GFP_KERNEL);
if (!card)
- return NULL;
+ goto out;
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
- if (qeth_setup_channel(&card->read)) {
- kfree(card);
- return NULL;
- }
- if (qeth_setup_channel(&card->write)) {
- qeth_clean_channel(&card->read);
- kfree(card);
- return NULL;
+ card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_KERNEL);
+ if (!card->ip_tbd_list) {
+ QETH_DBF_TEXT(SETUP, 0, "iptbdnom");
+ goto out_card;
}
+ if (qeth_setup_channel(&card->read))
+ goto out_ip;
+ if (qeth_setup_channel(&card->write))
+ goto out_channel;
card->options.layer2 = -1;
card->qeth_service_level.seq_print = qeth_core_sl_print;
register_service_level(&card->qeth_service_level);
return card;
+
+out_channel:
+ qeth_clean_channel(&card->read);
+out_ip:
+ kfree(card->ip_tbd_list);
+out_card:
+ kfree(card);
+out:
+ return NULL;
}
static int qeth_determine_card_type(struct qeth_card *card)
@@ -1355,26 +1362,29 @@ static int qeth_read_conf_data(struct qeth_card *card, void **buffer,
return ret;
}
-static int qeth_get_unitaddr(struct qeth_card *card)
+static void qeth_configure_unitaddr(struct qeth_card *card, char *prcd)
{
- int length;
- char *prcd;
- int rc;
-
- QETH_DBF_TEXT(SETUP, 2, "getunit");
- rc = qeth_read_conf_data(card, (void **) &prcd, &length);
- if (rc) {
- QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
- dev_name(&card->gdev->dev), rc);
- return rc;
- }
+ QETH_DBF_TEXT(SETUP, 2, "cfgunit");
card->info.chpid = prcd[30];
card->info.unit_addr2 = prcd[31];
card->info.cula = prcd[63];
card->info.guestlan = ((prcd[0x10] == _ascebc['V']) &&
(prcd[0x11] == _ascebc['M']));
- kfree(prcd);
- return 0;
+}
+
+static void qeth_configure_blkt_default(struct qeth_card *card, char *prcd)
+{
+ QETH_DBF_TEXT(SETUP, 2, "cfgblkt");
+
+ if (prcd[74] == 0xF0 && prcd[75] == 0xF0 && prcd[76] == 0xF5) {
+ card->info.blkt.time_total = 250;
+ card->info.blkt.inter_packet = 5;
+ card->info.blkt.inter_packet_jumbo = 15;
+ } else {
+ card->info.blkt.time_total = 0;
+ card->info.blkt.inter_packet = 0;
+ card->info.blkt.inter_packet_jumbo = 0;
+ }
}
static void qeth_init_tokens(struct qeth_card *card)
@@ -2573,8 +2583,8 @@ int qeth_query_setadapterparms(struct qeth_card *card)
}
EXPORT_SYMBOL_GPL(qeth_query_setadapterparms);
-int qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error,
- const char *dbftext)
+int qeth_check_qdio_errors(struct qeth_card *card, struct qdio_buffer *buf,
+ unsigned int qdio_error, const char *dbftext)
{
if (qdio_error) {
QETH_DBF_TEXT(TRACE, 2, dbftext);
@@ -2584,7 +2594,11 @@ int qeth_check_qdio_errors(struct qdio_buffer *buf, unsigned int qdio_error,
QETH_DBF_TEXT_(QERR, 2, " F14=%02X",
buf->element[14].flags & 0xff);
QETH_DBF_TEXT_(QERR, 2, " qerr=%X", qdio_error);
- return 1;
+ if ((buf->element[15].flags & 0xff) == 0x12) {
+ card->stats.rx_dropped++;
+ return 0;
+ } else
+ return 1;
}
return 0;
}
@@ -2667,7 +2681,7 @@ static int qeth_handle_send_error(struct qeth_card *card,
qdio_err = 1;
}
}
- qeth_check_qdio_errors(buffer->buffer, qdio_err, "qouterr");
+ qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
if (!qdio_err)
return QETH_SEND_ERROR_NONE;
@@ -3509,6 +3523,7 @@ void qeth_tx_timeout(struct net_device *dev)
{
struct qeth_card *card;
+ QETH_DBF_TEXT(TRACE, 4, "txtimeo");
card = dev->ml_priv;
card->stats.tx_errors++;
qeth_schedule_recovery(card);
@@ -3847,9 +3862,7 @@ static int qeth_core_driver_group(const char *buf, struct device *root_dev,
int qeth_core_hardsetup_card(struct qeth_card *card)
{
- struct qdio_ssqd_desc *ssqd;
int retries = 0;
- int mpno = 0;
int rc;
QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
@@ -3882,31 +3895,6 @@ retriable:
else
goto retry;
}
-
- rc = qeth_get_unitaddr(card);
- if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
- return rc;
- }
-
- ssqd = kmalloc(sizeof(struct qdio_ssqd_desc), GFP_KERNEL);
- if (!ssqd) {
- rc = -ENOMEM;
- goto out;
- }
- rc = qdio_get_ssqd_desc(CARD_DDEV(card), ssqd);
- if (rc == 0)
- mpno = ssqd->pcnt;
- kfree(ssqd);
-
- if (mpno)
- mpno = min(mpno - 1, QETH_MAX_PORTNO);
- if (card->info.portno > mpno) {
- QETH_DBF_MESSAGE(2, "Device %s does not offer port number %d"
- "\n.", CARD_BUS_ID(card), card->info.portno);
- rc = -ENODEV;
- goto out;
- }
qeth_init_tokens(card);
qeth_init_func_level(card);
rc = qeth_idx_activate_channel(&card->read, qeth_idx_read_cb);
@@ -3990,7 +3978,7 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
struct qdio_buffer_element *element = *__element;
int offset = *__offset;
struct sk_buff *skb = NULL;
- int skb_len;
+ int skb_len = 0;
void *data_ptr;
int data_len;
int headroom = 0;
@@ -4009,20 +3997,24 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
*hdr = element->addr + offset;
offset += sizeof(struct qeth_hdr);
- if (card->options.layer2) {
- if (card->info.type == QETH_CARD_TYPE_OSN) {
- skb_len = (*hdr)->hdr.osn.pdu_length;
- headroom = sizeof(struct qeth_hdr);
- } else {
- skb_len = (*hdr)->hdr.l2.pkt_length;
- }
- } else {
+ switch ((*hdr)->hdr.l2.id) {
+ case QETH_HEADER_TYPE_LAYER2:
+ skb_len = (*hdr)->hdr.l2.pkt_length;
+ break;
+ case QETH_HEADER_TYPE_LAYER3:
skb_len = (*hdr)->hdr.l3.length;
if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
(card->info.link_type == QETH_LINK_TYPE_HSTR))
headroom = TR_HLEN;
else
headroom = ETH_HLEN;
+ break;
+ case QETH_HEADER_TYPE_OSN:
+ skb_len = (*hdr)->hdr.osn.pdu_length;
+ headroom = sizeof(struct qeth_hdr);
+ break;
+ default:
+ break;
}
if (!skb_len)
@@ -4177,6 +4169,41 @@ void qeth_core_free_discipline(struct qeth_card *card)
card->discipline.ccwgdriver = NULL;
}
+static void qeth_determine_capabilities(struct qeth_card *card)
+{
+ int rc;
+ int length;
+ char *prcd;
+
+ QETH_DBF_TEXT(SETUP, 2, "detcapab");
+ rc = ccw_device_set_online(CARD_DDEV(card));
+ if (rc) {
+ QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
+ goto out;
+ }
+
+
+ rc = qeth_read_conf_data(card, (void **) &prcd, &length);
+ if (rc) {
+ QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n",
+ dev_name(&card->gdev->dev), rc);
+ QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
+ goto out_offline;
+ }
+ qeth_configure_unitaddr(card, prcd);
+ qeth_configure_blkt_default(card, prcd);
+ kfree(prcd);
+
+ rc = qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
+ if (rc)
+ QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+
+out_offline:
+ ccw_device_set_offline(CARD_DDEV(card));
+out:
+ return;
+}
+
static int qeth_core_probe_device(struct ccwgroup_device *gdev)
{
struct qeth_card *card;
@@ -4242,6 +4269,8 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
write_lock_irqsave(&qeth_core_card_list.rwlock, flags);
list_add_tail(&card->list, &qeth_core_card_list.list);
write_unlock_irqrestore(&qeth_core_card_list.rwlock, flags);
+
+ qeth_determine_capabilities(card);
return 0;
err_card:
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 1ba51152f66..104a3351e02 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -156,6 +156,8 @@ enum qeth_ipa_return_codes {
IPA_RC_IP_TABLE_FULL = 0x0002,
IPA_RC_UNKNOWN_ERROR = 0x0003,
IPA_RC_UNSUPPORTED_COMMAND = 0x0004,
+ IPA_RC_TRACE_ALREADY_ACTIVE = 0x0005,
+ IPA_RC_INVALID_FORMAT = 0x0006,
IPA_RC_DUP_IPV6_REMOTE = 0x0008,
IPA_RC_DUP_IPV6_HOME = 0x0010,
IPA_RC_UNREGISTERED_ADDR = 0x0011,
@@ -196,6 +198,11 @@ enum qeth_ipa_return_codes {
IPA_RC_INVALID_IP_VERSION2 = 0xf001,
IPA_RC_FFFF = 0xffff
};
+/* for DELIP */
+#define IPA_RC_IP_ADDRESS_NOT_DEFINED IPA_RC_PRIMARY_ALREADY_DEFINED
+/* for SET_DIAGNOSTIC_ASSIST */
+#define IPA_RC_INVALID_SUBCMD IPA_RC_IP_TABLE_FULL
+#define IPA_RC_HARDWARE_AUTH_ERROR IPA_RC_UNKNOWN_ERROR
/* IPA function flags; each flag marks availability of respective function */
enum qeth_ipa_funcs {
@@ -246,6 +253,7 @@ enum qeth_ipa_setadp_cmd {
IPA_SETADP_SET_SNMP_CONTROL = 0x00000200L,
IPA_SETADP_QUERY_CARD_INFO = 0x00000400L,
IPA_SETADP_SET_PROMISC_MODE = 0x00000800L,
+ IPA_SETADP_SET_DIAG_ASSIST = 0x00002000L,
IPA_SETADP_SET_ACCESS_CONTROL = 0x00010000L,
};
enum qeth_ipa_mac_ops {
@@ -424,6 +432,40 @@ struct qeth_create_destroy_address {
__u8 unique_id[8];
} __attribute__ ((packed));
+/* SET DIAGNOSTIC ASSIST IPA Command: *************************************/
+
+enum qeth_diags_cmds {
+ QETH_DIAGS_CMD_QUERY = 0x0001,
+ QETH_DIAGS_CMD_TRAP = 0x0002,
+ QETH_DIAGS_CMD_TRACE = 0x0004,
+ QETH_DIAGS_CMD_NOLOG = 0x0008,
+ QETH_DIAGS_CMD_DUMP = 0x0010,
+};
+
+enum qeth_diags_trace_types {
+ QETH_DIAGS_TYPE_HIPERSOCKET = 0x02,
+};
+
+enum qeth_diags_trace_cmds {
+ QETH_DIAGS_CMD_TRACE_ENABLE = 0x0001,
+ QETH_DIAGS_CMD_TRACE_DISABLE = 0x0002,
+ QETH_DIAGS_CMD_TRACE_MODIFY = 0x0004,
+ QETH_DIAGS_CMD_TRACE_REPLACE = 0x0008,
+ QETH_DIAGS_CMD_TRACE_QUERY = 0x0010,
+};
+
+struct qeth_ipacmd_diagass {
+ __u32 host_tod2;
+ __u32:32;
+ __u16 subcmd_len;
+ __u16:16;
+ __u32 subcmd;
+ __u8 type;
+ __u8 action;
+ __u16 options;
+ __u32:32;
+} __attribute__ ((packed));
+
/* Header for each IPA command */
struct qeth_ipacmd_hdr {
__u8 command;
@@ -452,6 +494,7 @@ struct qeth_ipa_cmd {
struct qeth_create_destroy_address create_destroy_addr;
struct qeth_ipacmd_setadpparms setadapterparms;
struct qeth_set_routing setrtg;
+ struct qeth_ipacmd_diagass diagass;
} data;
} __attribute__ ((packed));
@@ -469,7 +512,6 @@ enum qeth_ipa_arp_return_codes {
QETH_IPA_ARP_RC_Q_NO_DATA = 0x0008,
};
-
extern char *qeth_get_ipa_msg(enum qeth_ipa_return_codes rc);
extern char *qeth_get_ipa_cmd_name(enum qeth_ipa_cmds cmd);
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 9ff2b36fdc4..88ae4357136 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -118,7 +118,7 @@ static ssize_t qeth_dev_portno_store(struct device *dev,
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
- unsigned int portno;
+ unsigned int portno, limit;
if (!card)
return -EINVAL;
@@ -128,9 +128,11 @@ static ssize_t qeth_dev_portno_store(struct device *dev,
return -EPERM;
portno = simple_strtoul(buf, &tmp, 16);
- if (portno > QETH_MAX_PORTNO) {
+ if (portno > QETH_MAX_PORTNO)
+ return -EINVAL;
+ limit = (card->ssqd.pcnt ? card->ssqd.pcnt - 1 : card->ssqd.pcnt);
+ if (portno > limit)
return -EINVAL;
- }
card->info.portno = portno;
return count;
@@ -537,7 +539,7 @@ static ssize_t qeth_dev_blkt_total_store(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
return qeth_dev_blkt_store(card, buf, count,
- &card->info.blkt.time_total, 1000);
+ &card->info.blkt.time_total, 5000);
}
@@ -559,7 +561,7 @@ static ssize_t qeth_dev_blkt_inter_store(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
return qeth_dev_blkt_store(card, buf, count,
- &card->info.blkt.inter_packet, 100);
+ &card->info.blkt.inter_packet, 1000);
}
static DEVICE_ATTR(inter, 0644, qeth_dev_blkt_inter_show,
@@ -580,7 +582,7 @@ static ssize_t qeth_dev_blkt_inter_jumbo_store(struct device *dev,
struct qeth_card *card = dev_get_drvdata(dev);
return qeth_dev_blkt_store(card, buf, count,
- &card->info.blkt.inter_packet_jumbo, 100);
+ &card->info.blkt.inter_packet_jumbo, 1000);
}
static DEVICE_ATTR(inter_jumbo, 0644, qeth_dev_blkt_inter_jumbo_show,
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 0b763396d5d..51fde6f2e0b 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -486,22 +486,14 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card,
case IPA_RC_L2_DUP_MAC:
case IPA_RC_L2_DUP_LAYER3_MAC:
dev_warn(&card->gdev->dev,
- "MAC address "
- "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x "
- "already exists\n",
- card->dev->dev_addr[0], card->dev->dev_addr[1],
- card->dev->dev_addr[2], card->dev->dev_addr[3],
- card->dev->dev_addr[4], card->dev->dev_addr[5]);
+ "MAC address %pM already exists\n",
+ card->dev->dev_addr);
break;
case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
dev_warn(&card->gdev->dev,
- "MAC address "
- "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x "
- "is not authorized\n",
- card->dev->dev_addr[0], card->dev->dev_addr[1],
- card->dev->dev_addr[2], card->dev->dev_addr[3],
- card->dev->dev_addr[4], card->dev->dev_addr[5]);
+ "MAC address %pM is not authorized\n",
+ card->dev->dev_addr);
break;
default:
break;
@@ -512,12 +504,8 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card,
memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac,
OSA_ADDR_LEN);
dev_info(&card->gdev->dev,
- "MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x "
- "successfully registered on device %s\n",
- card->dev->dev_addr[0], card->dev->dev_addr[1],
- card->dev->dev_addr[2], card->dev->dev_addr[3],
- card->dev->dev_addr[4], card->dev->dev_addr[5],
- card->dev->name);
+ "MAC address %pM successfully registered on device %s\n",
+ card->dev->dev_addr, card->dev->name);
}
return 0;
}
@@ -634,7 +622,7 @@ static void qeth_l2_set_multicast_list(struct net_device *dev)
for (dm = dev->mc_list; dm; dm = dm->next)
qeth_l2_add_mc(card, dm->da_addr, 0);
- list_for_each_entry(ha, &dev->uc.list, list)
+ netdev_for_each_uc_addr(ha, dev)
qeth_l2_add_mc(card, ha->addr, 1);
spin_unlock_bh(&card->mclock);
@@ -781,7 +769,8 @@ static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev,
index = i % QDIO_MAX_BUFFERS_PER_Q;
buffer = &card->qdio.in_q->bufs[index];
if (!(qdio_err &&
- qeth_check_qdio_errors(buffer->buffer, qdio_err, "qinerr")))
+ qeth_check_qdio_errors(card, buffer->buffer, qdio_err,
+ "qinerr")))
qeth_l2_process_inbound_buffer(card, buffer, index);
/* clear buffer and give back to hardware */
qeth_put_buffer_pool_entry(card, buffer->pool_entry);
@@ -938,7 +927,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
QETH_DBF_TEXT(SETUP, 2, "setonlin");
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
- qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
recover_flag = card->state;
rc = qeth_core_hardsetup_card(card);
if (rc) {
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 321988fa9f7..8447d233d0b 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -13,6 +13,8 @@
#include "qeth_core.h"
+#define QETH_SNIFF_AVAIL 0x0008
+
struct qeth_ipaddr {
struct list_head entry;
enum qeth_ip_types type;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index fd1b6ed3721..5475834ab91 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -242,6 +242,8 @@ static int __qeth_l3_insert_ip_todo(struct qeth_card *card,
struct qeth_ipaddr *tmp, *t;
int found = 0;
+ if (card->options.sniffer)
+ return 0;
list_for_each_entry_safe(tmp, t, card->ip_tbd_list, entry) {
if ((addr->type == QETH_IP_TYPE_DEL_ALL_MC) &&
(tmp->type == QETH_IP_TYPE_DEL_ALL_MC))
@@ -457,6 +459,8 @@ static void qeth_l3_set_ip_addr_list(struct qeth_card *card)
QETH_DBF_TEXT(TRACE, 2, "sdiplist");
QETH_DBF_HEX(TRACE, 2, &card, sizeof(void *));
+ if (card->options.sniffer)
+ return;
spin_lock_irqsave(&card->ip_lock, flags);
tbd_list = card->ip_tbd_list;
card->ip_tbd_list = kmalloc(sizeof(struct list_head), GFP_ATOMIC);
@@ -495,7 +499,7 @@ static void qeth_l3_set_ip_addr_list(struct qeth_card *card)
spin_unlock_irqrestore(&card->ip_lock, flags);
rc = qeth_l3_deregister_addr_entry(card, addr);
spin_lock_irqsave(&card->ip_lock, flags);
- if (!rc || (rc == IPA_RC_PRIMARY_ALREADY_DEFINED))
+ if (!rc || (rc == IPA_RC_IP_ADDRESS_NOT_DEFINED))
kfree(addr);
else
list_add_tail(&addr->entry, &card->ip_list);
@@ -513,6 +517,8 @@ static void qeth_l3_clear_ip_list(struct qeth_card *card, int clean,
unsigned long flags;
QETH_DBF_TEXT(TRACE, 4, "clearip");
+ if (recover && card->options.sniffer)
+ return;
spin_lock_irqsave(&card->ip_lock, flags);
/* clear todo list */
list_for_each_entry_safe(addr, tmp, card->ip_tbd_list, entry) {
@@ -1674,6 +1680,76 @@ static int qeth_l3_get_unique_id(struct qeth_card *card)
return rc;
}
+static int
+qeth_diags_trace_cb(struct qeth_card *card, struct qeth_reply *reply,
+ unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd;
+ __u16 rc;
+
+ QETH_DBF_TEXT(SETUP, 2, "diastrcb");
+
+ cmd = (struct qeth_ipa_cmd *)data;
+ rc = cmd->hdr.return_code;
+ if (rc) {
+ QETH_DBF_TEXT_(TRACE, 2, "dxter%x", rc);
+ if (cmd->data.diagass.action == QETH_DIAGS_CMD_TRACE_ENABLE) {
+ switch (rc) {
+ case IPA_RC_HARDWARE_AUTH_ERROR:
+ dev_warn(&card->gdev->dev, "The device is not "
+ "authorized to run as a HiperSockets "
+ "network traffic analyzer\n");
+ break;
+ case IPA_RC_TRACE_ALREADY_ACTIVE:
+ dev_warn(&card->gdev->dev, "A HiperSockets "
+ "network traffic analyzer is already "
+ "active in the HiperSockets LAN\n");
+ break;
+ default:
+ break;
+ }
+ }
+ return 0;
+ }
+
+ switch (cmd->data.diagass.action) {
+ case QETH_DIAGS_CMD_TRACE_QUERY:
+ break;
+ case QETH_DIAGS_CMD_TRACE_DISABLE:
+ card->info.promisc_mode = SET_PROMISC_MODE_OFF;
+ dev_info(&card->gdev->dev, "The HiperSockets network traffic "
+ "analyzer is deactivated\n");
+ break;
+ case QETH_DIAGS_CMD_TRACE_ENABLE:
+ card->info.promisc_mode = SET_PROMISC_MODE_ON;
+ dev_info(&card->gdev->dev, "The HiperSockets network traffic "
+ "analyzer is activated\n");
+ break;
+ default:
+ QETH_DBF_MESSAGE(2, "Unknown sniffer action (0x%04x) on %s\n",
+ cmd->data.diagass.action, QETH_CARD_IFNAME(card));
+ }
+
+ return 0;
+}
+
+static int
+qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
+{
+ struct qeth_cmd_buffer *iob;
+ struct qeth_ipa_cmd *cmd;
+
+ QETH_DBF_TEXT(SETUP, 2, "diagtrac");
+
+ iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
+ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+ cmd->data.diagass.subcmd_len = 16;
+ cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRACE;
+ cmd->data.diagass.type = QETH_DIAGS_TYPE_HIPERSOCKET;
+ cmd->data.diagass.action = diags_cmd;
+ return qeth_send_ipa_cmd(card, iob, qeth_diags_trace_cb, NULL);
+}
+
static void qeth_l3_get_mac_for_ipm(__u32 ipm, char *mac,
struct net_device *dev)
{
@@ -1951,7 +2027,10 @@ static inline __u16 qeth_l3_rebuild_skb(struct qeth_card *card,
case QETH_CAST_ANYCAST:
case QETH_CAST_NOCAST:
default:
- skb->pkt_type = PACKET_HOST;
+ if (card->options.sniffer)
+ skb->pkt_type = PACKET_OTHERHOST;
+ else
+ skb->pkt_type = PACKET_HOST;
memcpy(tg_addr, card->dev->dev_addr,
card->dev->addr_len);
}
@@ -2007,7 +2086,6 @@ static void qeth_l3_process_inbound_buffer(struct qeth_card *card,
int offset;
__u16 vlan_tag = 0;
unsigned int len;
-
/* get first element of current buffer */
element = (struct qdio_buffer_element *)&buf->buffer->element[0];
offset = 0;
@@ -2026,7 +2104,7 @@ static void qeth_l3_process_inbound_buffer(struct qeth_card *card,
case QETH_HEADER_TYPE_LAYER3:
vlan_tag = qeth_l3_rebuild_skb(card, skb, hdr);
len = skb->len;
- if (vlan_tag)
+ if (vlan_tag && !card->options.sniffer)
if (card->vlangrp)
vlan_hwaccel_rx(skb, card->vlangrp,
vlan_tag);
@@ -2037,6 +2115,16 @@ static void qeth_l3_process_inbound_buffer(struct qeth_card *card,
else
netif_rx(skb);
break;
+ case QETH_HEADER_TYPE_LAYER2: /* for HiperSockets sniffer */
+ skb->pkt_type = PACKET_HOST;
+ skb->protocol = eth_type_trans(skb, skb->dev);
+ if (card->options.checksum_type == NO_CHECKSUMMING)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+ len = skb->len;
+ netif_receive_skb(skb);
+ break;
default:
dev_kfree_skb_any(skb);
QETH_DBF_TEXT(TRACE, 3, "inbunkno");
@@ -2118,6 +2206,9 @@ static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
qeth_set_allowed_threads(card, 0, 1);
+ if (card->options.sniffer &&
+ (card->info.promisc_mode == SET_PROMISC_MODE_ON))
+ qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
if (card->read.state == CH_STATE_UP &&
card->write.state == CH_STATE_UP &&
(card->state == CARD_STATE_UP)) {
@@ -2162,6 +2253,36 @@ static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode)
return rc;
}
+/*
+ * test for and Switch promiscuous mode (on or off)
+ * either for guestlan or HiperSocket Sniffer
+ */
+static void
+qeth_l3_handle_promisc_mode(struct qeth_card *card)
+{
+ struct net_device *dev = card->dev;
+
+ if (((dev->flags & IFF_PROMISC) &&
+ (card->info.promisc_mode == SET_PROMISC_MODE_ON)) ||
+ (!(dev->flags & IFF_PROMISC) &&
+ (card->info.promisc_mode == SET_PROMISC_MODE_OFF)))
+ return;
+
+ if (card->info.guestlan) { /* Guestlan trace */
+ if (qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
+ qeth_setadp_promisc_mode(card);
+ } else if (card->options.sniffer && /* HiperSockets trace */
+ qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
+ if (dev->flags & IFF_PROMISC) {
+ QETH_DBF_TEXT(TRACE, 3, "+promisc");
+ qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_ENABLE);
+ } else {
+ QETH_DBF_TEXT(TRACE, 3, "-promisc");
+ qeth_diags_trace(card, QETH_DIAGS_CMD_TRACE_DISABLE);
+ }
+ }
+}
+
static void qeth_l3_set_multicast_list(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
@@ -2170,15 +2291,17 @@ static void qeth_l3_set_multicast_list(struct net_device *dev)
if (qeth_threads_running(card, QETH_RECOVER_THREAD) &&
(card->state != CARD_STATE_UP))
return;
- qeth_l3_delete_mc_addresses(card);
- qeth_l3_add_multicast_ipv4(card);
+ if (!card->options.sniffer) {
+ qeth_l3_delete_mc_addresses(card);
+ qeth_l3_add_multicast_ipv4(card);
#ifdef CONFIG_QETH_IPV6
- qeth_l3_add_multicast_ipv6(card);
+ qeth_l3_add_multicast_ipv6(card);
#endif
- qeth_l3_set_ip_addr_list(card);
- if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
- return;
- qeth_setadp_promisc_mode(card);
+ qeth_l3_set_ip_addr_list(card);
+ if (!qeth_adp_supported(card, IPA_SETADP_SET_PROMISC_MODE))
+ return;
+ }
+ qeth_l3_handle_promisc_mode(card);
}
static const char *qeth_l3_arp_get_error_cause(int *rc)
@@ -2778,8 +2901,9 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
int nr_frags;
if ((card->info.type == QETH_CARD_TYPE_IQD) &&
- (skb->protocol != htons(ETH_P_IPV6)) &&
- (skb->protocol != htons(ETH_P_IP)))
+ (((skb->protocol != htons(ETH_P_IPV6)) &&
+ (skb->protocol != htons(ETH_P_IP))) ||
+ card->options.sniffer))
goto tx_drop;
if ((card->state != CARD_STATE_UP) || !card->lan_online) {
@@ -3155,7 +3279,7 @@ static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev,
index = i % QDIO_MAX_BUFFERS_PER_Q;
buffer = &card->qdio.in_q->bufs[index];
if (!(qdio_err &&
- qeth_check_qdio_errors(buffer->buffer,
+ qeth_check_qdio_errors(card, buffer->buffer,
qdio_err, "qinerr")))
qeth_l3_process_inbound_buffer(card, buffer, index);
/* clear buffer and give back to hardware */
@@ -3214,8 +3338,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
QETH_DBF_TEXT(SETUP, 2, "setonlin");
QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
- qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
-
recover_flag = card->state;
rc = qeth_core_hardsetup_card(card);
if (rc) {
@@ -3250,20 +3372,22 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
goto out_remove;
} else
card->lan_online = 1;
- qeth_l3_set_large_send(card, card->options.large_send);
rc = qeth_l3_setadapter_parms(card);
if (rc)
QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
- rc = qeth_l3_start_ipassists(card);
- if (rc)
- QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
- rc = qeth_l3_setrouting_v4(card);
- if (rc)
- QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
- rc = qeth_l3_setrouting_v6(card);
- if (rc)
- QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
+ if (!card->options.sniffer) {
+ rc = qeth_l3_start_ipassists(card);
+ if (rc)
+ QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc);
+ qeth_l3_set_large_send(card, card->options.large_send);
+ rc = qeth_l3_setrouting_v4(card);
+ if (rc)
+ QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
+ rc = qeth_l3_setrouting_v6(card);
+ if (rc)
+ QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
+ }
netif_tx_disable(card->dev);
rc = qeth_init_qdio_queues(card);
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 3360b0941aa..3f08b11274a 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -319,6 +319,61 @@ static ssize_t qeth_l3_dev_checksum_store(struct device *dev,
static DEVICE_ATTR(checksumming, 0644, qeth_l3_dev_checksum_show,
qeth_l3_dev_checksum_store);
+static ssize_t qeth_l3_dev_sniffer_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct qeth_card *card = dev_get_drvdata(dev);
+
+ if (!card)
+ return -EINVAL;
+
+ return sprintf(buf, "%i\n", card->options.sniffer ? 1 : 0);
+}
+
+static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct qeth_card *card = dev_get_drvdata(dev);
+ int ret;
+ unsigned long i;
+
+ if (!card)
+ return -EINVAL;
+
+ if (card->info.type != QETH_CARD_TYPE_IQD)
+ return -EPERM;
+
+ if ((card->state != CARD_STATE_DOWN) &&
+ (card->state != CARD_STATE_RECOVER))
+ return -EPERM;
+
+ ret = strict_strtoul(buf, 16, &i);
+ if (ret)
+ return -EINVAL;
+ switch (i) {
+ case 0:
+ card->options.sniffer = i;
+ break;
+ case 1:
+ ret = qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
+ if (card->ssqd.qdioac2 & QETH_SNIFF_AVAIL) {
+ card->options.sniffer = i;
+ if (card->qdio.init_pool.buf_count !=
+ QETH_IN_BUF_COUNT_MAX)
+ qeth_realloc_buffer_pool(card,
+ QETH_IN_BUF_COUNT_MAX);
+ break;
+ } else
+ return -EPERM;
+ default: /* fall through */
+ return -EINVAL;
+ }
+ return count;
+}
+
+static DEVICE_ATTR(sniffer, 0644, qeth_l3_dev_sniffer_show,
+ qeth_l3_dev_sniffer_store);
+
static ssize_t qeth_l3_dev_large_send_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -373,6 +428,7 @@ static struct attribute *qeth_l3_device_attrs[] = {
&dev_attr_broadcast_mode.attr,
&dev_attr_canonical_macaddr.attr,
&dev_attr_checksumming.attr,
+ &dev_attr_sniffer.attr,
&dev_attr_large_send.attr,
NULL,
};
diff --git a/drivers/serial/68328serial.c b/drivers/serial/68328serial.c
index d935b2d04f9..ae0251ef6f4 100644
--- a/drivers/serial/68328serial.c
+++ b/drivers/serial/68328serial.c
@@ -153,8 +153,6 @@ static int baud_table[] = {
0, 50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
9600, 19200, 38400, 57600, 115200, 0 };
-#define BAUD_TABLE_SIZE (sizeof(baud_table)/sizeof(baud_table[0]))
-
/* Sets or clears DTR/RTS on the requested line */
static inline void m68k_rtsdtr(struct m68k_serial *ss, int set)
{
@@ -1406,10 +1404,10 @@ static void m68328_set_baud(void)
USTCNT = ustcnt & ~USTCNT_TXEN;
again:
- for (i = 0; i < sizeof(baud_table) / sizeof(baud_table[0]); i++)
+ for (i = 0; i < ARRAY_SIZE(baud_table); i++)
if (baud_table[i] == m68328_console_baud)
break;
- if (i >= sizeof(baud_table) / sizeof(baud_table[0])) {
+ if (i >= ARRAY_SIZE(baud_table)) {
m68328_console_baud = 9600;
goto again;
}
@@ -1435,7 +1433,7 @@ int m68328_console_setup(struct console *cp, char *arg)
if (arg)
n = simple_strtoul(arg,NULL,0);
- for (i = 0; i < BAUD_TABLE_SIZE; i++)
+ for (i = 0; i < ARRAY_SIZE(baud_table); i++)
if (baud_table[i] == n)
break;
if (i < BAUD_TABLE_SIZE) {
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index a81ff7bc5fa..7c4ebe6ee18 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -2690,6 +2690,15 @@ static void __init serial8250_isa_init_ports(void)
}
}
+static void
+serial8250_init_fixed_type_port(struct uart_8250_port *up, unsigned int type)
+{
+ up->port.type = type;
+ up->port.fifosize = uart_config[type].fifo_size;
+ up->capabilities = uart_config[type].flags;
+ up->tx_loadsz = uart_config[type].tx_loadsz;
+}
+
static void __init
serial8250_register_ports(struct uart_driver *drv, struct device *dev)
{
@@ -2706,6 +2715,10 @@ serial8250_register_ports(struct uart_driver *drv, struct device *dev)
struct uart_8250_port *up = &serial8250_ports[i];
up->port.dev = dev;
+
+ if (up->port.flags & UPF_FIXED_TYPE)
+ serial8250_init_fixed_type_port(up, up->port.type);
+
uart_add_one_port(drv, &up->port);
}
}
@@ -3118,12 +3131,8 @@ int serial8250_register_port(struct uart_port *port)
if (port->dev)
uart->port.dev = port->dev;
- if (port->flags & UPF_FIXED_TYPE) {
- uart->port.type = port->type;
- uart->port.fifosize = uart_config[port->type].fifo_size;
- uart->capabilities = uart_config[port->type].flags;
- uart->tx_loadsz = uart_config[port->type].tx_loadsz;
- }
+ if (port->flags & UPF_FIXED_TYPE)
+ serial8250_init_fixed_type_port(uart, port->type);
set_io_from_upio(&uart->port);
/* Possibly override default I/O functions. */
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c
index b28af13c45a..01c012da4e2 100644
--- a/drivers/serial/8250_pci.c
+++ b/drivers/serial/8250_pci.c
@@ -760,7 +760,8 @@ static int pci_netmos_init(struct pci_dev *dev)
/* subdevice 0x00PS means <P> parallel, <S> serial */
unsigned int num_serial = dev->subsystem_device & 0xf;
- if (dev->device == PCI_DEVICE_ID_NETMOS_9901)
+ if ((dev->device == PCI_DEVICE_ID_NETMOS_9901) ||
+ (dev->device == PCI_DEVICE_ID_NETMOS_9865))
return 0;
if (dev->subsystem_vendor == PCI_VENDOR_ID_IBM &&
dev->subsystem_device == 0x0299)
@@ -1479,6 +1480,7 @@ enum pci_board_num_t {
pbn_b0_bt_1_115200,
pbn_b0_bt_2_115200,
+ pbn_b0_bt_4_115200,
pbn_b0_bt_8_115200,
pbn_b0_bt_1_460800,
@@ -1703,6 +1705,12 @@ static struct pciserial_board pci_boards[] __devinitdata = {
.base_baud = 115200,
.uart_offset = 8,
},
+ [pbn_b0_bt_4_115200] = {
+ .flags = FL_BASE0|FL_BASE_BARS,
+ .num_ports = 4,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ },
[pbn_b0_bt_8_115200] = {
.flags = FL_BASE0|FL_BASE_BARS,
.num_ports = 8,
@@ -3191,6 +3199,15 @@ static struct pci_device_id serial_pci_tbl[] = {
0x1208, 0x0004, 0, 0,
pbn_b0_4_921600 },
+ { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF2,
+ 0x1204, 0x0004, 0, 0,
+ pbn_b0_4_921600 },
+ { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF2,
+ 0x1208, 0x0004, 0, 0,
+ pbn_b0_4_921600 },
+ { PCI_VENDOR_ID_KORENIX, PCI_DEVICE_ID_KORENIX_JETCARDF3,
+ 0x1208, 0x0004, 0, 0,
+ pbn_b0_4_921600 },
/*
* Dell Remote Access Card 4 - Tim_T_Murphy@Dell.com
*/
@@ -3649,6 +3666,18 @@ static struct pci_device_id serial_pci_tbl[] = {
0, 0, pbn_b0_1_115200 },
/*
+ * Best Connectivity PCI Multi I/O cards
+ */
+
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
+ 0xA000, 0x1000,
+ 0, 0, pbn_b0_1_115200 },
+
+ { PCI_VENDOR_ID_NETMOS, PCI_DEVICE_ID_NETMOS_9865,
+ 0xA000, 0x3004,
+ 0, 0, pbn_b0_bt_4_115200 },
+
+ /*
* These entries match devices with class COMMUNICATION_SERIAL,
* COMMUNICATION_MODEM or COMMUNICATION_MULTISERIAL
*/
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 11ebe862457..d6ff7339562 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -1422,42 +1422,37 @@ config SERIAL_BFIN_SPORT
To compile this driver as a module, choose M here: the
module will be called bfin_sport_uart.
-choice
- prompt "Baud rate for Blackfin SPORT UART"
- depends on SERIAL_BFIN_SPORT
- default SERIAL_SPORT_BAUD_RATE_57600
- help
- Choose a baud rate for the SPORT UART, other uart settings are
- 8 bit, 1 stop bit, no parity, no flow control.
-
-config SERIAL_SPORT_BAUD_RATE_115200
- bool "115200"
-
-config SERIAL_SPORT_BAUD_RATE_57600
- bool "57600"
+config SERIAL_BFIN_SPORT_CONSOLE
+ bool "Console on Blackfin sport emulated uart"
+ depends on SERIAL_BFIN_SPORT=y
+ select SERIAL_CORE_CONSOLE
-config SERIAL_SPORT_BAUD_RATE_38400
- bool "38400"
+config SERIAL_BFIN_SPORT0_UART
+ bool "Enable UART over SPORT0"
+ depends on SERIAL_BFIN_SPORT && !(BF542 || BF542M || BF544 || BF544M)
+ help
+ Enable UART over SPORT0
-config SERIAL_SPORT_BAUD_RATE_19200
- bool "19200"
+config SERIAL_BFIN_SPORT1_UART
+ bool "Enable UART over SPORT1"
+ depends on SERIAL_BFIN_SPORT
+ help
+ Enable UART over SPORT1
-config SERIAL_SPORT_BAUD_RATE_9600
- bool "9600"
-endchoice
+config SERIAL_BFIN_SPORT2_UART
+ bool "Enable UART over SPORT2"
+ depends on SERIAL_BFIN_SPORT && (BF54x || BF538 || BF539)
+ help
+ Enable UART over SPORT2
-config SPORT_BAUD_RATE
- int
- depends on SERIAL_BFIN_SPORT
- default 115200 if (SERIAL_SPORT_BAUD_RATE_115200)
- default 57600 if (SERIAL_SPORT_BAUD_RATE_57600)
- default 38400 if (SERIAL_SPORT_BAUD_RATE_38400)
- default 19200 if (SERIAL_SPORT_BAUD_RATE_19200)
- default 9600 if (SERIAL_SPORT_BAUD_RATE_9600)
+config SERIAL_BFIN_SPORT3_UART
+ bool "Enable UART over SPORT3"
+ depends on SERIAL_BFIN_SPORT && (BF54x || BF538 || BF539)
+ help
+ Enable UART over SPORT3
config SERIAL_TIMBERDALE
tristate "Support for timberdale UART"
- depends on MFD_TIMBERDALE
select SERIAL_CORE
---help---
Add support for UART controller on timberdale.
diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c
index 9d948bccafa..2c9bf9b6832 100644
--- a/drivers/serial/atmel_serial.c
+++ b/drivers/serial/atmel_serial.c
@@ -1213,6 +1213,24 @@ static int atmel_verify_port(struct uart_port *port, struct serial_struct *ser)
return ret;
}
+#ifdef CONFIG_CONSOLE_POLL
+static int atmel_poll_get_char(struct uart_port *port)
+{
+ while (!(UART_GET_CSR(port) & ATMEL_US_RXRDY))
+ cpu_relax();
+
+ return UART_GET_CHAR(port);
+}
+
+static void atmel_poll_put_char(struct uart_port *port, unsigned char ch)
+{
+ while (!(UART_GET_CSR(port) & ATMEL_US_TXRDY))
+ cpu_relax();
+
+ UART_PUT_CHAR(port, ch);
+}
+#endif
+
static struct uart_ops atmel_pops = {
.tx_empty = atmel_tx_empty,
.set_mctrl = atmel_set_mctrl,
@@ -1232,6 +1250,10 @@ static struct uart_ops atmel_pops = {
.config_port = atmel_config_port,
.verify_port = atmel_verify_port,
.pm = atmel_serial_pm,
+#ifdef CONFIG_CONSOLE_POLL
+ .poll_get_char = atmel_poll_get_char,
+ .poll_put_char = atmel_poll_put_char,
+#endif
};
/*
diff --git a/drivers/serial/bcm63xx_uart.c b/drivers/serial/bcm63xx_uart.c
index 37ad0c44993..a1a0e55d080 100644
--- a/drivers/serial/bcm63xx_uart.c
+++ b/drivers/serial/bcm63xx_uart.c
@@ -35,7 +35,7 @@
#include <bcm63xx_regs.h>
#include <bcm63xx_io.h>
-#define BCM63XX_NR_UARTS 1
+#define BCM63XX_NR_UARTS 2
static struct uart_port ports[BCM63XX_NR_UARTS];
@@ -784,7 +784,7 @@ static struct uart_driver bcm_uart_driver = {
.dev_name = "ttyS",
.major = TTY_MAJOR,
.minor = 64,
- .nr = 1,
+ .nr = BCM63XX_NR_UARTS,
.cons = BCM63XX_CONSOLE,
};
@@ -826,11 +826,12 @@ static int __devinit bcm_uart_probe(struct platform_device *pdev)
port->dev = &pdev->dev;
port->fifosize = 16;
port->uartclk = clk_get_rate(clk) / 2;
+ port->line = pdev->id;
clk_put(clk);
ret = uart_add_one_port(&bcm_uart_driver, port);
if (ret) {
- kfree(port);
+ ports[pdev->id].membase = 0;
return ret;
}
platform_set_drvdata(pdev, port);
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c
index 50abb7e557f..fcf273e3f48 100644
--- a/drivers/serial/bfin_5xx.c
+++ b/drivers/serial/bfin_5xx.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/ioport.h>
+#include <linux/io.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/sysrq.h>
@@ -237,7 +238,8 @@ static void bfin_serial_rx_chars(struct bfin_serial_port *uart)
#if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \
defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE)
- if (kgdb_connected && kgdboc_port_line == uart->port.line)
+ if (kgdb_connected && kgdboc_port_line == uart->port.line
+ && kgdboc_break_enabled)
if (ch == 0x3) {/* Ctrl + C */
kgdb_breakpoint();
return;
@@ -488,6 +490,7 @@ void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart)
{
int x_pos, pos;
+ dma_disable_irq(uart->tx_dma_channel);
dma_disable_irq(uart->rx_dma_channel);
spin_lock_bh(&uart->port.lock);
@@ -521,6 +524,7 @@ void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart)
}
spin_unlock_bh(&uart->port.lock);
+ dma_enable_irq(uart->tx_dma_channel);
dma_enable_irq(uart->rx_dma_channel);
mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES);
@@ -746,15 +750,6 @@ static int bfin_serial_startup(struct uart_port *port)
Status interrupt.\n");
}
- if (uart->cts_pin >= 0) {
- gpio_request(uart->cts_pin, DRIVER_NAME);
- gpio_direction_output(uart->cts_pin, 1);
- }
- if (uart->rts_pin >= 0) {
- gpio_request(uart->rts_pin, DRIVER_NAME);
- gpio_direction_output(uart->rts_pin, 0);
- }
-
/* CTS RTS PINs are negative assertive. */
UART_PUT_MCR(uart, ACTS);
UART_SET_IER(uart, EDSSI);
@@ -801,10 +796,6 @@ static void bfin_serial_shutdown(struct uart_port *port)
gpio_free(uart->rts_pin);
#endif
#ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS
- if (uart->cts_pin >= 0)
- gpio_free(uart->cts_pin);
- if (uart->rts_pin >= 0)
- gpio_free(uart->rts_pin);
if (UART_GET_IER(uart) && EDSSI)
free_irq(uart->status_irq, uart);
#endif
@@ -1409,8 +1400,7 @@ static int bfin_serial_remove(struct platform_device *dev)
continue;
uart_remove_one_port(&bfin_serial_reg, &bfin_serial_ports[i].port);
bfin_serial_ports[i].port.dev = NULL;
-#if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \
- defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS)
+#if defined(CONFIG_SERIAL_BFIN_CTSRTS)
gpio_free(bfin_serial_ports[i].cts_pin);
gpio_free(bfin_serial_ports[i].rts_pin);
#endif
diff --git a/drivers/serial/bfin_sport_uart.c b/drivers/serial/bfin_sport_uart.c
index 088bb35475f..7c72888fbf9 100644
--- a/drivers/serial/bfin_sport_uart.c
+++ b/drivers/serial/bfin_sport_uart.c
@@ -1,27 +1,11 @@
/*
- * File: linux/drivers/serial/bfin_sport_uart.c
+ * Blackfin On-Chip Sport Emulated UART Driver
*
- * Based on: drivers/serial/bfin_5xx.c by Aubrey Li.
- * Author: Roy Huang <roy.huang@analog.com>
+ * Copyright 2006-2009 Analog Devices Inc.
*
- * Created: Nov 22, 2006
- * Copyright: (c) 2006-2007 Analog Devices Inc.
- * Description: this driver enable SPORTs on Blackfin emulate UART.
+ * Enter bugs at http://blackfin.uclinux.org/
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * Licensed under the GPL-2 or later.
*/
/*
@@ -29,39 +13,18 @@
* http://www.analog.com/UploadedFiles/Application_Notes/399447663EE191.pdf
* This application note describe how to implement a UART on a Sharc DSP,
* but this driver is implemented on Blackfin Processor.
+ * Transmit Frame Sync is not used by this driver to transfer data out.
*/
-/* After reset, there is a prelude of low level pulse when transmit data first
- * time. No addtional pulse in following transmit.
- * According to document:
- * The SPORTs are ready to start transmitting or receiving data no later than
- * three serial clock cycles after they are enabled in the SPORTx_TCR1 or
- * SPORTx_RCR1 register. No serial clock cycles are lost from this point on.
- * The first internal frame sync will occur one frame sync delay after the
- * SPORTs are ready. External frame syncs can occur as soon as the SPORT is
- * ready.
- */
+/* #define DEBUG */
-/* Thanks to Axel Alatalo <axel@rubico.se> for fixing sport rx bug. Sometimes
- * sport receives data incorrectly. The following is Axel's words.
- * As EE-191, sport rx samples 3 times of the UART baudrate and takes the
- * middle smaple of every 3 samples as the data bit. For a 8-N-1 UART setting,
- * 30 samples will be required for a byte. If transmitter sends a 1/3 bit short
- * byte due to buadrate drift, then the 30th sample of a byte, this sample is
- * also the third sample of the stop bit, will happens on the immediately
- * following start bit which will be thrown away and missed. Thus since parts
- * of the startbit will be missed and the receiver will begin to drift, the
- * effect accumulates over time until synchronization is lost.
- * If only require 2 samples of the stopbit (by sampling in total 29 samples),
- * then a to short byte as in the case above will be tolerated. Then the 1/3
- * early startbit will trigger a framesync since the last read is complete
- * after only 2/3 stopbit and framesync is active during the last 1/3 looking
- * for a possible early startbit. */
-
-//#define DEBUG
+#define DRV_NAME "bfin-sport-uart"
+#define DEVICE_NAME "ttySS"
+#define pr_fmt(fmt) DRV_NAME ": " fmt
#include <linux/module.h>
#include <linux/ioport.h>
+#include <linux/io.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/sysrq.h>
@@ -75,23 +38,36 @@
#include "bfin_sport_uart.h"
+#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
unsigned short bfin_uart_pin_req_sport0[] =
{P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, \
P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0};
-
+#endif
+#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
unsigned short bfin_uart_pin_req_sport1[] =
{P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, \
P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0};
-
-#define DRV_NAME "bfin-sport-uart"
+#endif
+#ifdef CONFIG_SERIAL_BFIN_SPORT2_UART
+unsigned short bfin_uart_pin_req_sport2[] =
+ {P_SPORT2_TFS, P_SPORT2_DTPRI, P_SPORT2_TSCLK, P_SPORT2_RFS, \
+ P_SPORT2_DRPRI, P_SPORT2_RSCLK, P_SPORT2_DRSEC, P_SPORT2_DTSEC, 0};
+#endif
+#ifdef CONFIG_SERIAL_BFIN_SPORT3_UART
+unsigned short bfin_uart_pin_req_sport3[] =
+ {P_SPORT3_TFS, P_SPORT3_DTPRI, P_SPORT3_TSCLK, P_SPORT3_RFS, \
+ P_SPORT3_DRPRI, P_SPORT3_RSCLK, P_SPORT3_DRSEC, P_SPORT3_DTSEC, 0};
+#endif
struct sport_uart_port {
struct uart_port port;
- char *name;
-
- int tx_irq;
- int rx_irq;
int err_irq;
+ unsigned short csize;
+ unsigned short rxmask;
+ unsigned short txmask1;
+ unsigned short txmask2;
+ unsigned char stopb;
+/* unsigned char parib; */
};
static void sport_uart_tx_chars(struct sport_uart_port *up);
@@ -99,36 +75,42 @@ static void sport_stop_tx(struct uart_port *port);
static inline void tx_one_byte(struct sport_uart_port *up, unsigned int value)
{
- pr_debug("%s value:%x\n", __func__, value);
- /* Place a Start and Stop bit */
+ pr_debug("%s value:%x, mask1=0x%x, mask2=0x%x\n", __func__, value,
+ up->txmask1, up->txmask2);
+
+ /* Place Start and Stop bits */
__asm__ __volatile__ (
- "R2 = b#01111111100;"
- "R3 = b#10000000001;"
- "%0 <<= 2;"
- "%0 = %0 & R2;"
- "%0 = %0 | R3;"
- : "=d"(value)
- : "d"(value)
- : "ASTAT", "R2", "R3"
+ "%[val] <<= 1;"
+ "%[val] = %[val] & %[mask1];"
+ "%[val] = %[val] | %[mask2];"
+ : [val]"+d"(value)
+ : [mask1]"d"(up->txmask1), [mask2]"d"(up->txmask2)
+ : "ASTAT"
);
pr_debug("%s value:%x\n", __func__, value);
SPORT_PUT_TX(up, value);
}
-static inline unsigned int rx_one_byte(struct sport_uart_port *up)
+static inline unsigned char rx_one_byte(struct sport_uart_port *up)
{
- unsigned int value, extract;
+ unsigned int value;
+ unsigned char extract;
u32 tmp_mask1, tmp_mask2, tmp_shift, tmp;
- value = SPORT_GET_RX32(up);
- pr_debug("%s value:%x\n", __func__, value);
+ if ((up->csize + up->stopb) > 7)
+ value = SPORT_GET_RX32(up);
+ else
+ value = SPORT_GET_RX(up);
+
+ pr_debug("%s value:%x, cs=%d, mask=0x%x\n", __func__, value,
+ up->csize, up->rxmask);
- /* Extract 8 bits data */
+ /* Extract data */
__asm__ __volatile__ (
"%[extr] = 0;"
- "%[mask1] = 0x1801(Z);"
- "%[mask2] = 0x0300(Z);"
+ "%[mask1] = %[rxmask];"
+ "%[mask2] = 0x0200(Z);"
"%[shift] = 0;"
"LSETUP(.Lloop_s, .Lloop_e) LC0 = %[lc];"
".Lloop_s:"
@@ -138,9 +120,9 @@ static inline unsigned int rx_one_byte(struct sport_uart_port *up)
"%[mask1] = %[mask1] - %[mask2];"
".Lloop_e:"
"%[shift] += 1;"
- : [val]"=d"(value), [extr]"=d"(extract), [shift]"=d"(tmp_shift), [tmp]"=d"(tmp),
- [mask1]"=d"(tmp_mask1), [mask2]"=d"(tmp_mask2)
- : "d"(value), [lc]"a"(8)
+ : [extr]"=&d"(extract), [shift]"=&d"(tmp_shift), [tmp]"=&d"(tmp),
+ [mask1]"=&d"(tmp_mask1), [mask2]"=&d"(tmp_mask2)
+ : [val]"d"(value), [rxmask]"d"(up->rxmask), [lc]"a"(up->csize)
: "ASTAT", "LB0", "LC0", "LT0"
);
@@ -148,29 +130,28 @@ static inline unsigned int rx_one_byte(struct sport_uart_port *up)
return extract;
}
-static int sport_uart_setup(struct sport_uart_port *up, int sclk, int baud_rate)
+static int sport_uart_setup(struct sport_uart_port *up, int size, int baud_rate)
{
- int tclkdiv, tfsdiv, rclkdiv;
+ int tclkdiv, rclkdiv;
+ unsigned int sclk = get_sclk();
- /* Set TCR1 and TCR2 */
- SPORT_PUT_TCR1(up, (LATFS | ITFS | TFSR | TLSBIT | ITCLK));
- SPORT_PUT_TCR2(up, 10);
+ /* Set TCR1 and TCR2, TFSR is not enabled for uart */
+ SPORT_PUT_TCR1(up, (ITFS | TLSBIT | ITCLK));
+ SPORT_PUT_TCR2(up, size + 1);
pr_debug("%s TCR1:%x, TCR2:%x\n", __func__, SPORT_GET_TCR1(up), SPORT_GET_TCR2(up));
/* Set RCR1 and RCR2 */
SPORT_PUT_RCR1(up, (RCKFE | LARFS | LRFS | RFSR | IRCLK));
- SPORT_PUT_RCR2(up, 28);
+ SPORT_PUT_RCR2(up, (size + 1) * 2 - 1);
pr_debug("%s RCR1:%x, RCR2:%x\n", __func__, SPORT_GET_RCR1(up), SPORT_GET_RCR2(up));
- tclkdiv = sclk/(2 * baud_rate) - 1;
- tfsdiv = 12;
- rclkdiv = sclk/(2 * baud_rate * 3) - 1;
+ tclkdiv = sclk / (2 * baud_rate) - 1;
+ rclkdiv = sclk / (2 * baud_rate * 2) - 1;
SPORT_PUT_TCLKDIV(up, tclkdiv);
- SPORT_PUT_TFSDIV(up, tfsdiv);
SPORT_PUT_RCLKDIV(up, rclkdiv);
SSYNC();
- pr_debug("%s sclk:%d, baud_rate:%d, tclkdiv:%d, tfsdiv:%d, rclkdiv:%d\n",
- __func__, sclk, baud_rate, tclkdiv, tfsdiv, rclkdiv);
+ pr_debug("%s sclk:%d, baud_rate:%d, tclkdiv:%d, rclkdiv:%d\n",
+ __func__, sclk, baud_rate, tclkdiv, rclkdiv);
return 0;
}
@@ -181,23 +162,29 @@ static irqreturn_t sport_uart_rx_irq(int irq, void *dev_id)
struct tty_struct *tty = up->port.state->port.tty;
unsigned int ch;
- do {
+ spin_lock(&up->port.lock);
+
+ while (SPORT_GET_STAT(up) & RXNE) {
ch = rx_one_byte(up);
up->port.icount.rx++;
- if (uart_handle_sysrq_char(&up->port, ch))
- ;
- else
+ if (!uart_handle_sysrq_char(&up->port, ch))
tty_insert_flip_char(tty, ch, TTY_NORMAL);
- } while (SPORT_GET_STAT(up) & RXNE);
+ }
tty_flip_buffer_push(tty);
+ spin_unlock(&up->port.lock);
+
return IRQ_HANDLED;
}
static irqreturn_t sport_uart_tx_irq(int irq, void *dev_id)
{
- sport_uart_tx_chars(dev_id);
+ struct sport_uart_port *up = dev_id;
+
+ spin_lock(&up->port.lock);
+ sport_uart_tx_chars(up);
+ spin_unlock(&up->port.lock);
return IRQ_HANDLED;
}
@@ -208,6 +195,8 @@ static irqreturn_t sport_uart_err_irq(int irq, void *dev_id)
struct tty_struct *tty = up->port.state->port.tty;
unsigned int stat = SPORT_GET_STAT(up);
+ spin_lock(&up->port.lock);
+
/* Overflow in RX FIFO */
if (stat & ROVF) {
up->port.icount.overrun++;
@@ -216,15 +205,16 @@ static irqreturn_t sport_uart_err_irq(int irq, void *dev_id)
}
/* These should not happen */
if (stat & (TOVF | TUVF | RUVF)) {
- printk(KERN_ERR "SPORT Error:%s %s %s\n",
- (stat & TOVF)?"TX overflow":"",
- (stat & TUVF)?"TX underflow":"",
- (stat & RUVF)?"RX underflow":"");
+ pr_err("SPORT Error:%s %s %s\n",
+ (stat & TOVF) ? "TX overflow" : "",
+ (stat & TUVF) ? "TX underflow" : "",
+ (stat & RUVF) ? "RX underflow" : "");
SPORT_PUT_TCR1(up, SPORT_GET_TCR1(up) & ~TSPEN);
SPORT_PUT_RCR1(up, SPORT_GET_RCR1(up) & ~RSPEN);
}
SSYNC();
+ spin_unlock(&up->port.lock);
return IRQ_HANDLED;
}
@@ -232,60 +222,37 @@ static irqreturn_t sport_uart_err_irq(int irq, void *dev_id)
static int sport_startup(struct uart_port *port)
{
struct sport_uart_port *up = (struct sport_uart_port *)port;
- char buffer[20];
- int retval;
+ int ret;
pr_debug("%s enter\n", __func__);
- snprintf(buffer, 20, "%s rx", up->name);
- retval = request_irq(up->rx_irq, sport_uart_rx_irq, IRQF_SAMPLE_RANDOM, buffer, up);
- if (retval) {
- printk(KERN_ERR "Unable to request interrupt %s\n", buffer);
- return retval;
+ ret = request_irq(up->port.irq, sport_uart_rx_irq, 0,
+ "SPORT_UART_RX", up);
+ if (ret) {
+ dev_err(port->dev, "unable to request SPORT RX interrupt\n");
+ return ret;
}
- snprintf(buffer, 20, "%s tx", up->name);
- retval = request_irq(up->tx_irq, sport_uart_tx_irq, IRQF_SAMPLE_RANDOM, buffer, up);
- if (retval) {
- printk(KERN_ERR "Unable to request interrupt %s\n", buffer);
+ ret = request_irq(up->port.irq+1, sport_uart_tx_irq, 0,
+ "SPORT_UART_TX", up);
+ if (ret) {
+ dev_err(port->dev, "unable to request SPORT TX interrupt\n");
goto fail1;
}
- snprintf(buffer, 20, "%s err", up->name);
- retval = request_irq(up->err_irq, sport_uart_err_irq, IRQF_SAMPLE_RANDOM, buffer, up);
- if (retval) {
- printk(KERN_ERR "Unable to request interrupt %s\n", buffer);
+ ret = request_irq(up->err_irq, sport_uart_err_irq, 0,
+ "SPORT_UART_STATUS", up);
+ if (ret) {
+ dev_err(port->dev, "unable to request SPORT status interrupt\n");
goto fail2;
}
- if (port->line) {
- if (peripheral_request_list(bfin_uart_pin_req_sport1, DRV_NAME))
- goto fail3;
- } else {
- if (peripheral_request_list(bfin_uart_pin_req_sport0, DRV_NAME))
- goto fail3;
- }
-
- sport_uart_setup(up, get_sclk(), port->uartclk);
-
- /* Enable receive interrupt */
- SPORT_PUT_RCR1(up, (SPORT_GET_RCR1(up) | RSPEN));
- SSYNC();
-
return 0;
+ fail2:
+ free_irq(up->port.irq+1, up);
+ fail1:
+ free_irq(up->port.irq, up);
-
-fail3:
- printk(KERN_ERR DRV_NAME
- ": Requesting Peripherals failed\n");
-
- free_irq(up->err_irq, up);
-fail2:
- free_irq(up->tx_irq, up);
-fail1:
- free_irq(up->rx_irq, up);
-
- return retval;
-
+ return ret;
}
static void sport_uart_tx_chars(struct sport_uart_port *up)
@@ -344,20 +311,17 @@ static void sport_set_mctrl(struct uart_port *port, unsigned int mctrl)
static void sport_stop_tx(struct uart_port *port)
{
struct sport_uart_port *up = (struct sport_uart_port *)port;
- unsigned int stat;
pr_debug("%s enter\n", __func__);
- stat = SPORT_GET_STAT(up);
- while(!(stat & TXHRE)) {
- udelay(1);
- stat = SPORT_GET_STAT(up);
- }
/* Although the hold register is empty, last byte is still in shift
- * register and not sent out yet. If baud rate is lower than default,
- * delay should be longer. For example, if the baud rate is 9600,
- * the delay must be at least 2ms by experience */
- udelay(500);
+ * register and not sent out yet. So, put a dummy data into TX FIFO.
+ * Then, sport tx stops when last byte is shift out and the dummy
+ * data is moved into the shift register.
+ */
+ SPORT_PUT_TX(up, 0xffff);
+ while (!(SPORT_GET_STAT(up) & TXHRE))
+ cpu_relax();
SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) & ~TSPEN));
SSYNC();
@@ -370,6 +334,7 @@ static void sport_start_tx(struct uart_port *port)
struct sport_uart_port *up = (struct sport_uart_port *)port;
pr_debug("%s enter\n", __func__);
+
/* Write data into SPORT FIFO before enable SPROT to transmit */
sport_uart_tx_chars(up);
@@ -403,37 +368,24 @@ static void sport_shutdown(struct uart_port *port)
{
struct sport_uart_port *up = (struct sport_uart_port *)port;
- pr_debug("%s enter\n", __func__);
+ dev_dbg(port->dev, "%s enter\n", __func__);
/* Disable sport */
SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) & ~TSPEN));
SPORT_PUT_RCR1(up, (SPORT_GET_RCR1(up) & ~RSPEN));
SSYNC();
- if (port->line) {
- peripheral_free_list(bfin_uart_pin_req_sport1);
- } else {
- peripheral_free_list(bfin_uart_pin_req_sport0);
- }
-
- free_irq(up->rx_irq, up);
- free_irq(up->tx_irq, up);
+ free_irq(up->port.irq, up);
+ free_irq(up->port.irq+1, up);
free_irq(up->err_irq, up);
}
-static void sport_set_termios(struct uart_port *port,
- struct ktermios *termios, struct ktermios *old)
-{
- pr_debug("%s enter, c_cflag:%08x\n", __func__, termios->c_cflag);
- uart_update_timeout(port, CS8 ,port->uartclk);
-}
-
static const char *sport_type(struct uart_port *port)
{
struct sport_uart_port *up = (struct sport_uart_port *)port;
pr_debug("%s enter\n", __func__);
- return up->name;
+ return up->port.type == PORT_BFIN_SPORT ? "BFIN-SPORT-UART" : NULL;
}
static void sport_release_port(struct uart_port *port)
@@ -461,6 +413,110 @@ static int sport_verify_port(struct uart_port *port, struct serial_struct *ser)
return 0;
}
+static void sport_set_termios(struct uart_port *port,
+ struct ktermios *termios, struct ktermios *old)
+{
+ struct sport_uart_port *up = (struct sport_uart_port *)port;
+ unsigned long flags;
+ int i;
+
+ pr_debug("%s enter, c_cflag:%08x\n", __func__, termios->c_cflag);
+
+ switch (termios->c_cflag & CSIZE) {
+ case CS8:
+ up->csize = 8;
+ break;
+ case CS7:
+ up->csize = 7;
+ break;
+ case CS6:
+ up->csize = 6;
+ break;
+ case CS5:
+ up->csize = 5;
+ break;
+ default:
+ pr_warning("requested word length not supported\n");
+ }
+
+ if (termios->c_cflag & CSTOPB) {
+ up->stopb = 1;
+ }
+ if (termios->c_cflag & PARENB) {
+ pr_warning("PAREN bits is not supported yet\n");
+ /* up->parib = 1; */
+ }
+
+ port->read_status_mask = OE;
+ if (termios->c_iflag & INPCK)
+ port->read_status_mask |= (FE | PE);
+ if (termios->c_iflag & (BRKINT | PARMRK))
+ port->read_status_mask |= BI;
+
+ /*
+ * Characters to ignore
+ */
+ port->ignore_status_mask = 0;
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= FE | PE;
+ if (termios->c_iflag & IGNBRK) {
+ port->ignore_status_mask |= BI;
+ /*
+ * If we're ignoring parity and break indicators,
+ * ignore overruns too (for real raw support).
+ */
+ if (termios->c_iflag & IGNPAR)
+ port->ignore_status_mask |= OE;
+ }
+
+ /* RX extract mask */
+ up->rxmask = 0x01 | (((up->csize + up->stopb) * 2 - 1) << 0x8);
+ /* TX masks, 8 bit data and 1 bit stop for example:
+ * mask1 = b#0111111110
+ * mask2 = b#1000000000
+ */
+ for (i = 0, up->txmask1 = 0; i < up->csize; i++)
+ up->txmask1 |= (1<<i);
+ up->txmask2 = (1<<i);
+ if (up->stopb) {
+ ++i;
+ up->txmask2 |= (1<<i);
+ }
+ up->txmask1 <<= 1;
+ up->txmask2 <<= 1;
+ /* uart baud rate */
+ port->uartclk = uart_get_baud_rate(port, termios, old, 0, get_sclk()/16);
+
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ /* Disable UART */
+ SPORT_PUT_TCR1(up, SPORT_GET_TCR1(up) & ~TSPEN);
+ SPORT_PUT_RCR1(up, SPORT_GET_RCR1(up) & ~RSPEN);
+
+ sport_uart_setup(up, up->csize + up->stopb, port->uartclk);
+
+ /* driver TX line high after config, one dummy data is
+ * necessary to stop sport after shift one byte
+ */
+ SPORT_PUT_TX(up, 0xffff);
+ SPORT_PUT_TX(up, 0xffff);
+ SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) | TSPEN));
+ SSYNC();
+ while (!(SPORT_GET_STAT(up) & TXHRE))
+ cpu_relax();
+ SPORT_PUT_TCR1(up, SPORT_GET_TCR1(up) & ~TSPEN);
+ SSYNC();
+
+ /* Port speed changed, update the per-port timeout. */
+ uart_update_timeout(port, termios->c_cflag, port->uartclk);
+
+ /* Enable sport rx */
+ SPORT_PUT_RCR1(up, SPORT_GET_RCR1(up) | RSPEN);
+ SSYNC();
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
struct uart_ops sport_uart_ops = {
.tx_empty = sport_tx_empty,
.set_mctrl = sport_set_mctrl,
@@ -480,138 +536,319 @@ struct uart_ops sport_uart_ops = {
.verify_port = sport_verify_port,
};
-static struct sport_uart_port sport_uart_ports[] = {
- { /* SPORT 0 */
- .name = "SPORT0",
- .tx_irq = IRQ_SPORT0_TX,
- .rx_irq = IRQ_SPORT0_RX,
- .err_irq= IRQ_SPORT0_ERROR,
- .port = {
- .type = PORT_BFIN_SPORT,
- .iotype = UPIO_MEM,
- .membase = (void __iomem *)SPORT0_TCR1,
- .mapbase = SPORT0_TCR1,
- .irq = IRQ_SPORT0_RX,
- .uartclk = CONFIG_SPORT_BAUD_RATE,
- .fifosize = 8,
- .ops = &sport_uart_ops,
- .line = 0,
- },
- }, { /* SPORT 1 */
- .name = "SPORT1",
- .tx_irq = IRQ_SPORT1_TX,
- .rx_irq = IRQ_SPORT1_RX,
- .err_irq= IRQ_SPORT1_ERROR,
- .port = {
- .type = PORT_BFIN_SPORT,
- .iotype = UPIO_MEM,
- .membase = (void __iomem *)SPORT1_TCR1,
- .mapbase = SPORT1_TCR1,
- .irq = IRQ_SPORT1_RX,
- .uartclk = CONFIG_SPORT_BAUD_RATE,
- .fifosize = 8,
- .ops = &sport_uart_ops,
- .line = 1,
- },
+#define BFIN_SPORT_UART_MAX_PORTS 4
+
+static struct sport_uart_port *bfin_sport_uart_ports[BFIN_SPORT_UART_MAX_PORTS];
+
+#ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE
+static int __init
+sport_uart_console_setup(struct console *co, char *options)
+{
+ struct sport_uart_port *up;
+ int baud = 57600;
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
+
+ /* Check whether an invalid uart number has been specified */
+ if (co->index < 0 || co->index >= BFIN_SPORT_UART_MAX_PORTS)
+ return -ENODEV;
+
+ up = bfin_sport_uart_ports[co->index];
+ if (!up)
+ return -ENODEV;
+
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
+ return uart_set_options(&up->port, co, baud, parity, bits, flow);
+}
+
+static void sport_uart_console_putchar(struct uart_port *port, int ch)
+{
+ struct sport_uart_port *up = (struct sport_uart_port *)port;
+
+ while (SPORT_GET_STAT(up) & TXF)
+ barrier();
+
+ tx_one_byte(up, ch);
+}
+
+/*
+ * Interrupts are disabled on entering
+ */
+static void
+sport_uart_console_write(struct console *co, const char *s, unsigned int count)
+{
+ struct sport_uart_port *up = bfin_sport_uart_ports[co->index];
+ unsigned long flags;
+
+ spin_lock_irqsave(&up->port.lock, flags);
+
+ if (SPORT_GET_TCR1(up) & TSPEN)
+ uart_console_write(&up->port, s, count, sport_uart_console_putchar);
+ else {
+ /* dummy data to start sport */
+ while (SPORT_GET_STAT(up) & TXF)
+ barrier();
+ SPORT_PUT_TX(up, 0xffff);
+ /* Enable transmit, then an interrupt will generated */
+ SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) | TSPEN));
+ SSYNC();
+
+ uart_console_write(&up->port, s, count, sport_uart_console_putchar);
+
+ /* Although the hold register is empty, last byte is still in shift
+ * register and not sent out yet. So, put a dummy data into TX FIFO.
+ * Then, sport tx stops when last byte is shift out and the dummy
+ * data is moved into the shift register.
+ */
+ while (SPORT_GET_STAT(up) & TXF)
+ barrier();
+ SPORT_PUT_TX(up, 0xffff);
+ while (!(SPORT_GET_STAT(up) & TXHRE))
+ barrier();
+
+ /* Stop sport tx transfer */
+ SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) & ~TSPEN));
+ SSYNC();
}
+
+ spin_unlock_irqrestore(&up->port.lock, flags);
+}
+
+static struct uart_driver sport_uart_reg;
+
+static struct console sport_uart_console = {
+ .name = DEVICE_NAME,
+ .write = sport_uart_console_write,
+ .device = uart_console_device,
+ .setup = sport_uart_console_setup,
+ .flags = CON_PRINTBUFFER,
+ .index = -1,
+ .data = &sport_uart_reg,
};
+#define SPORT_UART_CONSOLE (&sport_uart_console)
+#else
+#define SPORT_UART_CONSOLE NULL
+#endif /* CONFIG_SERIAL_BFIN_SPORT_CONSOLE */
+
+
static struct uart_driver sport_uart_reg = {
.owner = THIS_MODULE,
- .driver_name = "SPORT-UART",
- .dev_name = "ttySS",
+ .driver_name = DRV_NAME,
+ .dev_name = DEVICE_NAME,
.major = 204,
.minor = 84,
- .nr = ARRAY_SIZE(sport_uart_ports),
- .cons = NULL,
+ .nr = BFIN_SPORT_UART_MAX_PORTS,
+ .cons = SPORT_UART_CONSOLE,
};
-static int sport_uart_suspend(struct platform_device *dev, pm_message_t state)
+#ifdef CONFIG_PM
+static int sport_uart_suspend(struct device *dev)
{
- struct sport_uart_port *sport = platform_get_drvdata(dev);
+ struct sport_uart_port *sport = dev_get_drvdata(dev);
- pr_debug("%s enter\n", __func__);
+ dev_dbg(dev, "%s enter\n", __func__);
if (sport)
uart_suspend_port(&sport_uart_reg, &sport->port);
return 0;
}
-static int sport_uart_resume(struct platform_device *dev)
+static int sport_uart_resume(struct device *dev)
{
- struct sport_uart_port *sport = platform_get_drvdata(dev);
+ struct sport_uart_port *sport = dev_get_drvdata(dev);
- pr_debug("%s enter\n", __func__);
+ dev_dbg(dev, "%s enter\n", __func__);
if (sport)
uart_resume_port(&sport_uart_reg, &sport->port);
return 0;
}
-static int sport_uart_probe(struct platform_device *dev)
+static struct dev_pm_ops bfin_sport_uart_dev_pm_ops = {
+ .suspend = sport_uart_suspend,
+ .resume = sport_uart_resume,
+};
+#endif
+
+static int __devinit sport_uart_probe(struct platform_device *pdev)
{
- pr_debug("%s enter\n", __func__);
- sport_uart_ports[dev->id].port.dev = &dev->dev;
- uart_add_one_port(&sport_uart_reg, &sport_uart_ports[dev->id].port);
- platform_set_drvdata(dev, &sport_uart_ports[dev->id]);
+ struct resource *res;
+ struct sport_uart_port *sport;
+ int ret = 0;
- return 0;
+ dev_dbg(&pdev->dev, "%s enter\n", __func__);
+
+ if (pdev->id < 0 || pdev->id >= BFIN_SPORT_UART_MAX_PORTS) {
+ dev_err(&pdev->dev, "Wrong sport uart platform device id.\n");
+ return -ENOENT;
+ }
+
+ if (bfin_sport_uart_ports[pdev->id] == NULL) {
+ bfin_sport_uart_ports[pdev->id] =
+ kmalloc(sizeof(struct sport_uart_port), GFP_KERNEL);
+ sport = bfin_sport_uart_ports[pdev->id];
+ if (!sport) {
+ dev_err(&pdev->dev,
+ "Fail to kmalloc sport_uart_port\n");
+ return -ENOMEM;
+ }
+
+ ret = peripheral_request_list(
+ (unsigned short *)pdev->dev.platform_data, DRV_NAME);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Fail to request SPORT peripherals\n");
+ goto out_error_free_mem;
+ }
+
+ spin_lock_init(&sport->port.lock);
+ sport->port.fifosize = SPORT_TX_FIFO_SIZE,
+ sport->port.ops = &sport_uart_ops;
+ sport->port.line = pdev->id;
+ sport->port.iotype = UPIO_MEM;
+ sport->port.flags = UPF_BOOT_AUTOCONF;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n");
+ ret = -ENOENT;
+ goto out_error_free_peripherals;
+ }
+
+ sport->port.membase = ioremap(res->start,
+ res->end - res->start);
+ if (!sport->port.membase) {
+ dev_err(&pdev->dev, "Cannot map sport IO\n");
+ ret = -ENXIO;
+ goto out_error_free_peripherals;
+ }
+
+ sport->port.irq = platform_get_irq(pdev, 0);
+ if (sport->port.irq < 0) {
+ dev_err(&pdev->dev, "No sport RX/TX IRQ specified\n");
+ ret = -ENOENT;
+ goto out_error_unmap;
+ }
+
+ sport->err_irq = platform_get_irq(pdev, 1);
+ if (sport->err_irq < 0) {
+ dev_err(&pdev->dev, "No sport status IRQ specified\n");
+ ret = -ENOENT;
+ goto out_error_unmap;
+ }
+ }
+
+#ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE
+ if (!is_early_platform_device(pdev)) {
+#endif
+ sport = bfin_sport_uart_ports[pdev->id];
+ sport->port.dev = &pdev->dev;
+ dev_set_drvdata(&pdev->dev, sport);
+ ret = uart_add_one_port(&sport_uart_reg, &sport->port);
+#ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE
+ }
+#endif
+ if (!ret)
+ return 0;
+
+ if (sport) {
+out_error_unmap:
+ iounmap(sport->port.membase);
+out_error_free_peripherals:
+ peripheral_free_list(
+ (unsigned short *)pdev->dev.platform_data);
+out_error_free_mem:
+ kfree(sport);
+ bfin_sport_uart_ports[pdev->id] = NULL;
+ }
+
+ return ret;
}
-static int sport_uart_remove(struct platform_device *dev)
+static int __devexit sport_uart_remove(struct platform_device *pdev)
{
- struct sport_uart_port *sport = platform_get_drvdata(dev);
+ struct sport_uart_port *sport = platform_get_drvdata(pdev);
- pr_debug("%s enter\n", __func__);
- platform_set_drvdata(dev, NULL);
+ dev_dbg(&pdev->dev, "%s enter\n", __func__);
+ dev_set_drvdata(&pdev->dev, NULL);
- if (sport)
+ if (sport) {
uart_remove_one_port(&sport_uart_reg, &sport->port);
+ iounmap(sport->port.membase);
+ peripheral_free_list(
+ (unsigned short *)pdev->dev.platform_data);
+ kfree(sport);
+ bfin_sport_uart_ports[pdev->id] = NULL;
+ }
return 0;
}
static struct platform_driver sport_uart_driver = {
.probe = sport_uart_probe,
- .remove = sport_uart_remove,
- .suspend = sport_uart_suspend,
- .resume = sport_uart_resume,
+ .remove = __devexit_p(sport_uart_remove),
.driver = {
.name = DRV_NAME,
+#ifdef CONFIG_PM
+ .pm = &bfin_sport_uart_dev_pm_ops,
+#endif
},
};
+#ifdef CONFIG_SERIAL_BFIN_SPORT_CONSOLE
+static __initdata struct early_platform_driver early_sport_uart_driver = {
+ .class_str = DRV_NAME,
+ .pdrv = &sport_uart_driver,
+ .requested_id = EARLY_PLATFORM_ID_UNSET,
+};
+
+static int __init sport_uart_rs_console_init(void)
+{
+ early_platform_driver_register(&early_sport_uart_driver, DRV_NAME);
+
+ early_platform_driver_probe(DRV_NAME, BFIN_SPORT_UART_MAX_PORTS, 0);
+
+ register_console(&sport_uart_console);
+
+ return 0;
+}
+console_initcall(sport_uart_rs_console_init);
+#endif
+
static int __init sport_uart_init(void)
{
int ret;
- pr_debug("%s enter\n", __func__);
+ pr_info("Serial: Blackfin uart over sport driver\n");
+
ret = uart_register_driver(&sport_uart_reg);
- if (ret != 0) {
- printk(KERN_ERR "Failed to register %s:%d\n",
+ if (ret) {
+ pr_err("failed to register %s:%d\n",
sport_uart_reg.driver_name, ret);
return ret;
}
ret = platform_driver_register(&sport_uart_driver);
- if (ret != 0) {
- printk(KERN_ERR "Failed to register sport uart driver:%d\n", ret);
+ if (ret) {
+ pr_err("failed to register sport uart driver:%d\n", ret);
uart_unregister_driver(&sport_uart_reg);
}
-
- pr_debug("%s exit\n", __func__);
return ret;
}
+module_init(sport_uart_init);
static void __exit sport_uart_exit(void)
{
- pr_debug("%s enter\n", __func__);
platform_driver_unregister(&sport_uart_driver);
uart_unregister_driver(&sport_uart_reg);
}
-
-module_init(sport_uart_init);
module_exit(sport_uart_exit);
+MODULE_AUTHOR("Sonic Zhang, Roy Huang");
+MODULE_DESCRIPTION("Blackfin serial over SPORT driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/serial/bfin_sport_uart.h b/drivers/serial/bfin_sport_uart.h
index 671d41cc1a3..abe03614e4d 100644
--- a/drivers/serial/bfin_sport_uart.h
+++ b/drivers/serial/bfin_sport_uart.h
@@ -1,29 +1,23 @@
/*
- * File: linux/drivers/serial/bfin_sport_uart.h
+ * Blackfin On-Chip Sport Emulated UART Driver
*
- * Based on: include/asm-blackfin/mach-533/bfin_serial_5xx.h
- * Author: Roy Huang <roy.huang>analog.com>
+ * Copyright 2006-2008 Analog Devices Inc.
*
- * Created: Nov 22, 2006
- * Copyright: (C) Analog Device Inc.
- * Description: this driver enable SPORTs on Blackfin emulate UART.
+ * Enter bugs at http://blackfin.uclinux.org/
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see the file COPYING, or write
- * to the Free Software Foundation, Inc.,
- * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ * Licensed under the GPL-2 or later.
*/
+/*
+ * This driver and the hardware supported are in term of EE-191 of ADI.
+ * http://www.analog.com/UploadedFiles/Application_Notes/399447663EE191.pdf
+ * This application note describe how to implement a UART on a Sharc DSP,
+ * but this driver is implemented on Blackfin Processor.
+ * Transmit Frame Sync is not used by this driver to transfer data out.
+ */
+
+#ifndef _BFIN_SPORT_UART_H
+#define _BFIN_SPORT_UART_H
#define OFFSET_TCR1 0x00 /* Transmit Configuration 1 Register */
#define OFFSET_TCR2 0x04 /* Transmit Configuration 2 Register */
@@ -61,3 +55,7 @@
#define SPORT_PUT_RCLKDIV(sport, v) bfin_write16(((sport)->port.membase + OFFSET_RCLKDIV), v)
#define SPORT_PUT_RFSDIV(sport, v) bfin_write16(((sport)->port.membase + OFFSET_RFSDIV), v)
#define SPORT_PUT_STAT(sport, v) bfin_write16(((sport)->port.membase + OFFSET_STAT), v)
+
+#define SPORT_TX_FIFO_SIZE 8
+
+#endif /* _BFIN_SPORT_UART_H */
diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c
index 0028b6f89ce..53a46822705 100644
--- a/drivers/serial/icom.c
+++ b/drivers/serial/icom.c
@@ -751,7 +751,6 @@ static void recv_interrupt(u16 port_int_reg, struct icom_port *icom_port)
trace(icom_port, "FID_STATUS", status);
count = cpu_to_le16(icom_port->statStg->rcv[rcv_buff].leLength);
- count = tty_buffer_request_room(tty, count);
trace(icom_port, "RCV_COUNT", count);
trace(icom_port, "REAL_COUNT", count);
@@ -1654,4 +1653,6 @@ MODULE_DESCRIPTION("IBM iSeries Serial IOA driver");
MODULE_SUPPORTED_DEVICE
("IBM iSeries 2745, 2771, 2772, 2742, 2793 and 2805 Communications adapters");
MODULE_LICENSE("GPL");
-
+MODULE_FIRMWARE("icom_call_setup.bin");
+MODULE_FIRMWARE("icom_res_dce.bin");
+MODULE_FIRMWARE("icom_asc.bin");
diff --git a/drivers/serial/imx.c b/drivers/serial/imx.c
index 60d665a17a8..d00fcf8e6c7 100644
--- a/drivers/serial/imx.c
+++ b/drivers/serial/imx.c
@@ -1279,7 +1279,7 @@ static int serial_imx_probe(struct platform_device *pdev)
sport->use_irda = 1;
#endif
- if (pdata->init) {
+ if (pdata && pdata->init) {
ret = pdata->init(pdev);
if (ret)
goto clkput;
@@ -1292,7 +1292,7 @@ static int serial_imx_probe(struct platform_device *pdev)
return 0;
deinit:
- if (pdata->exit)
+ if (pdata && pdata->exit)
pdata->exit(pdev);
clkput:
clk_put(sport->clk);
@@ -1321,7 +1321,7 @@ static int serial_imx_remove(struct platform_device *pdev)
clk_disable(sport->clk);
- if (pdata->exit)
+ if (pdata && pdata->exit)
pdata->exit(pdev);
iounmap(sport->port.membase);
diff --git a/drivers/serial/ioc3_serial.c b/drivers/serial/ioc3_serial.c
index 85dc0410ac1..23ba6b40b3a 100644
--- a/drivers/serial/ioc3_serial.c
+++ b/drivers/serial/ioc3_serial.c
@@ -1411,8 +1411,7 @@ static int receive_chars(struct uart_port *the_port)
read_count = do_read(the_port, ch, MAX_CHARS);
if (read_count > 0) {
flip = 1;
- read_room = tty_buffer_request_room(tty, read_count);
- tty_insert_flip_string(tty, ch, read_room);
+ read_room = tty_insert_flip_string(tty, ch, read_count);
the_port->icount.rx += read_count;
}
spin_unlock_irqrestore(&the_port->lock, pflags);
diff --git a/drivers/serial/jsm/jsm_driver.c b/drivers/serial/jsm/jsm_driver.c
index 108c3e0471f..12cb5e446a4 100644
--- a/drivers/serial/jsm/jsm_driver.c
+++ b/drivers/serial/jsm/jsm_driver.c
@@ -179,6 +179,7 @@ static int __devinit jsm_probe_one(struct pci_dev *pdev, const struct pci_device
return 0;
out_free_irq:
+ jsm_remove_uart_port(brd);
free_irq(brd->irq, brd);
out_iounmap:
iounmap(brd->re_map_membase);
diff --git a/drivers/serial/jsm/jsm_tty.c b/drivers/serial/jsm/jsm_tty.c
index cd95e215550..5673ca9dfdc 100644
--- a/drivers/serial/jsm/jsm_tty.c
+++ b/drivers/serial/jsm/jsm_tty.c
@@ -432,7 +432,7 @@ int __devinit jsm_tty_init(struct jsm_board *brd)
int jsm_uart_port_init(struct jsm_board *brd)
{
- int i;
+ int i, rc;
unsigned int line;
struct jsm_channel *ch;
@@ -467,8 +467,11 @@ int jsm_uart_port_init(struct jsm_board *brd)
} else
set_bit(line, linemap);
brd->channels[i]->uart_port.line = line;
- if (uart_add_one_port (&jsm_uart_driver, &brd->channels[i]->uart_port))
- printk(KERN_INFO "jsm: add device failed\n");
+ rc = uart_add_one_port (&jsm_uart_driver, &brd->channels[i]->uart_port);
+ if (rc){
+ printk(KERN_INFO "jsm: Port %d failed. Aborting...\n", i);
+ return rc;
+ }
else
printk(KERN_INFO "jsm: Port %d added\n", i);
}
diff --git a/drivers/serial/msm_serial.c b/drivers/serial/msm_serial.c
index b05c5aa02cb..ecdc0facf7e 100644
--- a/drivers/serial/msm_serial.c
+++ b/drivers/serial/msm_serial.c
@@ -691,6 +691,7 @@ static int __init msm_serial_probe(struct platform_device *pdev)
struct msm_port *msm_port;
struct resource *resource;
struct uart_port *port;
+ int irq;
if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
return -ENXIO;
@@ -711,9 +712,10 @@ static int __init msm_serial_probe(struct platform_device *pdev)
return -ENXIO;
port->mapbase = resource->start;
- port->irq = platform_get_irq(pdev, 0);
- if (unlikely(port->irq < 0))
+ irq = platform_get_irq(pdev, 0);
+ if (unlikely(irq < 0))
return -ENXIO;
+ port->irq = irq;
platform_set_drvdata(pdev, port);
diff --git a/drivers/serial/timbuart.c b/drivers/serial/timbuart.c
index 34b31da01d0..7bf10264a6a 100644
--- a/drivers/serial/timbuart.c
+++ b/drivers/serial/timbuart.c
@@ -421,7 +421,7 @@ static struct uart_driver timbuart_driver = {
static int timbuart_probe(struct platform_device *dev)
{
- int err;
+ int err, irq;
struct timbuart_port *uart;
struct resource *iomem;
@@ -453,11 +453,12 @@ static int timbuart_probe(struct platform_device *dev)
uart->port.mapbase = iomem->start;
uart->port.membase = NULL;
- uart->port.irq = platform_get_irq(dev, 0);
- if (uart->port.irq < 0) {
+ irq = platform_get_irq(dev, 0);
+ if (irq < 0) {
err = -EINVAL;
goto err_register;
}
+ uart->port.irq = irq;
tasklet_init(&uart->tasklet, timbuart_tasklet, (unsigned long)uart);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 0fee95cd9a4..a191fa2be7c 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -181,7 +181,7 @@ config SPI_OMAP_UWIRE
config SPI_OMAP24XX
tristate "McSPI driver for OMAP24xx/OMAP34xx"
- depends on ARCH_OMAP24XX || ARCH_OMAP34XX
+ depends on ARCH_OMAP2 || ARCH_OMAP3
help
SPI master controller for OMAP24xx/OMAP34xx Multichannel SPI
(McSPI) modules.
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c
index bf5f95a1941..715c518b1b6 100644
--- a/drivers/spi/omap2_mcspi.c
+++ b/drivers/spi/omap2_mcspi.c
@@ -1014,7 +1014,7 @@ static u8 __initdata spi2_txdma_id[] = {
OMAP24XX_DMA_SPI2_TX1,
};
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP34XX) \
+#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) \
|| defined(CONFIG_ARCH_OMAP4)
static u8 __initdata spi3_rxdma_id[] = {
OMAP24XX_DMA_SPI3_RX0,
diff --git a/drivers/ssb/driver_chipcommon_pmu.c b/drivers/ssb/driver_chipcommon_pmu.c
index 64abd11f6fb..3d551245a4e 100644
--- a/drivers/ssb/driver_chipcommon_pmu.c
+++ b/drivers/ssb/driver_chipcommon_pmu.c
@@ -332,6 +332,12 @@ static void ssb_pmu_pll_init(struct ssb_chipcommon *cc)
case 0x5354:
ssb_pmu0_pllinit_r0(cc, crystalfreq);
break;
+ case 0x4322:
+ if (cc->pmu.rev == 2) {
+ chipco_write32(cc, SSB_CHIPCO_PLLCTL_ADDR, 0x0000000A);
+ chipco_write32(cc, SSB_CHIPCO_PLLCTL_DATA, 0x380005C0);
+ }
+ break;
default:
ssb_printk(KERN_ERR PFX
"ERROR: PLL init unknown for device %04X\n",
@@ -417,6 +423,7 @@ static void ssb_pmu_resources_init(struct ssb_chipcommon *cc)
switch (bus->chip_id) {
case 0x4312:
+ case 0x4322:
/* We keep the default settings:
* min_msk = 0xCBB
* max_msk = 0x7FFFF
diff --git a/drivers/ssb/driver_mipscore.c b/drivers/ssb/driver_mipscore.c
index 3c6feed46f6..97efce184a8 100644
--- a/drivers/ssb/driver_mipscore.c
+++ b/drivers/ssb/driver_mipscore.c
@@ -270,7 +270,6 @@ void ssb_mipscore_init(struct ssb_mipscore *mcore)
set_irq(dev, irq++);
}
break;
- /* fallthrough */
case SSB_DEV_PCI:
case SSB_DEV_ETHERNET:
case SSB_DEV_ETHERNET_GBIT:
@@ -281,6 +280,10 @@ void ssb_mipscore_init(struct ssb_mipscore *mcore)
set_irq(dev, irq++);
break;
}
+ /* fallthrough */
+ case SSB_DEV_EXTIF:
+ set_irq(dev, 0);
+ break;
}
}
ssb_dprintk(KERN_INFO PFX "after irq reconfiguration\n");
diff --git a/drivers/ssb/ssb_private.h b/drivers/ssb/ssb_private.h
index 56054be4d11..0331139a726 100644
--- a/drivers/ssb/ssb_private.h
+++ b/drivers/ssb/ssb_private.h
@@ -196,7 +196,7 @@ extern int ssb_devices_thaw(struct ssb_freeze_context *ctx);
#ifdef CONFIG_SSB_B43_PCI_BRIDGE
extern int __init b43_pci_ssb_bridge_init(void);
extern void __exit b43_pci_ssb_bridge_exit(void);
-#else /* CONFIG_SSB_B43_PCI_BRIDGR */
+#else /* CONFIG_SSB_B43_PCI_BRIDGE */
static inline int b43_pci_ssb_bridge_init(void)
{
return 0;
@@ -204,6 +204,6 @@ static inline int b43_pci_ssb_bridge_init(void)
static inline void b43_pci_ssb_bridge_exit(void)
{
}
-#endif /* CONFIG_SSB_PCIHOST */
+#endif /* CONFIG_SSB_B43_PCI_BRIDGE */
#endif /* LINUX_SSB_PRIVATE_H_ */
diff --git a/drivers/staging/arlan/arlan-main.c b/drivers/staging/arlan/arlan-main.c
index 921a082487a..88fdd53cf5d 100644
--- a/drivers/staging/arlan/arlan-main.c
+++ b/drivers/staging/arlan/arlan-main.c
@@ -1455,10 +1455,10 @@ static void arlan_rx_interrupt(struct net_device *dev, u_char rxStatus, u_short
#ifdef ARLAN_MULTICAST
if (!(dev->flags & IFF_ALLMULTI) &&
!(dev->flags & IFF_PROMISC) &&
- dev->mc_list)
+ !netdev_mc_empty(dev))
{
char hw_dst_addr[6];
- struct dev_mc_list *dmi = dev->mc_list;
+ struct dev_mc_list *dmi;
int i;
memcpy_fromio(hw_dst_addr, arlan->ultimateDestAddress, 6);
@@ -1469,20 +1469,15 @@ static void arlan_rx_interrupt(struct net_device *dev, u_char rxStatus, u_short
printk(KERN_ERR "%s mcast 0x0100 \n", dev->name);
else if (hw_dst_addr[1] == 0x40)
printk(KERN_ERR "%s m/bcast 0x0140 \n", dev->name);
- while (dmi)
- {
- if (dmi->dmi_addrlen == 6) {
- if (arlan_debug & ARLAN_DEBUG_HEADER_DUMP)
- printk(KERN_ERR "%s mcl %pM\n",
- dev->name, dmi->dmi_addr);
- for (i = 0; i < 6; i++)
- if (dmi->dmi_addr[i] != hw_dst_addr[i])
- break;
- if (i == 6)
+ netdev_for_each_mc_entry(dmi, dev) {
+ if (arlan_debug & ARLAN_DEBUG_HEADER_DUMP)
+ printk(KERN_ERR "%s mcl %pM\n",
+ dev->name, dmi->dmi_addr);
+ for (i = 0; i < 6; i++)
+ if (dmi->dmi_addr[i] != hw_dst_addr[i])
break;
- } else
- printk(KERN_ERR "%s: invalid multicast address length given.\n", dev->name);
- dmi = dmi->next;
+ if (i == 6)
+ break;
}
/* we reach here if multicast filtering is on and packet
* is multicast and not for receive */
diff --git a/drivers/staging/et131x/et131x_netdev.c b/drivers/staging/et131x/et131x_netdev.c
index 24d97b4fa6f..edb78ae9e59 100644
--- a/drivers/staging/et131x/et131x_netdev.c
+++ b/drivers/staging/et131x/et131x_netdev.c
@@ -411,9 +411,9 @@ void et131x_multicast(struct net_device *netdev)
{
struct et131x_adapter *adapter = netdev_priv(netdev);
uint32_t PacketFilter = 0;
- uint32_t count;
unsigned long flags;
- struct dev_mc_list *mclist = netdev->mc_list;
+ struct dev_mc_list *mclist;
+ int i;
spin_lock_irqsave(&adapter->Lock, flags);
@@ -444,11 +444,11 @@ void et131x_multicast(struct net_device *netdev)
adapter->PacketFilter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
}
- if (netdev->mc_count > NIC_MAX_MCAST_LIST) {
+ if (netdev_mc_count(netdev) > NIC_MAX_MCAST_LIST) {
adapter->PacketFilter |= ET131X_PACKET_TYPE_ALL_MULTICAST;
}
- if (netdev->mc_count < 1) {
+ if (netdev_mc_count(netdev) < 1) {
adapter->PacketFilter &= ~ET131X_PACKET_TYPE_ALL_MULTICAST;
adapter->PacketFilter &= ~ET131X_PACKET_TYPE_MULTICAST;
} else {
@@ -456,12 +456,13 @@ void et131x_multicast(struct net_device *netdev)
}
/* Set values in the private adapter struct */
- adapter->MCAddressCount = netdev->mc_count;
-
- if (netdev->mc_count) {
- count = netdev->mc_count - 1;
- memcpy(adapter->MCList[count], mclist->dmi_addr, ETH_ALEN);
+ i = 0;
+ netdev_for_each_mc_addr(mclist, netdev) {
+ if (i == NIC_MAX_MCAST_LIST)
+ break;
+ memcpy(adapter->MCList[i++], mclist->dmi_addr, ETH_ALEN);
}
+ adapter->MCAddressCount = i;
/* Are the new flags different from the previous ones? If not, then no
* action is required
diff --git a/drivers/staging/netwave/netwave_cs.c b/drivers/staging/netwave/netwave_cs.c
index e61e6b9440a..e936717d1f4 100644
--- a/drivers/staging/netwave/netwave_cs.c
+++ b/drivers/staging/netwave/netwave_cs.c
@@ -1341,15 +1341,15 @@ static void set_multicast_list(struct net_device *dev)
#ifdef PCMCIA_DEBUG
{
xstatic int old;
- if (old != dev->mc_count) {
- old = dev->mc_count;
+ if (old != netdev_mc_count(dev)) {
+ old = netdev_mc_count(dev);
pr_debug("%s: setting Rx mode to %d addresses.\n",
- dev->name, dev->mc_count);
+ dev->name, netdev_mc_count(dev));
}
}
#endif
- if (dev->mc_count || (dev->flags & IFF_ALLMULTI)) {
+ if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI)) {
/* Multicast Mode */
rcvMode = rxConfRxEna + rxConfAMP + rxConfBcast;
} else if (dev->flags & IFF_PROMISC) {
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 02b63678811..4a2161f70c7 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -350,7 +350,7 @@ static void cvm_oct_common_set_multicast_list(struct net_device *dev)
control.u64 = 0;
control.s.bcst = 1; /* Allow broadcast MAC addresses */
- if (dev->mc_list || (dev->flags & IFF_ALLMULTI) ||
+ if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI) ||
(dev->flags & IFF_PROMISC))
/* Force accept multicast packets */
control.s.mcst = 2;
diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
index f69b7783027..11fc4d5c43e 100644
--- a/drivers/staging/pohmelfs/inode.c
+++ b/drivers/staging/pohmelfs/inode.c
@@ -969,7 +969,7 @@ int pohmelfs_setattr_raw(struct inode *inode, struct iattr *attr)
if ((attr->ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
(attr->ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
- err = vfs_dq_transfer(inode, attr) ? -EDQUOT : 0;
+ err = dquot_transfer(inode, attr);
if (err)
goto err_out_exit;
}
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211.h b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
index 0d490c164db..9086047c32d 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211.h
@@ -482,15 +482,6 @@ struct ieee80211_header_data {
u16 seq_ctrl;
};
-struct ieee80211_hdr_3addr {
- u16 frame_ctl;
- u16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- u16 seq_ctl;
-} __attribute__ ((packed));
-
struct ieee80211_hdr_4addr {
u16 frame_ctl;
u16 duration_id;
diff --git a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
index c7c645af0eb..a2150670ef5 100644
--- a/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8187se/ieee80211/ieee80211_softmac.c
@@ -203,7 +203,7 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee
enqueue_mgmt(ieee,skb);
}else{
- header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0]<<4);
+ header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0]<<4);
if (ieee->seq_ctrl[0] == 0xFFF)
ieee->seq_ctrl[0] = 0;
@@ -220,7 +220,7 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee
spin_unlock_irqrestore(&ieee->lock, flags);
spin_lock_irqsave(&ieee->mgmt_tx_lock, flags);
- header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
+ header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
if (ieee->seq_ctrl[0] == 0xFFF)
ieee->seq_ctrl[0] = 0;
@@ -246,7 +246,7 @@ inline void softmac_ps_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *i
if(single){
- header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
+ header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
if (ieee->seq_ctrl[0] == 0xFFF)
ieee->seq_ctrl[0] = 0;
@@ -259,7 +259,7 @@ inline void softmac_ps_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *i
}else{
- header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
+ header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
if (ieee->seq_ctrl[0] == 0xFFF)
ieee->seq_ctrl[0] = 0;
@@ -287,7 +287,7 @@ inline struct sk_buff *ieee80211_disassociate_skb(
return NULL;
disass = (struct ieee80211_disassoc_frame *) skb_put(skb,sizeof(struct ieee80211_disassoc_frame));
- disass->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_DISASSOC);
+ disass->header.frame_control = cpu_to_le16(IEEE80211_STYPE_DISASSOC);
disass->header.duration_id = 0;
memcpy(disass->header.addr1, beacon->bssid, ETH_ALEN);
@@ -905,7 +905,7 @@ struct sk_buff* ieee80211_assoc_resp(struct ieee80211_device *ieee, u8 *dest)
assoc = (struct ieee80211_assoc_response_frame *)
skb_put(skb,sizeof(struct ieee80211_assoc_response_frame));
- assoc->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP);
+ assoc->header.frame_control = cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP);
memcpy(assoc->header.addr1, dest,ETH_ALEN);
memcpy(assoc->header.addr3, ieee->dev->dev_addr, ETH_ALEN);
memcpy(assoc->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
@@ -981,7 +981,7 @@ struct sk_buff* ieee80211_null_func(struct ieee80211_device *ieee,short pwr)
memcpy(hdr->addr2, ieee->dev->dev_addr, ETH_ALEN);
memcpy(hdr->addr3, ieee->current_network.bssid, ETH_ALEN);
- hdr->frame_ctl = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
IEEE80211_STYPE_NULLFUNC | IEEE80211_FCTL_TODS |
(pwr ? IEEE80211_FCTL_PM:0));
@@ -1084,7 +1084,7 @@ inline struct sk_buff *ieee80211_association_req(struct ieee80211_network *beaco
skb_put(skb, sizeof(struct ieee80211_assoc_request_frame));
- hdr->header.frame_ctl = IEEE80211_STYPE_ASSOC_REQ;
+ hdr->header.frame_control = IEEE80211_STYPE_ASSOC_REQ;
hdr->header.duration_id= 37; //FIXME
memcpy(hdr->header.addr1, beacon->bssid, ETH_ALEN);
memcpy(hdr->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
@@ -1786,11 +1786,11 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
tasklet_schedule(&ieee->ps_task);
- if(WLAN_FC_GET_STYPE(header->frame_ctl) != IEEE80211_STYPE_PROBE_RESP &&
- WLAN_FC_GET_STYPE(header->frame_ctl) != IEEE80211_STYPE_BEACON)
+ if (WLAN_FC_GET_STYPE(header->frame_control) != IEEE80211_STYPE_PROBE_RESP &&
+ WLAN_FC_GET_STYPE(header->frame_control) != IEEE80211_STYPE_BEACON)
ieee->last_rx_ps_time = jiffies;
- switch (WLAN_FC_GET_STYPE(header->frame_ctl)) {
+ switch (WLAN_FC_GET_STYPE(header->frame_control)) {
case IEEE80211_STYPE_ASSOC_RESP:
case IEEE80211_STYPE_REASSOC_RESP:
@@ -2064,7 +2064,7 @@ void ieee80211_rtl_wake_queue(struct ieee80211_device *ieee)
header = (struct ieee80211_hdr_3addr *) skb->data;
- header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
+ header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
if (ieee->seq_ctrl[0] == 0xFFF)
ieee->seq_ctrl[0] = 0;
diff --git a/drivers/staging/rtl8187se/r8180_core.c b/drivers/staging/rtl8187se/r8180_core.c
index e0f13efdb15..1847f38b9f2 100644
--- a/drivers/staging/rtl8187se/r8180_core.c
+++ b/drivers/staging/rtl8187se/r8180_core.c
@@ -1890,7 +1890,7 @@ rate)
struct r8180_priv *priv = (struct r8180_priv *)ieee80211_priv(dev);
int mode;
struct ieee80211_hdr_3addr *h = (struct ieee80211_hdr_3addr *) skb->data;
- short morefrag = (h->frame_ctl) & IEEE80211_FCTL_MOREFRAGS;
+ short morefrag = (h->frame_control) & IEEE80211_FCTL_MOREFRAGS;
unsigned long flags;
int priority;
@@ -2158,7 +2158,7 @@ short rtl8180_tx(struct net_device *dev, u8* txbuf, int len, int priority,
TxDescDuration = ThisFrameTime + aSifsTime + AckTime;
}
- if(!(frag_hdr->frame_ctl & IEEE80211_FCTL_MOREFRAGS)) { //no more fragment
+ if (!(frag_hdr->frame_control & IEEE80211_FCTL_MOREFRAGS)) {
// ThisFrame-ACK.
Duration = aSifsTime + AckTime;
} else { // One or more fragments remained.
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211.h b/drivers/staging/rtl8192su/ieee80211/ieee80211.h
index 9a4c858b066..2b8c85556dc 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211.h
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211.h
@@ -609,16 +609,6 @@ struct ieee80211_hdr_2addr {
u8 payload[0];
} __attribute__ ((packed));
-struct ieee80211_hdr_3addr {
- __le16 frame_ctl;
- __le16 duration_id;
- u8 addr1[ETH_ALEN];
- u8 addr2[ETH_ALEN];
- u8 addr3[ETH_ALEN];
- __le16 seq_ctl;
- u8 payload[0];
-} __attribute__ ((packed));
-
struct ieee80211_hdr_4addr {
__le16 frame_ctl;
__le16 duration_id;
@@ -1672,7 +1662,7 @@ static inline u8 *ieee80211_get_payload(struct rtl_ieee80211_hdr *hdr)
case IEEE80211_2ADDR_LEN:
return ((struct ieee80211_hdr_2addr *)hdr)->payload;
case IEEE80211_3ADDR_LEN:
- return ((struct ieee80211_hdr_3addr *)hdr)->payload;
+ return (void *)hdr+sizeof(struct ieee80211_hdr_3addr);
case IEEE80211_4ADDR_LEN:
return ((struct ieee80211_hdr_4addr *)hdr)->payload;
}
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_r8192s.h b/drivers/staging/rtl8192su/ieee80211/ieee80211_r8192s.h
index 123abcf0f49..7d6c3bc143a 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211_r8192s.h
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_r8192s.h
@@ -201,7 +201,7 @@ typedef union _frameqos {
static inline u8 Frame_QoSTID(u8 *buf)
{
struct ieee80211_hdr_3addr *hdr = (struct ieee80211_hdr_3addr *)buf;
- u16 fc = le16_to_cpu(hdr->frame_ctl);
+ u16 fc = le16_to_cpu(hdr->frame_control);
return (u8)((frameqos *)(buf +
(((fc & IEEE80211_FCTL_TODS) &&
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_rx.c
index fecfa120ff4..095b8c64314 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_rx.c
@@ -744,7 +744,7 @@ u8 parse_subframe(struct sk_buff *skb,
struct ieee80211_rxb *rxb,u8* src,u8* dst)
{
struct ieee80211_hdr_3addr *hdr = (struct ieee80211_hdr_3addr* )skb->data;
- u16 fc = le16_to_cpu(hdr->frame_ctl);
+ u16 fc = le16_to_cpu(hdr->frame_control);
u16 LLCOffset= sizeof(struct ieee80211_hdr_3addr);
u16 ChkLength;
@@ -756,7 +756,7 @@ u8 parse_subframe(struct sk_buff *skb,
struct sk_buff *sub_skb;
u8 *data_ptr;
/* just for debug purpose */
- SeqNum = WLAN_GET_SEQ_SEQ(le16_to_cpu(hdr->seq_ctl));
+ SeqNum = WLAN_GET_SEQ_SEQ(le16_to_cpu(hdr->seq_ctrl));
if((IEEE80211_QOS_HAS_SEQ(fc))&&\
(((frameqos *)(skb->data + IEEE80211_3ADDR_LEN))->field.reserved)) {
@@ -2370,7 +2370,7 @@ static inline void ieee80211_process_probe_response(
escape_essid(info_element->data,
info_element->len),
MAC_ARG(beacon->header.addr3),
- WLAN_FC_GET_STYPE(beacon->header.frame_ctl) ==
+ WLAN_FC_GET_STYPE(beacon->header.frame_control) ==
IEEE80211_STYPE_PROBE_RESP ?
"PROBE RESPONSE" : "BEACON");
return;
@@ -2387,7 +2387,7 @@ static inline void ieee80211_process_probe_response(
return;
if(ieee->bGlobalDomain)
{
- if (WLAN_FC_GET_STYPE(beacon->header.frame_ctl) == IEEE80211_STYPE_PROBE_RESP)
+ if (WLAN_FC_GET_STYPE(beacon->header.frame_control) == IEEE80211_STYPE_PROBE_RESP)
{
// Case 1: Country code
if(IS_COUNTRY_IE_VALID(ieee) )
@@ -2454,7 +2454,7 @@ static inline void ieee80211_process_probe_response(
else
ieee->current_network.buseprotection = false;
}
- if(is_beacon(beacon->header.frame_ctl))
+ if(is_beacon(beacon->header.frame_control))
{
if(ieee->state == IEEE80211_LINKED)
ieee->LinkDetectInfo.NumRecvBcnInPeriod++;
@@ -2496,7 +2496,7 @@ static inline void ieee80211_process_probe_response(
escape_essid(network.ssid,
network.ssid_len),
MAC_ARG(network.bssid),
- WLAN_FC_GET_STYPE(beacon->header.frame_ctl) ==
+ WLAN_FC_GET_STYPE(beacon->header.frame_control) ==
IEEE80211_STYPE_PROBE_RESP ?
"PROBE RESPONSE" : "BEACON");
#endif
@@ -2509,7 +2509,7 @@ static inline void ieee80211_process_probe_response(
escape_essid(target->ssid,
target->ssid_len),
MAC_ARG(target->bssid),
- WLAN_FC_GET_STYPE(beacon->header.frame_ctl) ==
+ WLAN_FC_GET_STYPE(beacon->header.frame_control) ==
IEEE80211_STYPE_PROBE_RESP ?
"PROBE RESPONSE" : "BEACON");
@@ -2519,7 +2519,7 @@ static inline void ieee80211_process_probe_response(
*/
renew = !time_after(target->last_scanned + ieee->scan_age, jiffies);
//YJ,add,080819,for hidden ap
- if(is_beacon(beacon->header.frame_ctl) == 0)
+ if(is_beacon(beacon->header.frame_control) == 0)
network.flags = (~NETWORK_EMPTY_ESSID & network.flags)|(NETWORK_EMPTY_ESSID & target->flags);
//if(strncmp(network.ssid, "linksys-c",9) == 0)
// printk("====>2 network.ssid=%s FLAG=%d target.ssid=%s FLAG=%d\n", network.ssid, network.flags, target->ssid, target->flags);
@@ -2535,7 +2535,7 @@ static inline void ieee80211_process_probe_response(
}
spin_unlock_irqrestore(&ieee->lock, flags);
- if (is_beacon(beacon->header.frame_ctl)&&is_same_network(&ieee->current_network, &network, ieee)&&\
+ if (is_beacon(beacon->header.frame_control)&&is_same_network(&ieee->current_network, &network, ieee)&&\
(ieee->state == IEEE80211_LINKED)) {
if(ieee->handle_beacon != NULL) {
ieee->handle_beacon(ieee->dev,beacon,&ieee->current_network);
diff --git a/drivers/staging/rtl8192su/ieee80211/ieee80211_softmac.c b/drivers/staging/rtl8192su/ieee80211/ieee80211_softmac.c
index 95d4f84dcf3..0ba2a01a06a 100644
--- a/drivers/staging/rtl8192su/ieee80211/ieee80211_softmac.c
+++ b/drivers/staging/rtl8192su/ieee80211/ieee80211_softmac.c
@@ -242,7 +242,7 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee
if(ieee->queue_stop){
enqueue_mgmt(ieee,skb);
}else{
- header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0]<<4);
+ header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0]<<4);
if (ieee->seq_ctrl[0] == 0xFFF)
ieee->seq_ctrl[0] = 0;
@@ -260,7 +260,7 @@ inline void softmac_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *ieee
spin_unlock_irqrestore(&ieee->lock, flags);
spin_lock_irqsave(&ieee->mgmt_tx_lock, flags);
- header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
+ header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
if (ieee->seq_ctrl[0] == 0xFFF)
ieee->seq_ctrl[0] = 0;
@@ -302,7 +302,7 @@ inline void softmac_ps_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *i
//printk("=============>%s()\n", __FUNCTION__);
if(single){
- header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
+ header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
if (ieee->seq_ctrl[0] == 0xFFF)
ieee->seq_ctrl[0] = 0;
@@ -315,7 +315,7 @@ inline void softmac_ps_mgmt_xmit(struct sk_buff *skb, struct ieee80211_device *i
}else{
- header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
+ header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
if (ieee->seq_ctrl[0] == 0xFFF)
ieee->seq_ctrl[0] = 0;
@@ -347,7 +347,7 @@ inline struct sk_buff *ieee80211_probe_req(struct ieee80211_device *ieee)
skb_reserve(skb, ieee->tx_headroom);
req = (struct ieee80211_probe_request *) skb_put(skb,sizeof(struct ieee80211_probe_request));
- req->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
+ req->header.frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_REQ);
req->header.duration_id = 0; //FIXME: is this OK ?
memset(req->header.addr1, 0xff, ETH_ALEN);
@@ -662,8 +662,8 @@ inline struct sk_buff *ieee80211_authentication_req(struct ieee80211_network *be
auth = (struct ieee80211_authentication *)
skb_put(skb, sizeof(struct ieee80211_authentication));
- auth->header.frame_ctl = IEEE80211_STYPE_AUTH;
- if (challengelen) auth->header.frame_ctl |= IEEE80211_FCTL_WEP;
+ auth->header.frame_control = IEEE80211_STYPE_AUTH;
+ if (challengelen) auth->header.frame_control |= IEEE80211_FCTL_WEP;
auth->header.duration_id = 0x013a; //FIXME
@@ -801,7 +801,7 @@ static struct sk_buff* ieee80211_probe_resp(struct ieee80211_device *ieee, u8 *d
beacon_buf->capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY);
- beacon_buf->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_PROBE_RESP);
+ beacon_buf->header.frame_control = cpu_to_le16(IEEE80211_STYPE_PROBE_RESP);
beacon_buf->info_element[0].id = MFIE_TYPE_SSID;
beacon_buf->info_element[0].len = ssid_len;
@@ -880,7 +880,7 @@ struct sk_buff* ieee80211_assoc_resp(struct ieee80211_device *ieee, u8 *dest)
assoc = (struct ieee80211_assoc_response_frame *)
skb_put(skb,sizeof(struct ieee80211_assoc_response_frame));
- assoc->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP);
+ assoc->header.frame_control = cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP);
memcpy(assoc->header.addr1, dest,ETH_ALEN);
memcpy(assoc->header.addr3, ieee->dev->dev_addr, ETH_ALEN);
memcpy(assoc->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
@@ -935,7 +935,7 @@ struct sk_buff* ieee80211_auth_resp(struct ieee80211_device *ieee,int status, u8
memcpy(auth->header.addr3, ieee->dev->dev_addr, ETH_ALEN);
memcpy(auth->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
memcpy(auth->header.addr1, dest, ETH_ALEN);
- auth->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_AUTH);
+ auth->header.frame_control = cpu_to_le16(IEEE80211_STYPE_AUTH);
return skb;
@@ -957,7 +957,7 @@ struct sk_buff* ieee80211_null_func(struct ieee80211_device *ieee,short pwr)
memcpy(hdr->addr2, ieee->dev->dev_addr, ETH_ALEN);
memcpy(hdr->addr3, ieee->current_network.bssid, ETH_ALEN);
- hdr->frame_ctl = cpu_to_le16(IEEE80211_FTYPE_DATA |
+ hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
IEEE80211_STYPE_NULLFUNC | IEEE80211_FCTL_TODS |
(pwr ? IEEE80211_FCTL_PM:0));
@@ -1083,7 +1083,7 @@ inline struct sk_buff *ieee80211_association_req(struct ieee80211_network *beaco
skb_put(skb, sizeof(struct ieee80211_assoc_request_frame)+2);
- hdr->header.frame_ctl = IEEE80211_STYPE_ASSOC_REQ;
+ hdr->header.frame_control = IEEE80211_STYPE_ASSOC_REQ;
hdr->header.duration_id= 37; //FIXME
memcpy(hdr->header.addr1, beacon->bssid, ETH_ALEN);
memcpy(hdr->header.addr2, ieee->dev->dev_addr, ETH_ALEN);
@@ -1940,13 +1940,13 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
if(!ieee->proto_started)
return 0;
- switch (WLAN_FC_GET_STYPE(header->frame_ctl)) {
+ switch (WLAN_FC_GET_STYPE(header->frame_control)) {
case IEEE80211_STYPE_ASSOC_RESP:
case IEEE80211_STYPE_REASSOC_RESP:
IEEE80211_DEBUG_MGMT("received [RE]ASSOCIATION RESPONSE (%d)\n",
- WLAN_FC_GET_STYPE(header->frame_ctl));
+ WLAN_FC_GET_STYPE(header->frame_control));
if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) &&
ieee->state == IEEE80211_ASSOCIATING_AUTHENTICATED &&
ieee->iw_mode == IW_MODE_INFRA){
@@ -2088,7 +2088,7 @@ ieee80211_rx_frame_softmac(struct ieee80211_device *ieee, struct sk_buff *skb,
if ((ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) &&
ieee->state == IEEE80211_LINKED &&
ieee->iw_mode == IW_MODE_INFRA){
- printk("==========>received disassoc/deauth(%x) frame, reason code:%x\n",WLAN_FC_GET_STYPE(header->frame_ctl), ((struct ieee80211_disassoc*)skb->data)->reason);
+ printk("==========>received disassoc/deauth(%x) frame, reason code:%x\n",WLAN_FC_GET_STYPE(header->frame_control), ((struct ieee80211_disassoc*)skb->data)->reason);
ieee->state = IEEE80211_ASSOCIATING;
ieee->softmac_stats.reassoc++;
ieee->is_roaming = true;
@@ -2239,7 +2239,7 @@ void ieee80211_rtl_wake_queue(struct ieee80211_device *ieee)
header = (struct ieee80211_hdr_3addr *) skb->data;
- header->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
+ header->seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
if (ieee->seq_ctrl[0] == 0xFFF)
ieee->seq_ctrl[0] = 0;
@@ -2574,7 +2574,7 @@ struct sk_buff *ieee80211_get_beacon_(struct ieee80211_device *ieee)
return NULL;
b = (struct ieee80211_probe_response *) skb->data;
- b->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_BEACON);
+ b->header.frame_control = cpu_to_le16(IEEE80211_STYPE_BEACON);
return skb;
@@ -2590,7 +2590,7 @@ struct sk_buff *ieee80211_get_beacon(struct ieee80211_device *ieee)
return NULL;
b = (struct ieee80211_probe_response *) skb->data;
- b->header.seq_ctl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
+ b->header.seq_ctrl = cpu_to_le16(ieee->seq_ctrl[0] << 4);
if (ieee->seq_ctrl[0] == 0xFFF)
ieee->seq_ctrl[0] = 0;
@@ -3139,7 +3139,7 @@ inline struct sk_buff *ieee80211_disassociate_skb(
return NULL;
disass = (struct ieee80211_disassoc *) skb_put(skb,sizeof(struct ieee80211_disassoc));
- disass->header.frame_ctl = cpu_to_le16(IEEE80211_STYPE_DISASSOC);
+ disass->header.frame_control = cpu_to_le16(IEEE80211_STYPE_DISASSOC);
disass->header.duration_id = 0;
memcpy(disass->header.addr1, beacon->bssid, ETH_ALEN);
diff --git a/drivers/staging/rtl8192su/ieee80211/rtl819x_BAProc.c b/drivers/staging/rtl8192su/ieee80211/rtl819x_BAProc.c
index 8d12ffca18f..c6962450e06 100644
--- a/drivers/staging/rtl8192su/ieee80211/rtl819x_BAProc.c
+++ b/drivers/staging/rtl8192su/ieee80211/rtl819x_BAProc.c
@@ -136,7 +136,7 @@ static struct sk_buff* ieee80211_ADDBA(struct ieee80211_device* ieee, u8* Dst, P
memcpy(BAReq->addr3, ieee->current_network.bssid, ETH_ALEN);
- BAReq->frame_ctl = cpu_to_le16(IEEE80211_STYPE_MANAGE_ACT); //action frame
+ BAReq->frame_control = cpu_to_le16(IEEE80211_STYPE_MANAGE_ACT); //action frame
//tag += sizeof( struct ieee80211_hdr_3addr); //move to action field
tag = (u8*)skb_put(skb, 9);
@@ -221,7 +221,7 @@ static struct sk_buff* ieee80211_DELBA(
memcpy(Delba->addr1, dst, ETH_ALEN);
memcpy(Delba->addr2, ieee->dev->dev_addr, ETH_ALEN);
memcpy(Delba->addr3, ieee->current_network.bssid, ETH_ALEN);
- Delba->frame_ctl = cpu_to_le16(IEEE80211_STYPE_MANAGE_ACT); //action frame
+ Delba->frame_control = cpu_to_le16(IEEE80211_STYPE_MANAGE_ACT); //action frame
tag = (u8*)skb_put(skb, 6);
diff --git a/drivers/staging/rtl8192su/r8192U_core.c b/drivers/staging/rtl8192su/r8192U_core.c
index ccb9d5b8cd4..6f424fe8a23 100644
--- a/drivers/staging/rtl8192su/r8192U_core.c
+++ b/drivers/staging/rtl8192su/r8192U_core.c
@@ -6168,7 +6168,7 @@ void rtl8192_process_phyinfo(struct r8192_priv * priv,u8* buffer, struct ieee802
u16 sc ;
unsigned int frag,seq;
hdr = (struct ieee80211_hdr_3addr *)buffer;
- sc = le16_to_cpu(hdr->seq_ctl);
+ sc = le16_to_cpu(hdr->seq_ctrl);
frag = WLAN_GET_SEQ_FRAG(sc);
seq = WLAN_GET_SEQ_SEQ(sc);
//cosa add 04292008 to record the sequence number
@@ -6827,7 +6827,7 @@ void rtl8192SU_TranslateRxSignalStuff(struct sk_buff *skb,
tmp_buf = (u8*)skb->data;// + get_rxpacket_shiftbytes_819xusb(pstats);
hdr = (struct ieee80211_hdr_3addr *)tmp_buf;
- fc = le16_to_cpu(hdr->frame_ctl);
+ fc = le16_to_cpu(hdr->frame_control);
type = WLAN_FC_GET_TYPE(fc);
praddr = hdr->addr1;
diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c
index 5b191afc144..f5cc01ba414 100644
--- a/drivers/staging/slicoss/slicoss.c
+++ b/drivers/staging/slicoss/slicoss.c
@@ -1362,25 +1362,17 @@ static void slic_mcast_set_list(struct net_device *dev)
{
struct adapter *adapter = (struct adapter *)netdev_priv(dev);
int status = STATUS_SUCCESS;
- int i;
char *addresses;
- struct dev_mc_list *mc_list = dev->mc_list;
- int mc_count = dev->mc_count;
+ struct dev_mc_list *mc_list;
ASSERT(adapter);
- for (i = 1; i <= mc_count; i++) {
+ netdev_for_each_mc_addr(mc_list, dev) {
addresses = (char *) &mc_list->dmi_addr;
- if (mc_list->dmi_addrlen == 6) {
- status = slic_mcast_add_list(adapter, addresses);
- if (status != STATUS_SUCCESS)
- break;
- } else {
- status = -EINVAL;
+ status = slic_mcast_add_list(adapter, addresses);
+ if (status != STATUS_SUCCESS)
break;
- }
slic_mcast_set_bit(adapter, addresses);
- mc_list = mc_list->next;
}
if (adapter->devflags_prev != dev->flags) {
diff --git a/drivers/staging/usbip/vhci_sysfs.c b/drivers/staging/usbip/vhci_sysfs.c
index d8992d10d55..f6e34e03c8e 100644
--- a/drivers/staging/usbip/vhci_sysfs.c
+++ b/drivers/staging/usbip/vhci_sysfs.c
@@ -144,7 +144,7 @@ static int valid_args(__u32 rhport, enum usb_device_speed speed)
case USB_SPEED_LOW:
case USB_SPEED_FULL:
case USB_SPEED_HIGH:
- case USB_SPEED_VARIABLE:
+ case USB_SPEED_WIRELESS:
break;
default:
usbip_uerr("speed %d\n", speed);
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 0db8d7b6e79..0dadb765fec 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -3082,8 +3082,7 @@ static void device_set_multi(struct net_device *dev) {
PSMgmtObject pMgmt = pDevice->pMgmt;
u32 mc_filter[2];
- int i;
- struct dev_mc_list *mclist;
+ struct dev_mc_list *mclist;
VNSvInPortB(pDevice->PortOffset + MAC_REG_RCR, &(pDevice->byRxMode));
@@ -3093,7 +3092,7 @@ static void device_set_multi(struct net_device *dev) {
/* Unconditionally log net taps. */
pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST|RCR_UNICAST);
}
- else if ((dev->mc_count > pDevice->multicast_limit)
+ else if ((netdev_mc_count(dev) > pDevice->multicast_limit)
|| (dev->flags & IFF_ALLMULTI)) {
MACvSelectPage1(pDevice->PortOffset);
VNSvOutPortD(pDevice->PortOffset + MAC_REG_MAR0, 0xffffffff);
@@ -3103,8 +3102,7 @@ static void device_set_multi(struct net_device *dev) {
}
else {
memset(mc_filter, 0, sizeof(mc_filter));
- for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
- i++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31));
}
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index ef17c4958c6..a8e1adbc959 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -1596,7 +1596,7 @@ static void device_set_multi(struct net_device *dev) {
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
u32 mc_filter[2];
int ii;
- struct dev_mc_list *mclist;
+ struct dev_mc_list *mclist;
BYTE pbyData[8] = {0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff};
BYTE byTmpMode = 0;
int rc;
@@ -1619,7 +1619,8 @@ static void device_set_multi(struct net_device *dev) {
// Unconditionally log net taps.
pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST|RCR_UNICAST);
}
- else if ((dev->mc_count > pDevice->multicast_limit) || (dev->flags & IFF_ALLMULTI)) {
+ else if ((netdev_mc_count(dev) > pDevice->multicast_limit) ||
+ (dev->flags & IFF_ALLMULTI)) {
CONTROLnsRequestOut(pDevice,
MESSAGE_TYPE_WRITE,
MAC_REG_MAR0,
@@ -1631,8 +1632,7 @@ static void device_set_multi(struct net_device *dev) {
}
else {
memset(mc_filter, 0, sizeof(mc_filter));
- for (ii = 0, mclist = dev->mc_list; mclist && ii < dev->mc_count;
- ii++, mclist = mclist->next) {
+ netdev_for_each_mc_addr(mclist, dev) {
int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31));
}
diff --git a/drivers/staging/wavelan/wavelan.c b/drivers/staging/wavelan/wavelan.c
index d634b2da3b8..54ca63196fd 100644
--- a/drivers/staging/wavelan/wavelan.c
+++ b/drivers/staging/wavelan/wavelan.c
@@ -1367,7 +1367,7 @@ static void wavelan_set_multicast_list(struct net_device * dev)
#ifdef DEBUG_IOCTL_INFO
printk(KERN_DEBUG
"%s: wavelan_set_multicast_list(): setting Rx mode %02X to %d addresses.\n",
- dev->name, dev->flags, dev->mc_count);
+ dev->name, dev->flags, netdev_mc_count(dev));
#endif
/* Are we asking for promiscuous mode,
@@ -1375,7 +1375,7 @@ static void wavelan_set_multicast_list(struct net_device * dev)
* or too many multicast addresses for the hardware filter? */
if ((dev->flags & IFF_PROMISC) ||
(dev->flags & IFF_ALLMULTI) ||
- (dev->mc_count > I82586_MAX_MULTICAST_ADDRESSES)) {
+ (netdev_mc_count(dev) > I82586_MAX_MULTICAST_ADDRESSES)) {
/*
* Enable promiscuous mode: receive all packets.
*/
@@ -1387,17 +1387,17 @@ static void wavelan_set_multicast_list(struct net_device * dev)
}
} else
/* Are there multicast addresses to send? */
- if (dev->mc_list != (struct dev_mc_list *) NULL) {
+ if (!netdev_mc_empty(dev)) {
/*
* Disable promiscuous mode, but receive all packets
* in multicast list
*/
#ifdef MULTICAST_AVOID
- if (lp->promiscuous || (dev->mc_count != lp->mc_count))
+ if (lp->promiscuous || (netdev_mc_count(dev) != lp->mc_count))
#endif
{
lp->promiscuous = 0;
- lp->mc_count = dev->mc_count;
+ lp->mc_count = netdev_mc_count(dev);
wv_82586_reconfig(dev);
}
@@ -3531,7 +3531,7 @@ static void wv_82586_config(struct net_device * dev)
/* Any address to set? */
if (lp->mc_count) {
- for (dmi = dev->mc_list; dmi; dmi = dmi->next)
+ netdev_for_each_mc_addr(dmi, dev)
outsw(PIOP1(ioaddr), (u16 *) dmi->dmi_addr,
WAVELAN_ADDR_SIZE >> 1);
@@ -3539,7 +3539,7 @@ static void wv_82586_config(struct net_device * dev)
printk(KERN_DEBUG
"%s: wv_82586_config(): set %d multicast addresses:\n",
dev->name, lp->mc_count);
- for (dmi = dev->mc_list; dmi; dmi = dmi->next)
+ netdev_for_each_mc_addr(dmi, dev)
printk(KERN_DEBUG " %pM\n", dmi->dmi_addr);
#endif
}
diff --git a/drivers/staging/wavelan/wavelan_cs.c b/drivers/staging/wavelan/wavelan_cs.c
index 10c702b5be4..04f691d127b 100644
--- a/drivers/staging/wavelan/wavelan_cs.c
+++ b/drivers/staging/wavelan/wavelan_cs.c
@@ -1373,7 +1373,7 @@ wavelan_set_multicast_list(struct net_device * dev)
#ifdef DEBUG_IOCTL_INFO
printk(KERN_DEBUG "%s: wavelan_set_multicast_list(): setting Rx mode %02X to %d addresses.\n",
- dev->name, dev->flags, dev->mc_count);
+ dev->name, dev->flags, netdev_mc_count(dev));
#endif
if(dev->flags & IFF_PROMISC)
@@ -1394,7 +1394,7 @@ wavelan_set_multicast_list(struct net_device * dev)
/* If all multicast addresses
* or too much multicast addresses for the hardware filter */
if((dev->flags & IFF_ALLMULTI) ||
- (dev->mc_count > I82593_MAX_MULTICAST_ADDRESSES))
+ (netdev_mc_count(dev) > I82593_MAX_MULTICAST_ADDRESSES))
{
/*
* Disable promiscuous mode, but active the all multicast mode
@@ -1410,20 +1410,19 @@ wavelan_set_multicast_list(struct net_device * dev)
}
else
/* If there is some multicast addresses to send */
- if(dev->mc_list != (struct dev_mc_list *) NULL)
- {
+ if (!netdev_mc_empty(dev)) {
/*
* Disable promiscuous mode, but receive all packets
* in multicast list
*/
#ifdef MULTICAST_AVOID
if(lp->promiscuous || lp->allmulticast ||
- (dev->mc_count != lp->mc_count))
+ (netdev_mc_count(dev) != lp->mc_count))
#endif
{
lp->promiscuous = 0;
lp->allmulticast = 0;
- lp->mc_count = dev->mc_count;
+ lp->mc_count = netdev_mc_count(dev);
wv_82593_reconfig(dev);
}
@@ -3598,13 +3597,13 @@ wv_82593_config(struct net_device * dev)
/* If any multicast address to set */
if(lp->mc_count)
{
- struct dev_mc_list * dmi;
+ struct dev_mc_list *dmi;
int addrs_len = WAVELAN_ADDR_SIZE * lp->mc_count;
#ifdef DEBUG_CONFIG_INFO
printk(KERN_DEBUG "%s: wv_hw_config(): set %d multicast addresses:\n",
dev->name, lp->mc_count);
- for(dmi=dev->mc_list; dmi; dmi=dmi->next)
+ netdev_for_each_mc_addr(dmi, dev)
printk(KERN_DEBUG " %pM\n", dmi->dmi_addr);
#endif
@@ -3613,7 +3612,7 @@ wv_82593_config(struct net_device * dev)
outb(((TX_BASE >> 8) & PIORH_MASK) | PIORH_SEL_TX, PIORH(base));
outb(addrs_len & 0xff, PIOP(base)); /* byte count lsb */
outb((addrs_len >> 8), PIOP(base)); /* byte count msb */
- for(dmi=dev->mc_list; dmi; dmi=dmi->next)
+ netdev_for_each_mc_addr(dmi, dev)
outsb(PIOP(base), dmi->dmi_addr, dmi->dmi_addrlen);
/* reset transmit DMA pointer */
@@ -3622,7 +3621,8 @@ wv_82593_config(struct net_device * dev)
if(!wv_82593_cmd(dev, "wv_82593_config(): mc-setup",
OP0_MC_SETUP, SR0_MC_SETUP_DONE))
ret = FALSE;
- lp->mc_count = dev->mc_count; /* remember to avoid repeated reset */
+ /* remember to avoid repeated reset */
+ lp->mc_count = netdev_mc_count(dev);
}
/* Job done, clear the flag */
diff --git a/drivers/staging/wlags49_h2/wl_netdev.c b/drivers/staging/wlags49_h2/wl_netdev.c
index ac389024796..c33e225bc0e 100644
--- a/drivers/staging/wlags49_h2/wl_netdev.c
+++ b/drivers/staging/wlags49_h2/wl_netdev.c
@@ -1049,7 +1049,7 @@ void wl_multicast( struct net_device *dev )
//;?seems reasonable that even an AP-only driver could afford this small additional footprint
int x;
- struct dev_mc_list *mclist;
+ struct dev_mc_list *mclist;
struct wl_private *lp = wl_priv(dev);
unsigned long flags;
/*------------------------------------------------------------------------*/
@@ -1070,13 +1070,11 @@ void wl_multicast( struct net_device *dev )
( dev->flags & IFF_MULTICAST ) ? "Multicast " : "",
( dev->flags & IFF_ALLMULTI ) ? "All-Multicast" : "" );
- DBG_PRINT( " mc_count: %d\n", dev->mc_count );
+ DBG_PRINT( " mc_count: %d\n", netdev_mc_count(dev));
- for( x = 0, mclist = dev->mc_list; mclist && x < dev->mc_count;
- x++, mclist = mclist->next ) {
+ netdev_for_each_mc_addr(mclist, dev)
DBG_PRINT( " %s (%d)\n", DbgHwAddr(mclist->dmi_addr),
mclist->dmi_addrlen );
- }
}
#endif /* DBG */
@@ -1103,7 +1101,7 @@ void wl_multicast( struct net_device *dev )
DBG_PRINT( "Enabling Promiscuous mode (IFF_PROMISC)\n" );
hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
}
- else if(( dev->mc_count > HCF_MAX_MULTICAST ) ||
+ else if ((netdev_mc_count(dev) > HCF_MAX_MULTICAST) ||
( dev->flags & IFF_ALLMULTI )) {
/* Shutting off this filter will enable all multicast frames to
be sent up from the device; however, this is a static RID, so
@@ -1115,17 +1113,15 @@ void wl_multicast( struct net_device *dev )
hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
wl_apply( lp );
}
- else if( dev->mc_count != 0 ) {
+ else if (!netdev_mc_empty(dev)) {
/* Set the multicast addresses */
- lp->ltvRecord.len = ( dev->mc_count * 3 ) + 1;
+ lp->ltvRecord.len = ( netdev_mc_count(dev) * 3 ) + 1;
lp->ltvRecord.typ = CFG_GROUP_ADDR;
- for( x = 0, mclist = dev->mc_list;
- ( x < dev->mc_count ) && ( mclist != NULL );
- x++, mclist = mclist->next ) {
- memcpy( &( lp->ltvRecord.u.u8[x * ETH_ALEN] ),
- mclist->dmi_addr, ETH_ALEN );
- }
+ x = 0;
+ netdev_for_each_mc_addr(mclist, dev)
+ memcpy(&(lp->ltvRecord.u.u8[x++ * ETH_ALEN]),
+ mclist->dmi_addr, ETH_ALEN);
DBG_PRINT( "Setting multicast list\n" );
hcf_put_info( &( lp->hcfCtx ), (LTVP)&( lp->ltvRecord ));
} else {
@@ -1194,9 +1190,7 @@ static const struct net_device_ops wl_netdev_ops =
.ndo_stop = &wl_adapter_close,
.ndo_do_ioctl = &wl_ioctl,
-#ifdef HAVE_TX_TIMEOUT
.ndo_tx_timeout = &wl_tx_timeout,
-#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = wl_poll,
@@ -1270,9 +1264,7 @@ struct net_device * wl_device_alloc( void )
dev->stop = &wl_adapter_close;
dev->do_ioctl = &wl_ioctl;
-#ifdef HAVE_TX_TIMEOUT
dev->tx_timeout = &wl_tx_timeout;
-#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = wl_poll;
@@ -1280,9 +1272,7 @@ struct net_device * wl_device_alloc( void )
#endif // (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,30))
-#ifdef HAVE_TX_TIMEOUT
dev->watchdog_timeo = TX_TIMEOUT;
-#endif
dev->ethtool_ops = &wl_ethtool_ops;
diff --git a/drivers/usb/Kconfig b/drivers/usb/Kconfig
index 81aac7f4ca5..6a58cb1330c 100644
--- a/drivers/usb/Kconfig
+++ b/drivers/usb/Kconfig
@@ -21,6 +21,7 @@ config USB_ARCH_HAS_HCD
default y if USB_ARCH_HAS_EHCI
default y if PCMCIA && !M32R # sl811_cs
default y if ARM # SL-811
+ default y if BLACKFIN # SL-811
default y if SUPERH # r8a66597-hcd
default PCI
@@ -39,6 +40,7 @@ config USB_ARCH_HAS_OHCI
default y if ARCH_PNX4008 && I2C
default y if MFD_TC6393XB
default y if ARCH_W90X900
+ default y if ARCH_DAVINCI_DA8XX
# PPC:
default y if STB03xxx
default y if PPC_MPC52xx
@@ -61,7 +63,7 @@ config USB_ARCH_HAS_EHCI
default y if ARCH_W90X900
default y if ARCH_AT91SAM9G45
default y if ARCH_MXC
- default y if ARCH_OMAP34XX
+ default y if ARCH_OMAP3
default PCI
# ARM SA1111 chips have a non-PCI based "OHCI-compatible" USB host interface.
diff --git a/drivers/usb/Makefile b/drivers/usb/Makefile
index be3c9b80bc9..80b4008c89b 100644
--- a/drivers/usb/Makefile
+++ b/drivers/usb/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_USB_U132_HCD) += host/
obj-$(CONFIG_USB_R8A66597_HCD) += host/
obj-$(CONFIG_USB_HWA_HCD) += host/
obj-$(CONFIG_USB_ISP1760_HCD) += host/
+obj-$(CONFIG_USB_IMX21_HCD) += host/
obj-$(CONFIG_USB_C67X00_HCD) += c67x00/
diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
index 56802d2e994..c89990f5e01 100644
--- a/drivers/usb/atm/cxacru.c
+++ b/drivers/usb/atm/cxacru.c
@@ -5,6 +5,7 @@
* Copyright (C) 2004 David Woodhouse, Duncan Sands, Roman Kagan
* Copyright (C) 2005 Duncan Sands, Roman Kagan (rkagan % mail ! ru)
* Copyright (C) 2007 Simon Arlott
+ * Copyright (C) 2009 Simon Arlott
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -43,7 +44,7 @@
#include "usbatm.h"
#define DRIVER_AUTHOR "Roman Kagan, David Woodhouse, Duncan Sands, Simon Arlott"
-#define DRIVER_VERSION "0.3"
+#define DRIVER_VERSION "0.4"
#define DRIVER_DESC "Conexant AccessRunner ADSL USB modem driver"
static const char cxacru_driver_name[] = "cxacru";
@@ -52,6 +53,7 @@ static const char cxacru_driver_name[] = "cxacru";
#define CXACRU_EP_DATA 0x02 /* Bulk in/out */
#define CMD_PACKET_SIZE 64 /* Should be maxpacket(ep)? */
+#define CMD_MAX_CONFIG ((CMD_PACKET_SIZE / 4 - 1) / 2)
/* Addresses */
#define PLLFCLK_ADDR 0x00350068
@@ -105,6 +107,26 @@ enum cxacru_cm_request {
CM_REQUEST_MAX,
};
+/* commands for interaction with the flash memory
+ *
+ * read: response is the contents of the first 60 bytes of flash memory
+ * write: request contains the 60 bytes of data to write to flash memory
+ * response is the contents of the first 60 bytes of flash memory
+ *
+ * layout: PP PP VV VV MM MM MM MM MM MM ?? ?? SS SS SS SS SS SS SS SS
+ * SS SS SS SS SS SS SS SS 00 00 00 00 00 00 00 00 00 00 00 00
+ * 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00
+ *
+ * P: le16 USB Product ID
+ * V: le16 USB Vendor ID
+ * M: be48 MAC Address
+ * S: le16 ASCII Serial Number
+ */
+enum cxacru_cm_flash {
+ CM_FLASH_READ = 0xa1,
+ CM_FLASH_WRITE = 0xa2
+};
+
/* reply codes to the commands above */
enum cxacru_cm_status {
CM_STATUS_UNDEFINED,
@@ -196,23 +218,32 @@ static DEVICE_ATTR(_name, S_IRUGO, cxacru_sysfs_show_##_name, NULL)
static DEVICE_ATTR(_name, S_IWUSR | S_IRUGO, \
cxacru_sysfs_show_##_name, cxacru_sysfs_store_##_name)
+#define CXACRU_SET_INIT(_name) \
+static DEVICE_ATTR(_name, S_IWUSR, \
+ NULL, cxacru_sysfs_store_##_name)
+
#define CXACRU_ATTR_INIT(_value, _type, _name) \
static ssize_t cxacru_sysfs_show_##_name(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
- struct usb_interface *intf = to_usb_interface(dev); \
- struct usbatm_data *usbatm_instance = usb_get_intfdata(intf); \
- struct cxacru_data *instance = usbatm_instance->driver_data; \
+ struct cxacru_data *instance = to_usbatm_driver_data(\
+ to_usb_interface(dev)); \
+\
+ if (instance == NULL) \
+ return -ENODEV; \
+\
return cxacru_sysfs_showattr_##_type(instance->card_info[_value], buf); \
} \
CXACRU__ATTR_INIT(_name)
#define CXACRU_ATTR_CREATE(_v, _t, _name) CXACRU_DEVICE_CREATE_FILE(_name)
#define CXACRU_CMD_CREATE(_name) CXACRU_DEVICE_CREATE_FILE(_name)
+#define CXACRU_SET_CREATE(_name) CXACRU_DEVICE_CREATE_FILE(_name)
#define CXACRU__ATTR_CREATE(_name) CXACRU_DEVICE_CREATE_FILE(_name)
#define CXACRU_ATTR_REMOVE(_v, _t, _name) CXACRU_DEVICE_REMOVE_FILE(_name)
#define CXACRU_CMD_REMOVE(_name) CXACRU_DEVICE_REMOVE_FILE(_name)
+#define CXACRU_SET_REMOVE(_name) CXACRU_DEVICE_REMOVE_FILE(_name)
#define CXACRU__ATTR_REMOVE(_name) CXACRU_DEVICE_REMOVE_FILE(_name)
static ssize_t cxacru_sysfs_showattr_u32(u32 value, char *buf)
@@ -267,12 +298,12 @@ static ssize_t cxacru_sysfs_showattr_LINE(u32 value, char *buf)
static ssize_t cxacru_sysfs_showattr_MODU(u32 value, char *buf)
{
static char *str[] = {
- NULL,
+ "",
"ANSI T1.413",
"ITU-T G.992.1 (G.DMT)",
"ITU-T G.992.2 (G.LITE)"
};
- if (unlikely(value >= ARRAY_SIZE(str) || str[value] == NULL))
+ if (unlikely(value >= ARRAY_SIZE(str)))
return snprintf(buf, PAGE_SIZE, "%u\n", value);
return snprintf(buf, PAGE_SIZE, "%s\n", str[value]);
}
@@ -288,22 +319,28 @@ static ssize_t cxacru_sysfs_showattr_MODU(u32 value, char *buf)
static ssize_t cxacru_sysfs_show_mac_address(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct usb_interface *intf = to_usb_interface(dev);
- struct usbatm_data *usbatm_instance = usb_get_intfdata(intf);
- struct atm_dev *atm_dev = usbatm_instance->atm_dev;
+ struct cxacru_data *instance = to_usbatm_driver_data(
+ to_usb_interface(dev));
- return snprintf(buf, PAGE_SIZE, "%pM\n", atm_dev->esi);
+ if (instance == NULL || instance->usbatm->atm_dev == NULL)
+ return -ENODEV;
+
+ return snprintf(buf, PAGE_SIZE, "%pM\n",
+ instance->usbatm->atm_dev->esi);
}
static ssize_t cxacru_sysfs_show_adsl_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct usb_interface *intf = to_usb_interface(dev);
- struct usbatm_data *usbatm_instance = usb_get_intfdata(intf);
- struct cxacru_data *instance = usbatm_instance->driver_data;
- u32 value = instance->card_info[CXINF_LINE_STARTABLE];
-
static char *str[] = { "running", "stopped" };
+ struct cxacru_data *instance = to_usbatm_driver_data(
+ to_usb_interface(dev));
+ u32 value;
+
+ if (instance == NULL)
+ return -ENODEV;
+
+ value = instance->card_info[CXINF_LINE_STARTABLE];
if (unlikely(value >= ARRAY_SIZE(str)))
return snprintf(buf, PAGE_SIZE, "%u\n", value);
return snprintf(buf, PAGE_SIZE, "%s\n", str[value]);
@@ -312,9 +349,8 @@ static ssize_t cxacru_sysfs_show_adsl_state(struct device *dev,
static ssize_t cxacru_sysfs_store_adsl_state(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- struct usb_interface *intf = to_usb_interface(dev);
- struct usbatm_data *usbatm_instance = usb_get_intfdata(intf);
- struct cxacru_data *instance = usbatm_instance->driver_data;
+ struct cxacru_data *instance = to_usbatm_driver_data(
+ to_usb_interface(dev));
int ret;
int poll = -1;
char str_cmd[8];
@@ -328,13 +364,16 @@ static ssize_t cxacru_sysfs_store_adsl_state(struct device *dev,
return -EINVAL;
ret = 0;
+ if (instance == NULL)
+ return -ENODEV;
+
if (mutex_lock_interruptible(&instance->adsl_state_serialize))
return -ERESTARTSYS;
if (!strcmp(str_cmd, "stop") || !strcmp(str_cmd, "restart")) {
ret = cxacru_cm(instance, CM_REQUEST_CHIP_ADSL_LINE_STOP, NULL, 0, NULL, 0);
if (ret < 0) {
- atm_err(usbatm_instance, "change adsl state:"
+ atm_err(instance->usbatm, "change adsl state:"
" CHIP_ADSL_LINE_STOP returned %d\n", ret);
ret = -EIO;
@@ -354,7 +393,7 @@ static ssize_t cxacru_sysfs_store_adsl_state(struct device *dev,
if (!strcmp(str_cmd, "start") || !strcmp(str_cmd, "restart")) {
ret = cxacru_cm(instance, CM_REQUEST_CHIP_ADSL_LINE_START, NULL, 0, NULL, 0);
if (ret < 0) {
- atm_err(usbatm_instance, "change adsl state:"
+ atm_err(instance->usbatm, "change adsl state:"
" CHIP_ADSL_LINE_START returned %d\n", ret);
ret = -EIO;
@@ -407,6 +446,72 @@ static ssize_t cxacru_sysfs_store_adsl_state(struct device *dev,
return ret;
}
+/* CM_REQUEST_CARD_DATA_GET times out, so no show attribute */
+
+static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct cxacru_data *instance = to_usbatm_driver_data(
+ to_usb_interface(dev));
+ int len = strlen(buf);
+ int ret, pos, num;
+ __le32 data[CMD_PACKET_SIZE / 4];
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EACCES;
+
+ if (instance == NULL)
+ return -ENODEV;
+
+ pos = 0;
+ num = 0;
+ while (pos < len) {
+ int tmp;
+ u32 index;
+ u32 value;
+
+ ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
+ if (ret < 2)
+ return -EINVAL;
+ if (index < 0 || index > 0x7f)
+ return -EINVAL;
+ pos += tmp;
+
+ /* skip trailing newline */
+ if (buf[pos] == '\n' && pos == len-1)
+ pos++;
+
+ data[num * 2 + 1] = cpu_to_le32(index);
+ data[num * 2 + 2] = cpu_to_le32(value);
+ num++;
+
+ /* send config values when data buffer is full
+ * or no more data
+ */
+ if (pos >= len || num >= CMD_MAX_CONFIG) {
+ char log[CMD_MAX_CONFIG * 12 + 1]; /* %02x=%08x */
+
+ data[0] = cpu_to_le32(num);
+ ret = cxacru_cm(instance, CM_REQUEST_CARD_DATA_SET,
+ (u8 *) data, 4 + num * 8, NULL, 0);
+ if (ret < 0) {
+ atm_err(instance->usbatm,
+ "set card data returned %d\n", ret);
+ return -EIO;
+ }
+
+ for (tmp = 0; tmp < num; tmp++)
+ snprintf(log + tmp*12, 13, " %02x=%08x",
+ le32_to_cpu(data[tmp * 2 + 1]),
+ le32_to_cpu(data[tmp * 2 + 2]));
+ atm_info(instance->usbatm, "config%s\n", log);
+ num = 0;
+ }
+ }
+
+ return len;
+}
+
/*
* All device attributes are included in CXACRU_ALL_FILES
* so that the same list can be used multiple times:
@@ -442,7 +547,8 @@ CXACRU_ATTR_##_action(CXINF_MODULATION, MODU, modulation); \
CXACRU_ATTR_##_action(CXINF_ADSL_HEADEND, u32, adsl_headend); \
CXACRU_ATTR_##_action(CXINF_ADSL_HEADEND_ENVIRONMENT, u32, adsl_headend_environment); \
CXACRU_ATTR_##_action(CXINF_CONTROLLER_VERSION, u32, adsl_controller_version); \
-CXACRU_CMD_##_action( adsl_state);
+CXACRU_CMD_##_action( adsl_state); \
+CXACRU_SET_##_action( adsl_config);
CXACRU_ALL_FILES(INIT);
@@ -596,7 +702,7 @@ static int cxacru_cm_get_array(struct cxacru_data *instance, enum cxacru_cm_requ
len = ret / 4;
for (offb = 0; offb < len; ) {
int l = le32_to_cpu(buf[offb++]);
- if (l > stride || l > (len - offb) / 2) {
+ if (l < 0 || l > stride || l > (len - offb) / 2) {
if (printk_ratelimit())
usb_err(instance->usbatm, "invalid data length from cm %#x: %d\n",
cm, l);
@@ -649,9 +755,6 @@ static int cxacru_atm_start(struct usbatm_data *usbatm_instance,
{
struct cxacru_data *instance = usbatm_instance->driver_data;
struct usb_interface *intf = usbatm_instance->usb_intf;
- /*
- struct atm_dev *atm_dev = usbatm_instance->atm_dev;
- */
int ret;
int start_polling = 1;
@@ -697,6 +800,9 @@ static int cxacru_atm_start(struct usbatm_data *usbatm_instance,
mutex_unlock(&instance->poll_state_serialize);
mutex_unlock(&instance->adsl_state_serialize);
+ printk(KERN_INFO "%s%d: %s %pM\n", atm_dev->type, atm_dev->number,
+ usbatm_instance->description, atm_dev->esi);
+
if (start_polling)
cxacru_poll_status(&instance->poll_work.work);
return 0;
@@ -873,11 +979,9 @@ cleanup:
static void cxacru_upload_firmware(struct cxacru_data *instance,
const struct firmware *fw,
- const struct firmware *bp,
- const struct firmware *cf)
+ const struct firmware *bp)
{
int ret;
- int off;
struct usbatm_data *usbatm = instance->usbatm;
struct usb_device *usb_dev = usbatm->usb_dev;
__le16 signature[] = { usb_dev->descriptor.idVendor,
@@ -911,6 +1015,7 @@ static void cxacru_upload_firmware(struct cxacru_data *instance,
}
/* Firmware */
+ usb_info(usbatm, "loading firmware\n");
ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, FW_ADDR, fw->data, fw->size);
if (ret) {
usb_err(usbatm, "Firmware upload failed: %d\n", ret);
@@ -919,6 +1024,7 @@ static void cxacru_upload_firmware(struct cxacru_data *instance,
/* Boot ROM patch */
if (instance->modem_type->boot_rom_patch) {
+ usb_info(usbatm, "loading boot ROM patch\n");
ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, BR_ADDR, bp->data, bp->size);
if (ret) {
usb_err(usbatm, "Boot ROM patching failed: %d\n", ret);
@@ -933,6 +1039,7 @@ static void cxacru_upload_firmware(struct cxacru_data *instance,
return;
}
+ usb_info(usbatm, "starting device\n");
if (instance->modem_type->boot_rom_patch) {
val = cpu_to_le32(BR_ADDR);
ret = cxacru_fw(usb_dev, FW_WRITE_MEM, 0x2, 0x0, BR_STACK_ADDR, (u8 *) &val, 4);
@@ -958,26 +1065,6 @@ static void cxacru_upload_firmware(struct cxacru_data *instance,
usb_err(usbatm, "modem failed to initialize: %d\n", ret);
return;
}
-
- /* Load config data (le32), doing one packet at a time */
- if (cf)
- for (off = 0; off < cf->size / 4; ) {
- __le32 buf[CMD_PACKET_SIZE / 4 - 1];
- int i, len = min_t(int, cf->size / 4 - off, CMD_PACKET_SIZE / 4 / 2 - 1);
- buf[0] = cpu_to_le32(len);
- for (i = 0; i < len; i++, off++) {
- buf[i * 2 + 1] = cpu_to_le32(off);
- memcpy(buf + i * 2 + 2, cf->data + off * 4, 4);
- }
- ret = cxacru_cm(instance, CM_REQUEST_CARD_DATA_SET,
- (u8 *) buf, len, NULL, 0);
- if (ret < 0) {
- usb_err(usbatm, "load config data failed: %d\n", ret);
- return;
- }
- }
-
- msleep_interruptible(4000);
}
static int cxacru_find_firmware(struct cxacru_data *instance,
@@ -1003,7 +1090,7 @@ static int cxacru_find_firmware(struct cxacru_data *instance,
static int cxacru_heavy_init(struct usbatm_data *usbatm_instance,
struct usb_interface *usb_intf)
{
- const struct firmware *fw, *bp, *cf;
+ const struct firmware *fw, *bp;
struct cxacru_data *instance = usbatm_instance->driver_data;
int ret = cxacru_find_firmware(instance, "fw", &fw);
@@ -1021,13 +1108,8 @@ static int cxacru_heavy_init(struct usbatm_data *usbatm_instance,
}
}
- if (cxacru_find_firmware(instance, "cf", &cf)) /* optional */
- cf = NULL;
-
- cxacru_upload_firmware(instance, fw, bp, cf);
+ cxacru_upload_firmware(instance, fw, bp);
- if (cf)
- release_firmware(cf);
if (instance->modem_type->boot_rom_patch)
release_firmware(bp);
release_firmware(fw);
diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
index fbea8563df1..9b53e8df464 100644
--- a/drivers/usb/atm/usbatm.c
+++ b/drivers/usb/atm/usbatm.c
@@ -1333,6 +1333,7 @@ void usbatm_usb_disconnect(struct usb_interface *intf)
if (instance->atm_dev) {
sysfs_remove_link(&instance->atm_dev->class_dev.kobj, "device");
atm_dev_deregister(instance->atm_dev);
+ instance->atm_dev = NULL;
}
usbatm_put_instance(instance); /* taken in usbatm_usb_probe */
@@ -1348,7 +1349,7 @@ static int __init usbatm_usb_init(void)
{
dbg("%s: driver version %s", __func__, DRIVER_VERSION);
- if (sizeof(struct usbatm_control) > sizeof(((struct sk_buff *) 0)->cb)) {
+ if (sizeof(struct usbatm_control) > FIELD_SIZEOF(struct sk_buff, cb)) {
printk(KERN_ERR "%s unusable with this kernel!\n", usbatm_driver_name);
return -EIO;
}
diff --git a/drivers/usb/atm/usbatm.h b/drivers/usb/atm/usbatm.h
index f6f4508a9d4..0863f85fcc2 100644
--- a/drivers/usb/atm/usbatm.h
+++ b/drivers/usb/atm/usbatm.h
@@ -204,4 +204,19 @@ struct usbatm_data {
struct urb *urbs[0];
};
+static inline void *to_usbatm_driver_data(struct usb_interface *intf)
+{
+ struct usbatm_data *usbatm_instance;
+
+ if (intf == NULL)
+ return NULL;
+
+ usbatm_instance = usb_get_intfdata(intf);
+
+ if (usbatm_instance == NULL) /* set NULL before unbind() */
+ return NULL;
+
+ return usbatm_instance->driver_data; /* set NULL after unbind() */
+}
+
#endif /* _USBATM_H_ */
diff --git a/drivers/usb/c67x00/c67x00-drv.c b/drivers/usb/c67x00/c67x00-drv.c
index 5633bc5c8bf..029ee4a8a1f 100644
--- a/drivers/usb/c67x00/c67x00-drv.c
+++ b/drivers/usb/c67x00/c67x00-drv.c
@@ -137,13 +137,13 @@ static int __devinit c67x00_drv_probe(struct platform_device *pdev)
if (!c67x00)
return -ENOMEM;
- if (!request_mem_region(res->start, res->end - res->start + 1,
+ if (!request_mem_region(res->start, resource_size(res),
pdev->name)) {
dev_err(&pdev->dev, "Memory region busy\n");
ret = -EBUSY;
goto request_mem_failed;
}
- c67x00->hpi.base = ioremap(res->start, res->end - res->start + 1);
+ c67x00->hpi.base = ioremap(res->start, resource_size(res));
if (!c67x00->hpi.base) {
dev_err(&pdev->dev, "Unable to map HPI registers\n");
ret = -EIO;
@@ -182,7 +182,7 @@ static int __devinit c67x00_drv_probe(struct platform_device *pdev)
request_irq_failed:
iounmap(c67x00->hpi.base);
map_failed:
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
request_mem_failed:
kfree(c67x00);
@@ -208,7 +208,7 @@ static int __devexit c67x00_drv_remove(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res)
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
kfree(c67x00);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 34d4eb98829..975d556b478 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -170,6 +170,7 @@ static void acm_write_done(struct acm *acm, struct acm_wb *wb)
{
wb->use = 0;
acm->transmitting--;
+ usb_autopm_put_interface_async(acm->control);
}
/*
@@ -211,9 +212,12 @@ static int acm_write_start(struct acm *acm, int wbn)
}
dbg("%s susp_count: %d", __func__, acm->susp_count);
+ usb_autopm_get_interface_async(acm->control);
if (acm->susp_count) {
- acm->delayed_wb = wb;
- schedule_work(&acm->waker);
+ if (!acm->delayed_wb)
+ acm->delayed_wb = wb;
+ else
+ usb_autopm_put_interface_async(acm->control);
spin_unlock_irqrestore(&acm->write_lock, flags);
return 0; /* A white lie */
}
@@ -424,7 +428,6 @@ next_buffer:
throttled = acm->throttle;
spin_unlock_irqrestore(&acm->throttle_lock, flags);
if (!throttled) {
- tty_buffer_request_room(tty, buf->size);
tty_insert_flip_string(tty, buf->base, buf->size);
tty_flip_buffer_push(tty);
} else {
@@ -534,23 +537,6 @@ static void acm_softint(struct work_struct *work)
tty_kref_put(tty);
}
-static void acm_waker(struct work_struct *waker)
-{
- struct acm *acm = container_of(waker, struct acm, waker);
- int rv;
-
- rv = usb_autopm_get_interface(acm->control);
- if (rv < 0) {
- dev_err(&acm->dev->dev, "Autopm failure in %s\n", __func__);
- return;
- }
- if (acm->delayed_wb) {
- acm_start_wb(acm, acm->delayed_wb);
- acm->delayed_wb = NULL;
- }
- usb_autopm_put_interface(acm->control);
-}
-
/*
* TTY handlers
*/
@@ -566,7 +552,7 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
acm = acm_table[tty->index];
if (!acm || !acm->dev)
- goto err_out;
+ goto out;
else
rv = 0;
@@ -582,8 +568,9 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
mutex_lock(&acm->mutex);
if (acm->port.count++) {
+ mutex_unlock(&acm->mutex);
usb_autopm_put_interface(acm->control);
- goto done;
+ goto out;
}
acm->ctrlurb->dev = acm->dev;
@@ -612,18 +599,18 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
set_bit(ASYNCB_INITIALIZED, &acm->port.flags);
rv = tty_port_block_til_ready(&acm->port, tty, filp);
tasklet_schedule(&acm->urb_task);
-done:
+
mutex_unlock(&acm->mutex);
-err_out:
+out:
mutex_unlock(&open_mutex);
return rv;
full_bailout:
usb_kill_urb(acm->ctrlurb);
bail_out:
- usb_autopm_put_interface(acm->control);
acm->port.count--;
mutex_unlock(&acm->mutex);
+ usb_autopm_put_interface(acm->control);
early_bail:
mutex_unlock(&open_mutex);
tty_port_tty_set(&acm->port, NULL);
@@ -1023,7 +1010,7 @@ static int acm_probe(struct usb_interface *intf,
case USB_CDC_CALL_MANAGEMENT_TYPE:
call_management_function = buffer[3];
call_interface_num = buffer[4];
- if ((call_management_function & 3) != 3)
+ if ( (quirks & NOT_A_MODEM) == 0 && (call_management_function & 3) != 3)
dev_err(&intf->dev, "This device cannot do calls on its own. It is not a modem.\n");
break;
default:
@@ -1178,7 +1165,6 @@ made_compressed_probe:
acm->urb_task.func = acm_rx_tasklet;
acm->urb_task.data = (unsigned long) acm;
INIT_WORK(&acm->work, acm_softint);
- INIT_WORK(&acm->waker, acm_waker);
init_waitqueue_head(&acm->drain_wait);
spin_lock_init(&acm->throttle_lock);
spin_lock_init(&acm->write_lock);
@@ -1343,7 +1329,6 @@ static void stop_data_traffic(struct acm *acm)
tasklet_enable(&acm->urb_task);
cancel_work_sync(&acm->work);
- cancel_work_sync(&acm->waker);
}
static void acm_disconnect(struct usb_interface *intf)
@@ -1435,6 +1420,7 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
static int acm_resume(struct usb_interface *intf)
{
struct acm *acm = usb_get_intfdata(intf);
+ struct acm_wb *wb;
int rv = 0;
int cnt;
@@ -1449,6 +1435,21 @@ static int acm_resume(struct usb_interface *intf)
mutex_lock(&acm->mutex);
if (acm->port.count) {
rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO);
+
+ spin_lock_irq(&acm->write_lock);
+ if (acm->delayed_wb) {
+ wb = acm->delayed_wb;
+ acm->delayed_wb = NULL;
+ spin_unlock_irq(&acm->write_lock);
+ acm_start_wb(acm, acm->delayed_wb);
+ } else {
+ spin_unlock_irq(&acm->write_lock);
+ }
+
+ /*
+ * delayed error checking because we must
+ * do the write path at all cost
+ */
if (rv < 0)
goto err_out;
@@ -1460,6 +1461,23 @@ err_out:
return rv;
}
+static int acm_reset_resume(struct usb_interface *intf)
+{
+ struct acm *acm = usb_get_intfdata(intf);
+ struct tty_struct *tty;
+
+ mutex_lock(&acm->mutex);
+ if (acm->port.count) {
+ tty = tty_port_tty_get(&acm->port);
+ if (tty) {
+ tty_hangup(tty);
+ tty_kref_put(tty);
+ }
+ }
+ mutex_unlock(&acm->mutex);
+ return acm_resume(intf);
+}
+
#endif /* CONFIG_PM */
#define NOKIA_PCSUITE_ACM_INFO(x) \
@@ -1471,7 +1489,7 @@ err_out:
* USB driver structure.
*/
-static struct usb_device_id acm_ids[] = {
+static const struct usb_device_id acm_ids[] = {
/* quirky and broken devices */
{ USB_DEVICE(0x0870, 0x0001), /* Metricom GS Modem */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
@@ -1576,6 +1594,11 @@ static struct usb_device_id acm_ids[] = {
/* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */
+ /* Support Lego NXT using pbLua firmware */
+ { USB_DEVICE(0x0694, 0xff00),
+ .driver_info = NOT_A_MODEM,
+ },
+
/* control interfaces with various AT-command sets */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_ACM_PROTO_AT_V25TER) },
@@ -1602,6 +1625,7 @@ static struct usb_driver acm_driver = {
#ifdef CONFIG_PM
.suspend = acm_suspend,
.resume = acm_resume,
+ .reset_resume = acm_reset_resume,
#endif
.id_table = acm_ids,
#ifdef CONFIG_PM
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index c4a0ee8ffcc..4a8e87ec6ce 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -112,7 +112,6 @@ struct acm {
struct mutex mutex;
struct usb_cdc_line_coding line; /* bits, stop, parity */
struct work_struct work; /* work queue entry for line discipline waking up */
- struct work_struct waker;
wait_queue_head_t drain_wait; /* close processing */
struct tasklet_struct urb_task; /* rx processing */
spinlock_t throttle_lock; /* synchronize throtteling and read callback */
@@ -137,3 +136,4 @@ struct acm {
#define NO_UNION_NORMAL 1
#define SINGLE_RX_URB 2
#define NO_CAP_LINE 4
+#define NOT_A_MODEM 8
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 3e564bfe17d..18aafcb08fc 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -31,7 +31,7 @@
#define DRIVER_AUTHOR "Oliver Neukum"
#define DRIVER_DESC "USB Abstract Control Model driver for USB WCM Device Management"
-static struct usb_device_id wdm_ids[] = {
+static const struct usb_device_id wdm_ids[] = {
{
.match_flags = USB_DEVICE_ID_MATCH_INT_CLASS |
USB_DEVICE_ID_MATCH_INT_SUBCLASS,
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 9bc112ee780..93b5f85d7ce 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -163,7 +163,6 @@ struct usblp {
unsigned char used; /* True if open */
unsigned char present; /* True if not disconnected */
unsigned char bidir; /* interface is bidirectional */
- unsigned char sleeping; /* interface is suspended */
unsigned char no_paper; /* Paper Out happened */
unsigned char *device_id_string; /* IEEE 1284 DEVICE ID string (ptr) */
/* first 2 bytes are (big-endian) length */
@@ -191,7 +190,6 @@ static void usblp_dump(struct usblp *usblp) {
dbg("quirks=%d", usblp->quirks);
dbg("used=%d", usblp->used);
dbg("bidir=%d", usblp->bidir);
- dbg("sleeping=%d", usblp->sleeping);
dbg("device_id_string=\"%s\"",
usblp->device_id_string ?
usblp->device_id_string + 2 :
@@ -376,7 +374,7 @@ static int usblp_check_status(struct usblp *usblp, int err)
static int handle_bidir (struct usblp *usblp)
{
- if (usblp->bidir && usblp->used && !usblp->sleeping) {
+ if (usblp->bidir && usblp->used) {
if (usblp_submit_read(usblp) < 0)
return -EIO;
}
@@ -503,11 +501,6 @@ static long usblp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
goto done;
}
- if (usblp->sleeping) {
- retval = -ENODEV;
- goto done;
- }
-
dbg("usblp_ioctl: cmd=0x%x (%c nr=%d len=%d dir=%d)", cmd, _IOC_TYPE(cmd),
_IOC_NR(cmd), _IOC_SIZE(cmd), _IOC_DIR(cmd) );
@@ -914,8 +907,6 @@ static int usblp_wtest(struct usblp *usblp, int nonblock)
return 0;
}
spin_unlock_irqrestore(&usblp->lock, flags);
- if (usblp->sleeping)
- return -ENODEV;
if (nonblock)
return -EAGAIN;
return 1;
@@ -968,8 +959,6 @@ static int usblp_rtest(struct usblp *usblp, int nonblock)
return 0;
}
spin_unlock_irqrestore(&usblp->lock, flags);
- if (usblp->sleeping)
- return -ENODEV;
if (nonblock)
return -EAGAIN;
return 1;
@@ -1377,12 +1366,10 @@ static void usblp_disconnect(struct usb_interface *intf)
mutex_unlock (&usblp_mutex);
}
-static int usblp_suspend (struct usb_interface *intf, pm_message_t message)
+static int usblp_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usblp *usblp = usb_get_intfdata (intf);
- /* we take no more IO */
- usblp->sleeping = 1;
usblp_unlink_urbs(usblp);
#if 0 /* XXX Do we want this? What if someone is reading, should we fail? */
/* not strictly necessary, but just in case */
@@ -1393,18 +1380,17 @@ static int usblp_suspend (struct usb_interface *intf, pm_message_t message)
return 0;
}
-static int usblp_resume (struct usb_interface *intf)
+static int usblp_resume(struct usb_interface *intf)
{
struct usblp *usblp = usb_get_intfdata (intf);
int r;
- usblp->sleeping = 0;
r = handle_bidir (usblp);
return r;
}
-static struct usb_device_id usblp_ids [] = {
+static const struct usb_device_id usblp_ids[] = {
{ USB_DEVICE_INFO(7, 1, 1) },
{ USB_DEVICE_INFO(7, 1, 2) },
{ USB_DEVICE_INFO(7, 1, 3) },
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 7c5f4e32c92..8588c0937a8 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -48,7 +48,7 @@
*/
#define USBTMC_MAX_READS_TO_CLEAR_BULK_IN 100
-static struct usb_device_id usbtmc_devices[] = {
+static const struct usb_device_id usbtmc_devices[] = {
{ USB_INTERFACE_INFO(USB_CLASS_APP_SPEC, 3, 0), },
{ USB_INTERFACE_INFO(USB_CLASS_APP_SPEC, 3, 1), },
{ 0, } /* terminating entry */
diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig
index ad925946f86..97a819c23ef 100644
--- a/drivers/usb/core/Kconfig
+++ b/drivers/usb/core/Kconfig
@@ -91,8 +91,8 @@ config USB_DYNAMIC_MINORS
If you are unsure about this, say N here.
config USB_SUSPEND
- bool "USB selective suspend/resume and wakeup"
- depends on USB && PM
+ bool "USB runtime power management (suspend/resume and wakeup)"
+ depends on USB && PM_RUNTIME
help
If you say Y here, you can use driver calls or the sysfs
"power/level" file to suspend or resume individual USB
diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
index 355dffcc23b..d41811bfef2 100644
--- a/drivers/usb/core/devices.c
+++ b/drivers/usb/core/devices.c
@@ -117,12 +117,20 @@ static const char *format_endpt =
* However, these will come from functions that return ptrs to each of them.
*/
-static DECLARE_WAIT_QUEUE_HEAD(deviceconndiscwq);
-static unsigned int conndiscevcnt;
-
-/* this struct stores the poll state for <mountpoint>/devices pollers */
-struct usb_device_status {
- unsigned int lastev;
+/*
+ * Wait for an connect/disconnect event to happen. We initialize
+ * the event counter with an odd number, and each event will increment
+ * the event counter by two, so it will always _stay_ odd. That means
+ * that it will never be zero, so "event 0" will never match a current
+ * event, and thus 'poll' will always trigger as readable for the first
+ * time it gets called.
+ */
+static struct device_connect_event {
+ atomic_t count;
+ wait_queue_head_t wait;
+} device_event = {
+ .count = ATOMIC_INIT(1),
+ .wait = __WAIT_QUEUE_HEAD_INITIALIZER(device_event.wait)
};
struct class_info {
@@ -156,8 +164,8 @@ static const struct class_info clas_info[] =
void usbfs_conn_disc_event(void)
{
- conndiscevcnt++;
- wake_up(&deviceconndiscwq);
+ atomic_add(2, &device_event.count);
+ wake_up(&device_event.wait);
}
static const char *class_decode(const int class)
@@ -629,55 +637,16 @@ static ssize_t usb_device_read(struct file *file, char __user *buf,
static unsigned int usb_device_poll(struct file *file,
struct poll_table_struct *wait)
{
- struct usb_device_status *st = file->private_data;
- unsigned int mask = 0;
-
- lock_kernel();
- if (!st) {
- st = kmalloc(sizeof(struct usb_device_status), GFP_KERNEL);
-
- /* we may have dropped BKL -
- * need to check for having lost the race */
- if (file->private_data) {
- kfree(st);
- st = file->private_data;
- goto lost_race;
- }
- /* we haven't lost - check for allocation failure now */
- if (!st) {
- unlock_kernel();
- return POLLIN;
- }
+ unsigned int event_count;
- /*
- * need to prevent the module from being unloaded, since
- * proc_unregister does not call the release method and
- * we would have a memory leak
- */
- st->lastev = conndiscevcnt;
- file->private_data = st;
- mask = POLLIN;
- }
-lost_race:
- if (file->f_mode & FMODE_READ)
- poll_wait(file, &deviceconndiscwq, wait);
- if (st->lastev != conndiscevcnt)
- mask |= POLLIN;
- st->lastev = conndiscevcnt;
- unlock_kernel();
- return mask;
-}
+ poll_wait(file, &device_event.wait, wait);
-static int usb_device_open(struct inode *inode, struct file *file)
-{
- file->private_data = NULL;
- return 0;
-}
+ event_count = atomic_read(&device_event.count);
+ if (file->f_version != event_count) {
+ file->f_version = event_count;
+ return POLLIN | POLLRDNORM;
+ }
-static int usb_device_release(struct inode *inode, struct file *file)
-{
- kfree(file->private_data);
- file->private_data = NULL;
return 0;
}
@@ -685,7 +654,7 @@ static loff_t usb_device_lseek(struct file *file, loff_t offset, int orig)
{
loff_t ret;
- lock_kernel();
+ mutex_lock(&file->f_dentry->d_inode->i_mutex);
switch (orig) {
case 0:
@@ -701,7 +670,7 @@ static loff_t usb_device_lseek(struct file *file, loff_t offset, int orig)
ret = -EINVAL;
}
- unlock_kernel();
+ mutex_unlock(&file->f_dentry->d_inode->i_mutex);
return ret;
}
@@ -709,6 +678,4 @@ const struct file_operations usbfs_devices_fops = {
.llseek = usb_device_lseek,
.read = usb_device_read,
.poll = usb_device_poll,
- .open = usb_device_open,
- .release = usb_device_release,
};
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index a678186f218..e909ff7b909 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -122,7 +122,7 @@ static loff_t usbdev_lseek(struct file *file, loff_t offset, int orig)
{
loff_t ret;
- lock_kernel();
+ mutex_lock(&file->f_dentry->d_inode->i_mutex);
switch (orig) {
case 0:
@@ -138,7 +138,7 @@ static loff_t usbdev_lseek(struct file *file, loff_t offset, int orig)
ret = -EINVAL;
}
- unlock_kernel();
+ mutex_unlock(&file->f_dentry->d_inode->i_mutex);
return ret;
}
@@ -310,7 +310,8 @@ static struct async *async_getpending(struct dev_state *ps,
static void snoop_urb(struct usb_device *udev,
void __user *userurb, int pipe, unsigned length,
- int timeout_or_status, enum snoop_when when)
+ int timeout_or_status, enum snoop_when when,
+ unsigned char *data, unsigned data_len)
{
static const char *types[] = {"isoc", "int", "ctrl", "bulk"};
static const char *dirs[] = {"out", "in"};
@@ -344,6 +345,11 @@ static void snoop_urb(struct usb_device *udev,
"status %d\n",
ep, t, d, length, timeout_or_status);
}
+
+ if (data && data_len > 0) {
+ print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE, 32, 1,
+ data, data_len, 1);
+ }
}
#define AS_CONTINUATION 1
@@ -410,7 +416,9 @@ static void async_completed(struct urb *urb)
}
snoop(&urb->dev->dev, "urb complete\n");
snoop_urb(urb->dev, as->userurb, urb->pipe, urb->actual_length,
- as->status, COMPLETE);
+ as->status, COMPLETE,
+ ((urb->transfer_flags & URB_DIR_MASK) == USB_DIR_OUT) ?
+ NULL : urb->transfer_buffer, urb->actual_length);
if (as->status < 0 && as->bulk_addr && as->status != -ECONNRESET &&
as->status != -ENOENT)
cancel_bulk_urbs(ps, as->bulk_addr);
@@ -653,20 +661,20 @@ static int usbdev_open(struct inode *inode, struct file *file)
const struct cred *cred = current_cred();
int ret;
- lock_kernel();
- /* Protect against simultaneous removal or release */
- mutex_lock(&usbfs_mutex);
-
ret = -ENOMEM;
ps = kmalloc(sizeof(struct dev_state), GFP_KERNEL);
if (!ps)
- goto out;
+ goto out_free_ps;
ret = -ENODEV;
+ /* Protect against simultaneous removal or release */
+ mutex_lock(&usbfs_mutex);
+
/* usbdev device-node */
if (imajor(inode) == USB_DEVICE_MAJOR)
dev = usbdev_lookup_by_devt(inode->i_rdev);
+
#ifdef CONFIG_USB_DEVICEFS
/* procfs file */
if (!dev) {
@@ -678,13 +686,19 @@ static int usbdev_open(struct inode *inode, struct file *file)
dev = NULL;
}
#endif
- if (!dev || dev->state == USB_STATE_NOTATTACHED)
- goto out;
+ mutex_unlock(&usbfs_mutex);
+
+ if (!dev)
+ goto out_free_ps;
+
+ usb_lock_device(dev);
+ if (dev->state == USB_STATE_NOTATTACHED)
+ goto out_unlock_device;
+
ret = usb_autoresume_device(dev);
if (ret)
- goto out;
+ goto out_unlock_device;
- ret = 0;
ps->dev = dev;
ps->file = file;
spin_lock_init(&ps->lock);
@@ -702,15 +716,16 @@ static int usbdev_open(struct inode *inode, struct file *file)
smp_wmb();
list_add_tail(&ps->list, &dev->filelist);
file->private_data = ps;
+ usb_unlock_device(dev);
snoop(&dev->dev, "opened by process %d: %s\n", task_pid_nr(current),
current->comm);
- out:
- if (ret) {
- kfree(ps);
- usb_put_dev(dev);
- }
- mutex_unlock(&usbfs_mutex);
- unlock_kernel();
+ return ret;
+
+ out_unlock_device:
+ usb_unlock_device(dev);
+ usb_put_dev(dev);
+ out_free_ps:
+ kfree(ps);
return ret;
}
@@ -724,10 +739,7 @@ static int usbdev_release(struct inode *inode, struct file *file)
usb_lock_device(dev);
usb_hub_release_all_ports(dev, ps);
- /* Protect against simultaneous open */
- mutex_lock(&usbfs_mutex);
list_del_init(&ps->list);
- mutex_unlock(&usbfs_mutex);
for (ifnum = 0; ps->ifclaimed && ifnum < 8*sizeof(ps->ifclaimed);
ifnum++) {
@@ -770,6 +782,13 @@ static int proc_control(struct dev_state *ps, void __user *arg)
if (!tbuf)
return -ENOMEM;
tmo = ctrl.timeout;
+ snoop(&dev->dev, "control urb: bRequestType=%02x "
+ "bRequest=%02x wValue=%04x "
+ "wIndex=%04x wLength=%04x\n",
+ ctrl.bRequestType, ctrl.bRequest,
+ __le16_to_cpup(&ctrl.wValue),
+ __le16_to_cpup(&ctrl.wIndex),
+ __le16_to_cpup(&ctrl.wLength));
if (ctrl.bRequestType & 0x80) {
if (ctrl.wLength && !access_ok(VERIFY_WRITE, ctrl.data,
ctrl.wLength)) {
@@ -777,15 +796,15 @@ static int proc_control(struct dev_state *ps, void __user *arg)
return -EINVAL;
}
pipe = usb_rcvctrlpipe(dev, 0);
- snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT);
+ snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT, NULL, 0);
usb_unlock_device(dev);
i = usb_control_msg(dev, pipe, ctrl.bRequest,
ctrl.bRequestType, ctrl.wValue, ctrl.wIndex,
tbuf, ctrl.wLength, tmo);
usb_lock_device(dev);
- snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE);
-
+ snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE,
+ tbuf, i);
if ((i > 0) && ctrl.wLength) {
if (copy_to_user(ctrl.data, tbuf, i)) {
free_page((unsigned long)tbuf);
@@ -800,14 +819,15 @@ static int proc_control(struct dev_state *ps, void __user *arg)
}
}
pipe = usb_sndctrlpipe(dev, 0);
- snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT);
+ snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT,
+ tbuf, ctrl.wLength);
usb_unlock_device(dev);
i = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), ctrl.bRequest,
ctrl.bRequestType, ctrl.wValue, ctrl.wIndex,
tbuf, ctrl.wLength, tmo);
usb_lock_device(dev);
- snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE);
+ snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE, NULL, 0);
}
free_page((unsigned long)tbuf);
if (i < 0 && i != -EPIPE) {
@@ -853,12 +873,12 @@ static int proc_bulk(struct dev_state *ps, void __user *arg)
kfree(tbuf);
return -EINVAL;
}
- snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT);
+ snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT, NULL, 0);
usb_unlock_device(dev);
i = usb_bulk_msg(dev, pipe, tbuf, len1, &len2, tmo);
usb_lock_device(dev);
- snoop_urb(dev, NULL, pipe, len2, i, COMPLETE);
+ snoop_urb(dev, NULL, pipe, len2, i, COMPLETE, tbuf, len2);
if (!i && len2) {
if (copy_to_user(bulk.data, tbuf, len2)) {
@@ -873,12 +893,12 @@ static int proc_bulk(struct dev_state *ps, void __user *arg)
return -EFAULT;
}
}
- snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT);
+ snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT, tbuf, len1);
usb_unlock_device(dev);
i = usb_bulk_msg(dev, pipe, tbuf, len1, &len2, tmo);
usb_lock_device(dev);
- snoop_urb(dev, NULL, pipe, len2, i, COMPLETE);
+ snoop_urb(dev, NULL, pipe, len2, i, COMPLETE, NULL, 0);
}
kfree(tbuf);
if (i < 0)
@@ -1097,6 +1117,13 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
is_in = 0;
uurb->endpoint &= ~USB_DIR_IN;
}
+ snoop(&ps->dev->dev, "control urb: bRequestType=%02x "
+ "bRequest=%02x wValue=%04x "
+ "wIndex=%04x wLength=%04x\n",
+ dr->bRequestType, dr->bRequest,
+ __le16_to_cpup(&dr->wValue),
+ __le16_to_cpup(&dr->wIndex),
+ __le16_to_cpup(&dr->wLength));
break;
case USBDEVFS_URB_TYPE_BULK:
@@ -1104,13 +1131,25 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
case USB_ENDPOINT_XFER_CONTROL:
case USB_ENDPOINT_XFER_ISOC:
return -EINVAL;
- /* allow single-shot interrupt transfers, at bogus rates */
+ case USB_ENDPOINT_XFER_INT:
+ /* allow single-shot interrupt transfers */
+ uurb->type = USBDEVFS_URB_TYPE_INTERRUPT;
+ goto interrupt_urb;
}
uurb->number_of_packets = 0;
if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE)
return -EINVAL;
break;
+ case USBDEVFS_URB_TYPE_INTERRUPT:
+ if (!usb_endpoint_xfer_int(&ep->desc))
+ return -EINVAL;
+ interrupt_urb:
+ uurb->number_of_packets = 0;
+ if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE)
+ return -EINVAL;
+ break;
+
case USBDEVFS_URB_TYPE_ISO:
/* arbitrary limit */
if (uurb->number_of_packets < 1 ||
@@ -1143,14 +1182,6 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
uurb->buffer_length = totlen;
break;
- case USBDEVFS_URB_TYPE_INTERRUPT:
- uurb->number_of_packets = 0;
- if (!usb_endpoint_xfer_int(&ep->desc))
- return -EINVAL;
- if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE)
- return -EINVAL;
- break;
-
default:
return -EINVAL;
}
@@ -1236,7 +1267,9 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
}
}
snoop_urb(ps->dev, as->userurb, as->urb->pipe,
- as->urb->transfer_buffer_length, 0, SUBMIT);
+ as->urb->transfer_buffer_length, 0, SUBMIT,
+ is_in ? NULL : as->urb->transfer_buffer,
+ uurb->buffer_length);
async_newpending(as);
if (usb_endpoint_xfer_bulk(&ep->desc)) {
@@ -1274,7 +1307,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
dev_printk(KERN_DEBUG, &ps->dev->dev,
"usbfs: usb_submit_urb returned %d\n", ret);
snoop_urb(ps->dev, as->userurb, as->urb->pipe,
- 0, ret, COMPLETE);
+ 0, ret, COMPLETE, NULL, 0);
async_removepending(as);
free_async(as);
return ret;
@@ -1628,7 +1661,10 @@ static int proc_ioctl(struct dev_state *ps, struct usbdevfs_ioctl *ctl)
if (driver == NULL || driver->ioctl == NULL) {
retval = -ENOTTY;
} else {
+ /* keep API that guarantees BKL */
+ lock_kernel();
retval = driver->ioctl(intf, ctl->ioctl_code, buf);
+ unlock_kernel();
if (retval == -ENOIOCTLCMD)
retval = -ENOTTY;
}
@@ -1711,6 +1747,7 @@ static long usbdev_do_ioctl(struct file *file, unsigned int cmd,
if (!(file->f_mode & FMODE_WRITE))
return -EPERM;
+
usb_lock_device(dev);
if (!connected(ps)) {
usb_unlock_device(dev);
@@ -1877,9 +1914,7 @@ static long usbdev_ioctl(struct file *file, unsigned int cmd,
{
int ret;
- lock_kernel();
ret = usbdev_do_ioctl(file, cmd, (void __user *)arg);
- unlock_kernel();
return ret;
}
@@ -1890,9 +1925,7 @@ static long usbdev_compat_ioctl(struct file *file, unsigned int cmd,
{
int ret;
- lock_kernel();
ret = usbdev_do_ioctl(file, cmd, compat_ptr(arg));
- unlock_kernel();
return ret;
}
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index f2f055eb683..a7037bf8168 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -25,7 +25,7 @@
#include <linux/device.h>
#include <linux/usb.h>
#include <linux/usb/quirks.h>
-#include <linux/workqueue.h>
+#include <linux/pm_runtime.h>
#include "hcd.h"
#include "usb.h"
@@ -221,7 +221,7 @@ static int usb_probe_device(struct device *dev)
{
struct usb_device_driver *udriver = to_usb_device_driver(dev->driver);
struct usb_device *udev = to_usb_device(dev);
- int error = -ENODEV;
+ int error = 0;
dev_dbg(dev, "%s\n", __func__);
@@ -230,18 +230,23 @@ static int usb_probe_device(struct device *dev)
/* The device should always appear to be in use
* unless the driver suports autosuspend.
*/
- udev->pm_usage_cnt = !(udriver->supports_autosuspend);
+ if (!udriver->supports_autosuspend)
+ error = usb_autoresume_device(udev);
- error = udriver->probe(udev);
+ if (!error)
+ error = udriver->probe(udev);
return error;
}
/* called from driver core with dev locked */
static int usb_unbind_device(struct device *dev)
{
+ struct usb_device *udev = to_usb_device(dev);
struct usb_device_driver *udriver = to_usb_device_driver(dev->driver);
- udriver->disconnect(to_usb_device(dev));
+ udriver->disconnect(udev);
+ if (!udriver->supports_autosuspend)
+ usb_autosuspend_device(udev);
return 0;
}
@@ -274,60 +279,62 @@ static int usb_probe_interface(struct device *dev)
intf->needs_binding = 0;
if (usb_device_is_owned(udev))
- return -ENODEV;
+ return error;
if (udev->authorized == 0) {
dev_err(&intf->dev, "Device is not authorized for usage\n");
- return -ENODEV;
+ return error;
}
id = usb_match_id(intf, driver->id_table);
if (!id)
id = usb_match_dynamic_id(intf, driver);
- if (id) {
- dev_dbg(dev, "%s - got id\n", __func__);
-
- error = usb_autoresume_device(udev);
- if (error)
- return error;
+ if (!id)
+ return error;
- /* Interface "power state" doesn't correspond to any hardware
- * state whatsoever. We use it to record when it's bound to
- * a driver that may start I/0: it's not frozen/quiesced.
- */
- mark_active(intf);
- intf->condition = USB_INTERFACE_BINDING;
+ dev_dbg(dev, "%s - got id\n", __func__);
- /* The interface should always appear to be in use
- * unless the driver suports autosuspend.
- */
- atomic_set(&intf->pm_usage_cnt, !driver->supports_autosuspend);
-
- /* Carry out a deferred switch to altsetting 0 */
- if (intf->needs_altsetting0) {
- error = usb_set_interface(udev, intf->altsetting[0].
- desc.bInterfaceNumber, 0);
- if (error < 0)
- goto err;
+ error = usb_autoresume_device(udev);
+ if (error)
+ return error;
- intf->needs_altsetting0 = 0;
- }
+ intf->condition = USB_INTERFACE_BINDING;
- error = driver->probe(intf, id);
- if (error)
+ /* Bound interfaces are initially active. They are
+ * runtime-PM-enabled only if the driver has autosuspend support.
+ * They are sensitive to their children's power states.
+ */
+ pm_runtime_set_active(dev);
+ pm_suspend_ignore_children(dev, false);
+ if (driver->supports_autosuspend)
+ pm_runtime_enable(dev);
+
+ /* Carry out a deferred switch to altsetting 0 */
+ if (intf->needs_altsetting0) {
+ error = usb_set_interface(udev, intf->altsetting[0].
+ desc.bInterfaceNumber, 0);
+ if (error < 0)
goto err;
-
- intf->condition = USB_INTERFACE_BOUND;
- usb_autosuspend_device(udev);
+ intf->needs_altsetting0 = 0;
}
+ error = driver->probe(intf, id);
+ if (error)
+ goto err;
+
+ intf->condition = USB_INTERFACE_BOUND;
+ usb_autosuspend_device(udev);
return error;
-err:
- mark_quiesced(intf);
+ err:
intf->needs_remote_wakeup = 0;
intf->condition = USB_INTERFACE_UNBOUND;
usb_cancel_queued_reset(intf);
+
+ /* Unbound interfaces are always runtime-PM-disabled and -suspended */
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+
usb_autosuspend_device(udev);
return error;
}
@@ -377,9 +384,17 @@ static int usb_unbind_interface(struct device *dev)
usb_set_intfdata(intf, NULL);
intf->condition = USB_INTERFACE_UNBOUND;
- mark_quiesced(intf);
intf->needs_remote_wakeup = 0;
+ /* Unbound interfaces are always runtime-PM-disabled and -suspended */
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+
+ /* Undo any residual pm_autopm_get_interface_* calls */
+ for (r = atomic_read(&intf->pm_usage_cnt); r > 0; --r)
+ usb_autopm_put_interface_no_suspend(intf);
+ atomic_set(&intf->pm_usage_cnt, 0);
+
if (!error)
usb_autosuspend_device(udev);
@@ -410,7 +425,6 @@ int usb_driver_claim_interface(struct usb_driver *driver,
struct usb_interface *iface, void *priv)
{
struct device *dev = &iface->dev;
- struct usb_device *udev = interface_to_usbdev(iface);
int retval = 0;
if (dev->driver)
@@ -420,11 +434,16 @@ int usb_driver_claim_interface(struct usb_driver *driver,
usb_set_intfdata(iface, priv);
iface->needs_binding = 0;
- usb_pm_lock(udev);
iface->condition = USB_INTERFACE_BOUND;
- mark_active(iface);
- atomic_set(&iface->pm_usage_cnt, !driver->supports_autosuspend);
- usb_pm_unlock(udev);
+
+ /* Bound interfaces are initially active. They are
+ * runtime-PM-enabled only if the driver has autosuspend support.
+ * They are sensitive to their children's power states.
+ */
+ pm_runtime_set_active(dev);
+ pm_suspend_ignore_children(dev, false);
+ if (driver->supports_autosuspend)
+ pm_runtime_enable(dev);
/* if interface was already added, bind now; else let
* the future device_add() bind it, bypassing probe()
@@ -691,9 +710,6 @@ static int usb_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct usb_device *usb_dev;
- /* driver is often null here; dev_dbg() would oops */
- pr_debug("usb %s: uevent\n", dev_name(dev));
-
if (is_usb_device(dev)) {
usb_dev = to_usb_device(dev);
} else if (is_usb_interface(dev)) {
@@ -705,6 +721,7 @@ static int usb_uevent(struct device *dev, struct kobj_uevent_env *env)
}
if (usb_dev->devnum < 0) {
+ /* driver is often null here; dev_dbg() would oops */
pr_debug("usb %s: already deleted?\n", dev_name(dev));
return -ENODEV;
}
@@ -983,7 +1000,6 @@ static void do_unbind_rebind(struct usb_device *udev, int action)
}
}
-/* Caller has locked udev's pm_mutex */
static int usb_suspend_device(struct usb_device *udev, pm_message_t msg)
{
struct usb_device_driver *udriver;
@@ -1007,7 +1023,6 @@ static int usb_suspend_device(struct usb_device *udev, pm_message_t msg)
return status;
}
-/* Caller has locked udev's pm_mutex */
static int usb_resume_device(struct usb_device *udev, pm_message_t msg)
{
struct usb_device_driver *udriver;
@@ -1041,27 +1056,20 @@ static int usb_resume_device(struct usb_device *udev, pm_message_t msg)
return status;
}
-/* Caller has locked intf's usb_device's pm mutex */
static int usb_suspend_interface(struct usb_device *udev,
struct usb_interface *intf, pm_message_t msg)
{
struct usb_driver *driver;
int status = 0;
- /* with no hardware, USB interfaces only use FREEZE and ON states */
- if (udev->state == USB_STATE_NOTATTACHED || !is_active(intf))
- goto done;
-
- /* This can happen; see usb_driver_release_interface() */
- if (intf->condition == USB_INTERFACE_UNBOUND)
+ if (udev->state == USB_STATE_NOTATTACHED ||
+ intf->condition == USB_INTERFACE_UNBOUND)
goto done;
driver = to_usb_driver(intf->dev.driver);
if (driver->suspend) {
status = driver->suspend(intf, msg);
- if (status == 0)
- mark_quiesced(intf);
- else if (!(msg.event & PM_EVENT_AUTO))
+ if (status && !(msg.event & PM_EVENT_AUTO))
dev_err(&intf->dev, "%s error %d\n",
"suspend", status);
} else {
@@ -1069,7 +1077,6 @@ static int usb_suspend_interface(struct usb_device *udev,
intf->needs_binding = 1;
dev_warn(&intf->dev, "no %s for driver %s?\n",
"suspend", driver->name);
- mark_quiesced(intf);
}
done:
@@ -1077,14 +1084,13 @@ static int usb_suspend_interface(struct usb_device *udev,
return status;
}
-/* Caller has locked intf's usb_device's pm_mutex */
static int usb_resume_interface(struct usb_device *udev,
struct usb_interface *intf, pm_message_t msg, int reset_resume)
{
struct usb_driver *driver;
int status = 0;
- if (udev->state == USB_STATE_NOTATTACHED || is_active(intf))
+ if (udev->state == USB_STATE_NOTATTACHED)
goto done;
/* Don't let autoresume interfere with unbinding */
@@ -1135,90 +1141,11 @@ static int usb_resume_interface(struct usb_device *udev,
done:
dev_vdbg(&intf->dev, "%s: status %d\n", __func__, status);
- if (status == 0 && intf->condition == USB_INTERFACE_BOUND)
- mark_active(intf);
/* Later we will unbind the driver and/or reprobe, if necessary */
return status;
}
-#ifdef CONFIG_USB_SUSPEND
-
-/* Internal routine to check whether we may autosuspend a device. */
-static int autosuspend_check(struct usb_device *udev, int reschedule)
-{
- int i;
- struct usb_interface *intf;
- unsigned long suspend_time, j;
-
- /* For autosuspend, fail fast if anything is in use or autosuspend
- * is disabled. Also fail if any interfaces require remote wakeup
- * but it isn't available.
- */
- if (udev->pm_usage_cnt > 0)
- return -EBUSY;
- if (udev->autosuspend_delay < 0 || udev->autosuspend_disabled)
- return -EPERM;
-
- suspend_time = udev->last_busy + udev->autosuspend_delay;
- if (udev->actconfig) {
- for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
- intf = udev->actconfig->interface[i];
- if (!is_active(intf))
- continue;
- if (atomic_read(&intf->pm_usage_cnt) > 0)
- return -EBUSY;
- if (intf->needs_remote_wakeup &&
- !udev->do_remote_wakeup) {
- dev_dbg(&udev->dev, "remote wakeup needed "
- "for autosuspend\n");
- return -EOPNOTSUPP;
- }
-
- /* Don't allow autosuspend if the device will need
- * a reset-resume and any of its interface drivers
- * doesn't include support.
- */
- if (udev->quirks & USB_QUIRK_RESET_RESUME) {
- struct usb_driver *driver;
-
- driver = to_usb_driver(intf->dev.driver);
- if (!driver->reset_resume ||
- intf->needs_remote_wakeup)
- return -EOPNOTSUPP;
- }
- }
- }
-
- /* If everything is okay but the device hasn't been idle for long
- * enough, queue a delayed autosuspend request. If the device
- * _has_ been idle for long enough and the reschedule flag is set,
- * likewise queue a delayed (1 second) autosuspend request.
- */
- j = jiffies;
- if (time_before(j, suspend_time))
- reschedule = 1;
- else
- suspend_time = j + HZ;
- if (reschedule) {
- if (!timer_pending(&udev->autosuspend.timer)) {
- queue_delayed_work(ksuspend_usb_wq, &udev->autosuspend,
- round_jiffies_up_relative(suspend_time - j));
- }
- return -EAGAIN;
- }
- return 0;
-}
-
-#else
-
-static inline int autosuspend_check(struct usb_device *udev, int reschedule)
-{
- return 0;
-}
-
-#endif /* CONFIG_USB_SUSPEND */
-
/**
* usb_suspend_both - suspend a USB device and its interfaces
* @udev: the usb_device to suspend
@@ -1230,27 +1157,12 @@ static inline int autosuspend_check(struct usb_device *udev, int reschedule)
* all the interfaces which were suspended are resumed so that they remain
* in the same state as the device.
*
- * If an autosuspend is in progress the routine checks first to make sure
- * that neither the device itself or any of its active interfaces is in use
- * (pm_usage_cnt is greater than 0). If they are, the autosuspend fails.
- *
- * If the suspend succeeds, the routine recursively queues an autosuspend
- * request for @udev's parent device, thereby propagating the change up
- * the device tree. If all of the parent's children are now suspended,
- * the parent will autosuspend in turn.
- *
- * The suspend method calls are subject to mutual exclusion under control
- * of @udev's pm_mutex. Many of these calls are also under the protection
- * of @udev's device lock (including all requests originating outside the
- * USB subsystem), but autosuspend requests generated by a child device or
- * interface driver may not be. Usbcore will insure that the method calls
- * do not arrive during bind, unbind, or reset operations. However, drivers
- * must be prepared to handle suspend calls arriving at unpredictable times.
- * The only way to block such calls is to do an autoresume (preventing
- * autosuspends) while holding @udev's device lock (preventing outside
- * suspends).
- *
- * The caller must hold @udev->pm_mutex.
+ * Autosuspend requests originating from a child device or an interface
+ * driver may be made without the protection of @udev's device lock, but
+ * all other suspend calls will hold the lock. Usbcore will insure that
+ * method calls do not arrive during bind, unbind, or reset operations.
+ * However drivers must be prepared to handle suspend calls arriving at
+ * unpredictable times.
*
* This routine can run only in process context.
*/
@@ -1259,20 +1171,11 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
int status = 0;
int i = 0;
struct usb_interface *intf;
- struct usb_device *parent = udev->parent;
if (udev->state == USB_STATE_NOTATTACHED ||
udev->state == USB_STATE_SUSPENDED)
goto done;
- udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
-
- if (msg.event & PM_EVENT_AUTO) {
- status = autosuspend_check(udev, 0);
- if (status < 0)
- goto done;
- }
-
/* Suspend all the interfaces and then udev itself */
if (udev->actconfig) {
for (; i < udev->actconfig->desc.bNumInterfaces; i++) {
@@ -1287,35 +1190,21 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
/* If the suspend failed, resume interfaces that did get suspended */
if (status != 0) {
- pm_message_t msg2;
-
- msg2.event = msg.event ^ (PM_EVENT_SUSPEND | PM_EVENT_RESUME);
+ msg.event ^= (PM_EVENT_SUSPEND | PM_EVENT_RESUME);
while (--i >= 0) {
intf = udev->actconfig->interface[i];
- usb_resume_interface(udev, intf, msg2, 0);
+ usb_resume_interface(udev, intf, msg, 0);
}
- /* Try another autosuspend when the interfaces aren't busy */
- if (msg.event & PM_EVENT_AUTO)
- autosuspend_check(udev, status == -EBUSY);
-
- /* If the suspend succeeded then prevent any more URB submissions,
- * flush any outstanding URBs, and propagate the suspend up the tree.
+ /* If the suspend succeeded then prevent any more URB submissions
+ * and flush any outstanding URBs.
*/
} else {
- cancel_delayed_work(&udev->autosuspend);
udev->can_submit = 0;
for (i = 0; i < 16; ++i) {
usb_hcd_flush_endpoint(udev, udev->ep_out[i]);
usb_hcd_flush_endpoint(udev, udev->ep_in[i]);
}
-
- /* If this is just a FREEZE or a PRETHAW, udev might
- * not really be suspended. Only true suspends get
- * propagated up the device tree.
- */
- if (parent && udev->state == USB_STATE_SUSPENDED)
- usb_autosuspend_device(parent);
}
done:
@@ -1332,23 +1221,12 @@ static int usb_suspend_both(struct usb_device *udev, pm_message_t msg)
* the resume method for @udev and then calls the resume methods for all
* the interface drivers in @udev.
*
- * Before starting the resume, the routine calls itself recursively for
- * the parent device of @udev, thereby propagating the change up the device
- * tree and assuring that @udev will be able to resume. If the parent is
- * unable to resume successfully, the routine fails.
- *
- * The resume method calls are subject to mutual exclusion under control
- * of @udev's pm_mutex. Many of these calls are also under the protection
- * of @udev's device lock (including all requests originating outside the
- * USB subsystem), but autoresume requests generated by a child device or
- * interface driver may not be. Usbcore will insure that the method calls
- * do not arrive during bind, unbind, or reset operations. However, drivers
- * must be prepared to handle resume calls arriving at unpredictable times.
- * The only way to block such calls is to do an autoresume (preventing
- * other autoresumes) while holding @udev's device lock (preventing outside
- * resumes).
- *
- * The caller must hold @udev->pm_mutex.
+ * Autoresume requests originating from a child device or an interface
+ * driver may be made without the protection of @udev's device lock, but
+ * all other resume calls will hold the lock. Usbcore will insure that
+ * method calls do not arrive during bind, unbind, or reset operations.
+ * However drivers must be prepared to handle resume calls arriving at
+ * unpredictable times.
*
* This routine can run only in process context.
*/
@@ -1357,48 +1235,18 @@ static int usb_resume_both(struct usb_device *udev, pm_message_t msg)
int status = 0;
int i;
struct usb_interface *intf;
- struct usb_device *parent = udev->parent;
- cancel_delayed_work(&udev->autosuspend);
if (udev->state == USB_STATE_NOTATTACHED) {
status = -ENODEV;
goto done;
}
udev->can_submit = 1;
- /* Propagate the resume up the tree, if necessary */
- if (udev->state == USB_STATE_SUSPENDED) {
- if (parent) {
- status = usb_autoresume_device(parent);
- if (status == 0) {
- status = usb_resume_device(udev, msg);
- if (status || udev->state ==
- USB_STATE_NOTATTACHED) {
- usb_autosuspend_device(parent);
-
- /* It's possible usb_resume_device()
- * failed after the port was
- * unsuspended, causing udev to be
- * logically disconnected. We don't
- * want usb_disconnect() to autosuspend
- * the parent again, so tell it that
- * udev disconnected while still
- * suspended. */
- if (udev->state ==
- USB_STATE_NOTATTACHED)
- udev->discon_suspended = 1;
- }
- }
- } else {
-
- /* We can't progagate beyond the USB subsystem,
- * so if a root hub's controller is suspended
- * then we're stuck. */
- status = usb_resume_device(udev, msg);
- }
- } else if (udev->reset_resume)
+ /* Resume the device */
+ if (udev->state == USB_STATE_SUSPENDED || udev->reset_resume)
status = usb_resume_device(udev, msg);
+ /* Resume the interfaces */
if (status == 0 && udev->actconfig) {
for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
intf = udev->actconfig->interface[i];
@@ -1414,55 +1262,94 @@ static int usb_resume_both(struct usb_device *udev, pm_message_t msg)
return status;
}
-#ifdef CONFIG_USB_SUSPEND
+/* The device lock is held by the PM core */
+int usb_suspend(struct device *dev, pm_message_t msg)
+{
+ struct usb_device *udev = to_usb_device(dev);
-/* Internal routine to adjust a device's usage counter and change
- * its autosuspend state.
- */
-static int usb_autopm_do_device(struct usb_device *udev, int inc_usage_cnt)
+ do_unbind_rebind(udev, DO_UNBIND);
+ udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
+ return usb_suspend_both(udev, msg);
+}
+
+/* The device lock is held by the PM core */
+int usb_resume(struct device *dev, pm_message_t msg)
{
- int status = 0;
+ struct usb_device *udev = to_usb_device(dev);
+ int status;
- usb_pm_lock(udev);
- udev->pm_usage_cnt += inc_usage_cnt;
- WARN_ON(udev->pm_usage_cnt < 0);
- if (inc_usage_cnt)
- udev->last_busy = jiffies;
- if (inc_usage_cnt >= 0 && udev->pm_usage_cnt > 0) {
- if (udev->state == USB_STATE_SUSPENDED)
- status = usb_resume_both(udev, PMSG_AUTO_RESUME);
- if (status != 0)
- udev->pm_usage_cnt -= inc_usage_cnt;
- else if (inc_usage_cnt)
+ /* For PM complete calls, all we do is rebind interfaces */
+ if (msg.event == PM_EVENT_ON) {
+ if (udev->state != USB_STATE_NOTATTACHED)
+ do_unbind_rebind(udev, DO_REBIND);
+ status = 0;
+
+ /* For all other calls, take the device back to full power and
+ * tell the PM core in case it was autosuspended previously.
+ */
+ } else {
+ status = usb_resume_both(udev, msg);
+ if (status == 0) {
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
udev->last_busy = jiffies;
- } else if (inc_usage_cnt <= 0 && udev->pm_usage_cnt <= 0) {
- status = usb_suspend_both(udev, PMSG_AUTO_SUSPEND);
+ }
}
- usb_pm_unlock(udev);
+
+ /* Avoid PM error messages for devices disconnected while suspended
+ * as we'll display regular disconnect messages just a bit later.
+ */
+ if (status == -ENODEV)
+ status = 0;
return status;
}
-/* usb_autosuspend_work - callback routine to autosuspend a USB device */
-void usb_autosuspend_work(struct work_struct *work)
-{
- struct usb_device *udev =
- container_of(work, struct usb_device, autosuspend.work);
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_USB_SUSPEND
- usb_autopm_do_device(udev, 0);
+/**
+ * usb_enable_autosuspend - allow a USB device to be autosuspended
+ * @udev: the USB device which may be autosuspended
+ *
+ * This routine allows @udev to be autosuspended. An autosuspend won't
+ * take place until the autosuspend_delay has elapsed and all the other
+ * necessary conditions are satisfied.
+ *
+ * The caller must hold @udev's device lock.
+ */
+int usb_enable_autosuspend(struct usb_device *udev)
+{
+ if (udev->autosuspend_disabled) {
+ udev->autosuspend_disabled = 0;
+ usb_autosuspend_device(udev);
+ }
+ return 0;
}
+EXPORT_SYMBOL_GPL(usb_enable_autosuspend);
-/* usb_autoresume_work - callback routine to autoresume a USB device */
-void usb_autoresume_work(struct work_struct *work)
+/**
+ * usb_disable_autosuspend - prevent a USB device from being autosuspended
+ * @udev: the USB device which may not be autosuspended
+ *
+ * This routine prevents @udev from being autosuspended and wakes it up
+ * if it is already autosuspended.
+ *
+ * The caller must hold @udev's device lock.
+ */
+int usb_disable_autosuspend(struct usb_device *udev)
{
- struct usb_device *udev =
- container_of(work, struct usb_device, autoresume);
+ int rc = 0;
- /* Wake it up, let the drivers do their thing, and then put it
- * back to sleep.
- */
- if (usb_autopm_do_device(udev, 1) == 0)
- usb_autopm_do_device(udev, -1);
+ if (!udev->autosuspend_disabled) {
+ rc = usb_autoresume_device(udev);
+ if (rc == 0)
+ udev->autosuspend_disabled = 1;
+ }
+ return rc;
}
+EXPORT_SYMBOL_GPL(usb_disable_autosuspend);
/**
* usb_autosuspend_device - delayed autosuspend of a USB device and its interfaces
@@ -1472,15 +1359,11 @@ void usb_autoresume_work(struct work_struct *work)
* @udev and wants to allow it to autosuspend. Examples would be when
* @udev's device file in usbfs is closed or after a configuration change.
*
- * @udev's usage counter is decremented. If it or any of the usage counters
- * for an active interface is greater than 0, no autosuspend request will be
- * queued. (If an interface driver does not support autosuspend then its
- * usage counter is permanently positive.) Furthermore, if an interface
- * driver requires remote-wakeup capability during autosuspend but remote
- * wakeup is disabled, the autosuspend will fail.
+ * @udev's usage counter is decremented; if it drops to 0 and all the
+ * interfaces are inactive then a delayed autosuspend will be attempted.
+ * The attempt may fail (see autosuspend_check()).
*
- * Often the caller will hold @udev's device lock, but this is not
- * necessary.
+ * The caller must hold @udev's device lock.
*
* This routine can run only in process context.
*/
@@ -1488,9 +1371,11 @@ void usb_autosuspend_device(struct usb_device *udev)
{
int status;
- status = usb_autopm_do_device(udev, -1);
- dev_vdbg(&udev->dev, "%s: cnt %d\n",
- __func__, udev->pm_usage_cnt);
+ udev->last_busy = jiffies;
+ status = pm_runtime_put_sync(&udev->dev);
+ dev_vdbg(&udev->dev, "%s: cnt %d -> %d\n",
+ __func__, atomic_read(&udev->dev.power.usage_count),
+ status);
}
/**
@@ -1500,17 +1385,22 @@ void usb_autosuspend_device(struct usb_device *udev)
* This routine should be called when a core subsystem thinks @udev may
* be ready to autosuspend.
*
- * @udev's usage counter left unchanged. If it or any of the usage counters
- * for an active interface is greater than 0, or autosuspend is not allowed
- * for any other reason, no autosuspend request will be queued.
+ * @udev's usage counter left unchanged. If it is 0 and all the interfaces
+ * are inactive then an autosuspend will be attempted. The attempt may
+ * fail or be delayed.
+ *
+ * The caller must hold @udev's device lock.
*
* This routine can run only in process context.
*/
void usb_try_autosuspend_device(struct usb_device *udev)
{
- usb_autopm_do_device(udev, 0);
- dev_vdbg(&udev->dev, "%s: cnt %d\n",
- __func__, udev->pm_usage_cnt);
+ int status;
+
+ status = pm_runtime_idle(&udev->dev);
+ dev_vdbg(&udev->dev, "%s: cnt %d -> %d\n",
+ __func__, atomic_read(&udev->dev.power.usage_count),
+ status);
}
/**
@@ -1519,16 +1409,15 @@ void usb_try_autosuspend_device(struct usb_device *udev)
*
* This routine should be called when a core subsystem wants to use @udev
* and needs to guarantee that it is not suspended. No autosuspend will
- * occur until usb_autosuspend_device is called. (Note that this will not
- * prevent suspend events originating in the PM core.) Examples would be
- * when @udev's device file in usbfs is opened or when a remote-wakeup
+ * occur until usb_autosuspend_device() is called. (Note that this will
+ * not prevent suspend events originating in the PM core.) Examples would
+ * be when @udev's device file in usbfs is opened or when a remote-wakeup
* request is received.
*
* @udev's usage counter is incremented to prevent subsequent autosuspends.
* However if the autoresume fails then the usage counter is re-decremented.
*
- * Often the caller will hold @udev's device lock, but this is not
- * necessary (and attempting it might cause deadlock).
+ * The caller must hold @udev's device lock.
*
* This routine can run only in process context.
*/
@@ -1536,42 +1425,14 @@ int usb_autoresume_device(struct usb_device *udev)
{
int status;
- status = usb_autopm_do_device(udev, 1);
- dev_vdbg(&udev->dev, "%s: status %d cnt %d\n",
- __func__, status, udev->pm_usage_cnt);
- return status;
-}
-
-/* Internal routine to adjust an interface's usage counter and change
- * its device's autosuspend state.
- */
-static int usb_autopm_do_interface(struct usb_interface *intf,
- int inc_usage_cnt)
-{
- struct usb_device *udev = interface_to_usbdev(intf);
- int status = 0;
-
- usb_pm_lock(udev);
- if (intf->condition == USB_INTERFACE_UNBOUND)
- status = -ENODEV;
- else {
- atomic_add(inc_usage_cnt, &intf->pm_usage_cnt);
- udev->last_busy = jiffies;
- if (inc_usage_cnt >= 0 &&
- atomic_read(&intf->pm_usage_cnt) > 0) {
- if (udev->state == USB_STATE_SUSPENDED)
- status = usb_resume_both(udev,
- PMSG_AUTO_RESUME);
- if (status != 0)
- atomic_sub(inc_usage_cnt, &intf->pm_usage_cnt);
- else
- udev->last_busy = jiffies;
- } else if (inc_usage_cnt <= 0 &&
- atomic_read(&intf->pm_usage_cnt) <= 0) {
- status = usb_suspend_both(udev, PMSG_AUTO_SUSPEND);
- }
- }
- usb_pm_unlock(udev);
+ status = pm_runtime_get_sync(&udev->dev);
+ if (status < 0)
+ pm_runtime_put_sync(&udev->dev);
+ dev_vdbg(&udev->dev, "%s: cnt %d -> %d\n",
+ __func__, atomic_read(&udev->dev.power.usage_count),
+ status);
+ if (status > 0)
+ status = 0;
return status;
}
@@ -1585,34 +1446,25 @@ static int usb_autopm_do_interface(struct usb_interface *intf,
* closed.
*
* The routine decrements @intf's usage counter. When the counter reaches
- * 0, a delayed autosuspend request for @intf's device is queued. When
- * the delay expires, if @intf->pm_usage_cnt is still <= 0 along with all
- * the other usage counters for the sibling interfaces and @intf's
- * usb_device, the device and all its interfaces will be autosuspended.
- *
- * Note that @intf->pm_usage_cnt is owned by the interface driver. The
- * core will not change its value other than the increment and decrement
- * in usb_autopm_get_interface and usb_autopm_put_interface. The driver
- * may use this simple counter-oriented discipline or may set the value
- * any way it likes.
+ * 0, a delayed autosuspend request for @intf's device is attempted. The
+ * attempt may fail (see autosuspend_check()).
*
* If the driver has set @intf->needs_remote_wakeup then autosuspend will
* take place only if the device's remote-wakeup facility is enabled.
*
- * Suspend method calls queued by this routine can arrive at any time
- * while @intf is resumed and its usage counter is equal to 0. They are
- * not protected by the usb_device's lock but only by its pm_mutex.
- * Drivers must provide their own synchronization.
- *
* This routine can run only in process context.
*/
void usb_autopm_put_interface(struct usb_interface *intf)
{
- int status;
+ struct usb_device *udev = interface_to_usbdev(intf);
+ int status;
- status = usb_autopm_do_interface(intf, -1);
- dev_vdbg(&intf->dev, "%s: status %d cnt %d\n",
- __func__, status, atomic_read(&intf->pm_usage_cnt));
+ udev->last_busy = jiffies;
+ atomic_dec(&intf->pm_usage_cnt);
+ status = pm_runtime_put_sync(&intf->dev);
+ dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
+ __func__, atomic_read(&intf->dev.power.usage_count),
+ status);
}
EXPORT_SYMBOL_GPL(usb_autopm_put_interface);
@@ -1620,11 +1472,11 @@ EXPORT_SYMBOL_GPL(usb_autopm_put_interface);
* usb_autopm_put_interface_async - decrement a USB interface's PM-usage counter
* @intf: the usb_interface whose counter should be decremented
*
- * This routine does essentially the same thing as
- * usb_autopm_put_interface(): it decrements @intf's usage counter and
- * queues a delayed autosuspend request if the counter is <= 0. The
- * difference is that it does not acquire the device's pm_mutex;
- * callers must handle all synchronization issues themselves.
+ * This routine does much the same thing as usb_autopm_put_interface():
+ * It decrements @intf's usage counter and schedules a delayed
+ * autosuspend request if the counter is <= 0. The difference is that it
+ * does not perform any synchronization; callers should hold a private
+ * lock and handle all synchronization issues themselves.
*
* Typically a driver would call this routine during an URB's completion
* handler, if no more URBs were pending.
@@ -1634,28 +1486,58 @@ EXPORT_SYMBOL_GPL(usb_autopm_put_interface);
void usb_autopm_put_interface_async(struct usb_interface *intf)
{
struct usb_device *udev = interface_to_usbdev(intf);
+ unsigned long last_busy;
int status = 0;
- if (intf->condition == USB_INTERFACE_UNBOUND) {
- status = -ENODEV;
- } else {
- udev->last_busy = jiffies;
- atomic_dec(&intf->pm_usage_cnt);
- if (udev->autosuspend_disabled || udev->autosuspend_delay < 0)
- status = -EPERM;
- else if (atomic_read(&intf->pm_usage_cnt) <= 0 &&
- !timer_pending(&udev->autosuspend.timer)) {
- queue_delayed_work(ksuspend_usb_wq, &udev->autosuspend,
+ last_busy = udev->last_busy;
+ udev->last_busy = jiffies;
+ atomic_dec(&intf->pm_usage_cnt);
+ pm_runtime_put_noidle(&intf->dev);
+
+ if (!udev->autosuspend_disabled) {
+ /* Optimization: Don't schedule a delayed autosuspend if
+ * the timer is already running and the expiration time
+ * wouldn't change.
+ *
+ * We have to use the interface's timer. Attempts to
+ * schedule a suspend for the device would fail because
+ * the interface is still active.
+ */
+ if (intf->dev.power.timer_expires == 0 ||
+ round_jiffies_up(last_busy) !=
+ round_jiffies_up(jiffies)) {
+ status = pm_schedule_suspend(&intf->dev,
+ jiffies_to_msecs(
round_jiffies_up_relative(
- udev->autosuspend_delay));
+ udev->autosuspend_delay)));
}
}
- dev_vdbg(&intf->dev, "%s: status %d cnt %d\n",
- __func__, status, atomic_read(&intf->pm_usage_cnt));
+ dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
+ __func__, atomic_read(&intf->dev.power.usage_count),
+ status);
}
EXPORT_SYMBOL_GPL(usb_autopm_put_interface_async);
/**
+ * usb_autopm_put_interface_no_suspend - decrement a USB interface's PM-usage counter
+ * @intf: the usb_interface whose counter should be decremented
+ *
+ * This routine decrements @intf's usage counter but does not carry out an
+ * autosuspend.
+ *
+ * This routine can run in atomic context.
+ */
+void usb_autopm_put_interface_no_suspend(struct usb_interface *intf)
+{
+ struct usb_device *udev = interface_to_usbdev(intf);
+
+ udev->last_busy = jiffies;
+ atomic_dec(&intf->pm_usage_cnt);
+ pm_runtime_put_noidle(&intf->dev);
+}
+EXPORT_SYMBOL_GPL(usb_autopm_put_interface_no_suspend);
+
+/**
* usb_autopm_get_interface - increment a USB interface's PM-usage counter
* @intf: the usb_interface whose counter should be incremented
*
@@ -1667,25 +1549,8 @@ EXPORT_SYMBOL_GPL(usb_autopm_put_interface_async);
* or @intf is unbound. A typical example would be a character-device
* driver when its device file is opened.
*
- *
- * The routine increments @intf's usage counter. (However if the
- * autoresume fails then the counter is re-decremented.) So long as the
- * counter is greater than 0, autosuspend will not be allowed for @intf
- * or its usb_device. When the driver is finished using @intf it should
- * call usb_autopm_put_interface() to decrement the usage counter and
- * queue a delayed autosuspend request (if the counter is <= 0).
- *
- *
- * Note that @intf->pm_usage_cnt is owned by the interface driver. The
- * core will not change its value other than the increment and decrement
- * in usb_autopm_get_interface and usb_autopm_put_interface. The driver
- * may use this simple counter-oriented discipline or may set the value
- * any way it likes.
- *
- * Resume method calls generated by this routine can arrive at any time
- * while @intf is suspended. They are not protected by the usb_device's
- * lock but only by its pm_mutex. Drivers must provide their own
- * synchronization.
+ * @intf's usage counter is incremented to prevent subsequent autosuspends.
+ * However if the autoresume fails then the counter is re-decremented.
*
* This routine can run only in process context.
*/
@@ -1693,9 +1558,16 @@ int usb_autopm_get_interface(struct usb_interface *intf)
{
int status;
- status = usb_autopm_do_interface(intf, 1);
- dev_vdbg(&intf->dev, "%s: status %d cnt %d\n",
- __func__, status, atomic_read(&intf->pm_usage_cnt));
+ status = pm_runtime_get_sync(&intf->dev);
+ if (status < 0)
+ pm_runtime_put_sync(&intf->dev);
+ else
+ atomic_inc(&intf->pm_usage_cnt);
+ dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
+ __func__, atomic_read(&intf->dev.power.usage_count),
+ status);
+ if (status > 0)
+ status = 0;
return status;
}
EXPORT_SYMBOL_GPL(usb_autopm_get_interface);
@@ -1705,149 +1577,207 @@ EXPORT_SYMBOL_GPL(usb_autopm_get_interface);
* @intf: the usb_interface whose counter should be incremented
*
* This routine does much the same thing as
- * usb_autopm_get_interface(): it increments @intf's usage counter and
- * queues an autoresume request if the result is > 0. The differences
- * are that it does not acquire the device's pm_mutex (callers must
- * handle all synchronization issues themselves), and it does not
- * autoresume the device directly (it only queues a request). After a
- * successful call, the device will generally not yet be resumed.
+ * usb_autopm_get_interface(): It increments @intf's usage counter and
+ * queues an autoresume request if the device is suspended. The
+ * differences are that it does not perform any synchronization (callers
+ * should hold a private lock and handle all synchronization issues
+ * themselves), and it does not autoresume the device directly (it only
+ * queues a request). After a successful call, the device may not yet be
+ * resumed.
*
* This routine can run in atomic context.
*/
int usb_autopm_get_interface_async(struct usb_interface *intf)
{
- struct usb_device *udev = interface_to_usbdev(intf);
- int status = 0;
+ int status = 0;
+ enum rpm_status s;
- if (intf->condition == USB_INTERFACE_UNBOUND)
- status = -ENODEV;
- else {
+ /* Don't request a resume unless the interface is already suspending
+ * or suspended. Doing so would force a running suspend timer to be
+ * cancelled.
+ */
+ pm_runtime_get_noresume(&intf->dev);
+ s = ACCESS_ONCE(intf->dev.power.runtime_status);
+ if (s == RPM_SUSPENDING || s == RPM_SUSPENDED)
+ status = pm_request_resume(&intf->dev);
+
+ if (status < 0 && status != -EINPROGRESS)
+ pm_runtime_put_noidle(&intf->dev);
+ else
atomic_inc(&intf->pm_usage_cnt);
- if (atomic_read(&intf->pm_usage_cnt) > 0 &&
- udev->state == USB_STATE_SUSPENDED)
- queue_work(ksuspend_usb_wq, &udev->autoresume);
- }
- dev_vdbg(&intf->dev, "%s: status %d cnt %d\n",
- __func__, status, atomic_read(&intf->pm_usage_cnt));
+ dev_vdbg(&intf->dev, "%s: cnt %d -> %d\n",
+ __func__, atomic_read(&intf->dev.power.usage_count),
+ status);
+ if (status > 0)
+ status = 0;
return status;
}
EXPORT_SYMBOL_GPL(usb_autopm_get_interface_async);
-#else
-
-void usb_autosuspend_work(struct work_struct *work)
-{}
-
-void usb_autoresume_work(struct work_struct *work)
-{}
-
-#endif /* CONFIG_USB_SUSPEND */
-
/**
- * usb_external_suspend_device - external suspend of a USB device and its interfaces
- * @udev: the usb_device to suspend
- * @msg: Power Management message describing this state transition
+ * usb_autopm_get_interface_no_resume - increment a USB interface's PM-usage counter
+ * @intf: the usb_interface whose counter should be incremented
*
- * This routine handles external suspend requests: ones not generated
- * internally by a USB driver (autosuspend) but rather coming from the user
- * (via sysfs) or the PM core (system sleep). The suspend will be carried
- * out regardless of @udev's usage counter or those of its interfaces,
- * and regardless of whether or not remote wakeup is enabled. Of course,
- * interface drivers still have the option of failing the suspend (if
- * there are unsuspended children, for example).
+ * This routine increments @intf's usage counter but does not carry out an
+ * autoresume.
*
- * The caller must hold @udev's device lock.
+ * This routine can run in atomic context.
*/
-int usb_external_suspend_device(struct usb_device *udev, pm_message_t msg)
+void usb_autopm_get_interface_no_resume(struct usb_interface *intf)
{
- int status;
+ struct usb_device *udev = interface_to_usbdev(intf);
- do_unbind_rebind(udev, DO_UNBIND);
- usb_pm_lock(udev);
- status = usb_suspend_both(udev, msg);
- usb_pm_unlock(udev);
- return status;
+ udev->last_busy = jiffies;
+ atomic_inc(&intf->pm_usage_cnt);
+ pm_runtime_get_noresume(&intf->dev);
}
+EXPORT_SYMBOL_GPL(usb_autopm_get_interface_no_resume);
-/**
- * usb_external_resume_device - external resume of a USB device and its interfaces
- * @udev: the usb_device to resume
- * @msg: Power Management message describing this state transition
- *
- * This routine handles external resume requests: ones not generated
- * internally by a USB driver (autoresume) but rather coming from the user
- * (via sysfs), the PM core (system resume), or the device itself (remote
- * wakeup). @udev's usage counter is unaffected.
- *
- * The caller must hold @udev's device lock.
- */
-int usb_external_resume_device(struct usb_device *udev, pm_message_t msg)
+/* Internal routine to check whether we may autosuspend a device. */
+static int autosuspend_check(struct usb_device *udev)
{
- int status;
+ int i;
+ struct usb_interface *intf;
+ unsigned long suspend_time, j;
- usb_pm_lock(udev);
- status = usb_resume_both(udev, msg);
- udev->last_busy = jiffies;
- usb_pm_unlock(udev);
- if (status == 0)
- do_unbind_rebind(udev, DO_REBIND);
+ /* Fail if autosuspend is disabled, or any interfaces are in use, or
+ * any interface drivers require remote wakeup but it isn't available.
+ */
+ udev->do_remote_wakeup = device_may_wakeup(&udev->dev);
+ if (udev->actconfig) {
+ for (i = 0; i < udev->actconfig->desc.bNumInterfaces; i++) {
+ intf = udev->actconfig->interface[i];
- /* Now that the device is awake, we can start trying to autosuspend
- * it again. */
- if (status == 0)
- usb_try_autosuspend_device(udev);
- return status;
+ /* We don't need to check interfaces that are
+ * disabled for runtime PM. Either they are unbound
+ * or else their drivers don't support autosuspend
+ * and so they are permanently active.
+ */
+ if (intf->dev.power.disable_depth)
+ continue;
+ if (atomic_read(&intf->dev.power.usage_count) > 0)
+ return -EBUSY;
+ if (intf->needs_remote_wakeup &&
+ !udev->do_remote_wakeup) {
+ dev_dbg(&udev->dev, "remote wakeup needed "
+ "for autosuspend\n");
+ return -EOPNOTSUPP;
+ }
+
+ /* Don't allow autosuspend if the device will need
+ * a reset-resume and any of its interface drivers
+ * doesn't include support or needs remote wakeup.
+ */
+ if (udev->quirks & USB_QUIRK_RESET_RESUME) {
+ struct usb_driver *driver;
+
+ driver = to_usb_driver(intf->dev.driver);
+ if (!driver->reset_resume ||
+ intf->needs_remote_wakeup)
+ return -EOPNOTSUPP;
+ }
+ }
+ }
+
+ /* If everything is okay but the device hasn't been idle for long
+ * enough, queue a delayed autosuspend request.
+ */
+ j = ACCESS_ONCE(jiffies);
+ suspend_time = udev->last_busy + udev->autosuspend_delay;
+ if (time_before(j, suspend_time)) {
+ pm_schedule_suspend(&udev->dev, jiffies_to_msecs(
+ round_jiffies_up_relative(suspend_time - j)));
+ return -EAGAIN;
+ }
+ return 0;
}
-int usb_suspend(struct device *dev, pm_message_t msg)
+static int usb_runtime_suspend(struct device *dev)
{
- struct usb_device *udev;
-
- udev = to_usb_device(dev);
+ int status = 0;
- /* If udev is already suspended, we can skip this suspend and
- * we should also skip the upcoming system resume. High-speed
- * root hubs are an exception; they need to resume whenever the
- * system wakes up in order for USB-PERSIST port handover to work
- * properly.
+ /* A USB device can be suspended if it passes the various autosuspend
+ * checks. Runtime suspend for a USB device means suspending all the
+ * interfaces and then the device itself.
*/
- if (udev->state == USB_STATE_SUSPENDED) {
- if (udev->parent || udev->speed != USB_SPEED_HIGH)
- udev->skip_sys_resume = 1;
- return 0;
+ if (is_usb_device(dev)) {
+ struct usb_device *udev = to_usb_device(dev);
+
+ if (autosuspend_check(udev) != 0)
+ return -EAGAIN;
+
+ status = usb_suspend_both(udev, PMSG_AUTO_SUSPEND);
+
+ /* If an interface fails the suspend, adjust the last_busy
+ * time so that we don't get another suspend attempt right
+ * away.
+ */
+ if (status) {
+ udev->last_busy = jiffies +
+ (udev->autosuspend_delay == 0 ?
+ HZ/2 : 0);
+ }
+
+ /* Prevent the parent from suspending immediately after */
+ else if (udev->parent) {
+ udev->parent->last_busy = jiffies;
+ }
}
- udev->skip_sys_resume = 0;
- return usb_external_suspend_device(udev, msg);
+ /* Runtime suspend for a USB interface doesn't mean anything. */
+ return status;
}
-int usb_resume(struct device *dev, pm_message_t msg)
+static int usb_runtime_resume(struct device *dev)
{
- struct usb_device *udev;
- int status;
+ /* Runtime resume for a USB device means resuming both the device
+ * and all its interfaces.
+ */
+ if (is_usb_device(dev)) {
+ struct usb_device *udev = to_usb_device(dev);
+ int status;
- udev = to_usb_device(dev);
+ status = usb_resume_both(udev, PMSG_AUTO_RESUME);
+ udev->last_busy = jiffies;
+ return status;
+ }
- /* If udev->skip_sys_resume is set then udev was already suspended
- * when the system sleep started, so we don't want to resume it
- * during this system wakeup.
- */
- if (udev->skip_sys_resume)
- return 0;
- status = usb_external_resume_device(udev, msg);
+ /* Runtime resume for a USB interface doesn't mean anything. */
+ return 0;
+}
- /* Avoid PM error messages for devices disconnected while suspended
- * as we'll display regular disconnect messages just a bit later.
+static int usb_runtime_idle(struct device *dev)
+{
+ /* An idle USB device can be suspended if it passes the various
+ * autosuspend checks. An idle interface can be suspended at
+ * any time.
*/
- if (status == -ENODEV)
- return 0;
- return status;
+ if (is_usb_device(dev)) {
+ struct usb_device *udev = to_usb_device(dev);
+
+ if (autosuspend_check(udev) != 0)
+ return 0;
+ }
+
+ pm_runtime_suspend(dev);
+ return 0;
}
-#endif /* CONFIG_PM */
+static struct dev_pm_ops usb_bus_pm_ops = {
+ .runtime_suspend = usb_runtime_suspend,
+ .runtime_resume = usb_runtime_resume,
+ .runtime_idle = usb_runtime_idle,
+};
+
+#else
+
+#define usb_bus_pm_ops (*(struct dev_pm_ops *) NULL)
+
+#endif /* CONFIG_USB_SUSPEND */
struct bus_type usb_bus_type = {
.name = "usb",
.match = usb_device_match,
.uevent = usb_uevent,
+ .pm = &usb_bus_pm_ops,
};
diff --git a/drivers/usb/core/file.c b/drivers/usb/core/file.c
index bfc6c2eea64..c3536f151f0 100644
--- a/drivers/usb/core/file.c
+++ b/drivers/usb/core/file.c
@@ -34,7 +34,6 @@ static int usb_open(struct inode * inode, struct file * file)
int err = -ENODEV;
const struct file_operations *old_fops, *new_fops = NULL;
- lock_kernel();
down_read(&minor_rwsem);
c = usb_minors[minor];
@@ -53,7 +52,6 @@ static int usb_open(struct inode * inode, struct file * file)
fops_put(old_fops);
done:
up_read(&minor_rwsem);
- unlock_kernel();
return err;
}
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 80995ef0868..2f8cedda800 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -39,6 +39,7 @@
#include <linux/platform_device.h>
#include <linux/workqueue.h>
#include <linux/mutex.h>
+#include <linux/pm_runtime.h>
#include <linux/usb.h>
@@ -141,7 +142,7 @@ static const u8 usb3_rh_dev_descriptor[18] = {
0x09, /* __u8 bMaxPacketSize0; 2^9 = 512 Bytes */
0x6b, 0x1d, /* __le16 idVendor; Linux Foundation */
- 0x02, 0x00, /* __le16 idProduct; device 0x0002 */
+ 0x03, 0x00, /* __le16 idProduct; device 0x0003 */
KERNEL_VER, KERNEL_REL, /* __le16 bcdDevice */
0x03, /* __u8 iManufacturer; */
@@ -1670,11 +1671,16 @@ int usb_hcd_alloc_bandwidth(struct usb_device *udev,
}
}
for (i = 0; i < num_intfs; ++i) {
+ struct usb_host_interface *first_alt;
+ int iface_num;
+
+ first_alt = &new_config->intf_cache[i]->altsetting[0];
+ iface_num = first_alt->desc.bInterfaceNumber;
/* Set up endpoints for alternate interface setting 0 */
- alt = usb_find_alt_setting(new_config, i, 0);
+ alt = usb_find_alt_setting(new_config, iface_num, 0);
if (!alt)
/* No alt setting 0? Pick the first setting. */
- alt = &new_config->intf_cache[i]->altsetting[0];
+ alt = first_alt;
for (j = 0; j < alt->desc.bNumEndpoints; j++) {
ret = hcd->driver->add_endpoint(hcd, udev, &alt->endpoint[j]);
@@ -1853,6 +1859,10 @@ int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg)
return status;
}
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_USB_SUSPEND
+
/* Workqueue routine for root-hub remote wakeup */
static void hcd_resume_work(struct work_struct *work)
{
@@ -1860,8 +1870,7 @@ static void hcd_resume_work(struct work_struct *work)
struct usb_device *udev = hcd->self.root_hub;
usb_lock_device(udev);
- usb_mark_last_busy(udev);
- usb_external_resume_device(udev, PMSG_REMOTE_RESUME);
+ usb_remote_wakeup(udev);
usb_unlock_device(udev);
}
@@ -1880,12 +1889,12 @@ void usb_hcd_resume_root_hub (struct usb_hcd *hcd)
spin_lock_irqsave (&hcd_root_hub_lock, flags);
if (hcd->rh_registered)
- queue_work(ksuspend_usb_wq, &hcd->wakeup_work);
+ queue_work(pm_wq, &hcd->wakeup_work);
spin_unlock_irqrestore (&hcd_root_hub_lock, flags);
}
EXPORT_SYMBOL_GPL(usb_hcd_resume_root_hub);
-#endif
+#endif /* CONFIG_USB_SUSPEND */
/*-------------------------------------------------------------------------*/
@@ -2030,7 +2039,7 @@ struct usb_hcd *usb_create_hcd (const struct hc_driver *driver,
init_timer(&hcd->rh_timer);
hcd->rh_timer.function = rh_timer_func;
hcd->rh_timer.data = (unsigned long) hcd;
-#ifdef CONFIG_PM
+#ifdef CONFIG_USB_SUSPEND
INIT_WORK(&hcd->wakeup_work, hcd_resume_work);
#endif
mutex_init(&hcd->bandwidth_mutex);
@@ -2230,7 +2239,7 @@ void usb_remove_hcd(struct usb_hcd *hcd)
hcd->rh_registered = 0;
spin_unlock_irq (&hcd_root_hub_lock);
-#ifdef CONFIG_PM
+#ifdef CONFIG_USB_SUSPEND
cancel_work_sync(&hcd->wakeup_work);
#endif
diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
index bbe2b924aae..a3cdb09734a 100644
--- a/drivers/usb/core/hcd.h
+++ b/drivers/usb/core/hcd.h
@@ -80,7 +80,7 @@ struct usb_hcd {
struct timer_list rh_timer; /* drives root-hub polling */
struct urb *status_urb; /* the current status urb */
-#ifdef CONFIG_PM
+#ifdef CONFIG_USB_SUSPEND
struct work_struct wakeup_work; /* for remote wakeup */
#endif
@@ -248,7 +248,7 @@ struct hc_driver {
/* xHCI specific functions */
/* Called by usb_alloc_dev to alloc HC device structures */
int (*alloc_dev)(struct usb_hcd *, struct usb_device *);
- /* Called by usb_release_dev to free HC device structures */
+ /* Called by usb_disconnect to free HC device structures */
void (*free_dev)(struct usb_hcd *, struct usb_device *);
/* Bandwidth computation functions */
@@ -286,6 +286,7 @@ struct hc_driver {
*/
int (*update_hub_device)(struct usb_hcd *, struct usb_device *hdev,
struct usb_tt *tt, gfp_t mem_flags);
+ int (*reset_device)(struct usb_hcd *, struct usb_device *);
};
extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);
@@ -463,16 +464,20 @@ extern int usb_find_interface_driver(struct usb_device *dev,
#define usb_endpoint_out(ep_dir) (!((ep_dir) & USB_DIR_IN))
#ifdef CONFIG_PM
-extern void usb_hcd_resume_root_hub(struct usb_hcd *hcd);
extern void usb_root_hub_lost_power(struct usb_device *rhdev);
extern int hcd_bus_suspend(struct usb_device *rhdev, pm_message_t msg);
extern int hcd_bus_resume(struct usb_device *rhdev, pm_message_t msg);
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_USB_SUSPEND
+extern void usb_hcd_resume_root_hub(struct usb_hcd *hcd);
#else
static inline void usb_hcd_resume_root_hub(struct usb_hcd *hcd)
{
return;
}
-#endif /* CONFIG_PM */
+#endif /* CONFIG_USB_SUSPEND */
+
/*
* USB device fs stuff
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 20ecb4cec8d..0940ccd6f4f 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -22,6 +22,7 @@
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/freezer.h>
+#include <linux/pm_runtime.h>
#include <asm/uaccess.h>
#include <asm/byteorder.h>
@@ -71,7 +72,6 @@ struct usb_hub {
unsigned mA_per_port; /* current for each child */
- unsigned init_done:1;
unsigned limited_power:1;
unsigned quiescing:1;
unsigned disconnected:1;
@@ -820,7 +820,6 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
}
init3:
hub->quiescing = 0;
- hub->init_done = 1;
status = usb_submit_urb(hub->urb, GFP_NOIO);
if (status < 0)
@@ -861,11 +860,6 @@ static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type)
int i;
cancel_delayed_work_sync(&hub->init_work);
- if (!hub->init_done) {
- hub->init_done = 1;
- usb_autopm_put_interface_no_suspend(
- to_usb_interface(hub->intfdev));
- }
/* khubd and related activity won't re-trigger */
hub->quiescing = 1;
@@ -1224,6 +1218,9 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
desc = intf->cur_altsetting;
hdev = interface_to_usbdev(intf);
+ /* Hubs have proper suspend/resume support */
+ usb_enable_autosuspend(hdev);
+
if (hdev->level == MAX_TOPO_LEVEL) {
dev_err(&intf->dev,
"Unsupported bus topology: hub nested too deep\n");
@@ -1402,10 +1399,8 @@ static void recursively_mark_NOTATTACHED(struct usb_device *udev)
if (udev->children[i])
recursively_mark_NOTATTACHED(udev->children[i]);
}
- if (udev->state == USB_STATE_SUSPENDED) {
- udev->discon_suspended = 1;
+ if (udev->state == USB_STATE_SUSPENDED)
udev->active_duration -= jiffies;
- }
udev->state = USB_STATE_NOTATTACHED;
}
@@ -1448,11 +1443,11 @@ void usb_set_device_state(struct usb_device *udev,
|| new_state == USB_STATE_SUSPENDED)
; /* No change to wakeup settings */
else if (new_state == USB_STATE_CONFIGURED)
- device_init_wakeup(&udev->dev,
+ device_set_wakeup_capable(&udev->dev,
(udev->actconfig->desc.bmAttributes
& USB_CONFIG_ATT_WAKEUP));
else
- device_init_wakeup(&udev->dev, 0);
+ device_set_wakeup_capable(&udev->dev, 0);
}
if (udev->state == USB_STATE_SUSPENDED &&
new_state != USB_STATE_SUSPENDED)
@@ -1529,31 +1524,15 @@ static void update_address(struct usb_device *udev, int devnum)
udev->devnum = devnum;
}
-#ifdef CONFIG_USB_SUSPEND
-
-static void usb_stop_pm(struct usb_device *udev)
+static void hub_free_dev(struct usb_device *udev)
{
- /* Synchronize with the ksuspend thread to prevent any more
- * autosuspend requests from being submitted, and decrement
- * the parent's count of unsuspended children.
- */
- usb_pm_lock(udev);
- if (udev->parent && !udev->discon_suspended)
- usb_autosuspend_device(udev->parent);
- usb_pm_unlock(udev);
+ struct usb_hcd *hcd = bus_to_hcd(udev->bus);
- /* Stop any autosuspend or autoresume requests already submitted */
- cancel_delayed_work_sync(&udev->autosuspend);
- cancel_work_sync(&udev->autoresume);
+ /* Root hubs aren't real devices, so don't free HCD resources */
+ if (hcd->driver->free_dev && udev->parent)
+ hcd->driver->free_dev(hcd, udev);
}
-#else
-
-static inline void usb_stop_pm(struct usb_device *udev)
-{ }
-
-#endif
-
/**
* usb_disconnect - disconnect a device (usbcore-internal)
* @pdev: pointer to device being disconnected
@@ -1622,7 +1601,7 @@ void usb_disconnect(struct usb_device **pdev)
*pdev = NULL;
spin_unlock_irq(&device_state_lock);
- usb_stop_pm(udev);
+ hub_free_dev(udev);
put_device(&udev->dev);
}
@@ -1799,9 +1778,18 @@ int usb_new_device(struct usb_device *udev)
{
int err;
- /* Increment the parent's count of unsuspended children */
- if (udev->parent)
- usb_autoresume_device(udev->parent);
+ if (udev->parent) {
+ /* Initialize non-root-hub device wakeup to disabled;
+ * device (un)configuration controls wakeup capable
+ * sysfs power/wakeup controls wakeup enabled/disabled
+ */
+ device_init_wakeup(&udev->dev, 0);
+ device_set_wakeup_enable(&udev->dev, 1);
+ }
+
+ /* Tell the runtime-PM framework the device is active */
+ pm_runtime_set_active(&udev->dev);
+ pm_runtime_enable(&udev->dev);
usb_detect_quirks(udev);
err = usb_enumerate_device(udev); /* Read descriptors */
@@ -1833,7 +1821,8 @@ int usb_new_device(struct usb_device *udev)
fail:
usb_set_device_state(udev, USB_STATE_NOTATTACHED);
- usb_stop_pm(udev);
+ pm_runtime_disable(&udev->dev);
+ pm_runtime_set_suspended(&udev->dev);
return err;
}
@@ -1982,7 +1971,7 @@ static int hub_port_wait_reset(struct usb_hub *hub, int port1,
if (!(portstatus & USB_PORT_STAT_RESET) &&
(portstatus & USB_PORT_STAT_ENABLE)) {
if (hub_is_wusb(hub))
- udev->speed = USB_SPEED_VARIABLE;
+ udev->speed = USB_SPEED_WIRELESS;
else if (portstatus & USB_PORT_STAT_HIGH_SPEED)
udev->speed = USB_SPEED_HIGH;
else if (portstatus & USB_PORT_STAT_LOW_SPEED)
@@ -2008,7 +1997,9 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
struct usb_device *udev, unsigned int delay)
{
int i, status;
+ struct usb_hcd *hcd;
+ hcd = bus_to_hcd(udev->bus);
/* Block EHCI CF initialization during the port reset.
* Some companion controllers don't like it when they mix.
*/
@@ -2036,6 +2027,14 @@ static int hub_port_reset(struct usb_hub *hub, int port1,
/* TRSTRCY = 10 ms; plus some extra */
msleep(10 + 40);
update_address(udev, 0);
+ if (hcd->driver->reset_device) {
+ status = hcd->driver->reset_device(hcd, udev);
+ if (status < 0) {
+ dev_err(&udev->dev, "Cannot reset "
+ "HCD device state\n");
+ break;
+ }
+ }
/* FALL THROUGH */
case -ENOTCONN:
case -ENODEV:
@@ -2381,14 +2380,17 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
}
/* caller has locked udev */
-static int remote_wakeup(struct usb_device *udev)
+int usb_remote_wakeup(struct usb_device *udev)
{
int status = 0;
if (udev->state == USB_STATE_SUSPENDED) {
dev_dbg(&udev->dev, "usb %sresume\n", "wakeup-");
- usb_mark_last_busy(udev);
- status = usb_external_resume_device(udev, PMSG_REMOTE_RESUME);
+ status = usb_autoresume_device(udev);
+ if (status == 0) {
+ /* Let the drivers do their thing, then... */
+ usb_autosuspend_device(udev);
+ }
}
return status;
}
@@ -2425,11 +2427,6 @@ int usb_port_resume(struct usb_device *udev, pm_message_t msg)
return status;
}
-static inline int remote_wakeup(struct usb_device *udev)
-{
- return 0;
-}
-
#endif
static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
@@ -2496,11 +2493,6 @@ EXPORT_SYMBOL_GPL(usb_root_hub_lost_power);
#else /* CONFIG_PM */
-static inline int remote_wakeup(struct usb_device *udev)
-{
- return 0;
-}
-
#define hub_suspend NULL
#define hub_resume NULL
#define hub_reset_resume NULL
@@ -2645,14 +2637,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
mutex_lock(&usb_address0_mutex);
- if ((hcd->driver->flags & HCD_USB3) && udev->config) {
- /* FIXME this will need special handling by the xHCI driver. */
- dev_dbg(&udev->dev,
- "xHCI reset of configured device "
- "not supported yet.\n");
- retval = -EINVAL;
- goto fail;
- } else if (!udev->config && oldspeed == USB_SPEED_SUPER) {
+ if (!udev->config && oldspeed == USB_SPEED_SUPER) {
/* Don't reset USB 3.0 devices during an initial setup */
usb_set_device_state(udev, USB_STATE_DEFAULT);
} else {
@@ -2678,7 +2663,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
*/
switch (udev->speed) {
case USB_SPEED_SUPER:
- case USB_SPEED_VARIABLE: /* fixed at 512 */
+ case USB_SPEED_WIRELESS: /* fixed at 512 */
udev->ep0.desc.wMaxPacketSize = cpu_to_le16(512);
break;
case USB_SPEED_HIGH: /* fixed at 64 */
@@ -2706,7 +2691,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
case USB_SPEED_SUPER:
speed = "super";
break;
- case USB_SPEED_VARIABLE:
+ case USB_SPEED_WIRELESS:
speed = "variable";
type = "Wireless ";
break;
@@ -3006,7 +2991,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
/* For a suspended device, treat this as a
* remote wakeup event.
*/
- status = remote_wakeup(udev);
+ status = usb_remote_wakeup(udev);
#endif
} else {
@@ -3192,6 +3177,7 @@ loop_disable:
loop:
usb_ep0_reinit(udev);
release_address(udev);
+ hub_free_dev(udev);
usb_put_dev(udev);
if ((status == -ENOTCONN) || (status == -ENOTSUPP))
break;
@@ -3259,7 +3245,7 @@ static void hub_events(void)
* disconnected while waiting for the lock to succeed. */
usb_lock_device(hdev);
if (unlikely(hub->disconnected))
- goto loop2;
+ goto loop_disconnected;
/* If the hub has died, clean up after it */
if (hdev->state == USB_STATE_NOTATTACHED) {
@@ -3352,7 +3338,7 @@ static void hub_events(void)
msleep(10);
usb_lock_device(udev);
- ret = remote_wakeup(hdev->
+ ret = usb_remote_wakeup(hdev->
children[i-1]);
usb_unlock_device(udev);
if (ret < 0)
@@ -3419,7 +3405,7 @@ static void hub_events(void)
* kick_khubd() and allow autosuspend.
*/
usb_autopm_put_interface(intf);
- loop2:
+ loop_disconnected:
usb_unlock_device(hdev);
kref_put(&hub->kref, hub_release);
@@ -3446,7 +3432,7 @@ static int hub_thread(void *__unused)
return 0;
}
-static struct usb_device_id hub_id_table [] = {
+static const struct usb_device_id hub_id_table[] = {
{ .match_flags = USB_DEVICE_ID_MATCH_DEV_CLASS,
.bDeviceClass = USB_CLASS_HUB},
{ .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS,
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index df73574a9cc..cd220277c6c 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1316,7 +1316,7 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate)
alt = usb_altnum_to_altsetting(iface, alternate);
if (!alt) {
- dev_warn(&dev->dev, "selecting invalid altsetting %d",
+ dev_warn(&dev->dev, "selecting invalid altsetting %d\n",
alternate);
return -EINVAL;
}
@@ -1471,7 +1471,7 @@ int usb_reset_configuration(struct usb_device *dev)
/* If not, reinstate the old alternate settings */
if (retval < 0) {
reset_old_alts:
- for (; i >= 0; i--) {
+ for (i--; i >= 0; i--) {
struct usb_interface *intf = config->interface[i];
struct usb_host_interface *alt;
@@ -1843,7 +1843,6 @@ free_interfaces:
intf->dev.dma_mask = dev->dev.dma_mask;
INIT_WORK(&intf->reset_ws, __usb_queue_reset_device);
device_initialize(&intf->dev);
- mark_quiesced(intf);
dev_set_name(&intf->dev, "%d-%s:%d.%d",
dev->bus->busnum, dev->devpath,
configuration, alt->desc.bInterfaceNumber);
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index ab93918d920..f073c5cb4e7 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -103,10 +103,19 @@ void usb_detect_quirks(struct usb_device *udev)
dev_dbg(&udev->dev, "USB quirks for this device: %x\n",
udev->quirks);
- /* By default, disable autosuspend for all non-hubs */
#ifdef CONFIG_USB_SUSPEND
- if (udev->descriptor.bDeviceClass != USB_CLASS_HUB)
- udev->autosuspend_disabled = 1;
+
+ /* By default, disable autosuspend for all devices. The hub driver
+ * will enable it for hubs.
+ */
+ usb_disable_autosuspend(udev);
+
+ /* Autosuspend can also be disabled if the initial autosuspend_delay
+ * is negative.
+ */
+ if (udev->autosuspend_delay < 0)
+ usb_autoresume_device(udev);
+
#endif
/* For the present, all devices default to USB-PERSIST enabled */
@@ -120,6 +129,7 @@ void usb_detect_quirks(struct usb_device *udev)
* for all devices. It will affect things like hub resets
* and EMF-related port disables.
*/
- udev->persist_enabled = 1;
+ if (!(udev->quirks & USB_QUIRK_RESET_MORPHS))
+ udev->persist_enabled = 1;
#endif /* CONFIG_PM */
}
diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
index 5f3908f6e2d..43c002e3a9a 100644
--- a/drivers/usb/core/sysfs.c
+++ b/drivers/usb/core/sysfs.c
@@ -115,7 +115,7 @@ show_speed(struct device *dev, struct device_attribute *attr, char *buf)
case USB_SPEED_HIGH:
speed = "480";
break;
- case USB_SPEED_VARIABLE:
+ case USB_SPEED_WIRELESS:
speed = "480";
break;
case USB_SPEED_SUPER:
@@ -191,6 +191,36 @@ show_quirks(struct device *dev, struct device_attribute *attr, char *buf)
static DEVICE_ATTR(quirks, S_IRUGO, show_quirks, NULL);
static ssize_t
+show_avoid_reset_quirk(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct usb_device *udev;
+
+ udev = to_usb_device(dev);
+ return sprintf(buf, "%d\n", !!(udev->quirks & USB_QUIRK_RESET_MORPHS));
+}
+
+static ssize_t
+set_avoid_reset_quirk(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct usb_device *udev = to_usb_device(dev);
+ int config;
+
+ if (sscanf(buf, "%d", &config) != 1 || config < 0 || config > 1)
+ return -EINVAL;
+ usb_lock_device(udev);
+ if (config)
+ udev->quirks |= USB_QUIRK_RESET_MORPHS;
+ else
+ udev->quirks &= ~USB_QUIRK_RESET_MORPHS;
+ usb_unlock_device(udev);
+ return count;
+}
+
+static DEVICE_ATTR(avoid_reset_quirk, S_IRUGO | S_IWUSR,
+ show_avoid_reset_quirk, set_avoid_reset_quirk);
+
+static ssize_t
show_urbnum(struct device *dev, struct device_attribute *attr, char *buf)
{
struct usb_device *udev;
@@ -226,9 +256,10 @@ set_persist(struct device *dev, struct device_attribute *attr,
if (sscanf(buf, "%d", &value) != 1)
return -EINVAL;
- usb_pm_lock(udev);
+
+ usb_lock_device(udev);
udev->persist_enabled = !!value;
- usb_pm_unlock(udev);
+ usb_unlock_device(udev);
return count;
}
@@ -315,20 +346,34 @@ set_autosuspend(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_device *udev = to_usb_device(dev);
- int value;
+ int value, old_delay;
+ int rc;
if (sscanf(buf, "%d", &value) != 1 || value >= INT_MAX/HZ ||
value <= - INT_MAX/HZ)
return -EINVAL;
value *= HZ;
+ usb_lock_device(udev);
+ old_delay = udev->autosuspend_delay;
udev->autosuspend_delay = value;
- if (value >= 0)
- usb_try_autosuspend_device(udev);
- else {
- if (usb_autoresume_device(udev) == 0)
+
+ if (old_delay < 0) { /* Autosuspend wasn't allowed */
+ if (value >= 0)
usb_autosuspend_device(udev);
+ } else { /* Autosuspend was allowed */
+ if (value < 0) {
+ rc = usb_autoresume_device(udev);
+ if (rc < 0) {
+ count = rc;
+ udev->autosuspend_delay = old_delay;
+ }
+ } else {
+ usb_try_autosuspend_device(udev);
+ }
}
+
+ usb_unlock_device(udev);
return count;
}
@@ -356,34 +401,25 @@ set_level(struct device *dev, struct device_attribute *attr,
struct usb_device *udev = to_usb_device(dev);
int len = count;
char *cp;
- int rc = 0;
- int old_autosuspend_disabled;
+ int rc;
cp = memchr(buf, '\n', count);
if (cp)
len = cp - buf;
usb_lock_device(udev);
- old_autosuspend_disabled = udev->autosuspend_disabled;
- /* Setting the flags without calling usb_pm_lock is a subject to
- * races, but who cares...
- */
if (len == sizeof on_string - 1 &&
- strncmp(buf, on_string, len) == 0) {
- udev->autosuspend_disabled = 1;
- rc = usb_external_resume_device(udev, PMSG_USER_RESUME);
+ strncmp(buf, on_string, len) == 0)
+ rc = usb_disable_autosuspend(udev);
- } else if (len == sizeof auto_string - 1 &&
- strncmp(buf, auto_string, len) == 0) {
- udev->autosuspend_disabled = 0;
- rc = usb_external_resume_device(udev, PMSG_USER_RESUME);
+ else if (len == sizeof auto_string - 1 &&
+ strncmp(buf, auto_string, len) == 0)
+ rc = usb_enable_autosuspend(udev);
- } else
+ else
rc = -EINVAL;
- if (rc)
- udev->autosuspend_disabled = old_autosuspend_disabled;
usb_unlock_device(udev);
return (rc < 0 ? rc : count);
}
@@ -558,6 +594,7 @@ static struct attribute *dev_attrs[] = {
&dev_attr_version.attr,
&dev_attr_maxchild.attr,
&dev_attr_quirks.attr,
+ &dev_attr_avoid_reset_quirk.attr,
&dev_attr_authorized.attr,
&dev_attr_remove.attr,
NULL,
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index e7cae133469..27080561a1c 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -387,6 +387,13 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
{
unsigned int orig_flags = urb->transfer_flags;
unsigned int allowed;
+ static int pipetypes[4] = {
+ PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT
+ };
+
+ /* Check that the pipe's type matches the endpoint's type */
+ if (usb_pipetype(urb->pipe) != pipetypes[xfertype])
+ return -EPIPE; /* The most suitable error code :-) */
/* enforce simple/standard policy */
allowed = (URB_NO_TRANSFER_DMA_MAP | URB_NO_SETUP_DMA_MAP |
@@ -430,7 +437,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
case USB_ENDPOINT_XFER_INT:
/* too small? */
switch (dev->speed) {
- case USB_SPEED_VARIABLE:
+ case USB_SPEED_WIRELESS:
if (urb->interval < 6)
return -EINVAL;
break;
@@ -446,7 +453,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
if (urb->interval > (1 << 15))
return -EINVAL;
max = 1 << 15;
- case USB_SPEED_VARIABLE:
+ case USB_SPEED_WIRELESS:
if (urb->interval > 16)
return -EINVAL;
break;
@@ -473,7 +480,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags)
default:
return -EINVAL;
}
- if (dev->speed != USB_SPEED_VARIABLE) {
+ if (dev->speed != USB_SPEED_WIRELESS) {
/* Round down to a power of 2, no more than max */
urb->interval = min(max, 1 << ilog2(urb->interval));
}
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index 0daff0d968b..1297e9b16a5 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -49,9 +49,6 @@ const char *usbcore_name = "usbcore";
static int nousb; /* Disable USB when built into kernel image */
-/* Workqueue for autosuspend and for remote wakeup of root hubs */
-struct workqueue_struct *ksuspend_usb_wq;
-
#ifdef CONFIG_USB_SUSPEND
static int usb_autosuspend_delay = 2; /* Default delay value,
* in seconds */
@@ -228,9 +225,6 @@ static void usb_release_dev(struct device *dev)
hcd = bus_to_hcd(udev->bus);
usb_destroy_configuration(udev);
- /* Root hubs aren't real devices, so don't free HCD resources */
- if (hcd->driver->free_dev && udev->parent)
- hcd->driver->free_dev(hcd, udev);
usb_put_hcd(hcd);
kfree(udev->product);
kfree(udev->manufacturer);
@@ -264,23 +258,6 @@ static int usb_dev_uevent(struct device *dev, struct kobj_uevent_env *env)
#ifdef CONFIG_PM
-static int ksuspend_usb_init(void)
-{
- /* This workqueue is supposed to be both freezable and
- * singlethreaded. Its job doesn't justify running on more
- * than one CPU.
- */
- ksuspend_usb_wq = create_freezeable_workqueue("ksuspend_usbd");
- if (!ksuspend_usb_wq)
- return -ENOMEM;
- return 0;
-}
-
-static void ksuspend_usb_cleanup(void)
-{
- destroy_workqueue(ksuspend_usb_wq);
-}
-
/* USB device Power-Management thunks.
* There's no need to distinguish here between quiescing a USB device
* and powering it down; the generic_suspend() routine takes care of
@@ -296,7 +273,7 @@ static int usb_dev_prepare(struct device *dev)
static void usb_dev_complete(struct device *dev)
{
/* Currently used only for rebinding interfaces */
- usb_resume(dev, PMSG_RESUME); /* Message event is meaningless */
+ usb_resume(dev, PMSG_ON); /* FIXME: change to PMSG_COMPLETE */
}
static int usb_dev_suspend(struct device *dev)
@@ -342,9 +319,7 @@ static const struct dev_pm_ops usb_device_pm_ops = {
#else
-#define ksuspend_usb_init() 0
-#define ksuspend_usb_cleanup() do {} while (0)
-#define usb_device_pm_ops (*(struct dev_pm_ops *)0)
+#define usb_device_pm_ops (*(struct dev_pm_ops *) NULL)
#endif /* CONFIG_PM */
@@ -472,9 +447,6 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
INIT_LIST_HEAD(&dev->filelist);
#ifdef CONFIG_PM
- mutex_init(&dev->pm_mutex);
- INIT_DELAYED_WORK(&dev->autosuspend, usb_autosuspend_work);
- INIT_WORK(&dev->autoresume, usb_autoresume_work);
dev->autosuspend_delay = usb_autosuspend_delay * HZ;
dev->connect_time = jiffies;
dev->active_duration = -jiffies;
@@ -1117,9 +1089,6 @@ static int __init usb_init(void)
if (retval)
goto out;
- retval = ksuspend_usb_init();
- if (retval)
- goto out;
retval = bus_register(&usb_bus_type);
if (retval)
goto bus_register_failed;
@@ -1159,7 +1128,7 @@ major_init_failed:
bus_notifier_failed:
bus_unregister(&usb_bus_type);
bus_register_failed:
- ksuspend_usb_cleanup();
+ usb_debugfs_cleanup();
out:
return retval;
}
@@ -1181,7 +1150,6 @@ static void __exit usb_exit(void)
usb_hub_cleanup();
bus_unregister_notifier(&usb_bus_type, &usb_bus_nb);
bus_unregister(&usb_bus_type);
- ksuspend_usb_cleanup();
usb_debugfs_cleanup();
}
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 4c36c7f512a..cd882203ad3 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -55,24 +55,8 @@ extern void usb_major_cleanup(void);
extern int usb_suspend(struct device *dev, pm_message_t msg);
extern int usb_resume(struct device *dev, pm_message_t msg);
-extern void usb_autosuspend_work(struct work_struct *work);
-extern void usb_autoresume_work(struct work_struct *work);
extern int usb_port_suspend(struct usb_device *dev, pm_message_t msg);
extern int usb_port_resume(struct usb_device *dev, pm_message_t msg);
-extern int usb_external_suspend_device(struct usb_device *udev,
- pm_message_t msg);
-extern int usb_external_resume_device(struct usb_device *udev,
- pm_message_t msg);
-
-static inline void usb_pm_lock(struct usb_device *udev)
-{
- mutex_lock_nested(&udev->pm_mutex, udev->level);
-}
-
-static inline void usb_pm_unlock(struct usb_device *udev)
-{
- mutex_unlock(&udev->pm_mutex);
-}
#else
@@ -86,9 +70,6 @@ static inline int usb_port_resume(struct usb_device *udev, pm_message_t msg)
return 0;
}
-static inline void usb_pm_lock(struct usb_device *udev) {}
-static inline void usb_pm_unlock(struct usb_device *udev) {}
-
#endif
#ifdef CONFIG_USB_SUSPEND
@@ -96,6 +77,7 @@ static inline void usb_pm_unlock(struct usb_device *udev) {}
extern void usb_autosuspend_device(struct usb_device *udev);
extern void usb_try_autosuspend_device(struct usb_device *udev);
extern int usb_autoresume_device(struct usb_device *udev);
+extern int usb_remote_wakeup(struct usb_device *dev);
#else
@@ -106,9 +88,13 @@ static inline int usb_autoresume_device(struct usb_device *udev)
return 0;
}
+static inline int usb_remote_wakeup(struct usb_device *udev)
+{
+ return 0;
+}
+
#endif
-extern struct workqueue_struct *ksuspend_usb_wq;
extern struct bus_type usb_bus_type;
extern struct device_type usb_device_type;
extern struct device_type usb_if_device_type;
@@ -138,23 +124,6 @@ static inline int is_usb_device_driver(struct device_driver *drv)
for_devices;
}
-/* Interfaces and their "power state" are owned by usbcore */
-
-static inline void mark_active(struct usb_interface *f)
-{
- f->is_active = 1;
-}
-
-static inline void mark_quiesced(struct usb_interface *f)
-{
- f->is_active = 0;
-}
-
-static inline int is_active(const struct usb_interface *f)
-{
- return f->is_active;
-}
-
/* for labeling diagnostics */
extern const char *usbcore_name;
diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
index 2958a1271b2..6e98a369784 100644
--- a/drivers/usb/early/ehci-dbgp.c
+++ b/drivers/usb/early/ehci-dbgp.c
@@ -66,8 +66,6 @@ static struct ehci_dev ehci_dev;
#define USB_DEBUG_DEVNUM 127
-#define DBGP_DATA_TOGGLE 0x8800
-
#ifdef DBGP_DEBUG
#define dbgp_printk printk
static void dbgp_ehci_status(char *str)
@@ -88,11 +86,6 @@ static inline void dbgp_ehci_status(char *str) { }
static inline void dbgp_printk(const char *fmt, ...) { }
#endif
-static inline u32 dbgp_pid_update(u32 x, u32 tok)
-{
- return ((x ^ DBGP_DATA_TOGGLE) & 0xffff00) | (tok & 0xff);
-}
-
static inline u32 dbgp_len_update(u32 x, u32 len)
{
return (x & ~0x0f) | (len & 0x0f);
@@ -136,6 +129,19 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
#define DBGP_MAX_PACKET 8
#define DBGP_TIMEOUT (250 * 1000)
+#define DBGP_LOOPS 1000
+
+static inline u32 dbgp_pid_write_update(u32 x, u32 tok)
+{
+ static int data0 = USB_PID_DATA1;
+ data0 ^= USB_PID_DATA_TOGGLE;
+ return (x & 0xffff0000) | (data0 << 8) | (tok & 0xff);
+}
+
+static inline u32 dbgp_pid_read_update(u32 x, u32 tok)
+{
+ return (x & 0xffff0000) | (USB_PID_DATA0 << 8) | (tok & 0xff);
+}
static int dbgp_wait_until_complete(void)
{
@@ -180,7 +186,7 @@ static int dbgp_wait_until_done(unsigned ctrl)
{
u32 pids, lpid;
int ret;
- int loop = 3;
+ int loop = DBGP_LOOPS;
retry:
writel(ctrl | DBGP_GO, &ehci_debug->control);
@@ -197,6 +203,8 @@ retry:
*/
if (ret == -DBGP_TIMEOUT && !dbgp_not_safe)
dbgp_not_safe = 1;
+ if (ret == -DBGP_ERR_BAD && --loop > 0)
+ goto retry;
return ret;
}
@@ -245,12 +253,20 @@ static inline void dbgp_get_data(void *buf, int size)
bytes[i] = (hi >> (8*(i - 4))) & 0xff;
}
-static int dbgp_out(u32 addr, const char *bytes, int size)
+static int dbgp_bulk_write(unsigned devnum, unsigned endpoint,
+ const char *bytes, int size)
{
+ int ret;
+ u32 addr;
u32 pids, ctrl;
+ if (size > DBGP_MAX_PACKET)
+ return -1;
+
+ addr = DBGP_EPADDR(devnum, endpoint);
+
pids = readl(&ehci_debug->pids);
- pids = dbgp_pid_update(pids, USB_PID_OUT);
+ pids = dbgp_pid_write_update(pids, USB_PID_OUT);
ctrl = readl(&ehci_debug->control);
ctrl = dbgp_len_update(ctrl, size);
@@ -260,34 +276,7 @@ static int dbgp_out(u32 addr, const char *bytes, int size)
dbgp_set_data(bytes, size);
writel(addr, &ehci_debug->address);
writel(pids, &ehci_debug->pids);
- return dbgp_wait_until_done(ctrl);
-}
-
-static int dbgp_bulk_write(unsigned devnum, unsigned endpoint,
- const char *bytes, int size)
-{
- int ret;
- int loops = 5;
- u32 addr;
- if (size > DBGP_MAX_PACKET)
- return -1;
-
- addr = DBGP_EPADDR(devnum, endpoint);
-try_again:
- if (loops--) {
- ret = dbgp_out(addr, bytes, size);
- if (ret == -DBGP_ERR_BAD) {
- int try_loops = 3;
- do {
- /* Emit a dummy packet to re-sync communication
- * with the debug device */
- if (dbgp_out(addr, "12345678", 8) >= 0) {
- udelay(2);
- goto try_again;
- }
- } while (try_loops--);
- }
- }
+ ret = dbgp_wait_until_done(ctrl);
return ret;
}
@@ -304,7 +293,7 @@ static int dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data,
addr = DBGP_EPADDR(devnum, endpoint);
pids = readl(&ehci_debug->pids);
- pids = dbgp_pid_update(pids, USB_PID_IN);
+ pids = dbgp_pid_read_update(pids, USB_PID_IN);
ctrl = readl(&ehci_debug->control);
ctrl = dbgp_len_update(ctrl, size);
@@ -362,7 +351,6 @@ static int dbgp_control_msg(unsigned devnum, int requesttype,
return dbgp_bulk_read(devnum, 0, data, size);
}
-
/* Find a PCI capability */
static u32 __init find_cap(u32 num, u32 slot, u32 func, int cap)
{
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index ee411206c69..7460cd797f4 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -812,6 +812,16 @@ config USB_CDC_COMPOSITE
Say "y" to link the driver statically, or "m" to build a
dynamically linked module.
+config USB_G_NOKIA
+ tristate "Nokia composite gadget"
+ depends on PHONET
+ help
+ The Nokia composite gadget provides support for acm, obex
+ and phonet in only one composite gadget driver.
+
+ It's only really useful for N900 hardware. If you're building
+ a kernel for N900, say Y or M here. If unsure, say N.
+
config USB_G_MULTI
tristate "Multifunction Composite Gadget (EXPERIMENTAL)"
depends on BLOCK && NET
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 2e2c047262b..43b51da8d72 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -43,6 +43,7 @@ g_mass_storage-objs := mass_storage.o
g_printer-objs := printer.o
g_cdc-objs := cdc2.o
g_multi-objs := multi.o
+g_nokia-objs := nokia.o
obj-$(CONFIG_USB_ZERO) += g_zero.o
obj-$(CONFIG_USB_AUDIO) += g_audio.o
@@ -55,4 +56,5 @@ obj-$(CONFIG_USB_G_PRINTER) += g_printer.o
obj-$(CONFIG_USB_MIDI_GADGET) += g_midi.o
obj-$(CONFIG_USB_CDC_COMPOSITE) += g_cdc.o
obj-$(CONFIG_USB_G_MULTI) += g_multi.o
+obj-$(CONFIG_USB_G_NOKIA) += g_nokia.o
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 043e04db2a0..12ac9cd32a0 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -1656,9 +1656,7 @@ static int __init at91udc_probe(struct platform_device *pdev)
if (!res)
return -ENXIO;
- if (!request_mem_region(res->start,
- res->end - res->start + 1,
- driver_name)) {
+ if (!request_mem_region(res->start, resource_size(res), driver_name)) {
DBG("someone's using UDC memory\n");
return -EBUSY;
}
@@ -1699,7 +1697,7 @@ static int __init at91udc_probe(struct platform_device *pdev)
udc->ep[3].maxpacket = 64;
}
- udc->udp_baseaddr = ioremap(res->start, res->end - res->start + 1);
+ udc->udp_baseaddr = ioremap(res->start, resource_size(res));
if (!udc->udp_baseaddr) {
retval = -ENOMEM;
goto fail0a;
@@ -1781,7 +1779,7 @@ fail0a:
if (cpu_is_at91rm9200())
gpio_free(udc->board.pullup_pin);
fail0:
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
DBG("%s probe failed, %d\n", driver_name, retval);
return retval;
}
@@ -1813,7 +1811,7 @@ static int __exit at91udc_remove(struct platform_device *pdev)
gpio_free(udc->board.pullup_pin);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
clk_put(udc->iclk);
clk_put(udc->fclk);
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index 4e970cf0e29..f79bdfe4bed 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -320,7 +320,7 @@ static inline void usba_cleanup_debugfs(struct usba_udc *udc)
static int vbus_is_present(struct usba_udc *udc)
{
if (gpio_is_valid(udc->vbus_pin))
- return gpio_get_value(udc->vbus_pin);
+ return gpio_get_value(udc->vbus_pin) ^ udc->vbus_pin_inverted;
/* No Vbus detection: Assume always present */
return 1;
@@ -1763,7 +1763,7 @@ static irqreturn_t usba_vbus_irq(int irq, void *devid)
if (!udc->driver)
goto out;
- vbus = gpio_get_value(udc->vbus_pin);
+ vbus = vbus_is_present(udc);
if (vbus != udc->vbus_prev) {
if (vbus) {
toggle_bias(1);
@@ -1914,14 +1914,14 @@ static int __init usba_udc_probe(struct platform_device *pdev)
udc->vbus_pin = -ENODEV;
ret = -ENOMEM;
- udc->regs = ioremap(regs->start, regs->end - regs->start + 1);
+ udc->regs = ioremap(regs->start, resource_size(regs));
if (!udc->regs) {
dev_err(&pdev->dev, "Unable to map I/O memory, aborting.\n");
goto err_map_regs;
}
dev_info(&pdev->dev, "MMIO registers at 0x%08lx mapped at %p\n",
(unsigned long)regs->start, udc->regs);
- udc->fifo = ioremap(fifo->start, fifo->end - fifo->start + 1);
+ udc->fifo = ioremap(fifo->start, resource_size(fifo));
if (!udc->fifo) {
dev_err(&pdev->dev, "Unable to map FIFO, aborting.\n");
goto err_map_fifo;
@@ -2000,6 +2000,7 @@ static int __init usba_udc_probe(struct platform_device *pdev)
if (gpio_is_valid(pdata->vbus_pin)) {
if (!gpio_request(pdata->vbus_pin, "atmel_usba_udc")) {
udc->vbus_pin = pdata->vbus_pin;
+ udc->vbus_pin_inverted = pdata->vbus_pin_inverted;
ret = request_irq(gpio_to_irq(udc->vbus_pin),
usba_vbus_irq, 0,
diff --git a/drivers/usb/gadget/atmel_usba_udc.h b/drivers/usb/gadget/atmel_usba_udc.h
index f7baea307f0..88a2e07a11a 100644
--- a/drivers/usb/gadget/atmel_usba_udc.h
+++ b/drivers/usb/gadget/atmel_usba_udc.h
@@ -323,6 +323,7 @@ struct usba_udc {
struct platform_device *pdev;
int irq;
int vbus_pin;
+ int vbus_pin_inverted;
struct clk *pclk;
struct clk *hclk;
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index cd0914ec898..65a5f94cbc0 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -265,16 +265,24 @@ struct usb_ep * __init usb_ep_autoconfig (
return ep;
}
- } else if (gadget_is_sh (gadget) && USB_ENDPOINT_XFER_INT == type) {
- /* single buffering is enough; maybe 8 byte fifo is too */
- ep = find_ep (gadget, "ep3in-bulk");
- if (ep && ep_matches (gadget, ep, desc))
- return ep;
-
- } else if (gadget_is_mq11xx (gadget) && USB_ENDPOINT_XFER_INT == type) {
- ep = find_ep (gadget, "ep1-bulk");
+#ifdef CONFIG_BLACKFIN
+ } else if (gadget_is_musbhsfc(gadget) || gadget_is_musbhdrc(gadget)) {
+ if ((USB_ENDPOINT_XFER_BULK == type) ||
+ (USB_ENDPOINT_XFER_ISOC == type)) {
+ if (USB_DIR_IN & desc->bEndpointAddress)
+ ep = find_ep (gadget, "ep5in");
+ else
+ ep = find_ep (gadget, "ep6out");
+ } else if (USB_ENDPOINT_XFER_INT == type) {
+ if (USB_DIR_IN & desc->bEndpointAddress)
+ ep = find_ep(gadget, "ep1in");
+ else
+ ep = find_ep(gadget, "ep2out");
+ } else
+ ep = NULL;
if (ep && ep_matches (gadget, ep, desc))
return ep;
+#endif
}
/* Second, look at endpoints until an unclaimed one looks usable */
diff --git a/drivers/usb/gadget/ether.c b/drivers/usb/gadget/ether.c
index 141372b6e7a..400f80372d9 100644
--- a/drivers/usb/gadget/ether.c
+++ b/drivers/usb/gadget/ether.c
@@ -259,7 +259,7 @@ static struct usb_configuration rndis_config_driver = {
/*-------------------------------------------------------------------------*/
-#ifdef USB_ETH_EEM
+#ifdef CONFIG_USB_ETH_EEM
static int use_eem = 1;
#else
static int use_eem;
diff --git a/drivers/usb/gadget/f_acm.c b/drivers/usb/gadget/f_acm.c
index d10353d46b8..e49c7325dce 100644
--- a/drivers/usb/gadget/f_acm.c
+++ b/drivers/usb/gadget/f_acm.c
@@ -702,14 +702,6 @@ acm_unbind(struct usb_configuration *c, struct usb_function *f)
/* Some controllers can't support CDC ACM ... */
static inline bool can_support_cdc(struct usb_configuration *c)
{
- /* SH3 doesn't support multiple interfaces */
- if (gadget_is_sh(c->cdev->gadget))
- return false;
-
- /* sa1100 doesn't have a third interrupt endpoint */
- if (gadget_is_sa1100(c->cdev->gadget))
- return false;
-
/* everything else is *probably* fine ... */
return true;
}
diff --git a/drivers/usb/gadget/f_ecm.c b/drivers/usb/gadget/f_ecm.c
index ecf5bdd0ae0..2fff530efc1 100644
--- a/drivers/usb/gadget/f_ecm.c
+++ b/drivers/usb/gadget/f_ecm.c
@@ -497,12 +497,9 @@ static int ecm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
struct net_device *net;
/* Enable zlps by default for ECM conformance;
- * override for musb_hdrc (avoids txdma ovhead)
- * and sa1100 (can't).
+ * override for musb_hdrc (avoids txdma ovhead).
*/
- ecm->port.is_zlp_ok = !(
- gadget_is_sa1100(cdev->gadget)
- || gadget_is_musbhdrc(cdev->gadget)
+ ecm->port.is_zlp_ok = !(gadget_is_musbhdrc(cdev->gadget)
);
ecm->port.cdc_filter = DEFAULT_FILTER;
DBG(cdev, "activate ecm\n");
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index a37640eba43..5a3cdd08f1d 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -368,7 +368,7 @@ struct fsg_common {
struct task_struct *thread_task;
/* Callback function to call when thread exits. */
- void (*thread_exits)(struct fsg_common *common);
+ int (*thread_exits)(struct fsg_common *common);
/* Gadget's private data. */
void *private_data;
@@ -392,8 +392,12 @@ struct fsg_config {
const char *lun_name_format;
const char *thread_name;
- /* Callback function to call when thread exits. */
- void (*thread_exits)(struct fsg_common *common);
+ /* Callback function to call when thread exits. If no
+ * callback is set or it returns value lower then zero MSF
+ * will force eject all LUNs it operates on (including those
+ * marked as non-removable or with prevent_medium_removal flag
+ * set). */
+ int (*thread_exits)(struct fsg_common *common);
/* Gadget's private data. */
void *private_data;
@@ -614,7 +618,12 @@ static int fsg_setup(struct usb_function *f,
return -EDOM;
VDBG(fsg, "get max LUN\n");
*(u8 *) req->buf = fsg->common->nluns - 1;
- return 1;
+
+ /* Respond with data/status */
+ req->length = min((u16)1, w_length);
+ fsg->common->ep0req_name =
+ ctrl->bRequestType & USB_DIR_IN ? "ep0-in" : "ep0-out";
+ return ep0_queue(fsg->common);
}
VDBG(fsg,
@@ -1041,7 +1050,7 @@ static void invalidate_sub(struct fsg_lun *curlun)
unsigned long rc;
rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
- VLDBG(curlun, "invalidate_inode_pages -> %ld\n", rc);
+ VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc);
}
static int do_verify(struct fsg_common *common)
@@ -2524,14 +2533,6 @@ static void handle_exception(struct fsg_common *common)
case FSG_STATE_CONFIG_CHANGE:
rc = do_set_config(common, new_config);
- if (common->ep0_req_tag != exception_req_tag)
- break;
- if (rc != 0) { /* STALL on errors */
- DBG(common, "ep0 set halt\n");
- usb_ep_set_halt(common->ep0);
- } else { /* Complete the status stage */
- ep0_queue(common);
- }
break;
case FSG_STATE_EXIT:
@@ -2615,8 +2616,20 @@ static int fsg_main_thread(void *common_)
common->thread_task = NULL;
spin_unlock_irq(&common->lock);
- if (common->thread_exits)
- common->thread_exits(common);
+ if (!common->thread_exits || common->thread_exits(common) < 0) {
+ struct fsg_lun *curlun = common->luns;
+ unsigned i = common->nluns;
+
+ down_write(&common->filesem);
+ for (; i--; ++curlun) {
+ if (!fsg_lun_is_open(curlun))
+ continue;
+
+ fsg_lun_close(curlun);
+ curlun->unit_attention_data = SS_MEDIUM_NOT_PRESENT;
+ }
+ up_write(&common->filesem);
+ }
/* Let the unbind and cleanup routines know the thread has exited */
complete_and_exit(&common->thread_notifier, 0);
@@ -2763,10 +2776,7 @@ static struct fsg_common *fsg_common_init(struct fsg_common *common,
if (cfg->release != 0xffff) {
i = cfg->release;
} else {
- /* The sa1100 controller is not supported */
- i = gadget_is_sa1100(gadget)
- ? -1
- : usb_gadget_controller_number(gadget);
+ i = usb_gadget_controller_number(gadget);
if (i >= 0) {
i = 0x0300 + i;
} else {
@@ -2791,8 +2801,7 @@ static struct fsg_common *fsg_common_init(struct fsg_common *common,
* disable stalls.
*/
common->can_stall = cfg->can_stall &&
- !(gadget_is_sh(common->gadget) ||
- gadget_is_at91(common->gadget));
+ !(gadget_is_at91(common->gadget));
spin_lock_init(&common->lock);
@@ -2852,7 +2861,6 @@ error_release:
/* Call fsg_common_release() directly, ref might be not
* initialised */
fsg_common_release(&common->ref);
- complete(&common->thread_notifier);
return ERR_PTR(rc);
}
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index 95dae4c1ea4..a30e60c7f12 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -769,10 +769,6 @@ rndis_unbind(struct usb_configuration *c, struct usb_function *f)
/* Some controllers can't support RNDIS ... */
static inline bool can_support_rndis(struct usb_configuration *c)
{
- /* only two endpoints on sa1100 */
- if (gadget_is_sa1100(c->cdev->gadget))
- return false;
-
/* everything else is *presumably* fine */
return true;
}
diff --git a/drivers/usb/gadget/file_storage.c b/drivers/usb/gadget/file_storage.c
index 29dfb0277ff..b49d86e3e45 100644
--- a/drivers/usb/gadget/file_storage.c
+++ b/drivers/usb/gadget/file_storage.c
@@ -1448,7 +1448,7 @@ static void invalidate_sub(struct fsg_lun *curlun)
unsigned long rc;
rc = invalidate_mapping_pages(inode->i_mapping, 0, -1);
- VLDBG(curlun, "invalidate_inode_pages -> %ld\n", rc);
+ VLDBG(curlun, "invalidate_mapping_pages -> %ld\n", rc);
}
static int do_verify(struct fsg_dev *fsg)
@@ -3208,15 +3208,11 @@ static int __init check_parameters(struct fsg_dev *fsg)
* halt bulk endpoints correctly. If one of them is present,
* disable stalls.
*/
- if (gadget_is_sh(fsg->gadget) || gadget_is_at91(fsg->gadget))
+ if (gadget_is_at91(fsg->gadget))
mod_data.can_stall = 0;
if (mod_data.release == 0xffff) { // Parameter wasn't set
- /* The sa1100 controller is not supported */
- if (gadget_is_sa1100(fsg->gadget))
- gcnum = -1;
- else
- gcnum = usb_gadget_controller_number(fsg->gadget);
+ gcnum = usb_gadget_controller_number(fsg->gadget);
if (gcnum >= 0)
mod_data.release = 0x0300 + gcnum;
else {
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index 7881f12413c..3537d51073b 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -2749,7 +2749,7 @@ static int __devexit qe_udc_remove(struct of_device *ofdev)
}
/*-------------------------------------------------------------------------*/
-static struct of_device_id __devinitdata qe_udc_match[] = {
+static const struct of_device_id qe_udc_match[] __devinitconst = {
{
.compatible = "fsl,mpc8323-qe-usb",
.data = (void *)PORT_QE,
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index f2d270b202f..1edbc12fff1 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -45,46 +45,18 @@
#define gadget_is_goku(g) 0
#endif
-/* SH3 UDC -- not yet ported 2.4 --> 2.6 */
-#ifdef CONFIG_USB_GADGET_SUPERH
-#define gadget_is_sh(g) !strcmp("sh_udc", (g)->name)
-#else
-#define gadget_is_sh(g) 0
-#endif
-
-/* not yet stable on 2.6 (would help "original Zaurus") */
-#ifdef CONFIG_USB_GADGET_SA1100
-#define gadget_is_sa1100(g) !strcmp("sa1100_udc", (g)->name)
-#else
-#define gadget_is_sa1100(g) 0
-#endif
-
#ifdef CONFIG_USB_GADGET_LH7A40X
#define gadget_is_lh7a40x(g) !strcmp("lh7a40x_udc", (g)->name)
#else
#define gadget_is_lh7a40x(g) 0
#endif
-/* handhelds.org tree (?) */
-#ifdef CONFIG_USB_GADGET_MQ11XX
-#define gadget_is_mq11xx(g) !strcmp("mq11xx_udc", (g)->name)
-#else
-#define gadget_is_mq11xx(g) 0
-#endif
-
#ifdef CONFIG_USB_GADGET_OMAP
#define gadget_is_omap(g) !strcmp("omap_udc", (g)->name)
#else
#define gadget_is_omap(g) 0
#endif
-/* not yet ported 2.4 --> 2.6 */
-#ifdef CONFIG_USB_GADGET_N9604
-#define gadget_is_n9604(g) !strcmp("n9604_udc", (g)->name)
-#else
-#define gadget_is_n9604(g) 0
-#endif
-
/* various unstable versions available */
#ifdef CONFIG_USB_GADGET_PXA27X
#define gadget_is_pxa27x(g) !strcmp("pxa27x_udc", (g)->name)
@@ -122,14 +94,6 @@
#define gadget_is_fsl_usb2(g) 0
#endif
-/* Mentor high speed function controller */
-/* from Montavista kernel (?) */
-#ifdef CONFIG_USB_GADGET_MUSBHSFC
-#define gadget_is_musbhsfc(g) !strcmp("musbhsfc_udc", (g)->name)
-#else
-#define gadget_is_musbhsfc(g) 0
-#endif
-
/* Mentor high speed "dual role" controller, in peripheral role */
#ifdef CONFIG_USB_GADGET_MUSB_HDRC
#define gadget_is_musbhdrc(g) !strcmp("musb_hdrc", (g)->name)
@@ -143,13 +107,6 @@
#define gadget_is_langwell(g) 0
#endif
-/* from Montavista kernel (?) */
-#ifdef CONFIG_USB_GADGET_MPC8272
-#define gadget_is_mpc8272(g) !strcmp("mpc8272_udc", (g)->name)
-#else
-#define gadget_is_mpc8272(g) 0
-#endif
-
#ifdef CONFIG_USB_GADGET_M66592
#define gadget_is_m66592(g) !strcmp("m66592_udc", (g)->name)
#else
@@ -203,20 +160,12 @@ static inline int usb_gadget_controller_number(struct usb_gadget *gadget)
return 0x02;
else if (gadget_is_pxa(gadget))
return 0x03;
- else if (gadget_is_sh(gadget))
- return 0x04;
- else if (gadget_is_sa1100(gadget))
- return 0x05;
else if (gadget_is_goku(gadget))
return 0x06;
- else if (gadget_is_mq11xx(gadget))
- return 0x07;
else if (gadget_is_omap(gadget))
return 0x08;
else if (gadget_is_lh7a40x(gadget))
return 0x09;
- else if (gadget_is_n9604(gadget))
- return 0x10;
else if (gadget_is_pxa27x(gadget))
return 0x11;
else if (gadget_is_s3c2410(gadget))
@@ -225,12 +174,8 @@ static inline int usb_gadget_controller_number(struct usb_gadget *gadget)
return 0x13;
else if (gadget_is_imx(gadget))
return 0x14;
- else if (gadget_is_musbhsfc(gadget))
- return 0x15;
else if (gadget_is_musbhdrc(gadget))
return 0x16;
- else if (gadget_is_mpc8272(gadget))
- return 0x17;
else if (gadget_is_atmel_usba(gadget))
return 0x18;
else if (gadget_is_fsl_usb2(gadget))
@@ -265,10 +210,6 @@ static inline bool gadget_supports_altsettings(struct usb_gadget *gadget)
if (gadget_is_pxa27x(gadget))
return false;
- /* SH3 hardware just doesn't do altsettings */
- if (gadget_is_sh(gadget))
- return false;
-
/* Everything else is *presumably* fine ... */
return true;
}
diff --git a/drivers/usb/gadget/gmidi.c b/drivers/usb/gadget/gmidi.c
index 5f6a2e0a935..04f6224b7e0 100644
--- a/drivers/usb/gadget/gmidi.c
+++ b/drivers/usb/gadget/gmidi.c
@@ -618,11 +618,6 @@ gmidi_set_config(struct gmidi_device *dev, unsigned number, gfp_t gfp_flags)
}
#endif
- if (gadget_is_sa1100(gadget) && dev->config) {
- /* tx fifo is full, but we can't clear it...*/
- ERROR(dev, "can't change configurations\n");
- return -ESPIPE;
- }
gmidi_reset_config(dev);
switch (number) {
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index 112bb40a427..e8edc640381 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -1859,7 +1859,7 @@ done:
/*-------------------------------------------------------------------------*/
-static struct pci_device_id pci_ids [] = { {
+static const struct pci_device_id pci_ids[] = { {
.class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
.class_mask = ~0,
.vendor = 0x102f, /* Toshiba */
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index bf0f6520c6d..de8a8380350 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -194,7 +194,7 @@ enum ep_state {
};
struct ep_data {
- struct semaphore lock;
+ struct mutex lock;
enum ep_state state;
atomic_t count;
struct dev_data *dev;
@@ -298,10 +298,10 @@ get_ready_ep (unsigned f_flags, struct ep_data *epdata)
int val;
if (f_flags & O_NONBLOCK) {
- if (down_trylock (&epdata->lock) != 0)
+ if (!mutex_trylock(&epdata->lock))
goto nonblock;
if (epdata->state != STATE_EP_ENABLED) {
- up (&epdata->lock);
+ mutex_unlock(&epdata->lock);
nonblock:
val = -EAGAIN;
} else
@@ -309,7 +309,8 @@ nonblock:
return val;
}
- if ((val = down_interruptible (&epdata->lock)) < 0)
+ val = mutex_lock_interruptible(&epdata->lock);
+ if (val < 0)
return val;
switch (epdata->state) {
@@ -323,7 +324,7 @@ nonblock:
// FALLTHROUGH
case STATE_EP_UNBOUND: /* clean disconnect */
val = -ENODEV;
- up (&epdata->lock);
+ mutex_unlock(&epdata->lock);
}
return val;
}
@@ -393,7 +394,7 @@ ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
if (likely (data->ep != NULL))
usb_ep_set_halt (data->ep);
spin_unlock_irq (&data->dev->lock);
- up (&data->lock);
+ mutex_unlock(&data->lock);
return -EBADMSG;
}
@@ -411,7 +412,7 @@ ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr)
value = -EFAULT;
free1:
- up (&data->lock);
+ mutex_unlock(&data->lock);
kfree (kbuf);
return value;
}
@@ -436,7 +437,7 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
if (likely (data->ep != NULL))
usb_ep_set_halt (data->ep);
spin_unlock_irq (&data->dev->lock);
- up (&data->lock);
+ mutex_unlock(&data->lock);
return -EBADMSG;
}
@@ -455,7 +456,7 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
VDEBUG (data->dev, "%s write %zu IN, status %d\n",
data->name, len, (int) value);
free1:
- up (&data->lock);
+ mutex_unlock(&data->lock);
kfree (kbuf);
return value;
}
@@ -466,7 +467,8 @@ ep_release (struct inode *inode, struct file *fd)
struct ep_data *data = fd->private_data;
int value;
- if ((value = down_interruptible(&data->lock)) < 0)
+ value = mutex_lock_interruptible(&data->lock);
+ if (value < 0)
return value;
/* clean up if this can be reopened */
@@ -476,7 +478,7 @@ ep_release (struct inode *inode, struct file *fd)
data->hs_desc.bDescriptorType = 0;
usb_ep_disable(data->ep);
}
- up (&data->lock);
+ mutex_unlock(&data->lock);
put_ep (data);
return 0;
}
@@ -507,7 +509,7 @@ static long ep_ioctl(struct file *fd, unsigned code, unsigned long value)
} else
status = -ENODEV;
spin_unlock_irq (&data->dev->lock);
- up (&data->lock);
+ mutex_unlock(&data->lock);
return status;
}
@@ -673,7 +675,7 @@ fail:
value = -ENODEV;
spin_unlock_irq(&epdata->dev->lock);
- up(&epdata->lock);
+ mutex_unlock(&epdata->lock);
if (unlikely(value)) {
kfree(priv);
@@ -765,7 +767,8 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
u32 tag;
int value, length = len;
- if ((value = down_interruptible (&data->lock)) < 0)
+ value = mutex_lock_interruptible(&data->lock);
+ if (value < 0)
return value;
if (data->state != STATE_EP_READY) {
@@ -854,7 +857,7 @@ fail:
data->desc.bDescriptorType = 0;
data->hs_desc.bDescriptorType = 0;
}
- up (&data->lock);
+ mutex_unlock(&data->lock);
return value;
fail0:
value = -EINVAL;
@@ -870,7 +873,7 @@ ep_open (struct inode *inode, struct file *fd)
struct ep_data *data = inode->i_private;
int value = -EBUSY;
- if (down_interruptible (&data->lock) != 0)
+ if (mutex_lock_interruptible(&data->lock) != 0)
return -EINTR;
spin_lock_irq (&data->dev->lock);
if (data->dev->state == STATE_DEV_UNBOUND)
@@ -885,7 +888,7 @@ ep_open (struct inode *inode, struct file *fd)
DBG (data->dev, "%s state %d\n",
data->name, data->state);
spin_unlock_irq (&data->dev->lock);
- up (&data->lock);
+ mutex_unlock(&data->lock);
return value;
}
@@ -1631,7 +1634,7 @@ static int activate_ep_files (struct dev_data *dev)
if (!data)
goto enomem0;
data->state = STATE_EP_DISABLED;
- init_MUTEX (&data->lock);
+ mutex_init(&data->lock);
init_waitqueue_head (&data->wait);
strncpy (data->name, ep->name, sizeof (data->name) - 1);
diff --git a/drivers/usb/gadget/mass_storage.c b/drivers/usb/gadget/mass_storage.c
index 19619fbf20a..705cc1f7632 100644
--- a/drivers/usb/gadget/mass_storage.c
+++ b/drivers/usb/gadget/mass_storage.c
@@ -135,6 +135,12 @@ FSG_MODULE_PARAMETERS(/* no prefix */, mod_data);
static unsigned long msg_registered = 0;
static void msg_cleanup(void);
+static int msg_thread_exits(struct fsg_common *common)
+{
+ msg_cleanup();
+ return 0;
+}
+
static int __init msg_do_config(struct usb_configuration *c)
{
struct fsg_common *common;
@@ -147,7 +153,7 @@ static int __init msg_do_config(struct usb_configuration *c)
}
fsg_config_from_params(&config, &mod_data);
- config.thread_exits = (void(*)(struct fsg_common*))&msg_cleanup;
+ config.thread_exits = msg_thread_exits;
common = fsg_common_init(0, c->cdev, &config);
if (IS_ERR(common))
return PTR_ERR(common);
diff --git a/drivers/usb/gadget/nokia.c b/drivers/usb/gadget/nokia.c
new file mode 100644
index 00000000000..7d6b66a8572
--- /dev/null
+++ b/drivers/usb/gadget/nokia.c
@@ -0,0 +1,259 @@
+/*
+ * nokia.c -- Nokia Composite Gadget Driver
+ *
+ * Copyright (C) 2008-2010 Nokia Corporation
+ * Contact: Felipe Balbi <felipe.balbi@nokia.com>
+ *
+ * This gadget driver borrows from serial.c which is:
+ *
+ * Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
+ * Copyright (C) 2008 by David Brownell
+ * Copyright (C) 2008 by Nokia Corporation
+ *
+ * This software is distributed under the terms of the GNU General
+ * Public License ("GPL") as published by the Free Software Foundation,
+ * version 2 of that License.
+ */
+
+#include <linux/kernel.h>
+#include <linux/utsname.h>
+#include <linux/device.h>
+
+#include "u_serial.h"
+#include "u_ether.h"
+#include "u_phonet.h"
+#include "gadget_chips.h"
+
+/* Defines */
+
+#define NOKIA_VERSION_NUM 0x0211
+#define NOKIA_LONG_NAME "N900 (PC-Suite Mode)"
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Kbuild is not very cooperative with respect to linking separately
+ * compiled library objects into one module. So for now we won't use
+ * separate compilation ... ensuring init/exit sections work to shrink
+ * the runtime footprint, and giving us at least some parts of what
+ * a "gcc --combine ... part1.c part2.c part3.c ... " build would.
+ */
+#include "composite.c"
+#include "usbstring.c"
+#include "config.c"
+#include "epautoconf.c"
+
+#include "u_serial.c"
+#include "f_acm.c"
+#include "f_ecm.c"
+#include "f_obex.c"
+#include "f_serial.c"
+#include "f_phonet.c"
+#include "u_ether.c"
+
+/*-------------------------------------------------------------------------*/
+
+#define NOKIA_VENDOR_ID 0x0421 /* Nokia */
+#define NOKIA_PRODUCT_ID 0x01c8 /* Nokia Gadget */
+
+/* string IDs are assigned dynamically */
+
+#define STRING_MANUFACTURER_IDX 0
+#define STRING_PRODUCT_IDX 1
+#define STRING_DESCRIPTION_IDX 2
+
+static char manufacturer_nokia[] = "Nokia";
+static const char product_nokia[] = NOKIA_LONG_NAME;
+static const char description_nokia[] = "PC-Suite Configuration";
+
+static struct usb_string strings_dev[] = {
+ [STRING_MANUFACTURER_IDX].s = manufacturer_nokia,
+ [STRING_PRODUCT_IDX].s = NOKIA_LONG_NAME,
+ [STRING_DESCRIPTION_IDX].s = description_nokia,
+ { } /* end of list */
+};
+
+static struct usb_gadget_strings stringtab_dev = {
+ .language = 0x0409, /* en-us */
+ .strings = strings_dev,
+};
+
+static struct usb_gadget_strings *dev_strings[] = {
+ &stringtab_dev,
+ NULL,
+};
+
+static struct usb_device_descriptor device_desc = {
+ .bLength = USB_DT_DEVICE_SIZE,
+ .bDescriptorType = USB_DT_DEVICE,
+ .bcdUSB = __constant_cpu_to_le16(0x0200),
+ .bDeviceClass = USB_CLASS_COMM,
+ .idVendor = __constant_cpu_to_le16(NOKIA_VENDOR_ID),
+ .idProduct = __constant_cpu_to_le16(NOKIA_PRODUCT_ID),
+ /* .iManufacturer = DYNAMIC */
+ /* .iProduct = DYNAMIC */
+ .bNumConfigurations = 1,
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* Module */
+MODULE_DESCRIPTION("Nokia composite gadget driver for N900");
+MODULE_AUTHOR("Felipe Balbi");
+MODULE_LICENSE("GPL");
+
+/*-------------------------------------------------------------------------*/
+
+static u8 hostaddr[ETH_ALEN];
+
+static int __init nokia_bind_config(struct usb_configuration *c)
+{
+ int status = 0;
+
+ status = phonet_bind_config(c);
+ if (status)
+ printk(KERN_DEBUG "could not bind phonet config\n");
+
+ status = obex_bind_config(c, 0);
+ if (status)
+ printk(KERN_DEBUG "could not bind obex config %d\n", 0);
+
+ status = obex_bind_config(c, 1);
+ if (status)
+ printk(KERN_DEBUG "could not bind obex config %d\n", 0);
+
+ status = acm_bind_config(c, 2);
+ if (status)
+ printk(KERN_DEBUG "could not bind acm config\n");
+
+ status = ecm_bind_config(c, hostaddr);
+ if (status)
+ printk(KERN_DEBUG "could not bind ecm config\n");
+
+ return status;
+}
+
+static struct usb_configuration nokia_config_500ma_driver = {
+ .label = "Bus Powered",
+ .bind = nokia_bind_config,
+ .bConfigurationValue = 1,
+ /* .iConfiguration = DYNAMIC */
+ .bmAttributes = USB_CONFIG_ATT_ONE,
+ .bMaxPower = 250, /* 500mA */
+};
+
+static struct usb_configuration nokia_config_100ma_driver = {
+ .label = "Self Powered",
+ .bind = nokia_bind_config,
+ .bConfigurationValue = 2,
+ /* .iConfiguration = DYNAMIC */
+ .bmAttributes = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
+ .bMaxPower = 50, /* 100 mA */
+};
+
+static int __init nokia_bind(struct usb_composite_dev *cdev)
+{
+ int gcnum;
+ struct usb_gadget *gadget = cdev->gadget;
+ int status;
+
+ status = gphonet_setup(cdev->gadget);
+ if (status < 0)
+ goto err_phonet;
+
+ status = gserial_setup(cdev->gadget, 3);
+ if (status < 0)
+ goto err_serial;
+
+ status = gether_setup(cdev->gadget, hostaddr);
+ if (status < 0)
+ goto err_ether;
+
+ status = usb_string_id(cdev);
+ if (status < 0)
+ goto err_usb;
+ strings_dev[STRING_MANUFACTURER_IDX].id = status;
+
+ device_desc.iManufacturer = status;
+
+ status = usb_string_id(cdev);
+ if (status < 0)
+ goto err_usb;
+ strings_dev[STRING_PRODUCT_IDX].id = status;
+
+ device_desc.iProduct = status;
+
+ /* config description */
+ status = usb_string_id(cdev);
+ if (status < 0)
+ goto err_usb;
+ strings_dev[STRING_DESCRIPTION_IDX].id = status;
+
+ nokia_config_500ma_driver.iConfiguration = status;
+ nokia_config_100ma_driver.iConfiguration = status;
+
+ /* set up other descriptors */
+ gcnum = usb_gadget_controller_number(gadget);
+ if (gcnum >= 0)
+ device_desc.bcdDevice = cpu_to_le16(NOKIA_VERSION_NUM);
+ else {
+ /* this should only work with hw that supports altsettings
+ * and several endpoints, anything else, panic.
+ */
+ pr_err("nokia_bind: controller '%s' not recognized\n",
+ gadget->name);
+ goto err_usb;
+ }
+
+ /* finaly register the configuration */
+ status = usb_add_config(cdev, &nokia_config_500ma_driver);
+ if (status < 0)
+ goto err_usb;
+
+ status = usb_add_config(cdev, &nokia_config_100ma_driver);
+ if (status < 0)
+ goto err_usb;
+
+ dev_info(&gadget->dev, "%s\n", NOKIA_LONG_NAME);
+
+ return 0;
+
+err_usb:
+ gether_cleanup();
+err_ether:
+ gserial_cleanup();
+err_serial:
+ gphonet_cleanup();
+err_phonet:
+ return status;
+}
+
+static int __exit nokia_unbind(struct usb_composite_dev *cdev)
+{
+ gphonet_cleanup();
+ gserial_cleanup();
+ gether_cleanup();
+
+ return 0;
+}
+
+static struct usb_composite_driver nokia_driver = {
+ .name = "g_nokia",
+ .dev = &device_desc,
+ .strings = dev_strings,
+ .bind = nokia_bind,
+ .unbind = __exit_p(nokia_unbind),
+};
+
+static int __init nokia_init(void)
+{
+ return usb_composite_register(&nokia_driver);
+}
+module_init(nokia_init);
+
+static void __exit nokia_cleanup(void)
+{
+ usb_composite_unregister(&nokia_driver);
+}
+module_exit(nokia_cleanup);
+
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
index 2d867fd2241..6b8bf8c781c 100644
--- a/drivers/usb/gadget/printer.c
+++ b/drivers/usb/gadget/printer.c
@@ -949,12 +949,6 @@ printer_set_config(struct printer_dev *dev, unsigned number)
int result = 0;
struct usb_gadget *gadget = dev->gadget;
- if (gadget_is_sa1100(gadget) && dev->config) {
- /* tx fifo is full, but we can't clear it...*/
- INFO(dev, "can't change configurations\n");
- return -ESPIPE;
- }
-
switch (number) {
case DEV_CONFIG_VALUE:
result = 0;
@@ -1033,12 +1027,6 @@ set_interface(struct printer_dev *dev, unsigned number)
{
int result = 0;
- if (gadget_is_sa1100(dev->gadget) && dev->interface < 0) {
- /* tx fifo is full, but we can't clear it...*/
- INFO(dev, "can't change interfaces\n");
- return -ESPIPE;
- }
-
/* Free the current interface */
switch (dev->interface) {
case PRINTER_INTERFACE:
@@ -1392,12 +1380,6 @@ printer_bind(struct usb_gadget *gadget)
goto fail;
}
- if (gadget_is_sa1100(gadget)) {
- /* hardware can't write zero length packets. */
- ERROR(dev, "SA1100 controller is unsupport by this driver\n");
- goto fail;
- }
-
gcnum = usb_gadget_controller_number(gadget);
if (gcnum >= 0) {
device_desc.bcdDevice = cpu_to_le16(0x0200 + gcnum);
diff --git a/drivers/usb/gadget/pxa27x_udc.c b/drivers/usb/gadget/pxa27x_udc.c
index adda1208a1e..05b892c3d68 100644
--- a/drivers/usb/gadget/pxa27x_udc.c
+++ b/drivers/usb/gadget/pxa27x_udc.c
@@ -742,13 +742,17 @@ static void ep_del_request(struct pxa_ep *ep, struct pxa27x_request *req)
* @ep: pxa physical endpoint
* @req: pxa request
* @status: usb request status sent to gadget API
+ * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
*
- * Context: ep->lock held
+ * Context: ep->lock held if flags not NULL, else ep->lock released
*
* Retire a pxa27x usb request. Endpoint must be locked.
*/
-static void req_done(struct pxa_ep *ep, struct pxa27x_request *req, int status)
+static void req_done(struct pxa_ep *ep, struct pxa27x_request *req, int status,
+ unsigned long *pflags)
{
+ unsigned long flags;
+
ep_del_request(ep, req);
if (likely(req->req.status == -EINPROGRESS))
req->req.status = status;
@@ -760,38 +764,48 @@ static void req_done(struct pxa_ep *ep, struct pxa27x_request *req, int status)
&req->req, status,
req->req.actual, req->req.length);
+ if (pflags)
+ spin_unlock_irqrestore(&ep->lock, *pflags);
+ local_irq_save(flags);
req->req.complete(&req->udc_usb_ep->usb_ep, &req->req);
+ local_irq_restore(flags);
+ if (pflags)
+ spin_lock_irqsave(&ep->lock, *pflags);
}
/**
* ep_end_out_req - Ends endpoint OUT request
* @ep: physical endpoint
* @req: pxa request
+ * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
*
- * Context: ep->lock held
+ * Context: ep->lock held or released (see req_done())
*
* Ends endpoint OUT request (completes usb request).
*/
-static void ep_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req)
+static void ep_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req,
+ unsigned long *pflags)
{
inc_ep_stats_reqs(ep, !USB_DIR_IN);
- req_done(ep, req, 0);
+ req_done(ep, req, 0, pflags);
}
/**
* ep0_end_out_req - Ends control endpoint OUT request (ends data stage)
* @ep: physical endpoint
* @req: pxa request
+ * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
*
- * Context: ep->lock held
+ * Context: ep->lock held or released (see req_done())
*
* Ends control endpoint OUT request (completes usb request), and puts
* control endpoint into idle state
*/
-static void ep0_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req)
+static void ep0_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req,
+ unsigned long *pflags)
{
set_ep0state(ep->dev, OUT_STATUS_STAGE);
- ep_end_out_req(ep, req);
+ ep_end_out_req(ep, req, pflags);
ep0_idle(ep->dev);
}
@@ -799,31 +813,35 @@ static void ep0_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req)
* ep_end_in_req - Ends endpoint IN request
* @ep: physical endpoint
* @req: pxa request
+ * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
*
- * Context: ep->lock held
+ * Context: ep->lock held or released (see req_done())
*
* Ends endpoint IN request (completes usb request).
*/
-static void ep_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req)
+static void ep_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req,
+ unsigned long *pflags)
{
inc_ep_stats_reqs(ep, USB_DIR_IN);
- req_done(ep, req, 0);
+ req_done(ep, req, 0, pflags);
}
/**
* ep0_end_in_req - Ends control endpoint IN request (ends data stage)
* @ep: physical endpoint
* @req: pxa request
+ * @pflags: flags of previous spinlock_irq_save() or NULL if no lock held
*
- * Context: ep->lock held
+ * Context: ep->lock held or released (see req_done())
*
* Ends control endpoint IN request (completes usb request), and puts
* control endpoint into status state
*/
-static void ep0_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req)
+static void ep0_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req,
+ unsigned long *pflags)
{
set_ep0state(ep->dev, IN_STATUS_STAGE);
- ep_end_in_req(ep, req);
+ ep_end_in_req(ep, req, pflags);
}
/**
@@ -831,19 +849,22 @@ static void ep0_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req)
* @ep: pxa endpoint
* @status: usb request status
*
- * Context: ep->lock held
+ * Context: ep->lock released
*
* Dequeues all requests on an endpoint. As a side effect, interrupts will be
* disabled on that endpoint (because no more requests).
*/
static void nuke(struct pxa_ep *ep, int status)
{
- struct pxa27x_request *req;
+ struct pxa27x_request *req;
+ unsigned long flags;
+ spin_lock_irqsave(&ep->lock, flags);
while (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next, struct pxa27x_request, queue);
- req_done(ep, req, status);
+ req_done(ep, req, status, &flags);
}
+ spin_unlock_irqrestore(&ep->lock, flags);
}
/**
@@ -1123,6 +1144,7 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
int rc = 0;
int is_first_req;
unsigned length;
+ int recursion_detected;
req = container_of(_req, struct pxa27x_request, req);
udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep);
@@ -1152,6 +1174,7 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
return -EMSGSIZE;
spin_lock_irqsave(&ep->lock, flags);
+ recursion_detected = ep->in_handle_ep;
is_first_req = list_empty(&ep->queue);
ep_dbg(ep, "queue req %p(first=%s), len %d buf %p\n",
@@ -1161,12 +1184,12 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
if (!ep->enabled) {
_req->status = -ESHUTDOWN;
rc = -ESHUTDOWN;
- goto out;
+ goto out_locked;
}
if (req->in_use) {
ep_err(ep, "refusing to queue req %p (already queued)\n", req);
- goto out;
+ goto out_locked;
}
length = _req->length;
@@ -1174,12 +1197,13 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
_req->actual = 0;
ep_add_request(ep, req);
+ spin_unlock_irqrestore(&ep->lock, flags);
if (is_ep0(ep)) {
switch (dev->ep0state) {
case WAIT_ACK_SET_CONF_INTERF:
if (length == 0) {
- ep_end_in_req(ep, req);
+ ep_end_in_req(ep, req, NULL);
} else {
ep_err(ep, "got a request of %d bytes while"
"in state WAIT_ACK_SET_CONF_INTERF\n",
@@ -1192,12 +1216,12 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
case IN_DATA_STAGE:
if (!ep_is_full(ep))
if (write_ep0_fifo(ep, req))
- ep0_end_in_req(ep, req);
+ ep0_end_in_req(ep, req, NULL);
break;
case OUT_DATA_STAGE:
if ((length == 0) || !epout_has_pkt(ep))
if (read_ep0_fifo(ep, req))
- ep0_end_out_req(ep, req);
+ ep0_end_out_req(ep, req, NULL);
break;
default:
ep_err(ep, "odd state %s to send me a request\n",
@@ -1207,12 +1231,15 @@ static int pxa_ep_queue(struct usb_ep *_ep, struct usb_request *_req,
break;
}
} else {
- handle_ep(ep);
+ if (!recursion_detected)
+ handle_ep(ep);
}
out:
- spin_unlock_irqrestore(&ep->lock, flags);
return rc;
+out_locked:
+ spin_unlock_irqrestore(&ep->lock, flags);
+ goto out;
}
/**
@@ -1242,13 +1269,14 @@ static int pxa_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
/* make sure it's actually queued on this endpoint */
list_for_each_entry(req, &ep->queue, queue) {
if (&req->req == _req) {
- req_done(ep, req, -ECONNRESET);
rc = 0;
break;
}
}
spin_unlock_irqrestore(&ep->lock, flags);
+ if (!rc)
+ req_done(ep, req, -ECONNRESET, NULL);
return rc;
}
@@ -1445,7 +1473,6 @@ static int pxa_ep_disable(struct usb_ep *_ep)
{
struct pxa_ep *ep;
struct udc_usb_ep *udc_usb_ep;
- unsigned long flags;
if (!_ep)
return -EINVAL;
@@ -1455,10 +1482,8 @@ static int pxa_ep_disable(struct usb_ep *_ep)
if (!ep || is_ep0(ep) || !list_empty(&ep->queue))
return -EINVAL;
- spin_lock_irqsave(&ep->lock, flags);
ep->enabled = 0;
nuke(ep, -ESHUTDOWN);
- spin_unlock_irqrestore(&ep->lock, flags);
pxa_ep_fifo_flush(_ep);
udc_usb_ep->pxa_ep = NULL;
@@ -1907,8 +1932,10 @@ static void handle_ep0_ctrl_req(struct pxa_udc *udc,
} u;
int i;
int have_extrabytes = 0;
+ unsigned long flags;
nuke(ep, -EPROTO);
+ spin_lock_irqsave(&ep->lock, flags);
/*
* In the PXA320 manual, in the section about Back-to-Back setup
@@ -1947,10 +1974,13 @@ static void handle_ep0_ctrl_req(struct pxa_udc *udc,
/* Tell UDC to enter Data Stage */
ep_write_UDCCSR(ep, UDCCSR0_SA | UDCCSR0_OPC);
+ spin_unlock_irqrestore(&ep->lock, flags);
i = udc->driver->setup(&udc->gadget, &u.r);
+ spin_lock_irqsave(&ep->lock, flags);
if (i < 0)
goto stall;
out:
+ spin_unlock_irqrestore(&ep->lock, flags);
return;
stall:
ep_dbg(ep, "protocol STALL, udccsr0=%03x err %d\n",
@@ -2055,13 +2085,13 @@ static void handle_ep0(struct pxa_udc *udc, int fifo_irq, int opc_irq)
if (req && !ep_is_full(ep))
completed = write_ep0_fifo(ep, req);
if (completed)
- ep0_end_in_req(ep, req);
+ ep0_end_in_req(ep, req, NULL);
break;
case OUT_DATA_STAGE: /* SET_DESCRIPTOR */
if (epout_has_pkt(ep) && req)
completed = read_ep0_fifo(ep, req);
if (completed)
- ep0_end_out_req(ep, req);
+ ep0_end_out_req(ep, req, NULL);
break;
case STALL:
ep_write_UDCCSR(ep, UDCCSR0_FST);
@@ -2091,7 +2121,7 @@ static void handle_ep0(struct pxa_udc *udc, int fifo_irq, int opc_irq)
* Tries to transfer all pending request data into the endpoint and/or
* transfer all pending data in the endpoint into usb requests.
*
- * Is always called when in_interrupt() or with ep->lock held.
+ * Is always called when in_interrupt() and with ep->lock released.
*/
static void handle_ep(struct pxa_ep *ep)
{
@@ -2100,10 +2130,17 @@ static void handle_ep(struct pxa_ep *ep)
u32 udccsr;
int is_in = ep->dir_in;
int loop = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ep->lock, flags);
+ if (ep->in_handle_ep)
+ goto recursion_detected;
+ ep->in_handle_ep = 1;
do {
completed = 0;
udccsr = udc_ep_readl(ep, UDCCSR);
+
if (likely(!list_empty(&ep->queue)))
req = list_entry(ep->queue.next,
struct pxa27x_request, queue);
@@ -2122,15 +2159,22 @@ static void handle_ep(struct pxa_ep *ep)
if (unlikely(is_in)) {
if (likely(!ep_is_full(ep)))
completed = write_fifo(ep, req);
- if (completed)
- ep_end_in_req(ep, req);
} else {
if (likely(epout_has_pkt(ep)))
completed = read_fifo(ep, req);
- if (completed)
- ep_end_out_req(ep, req);
+ }
+
+ if (completed) {
+ if (is_in)
+ ep_end_in_req(ep, req, &flags);
+ else
+ ep_end_out_req(ep, req, &flags);
}
} while (completed);
+
+ ep->in_handle_ep = 0;
+recursion_detected:
+ spin_unlock_irqrestore(&ep->lock, flags);
}
/**
@@ -2218,9 +2262,13 @@ static void irq_handle_data(int irq, struct pxa_udc *udc)
continue;
udc_writel(udc, UDCISR0, UDCISR_INT(i, UDCISR_INT_MASK));
- ep = &udc->pxa_ep[i];
- ep->stats.irqs++;
- handle_ep(ep);
+
+ WARN_ON(i >= ARRAY_SIZE(udc->pxa_ep));
+ if (i < ARRAY_SIZE(udc->pxa_ep)) {
+ ep = &udc->pxa_ep[i];
+ ep->stats.irqs++;
+ handle_ep(ep);
+ }
}
for (i = 16; udcisr1 != 0 && i < 24; udcisr1 >>= 2, i++) {
@@ -2228,9 +2276,12 @@ static void irq_handle_data(int irq, struct pxa_udc *udc)
if (!(udcisr1 & UDCISR_INT_MASK))
continue;
- ep = &udc->pxa_ep[i];
- ep->stats.irqs++;
- handle_ep(ep);
+ WARN_ON(i >= ARRAY_SIZE(udc->pxa_ep));
+ if (i < ARRAY_SIZE(udc->pxa_ep)) {
+ ep = &udc->pxa_ep[i];
+ ep->stats.irqs++;
+ handle_ep(ep);
+ }
}
}
@@ -2439,7 +2490,7 @@ static int __init pxa_udc_probe(struct platform_device *pdev)
}
retval = -ENOMEM;
- udc->regs = ioremap(regs->start, regs->end - regs->start + 1);
+ udc->regs = ioremap(regs->start, resource_size(regs));
if (!udc->regs) {
dev_err(&pdev->dev, "Unable to map UDC I/O memory\n");
goto err_map;
diff --git a/drivers/usb/gadget/pxa27x_udc.h b/drivers/usb/gadget/pxa27x_udc.h
index e25225e2658..ff61e4866e8 100644
--- a/drivers/usb/gadget/pxa27x_udc.h
+++ b/drivers/usb/gadget/pxa27x_udc.h
@@ -318,6 +318,11 @@ struct udc_usb_ep {
* @queue: requests queue
* @lock: lock to pxa_ep data (queues and stats)
* @enabled: true when endpoint enabled (not stopped by gadget layer)
+ * @in_handle_ep: number of recursions of handle_ep() function
+ * Prevents deadlocks or infinite recursions of types :
+ * irq->handle_ep()->req_done()->req.complete()->pxa_ep_queue()->handle_ep()
+ * or
+ * pxa_ep_queue()->handle_ep()->req_done()->req.complete()->pxa_ep_queue()
* @idx: endpoint index (1 => epA, 2 => epB, ..., 24 => epX)
* @name: endpoint name (for trace/debug purpose)
* @dir_in: 1 if IN endpoint, 0 if OUT endpoint
@@ -346,6 +351,7 @@ struct pxa_ep {
spinlock_t lock; /* Protects this structure */
/* (queues, stats) */
unsigned enabled:1;
+ unsigned in_handle_ep:1;
unsigned idx:5;
char *name;
diff --git a/drivers/usb/gadget/s3c-hsotg.c b/drivers/usb/gadget/s3c-hsotg.c
index 5fc80a10415..7e5bf593d38 100644
--- a/drivers/usb/gadget/s3c-hsotg.c
+++ b/drivers/usb/gadget/s3c-hsotg.c
@@ -317,7 +317,8 @@ static void s3c_hsotg_init_fifo(struct s3c_hsotg *hsotg)
*
* Allocate a new USB request structure appropriate for the specified endpoint
*/
-struct usb_request *s3c_hsotg_ep_alloc_request(struct usb_ep *ep, gfp_t flags)
+static struct usb_request *s3c_hsotg_ep_alloc_request(struct usb_ep *ep,
+ gfp_t flags)
{
struct s3c_hsotg_req *req;
@@ -373,7 +374,7 @@ static void s3c_hsotg_unmap_dma(struct s3c_hsotg *hsotg,
req->dma = DMA_ADDR_INVALID;
hs_req->mapped = 0;
} else {
- dma_sync_single(hsotg->dev, req->dma, req->length, dir);
+ dma_sync_single_for_cpu(hsotg->dev, req->dma, req->length, dir);
}
}
@@ -755,7 +756,7 @@ static int s3c_hsotg_map_dma(struct s3c_hsotg *hsotg,
hs_req->mapped = 1;
req->dma = dma;
} else {
- dma_sync_single(hsotg->dev, req->dma, req->length, dir);
+ dma_sync_single_for_cpu(hsotg->dev, req->dma, req->length, dir);
hs_req->mapped = 0;
}
@@ -1460,7 +1461,7 @@ static u32 s3c_hsotg_read_frameno(struct s3c_hsotg *hsotg)
* as the actual data should be sent to the memory directly and we turn
* on the completion interrupts to get notifications of transfer completion.
*/
-void s3c_hsotg_handle_rx(struct s3c_hsotg *hsotg)
+static void s3c_hsotg_handle_rx(struct s3c_hsotg *hsotg)
{
u32 grxstsr = readl(hsotg->regs + S3C_GRXSTSP);
u32 epnum, status, size;
@@ -3094,7 +3095,7 @@ static void s3c_hsotg_gate(struct platform_device *pdev, bool on)
local_irq_restore(flags);
}
-struct s3c_hsotg_plat s3c_hsotg_default_pdata;
+static struct s3c_hsotg_plat s3c_hsotg_default_pdata;
static int __devinit s3c_hsotg_probe(struct platform_device *pdev)
{
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index 2fc02bd9584..84ca195c2d1 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -746,6 +746,10 @@ static const struct net_device_ops eth_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
};
+static struct device_type gadget_type = {
+ .name = "gadget",
+};
+
/**
* gether_setup - initialize one ethernet-over-usb link
* @g: gadget to associated with these links
@@ -808,6 +812,7 @@ int __init gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
dev->gadget = g;
SET_NETDEV_DEV(net, &g->dev);
+ SET_NETDEV_DEVTYPE(net, &gadget_type);
status = register_netdev(net);
if (status < 0) {
diff --git a/drivers/usb/gadget/u_ether.h b/drivers/usb/gadget/u_ether.h
index fd55f450bc0..3c8c0c9f9d7 100644
--- a/drivers/usb/gadget/u_ether.h
+++ b/drivers/usb/gadget/u_ether.h
@@ -93,13 +93,6 @@ static inline bool can_support_ecm(struct usb_gadget *gadget)
if (!gadget_supports_altsettings(gadget))
return false;
- /* SA1100 can do ECM, *without* status endpoint ... but we'll
- * only use it in non-ECM mode for backwards compatibility
- * (and since we currently require a status endpoint)
- */
- if (gadget_is_sa1100(gadget))
- return false;
-
/* Everything else is *presumably* fine ... but this is a bit
* chancy, so be **CERTAIN** there are no hardware issues with
* your controller. Add it above if it can't handle CDC.
diff --git a/drivers/usb/gadget/zero.c b/drivers/usb/gadget/zero.c
index 2d772401b7a..fac81ee193d 100644
--- a/drivers/usb/gadget/zero.c
+++ b/drivers/usb/gadget/zero.c
@@ -297,12 +297,10 @@ static int __init zero_bind(struct usb_composite_dev *cdev)
*/
if (loopdefault) {
loopback_add(cdev, autoresume != 0);
- if (!gadget_is_sh(gadget))
- sourcesink_add(cdev, autoresume != 0);
+ sourcesink_add(cdev, autoresume != 0);
} else {
sourcesink_add(cdev, autoresume != 0);
- if (!gadget_is_sh(gadget))
- loopback_add(cdev, autoresume != 0);
+ loopback_add(cdev, autoresume != 0);
}
gcnum = usb_gadget_controller_number(gadget);
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 2678a1624fc..8d3df0397de 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -399,3 +399,14 @@ config USB_HWA_HCD
To compile this driver a module, choose M here: the module
will be called "hwa-hc".
+
+config USB_IMX21_HCD
+ tristate "iMX21 HCD support"
+ depends on USB && ARM && MACH_MX21
+ help
+ This driver enables support for the on-chip USB host in the
+ iMX21 processor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called "imx21-hcd".
+
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index f58b2494c44..4e0c67f1f51 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -32,3 +32,5 @@ obj-$(CONFIG_USB_U132_HCD) += u132-hcd.o
obj-$(CONFIG_USB_R8A66597_HCD) += r8a66597-hcd.o
obj-$(CONFIG_USB_ISP1760_HCD) += isp1760.o
obj-$(CONFIG_USB_HWA_HCD) += hwa-hc.o
+obj-$(CONFIG_USB_IMX21_HCD) += imx21-hcd.o
+
diff --git a/drivers/usb/host/ehci-atmel.c b/drivers/usb/host/ehci-atmel.c
index 87c1b7c34c0..51bd0edf544 100644
--- a/drivers/usb/host/ehci-atmel.c
+++ b/drivers/usb/host/ehci-atmel.c
@@ -149,7 +149,7 @@ static int __init ehci_atmel_drv_probe(struct platform_device *pdev)
goto fail_request_resource;
}
hcd->rsrc_start = res->start;
- hcd->rsrc_len = res->end - res->start + 1;
+ hcd->rsrc_len = resource_size(res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
driver->description)) {
diff --git a/drivers/usb/host/ehci-au1xxx.c b/drivers/usb/host/ehci-au1xxx.c
index dbfb482a94e..e3a74e75e82 100644
--- a/drivers/usb/host/ehci-au1xxx.c
+++ b/drivers/usb/host/ehci-au1xxx.c
@@ -121,6 +121,7 @@ static int ehci_hcd_au1xxx_drv_probe(struct platform_device *pdev)
{
struct usb_hcd *hcd;
struct ehci_hcd *ehci;
+ struct resource *res;
int ret;
if (usb_disabled())
@@ -144,8 +145,9 @@ static int ehci_hcd_au1xxx_drv_probe(struct platform_device *pdev)
if (!hcd)
return -ENOMEM;
- hcd->rsrc_start = pdev->resource[0].start;
- hcd->rsrc_len = pdev->resource[0].end - pdev->resource[0].start + 1;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
pr_debug("request_mem_region failed");
diff --git a/drivers/usb/host/ehci-fsl.c b/drivers/usb/host/ehci-fsl.c
index 991174937db..0e26aa13f15 100644
--- a/drivers/usb/host/ehci-fsl.c
+++ b/drivers/usb/host/ehci-fsl.c
@@ -1,5 +1,6 @@
/*
- * Copyright (c) 2005 MontaVista Software
+ * Copyright 2005-2009 MontaVista Software, Inc.
+ * Copyright 2008 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -17,17 +18,20 @@
*
* Ported to 834x by Randy Vinson <rvinson@mvista.com> using code provided
* by Hunter Wu.
+ * Power Management support by Dave Liu <daveliu@freescale.com>,
+ * Jerry Huang <Chang-Ming.Huang@freescale.com> and
+ * Anton Vorontsov <avorontsov@ru.mvista.com>.
*/
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
#include <linux/platform_device.h>
#include <linux/fsl_devices.h>
#include "ehci-fsl.h"
-/* FIXME: Power Management is un-ported so temporarily disable it */
-#undef CONFIG_PM
-
-
/* configure so an HC device and id are always provided */
/* always called with process context; sleeping is OK */
@@ -40,8 +44,8 @@
* Allocates basic resources for this USB host controller.
*
*/
-int usb_hcd_fsl_probe(const struct hc_driver *driver,
- struct platform_device *pdev)
+static int usb_hcd_fsl_probe(const struct hc_driver *driver,
+ struct platform_device *pdev)
{
struct fsl_usb2_platform_data *pdata;
struct usb_hcd *hcd;
@@ -147,7 +151,8 @@ int usb_hcd_fsl_probe(const struct hc_driver *driver,
* Reverses the effect of usb_hcd_fsl_probe().
*
*/
-void usb_hcd_fsl_remove(struct usb_hcd *hcd, struct platform_device *pdev)
+static void usb_hcd_fsl_remove(struct usb_hcd *hcd,
+ struct platform_device *pdev)
{
usb_remove_hcd(hcd);
iounmap(hcd->regs);
@@ -284,10 +289,81 @@ static int ehci_fsl_setup(struct usb_hcd *hcd)
return retval;
}
+struct ehci_fsl {
+ struct ehci_hcd ehci;
+
+#ifdef CONFIG_PM
+ /* Saved USB PHY settings, need to restore after deep sleep. */
+ u32 usb_ctrl;
+#endif
+};
+
+#ifdef CONFIG_PM
+
+static struct ehci_fsl *hcd_to_ehci_fsl(struct usb_hcd *hcd)
+{
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+
+ return container_of(ehci, struct ehci_fsl, ehci);
+}
+
+static int ehci_fsl_drv_suspend(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
+ void __iomem *non_ehci = hcd->regs;
+
+ if (!fsl_deep_sleep())
+ return 0;
+
+ ehci_fsl->usb_ctrl = in_be32(non_ehci + FSL_SOC_USB_CTRL);
+ return 0;
+}
+
+static int ehci_fsl_drv_resume(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct ehci_fsl *ehci_fsl = hcd_to_ehci_fsl(hcd);
+ struct ehci_hcd *ehci = hcd_to_ehci(hcd);
+ void __iomem *non_ehci = hcd->regs;
+
+ if (!fsl_deep_sleep())
+ return 0;
+
+ usb_root_hub_lost_power(hcd->self.root_hub);
+
+ /* Restore USB PHY settings and enable the controller. */
+ out_be32(non_ehci + FSL_SOC_USB_CTRL, ehci_fsl->usb_ctrl);
+
+ ehci_reset(ehci);
+ ehci_fsl_reinit(ehci);
+
+ return 0;
+}
+
+static int ehci_fsl_drv_restore(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+
+ usb_root_hub_lost_power(hcd->self.root_hub);
+ return 0;
+}
+
+static struct dev_pm_ops ehci_fsl_pm_ops = {
+ .suspend = ehci_fsl_drv_suspend,
+ .resume = ehci_fsl_drv_resume,
+ .restore = ehci_fsl_drv_restore,
+};
+
+#define EHCI_FSL_PM_OPS (&ehci_fsl_pm_ops)
+#else
+#define EHCI_FSL_PM_OPS NULL
+#endif /* CONFIG_PM */
+
static const struct hc_driver ehci_fsl_hc_driver = {
.description = hcd_name,
.product_desc = "Freescale On-Chip EHCI Host Controller",
- .hcd_priv_size = sizeof(struct ehci_hcd),
+ .hcd_priv_size = sizeof(struct ehci_fsl),
/*
* generic hardware linkage
@@ -354,6 +430,7 @@ static struct platform_driver ehci_fsl_driver = {
.remove = ehci_fsl_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
.driver = {
- .name = "fsl-ehci",
+ .name = "fsl-ehci",
+ .pm = EHCI_FSL_PM_OPS,
},
};
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 1ec3857f22e..d8d6d3461d3 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -1118,7 +1118,7 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ehci_hcd_au1xxx_driver
#endif
-#ifdef CONFIG_ARCH_OMAP34XX
+#ifdef CONFIG_ARCH_OMAP3
#include "ehci-omap.c"
#define PLATFORM_DRIVER ehci_hcd_omap_driver
#endif
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index 35c56f40bdb..23cd917088b 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -162,6 +162,17 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
goto err_ioremap;
}
+ /* call platform specific init function */
+ if (pdata->init) {
+ ret = pdata->init(pdev);
+ if (ret) {
+ dev_err(dev, "platform init failed\n");
+ goto err_init;
+ }
+ /* platforms need some time to settle changed IO settings */
+ mdelay(10);
+ }
+
/* enable clocks */
priv->usbclk = clk_get(dev, "usb");
if (IS_ERR(priv->usbclk)) {
@@ -192,18 +203,6 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
if (ret < 0)
goto err_init;
- /* call platform specific init function */
- if (pdata->init) {
- ret = pdata->init(pdev);
- if (ret) {
- dev_err(dev, "platform init failed\n");
- goto err_init;
- }
- }
-
- /* most platforms need some time to settle changed IO settings */
- mdelay(10);
-
/* Initialize the transceiver */
if (pdata->otg) {
pdata->otg->io_priv = hcd->regs + ULPI_VIEWPORT_OFFSET;
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index 74d07f4e8b7..f0282d6bb7a 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -26,10 +26,9 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
- * TODO (last updated Feb 23rd, 2009):
+ * TODO (last updated Feb 12, 2010):
* - add kernel-doc
* - enable AUTOIDLE
- * - move DPLL5 programming to clock fw
* - add suspend/resume
* - move workarounds to board-files
*/
@@ -37,6 +36,7 @@
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
#include <plat/usb.h>
/*
@@ -178,6 +178,11 @@ struct ehci_hcd_omap {
void __iomem *uhh_base;
void __iomem *tll_base;
void __iomem *ehci_base;
+
+ /* Regulators for USB PHYs.
+ * Each PHY can have a seperate regulator.
+ */
+ struct regulator *regulator[OMAP3_HS_USB_PORTS];
};
/*-------------------------------------------------------------------------*/
@@ -546,6 +551,8 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
int irq = platform_get_irq(pdev, 0);
int ret = -ENODEV;
+ int i;
+ char supply[7];
if (!pdata) {
dev_dbg(&pdev->dev, "missing platform_data\n");
@@ -613,6 +620,21 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
goto err_tll_ioremap;
}
+ /* get ehci regulator and enable */
+ for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) {
+ if (omap->port_mode[i] != EHCI_HCD_OMAP_MODE_PHY) {
+ omap->regulator[i] = NULL;
+ continue;
+ }
+ snprintf(supply, sizeof(supply), "hsusb%d", i);
+ omap->regulator[i] = regulator_get(omap->dev, supply);
+ if (IS_ERR(omap->regulator[i]))
+ dev_dbg(&pdev->dev,
+ "failed to get ehci port%d regulator\n", i);
+ else
+ regulator_enable(omap->regulator[i]);
+ }
+
ret = omap_start_ehc(omap, hcd);
if (ret) {
dev_dbg(&pdev->dev, "failed to start ehci\n");
@@ -622,13 +644,12 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
omap->ehci->regs = hcd->regs
+ HC_LENGTH(readl(&omap->ehci->caps->hc_capbase));
+ dbg_hcs_params(omap->ehci, "reset");
+ dbg_hcc_params(omap->ehci, "reset");
+
/* cache this readonly data; minimize chip reads */
omap->ehci->hcs_params = readl(&omap->ehci->caps->hcs_params);
- /* SET 1 micro-frame Interrupt interval */
- writel(readl(&omap->ehci->regs->command) | (1 << 16),
- &omap->ehci->regs->command);
-
ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
if (ret) {
dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret);
@@ -641,6 +662,12 @@ err_add_hcd:
omap_stop_ehc(omap, hcd);
err_start:
+ for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) {
+ if (omap->regulator[i]) {
+ regulator_disable(omap->regulator[i]);
+ regulator_put(omap->regulator[i]);
+ }
+ }
iounmap(omap->tll_base);
err_tll_ioremap:
@@ -674,13 +701,21 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev)
{
struct ehci_hcd_omap *omap = platform_get_drvdata(pdev);
struct usb_hcd *hcd = ehci_to_hcd(omap->ehci);
+ int i;
usb_remove_hcd(hcd);
omap_stop_ehc(omap, hcd);
iounmap(hcd->regs);
+ for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) {
+ if (omap->regulator[i]) {
+ regulator_disable(omap->regulator[i]);
+ regulator_put(omap->regulator[i]);
+ }
+ }
iounmap(omap->tll_base);
iounmap(omap->uhh_base);
usb_put_hcd(hcd);
+ kfree(omap);
return 0;
}
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 1d283e1b2b8..0f87dc72820 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -222,14 +222,14 @@ static int __devinit ehci_orion_drv_probe(struct platform_device *pdev)
goto err1;
}
- if (!request_mem_region(res->start, res->end - res->start + 1,
+ if (!request_mem_region(res->start, resource_size(res),
ehci_orion_hc_driver.description)) {
dev_dbg(&pdev->dev, "controller already in use\n");
err = -EBUSY;
goto err1;
}
- regs = ioremap(res->start, res->end - res->start + 1);
+ regs = ioremap(res->start, resource_size(res));
if (regs == NULL) {
dev_dbg(&pdev->dev, "error mapping memory\n");
err = -EFAULT;
@@ -244,7 +244,7 @@ static int __devinit ehci_orion_drv_probe(struct platform_device *pdev)
}
hcd->rsrc_start = res->start;
- hcd->rsrc_len = res->end - res->start + 1;
+ hcd->rsrc_len = resource_size(res);
hcd->regs = regs;
ehci = hcd_to_ehci(hcd);
@@ -287,7 +287,7 @@ err4:
err3:
iounmap(regs);
err2:
- release_mem_region(res->start, res->end - res->start + 1);
+ release_mem_region(res->start, resource_size(res));
err1:
dev_err(&pdev->dev, "init %s fail, %d\n",
dev_name(&pdev->dev), err);
diff --git a/drivers/usb/host/ehci-ppc-of.c b/drivers/usb/host/ehci-ppc-of.c
index 36f96da129f..8df33b8a634 100644
--- a/drivers/usb/host/ehci-ppc-of.c
+++ b/drivers/usb/host/ehci-ppc-of.c
@@ -134,21 +134,21 @@ ehci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
hcd->rsrc_len = res.end - res.start + 1;
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- printk(KERN_ERR __FILE__ ": request_mem_region failed\n");
+ printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__);
rv = -EBUSY;
goto err_rmr;
}
irq = irq_of_parse_and_map(dn, 0);
if (irq == NO_IRQ) {
- printk(KERN_ERR __FILE__ ": irq_of_parse_and_map failed\n");
+ printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__);
rv = -EBUSY;
goto err_irq;
}
hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
if (!hcd->regs) {
- printk(KERN_ERR __FILE__ ": ioremap failed\n");
+ printk(KERN_ERR "%s: ioremap failed\n", __FILE__);
rv = -ENOMEM;
goto err_ioremap;
}
@@ -161,9 +161,9 @@ ehci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
ehci->ohci_hcctrl_reg = ioremap(res.start +
OHCI_HCCTRL_OFFSET, OHCI_HCCTRL_LEN);
else
- pr_debug(__FILE__ ": no ohci offset in fdt\n");
+ pr_debug("%s: no ohci offset in fdt\n", __FILE__);
if (!ehci->ohci_hcctrl_reg) {
- pr_debug(__FILE__ ": ioremap for ohci hcctrl failed\n");
+ pr_debug("%s: ioremap for ohci hcctrl failed\n", __FILE__);
} else {
ehci->has_amcc_usb23 = 1;
}
@@ -241,7 +241,7 @@ static int ehci_hcd_ppc_of_remove(struct of_device *op)
else
release_mem_region(res.start, 0x4);
else
- pr_debug(__FILE__ ": no ohci offset in fdt\n");
+ pr_debug("%s: no ohci offset in fdt\n", __FILE__);
of_node_put(np);
}
@@ -264,7 +264,7 @@ static int ehci_hcd_ppc_of_shutdown(struct of_device *op)
}
-static struct of_device_id ehci_hcd_ppc_of_match[] = {
+static const struct of_device_id ehci_hcd_ppc_of_match[] = {
{
.compatible = "usb-ehci",
},
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 1e391e624c8..39340ae00ac 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -510,6 +510,8 @@ static int disable_periodic (struct ehci_hcd *ehci)
ehci_writel(ehci, cmd, &ehci->regs->command);
/* posted write ... */
+ free_cached_itd_list(ehci);
+
ehci->next_uframe = -1;
return 0;
}
@@ -2322,9 +2324,13 @@ restart:
* No need to check for activity unless the
* frame is current.
*/
- if (frame == clock_frame && live &&
- (q.sitd->hw_results &
- SITD_ACTIVE(ehci))) {
+ if (((frame == clock_frame) ||
+ (((frame + 1) % ehci->periodic_size)
+ == clock_frame))
+ && live
+ && (q.sitd->hw_results &
+ SITD_ACTIVE(ehci))) {
+
incomplete = true;
q_p = &q.sitd->sitd_next;
hw_p = &q.sitd->hw_next;
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index a5861531ad3..f603bb2c0a8 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -177,21 +177,21 @@ ehci_hcd_xilinx_of_probe(struct of_device *op, const struct of_device_id *match)
hcd->rsrc_len = res.end - res.start + 1;
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- printk(KERN_ERR __FILE__ ": request_mem_region failed\n");
+ printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__);
rv = -EBUSY;
goto err_rmr;
}
irq = irq_of_parse_and_map(dn, 0);
if (irq == NO_IRQ) {
- printk(KERN_ERR __FILE__ ": irq_of_parse_and_map failed\n");
+ printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__);
rv = -EBUSY;
goto err_irq;
}
hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
if (!hcd->regs) {
- printk(KERN_ERR __FILE__ ": ioremap failed\n");
+ printk(KERN_ERR "%s: ioremap failed\n", __FILE__);
rv = -ENOMEM;
goto err_ioremap;
}
@@ -281,7 +281,7 @@ static int ehci_hcd_xilinx_of_shutdown(struct of_device *op)
}
-static struct of_device_id ehci_hcd_xilinx_of_match[] = {
+static const struct of_device_id ehci_hcd_xilinx_of_match[] = {
{.compatible = "xlnx,xps-usb-host-1.00.a",},
{},
};
diff --git a/drivers/usb/host/fhci-hcd.c b/drivers/usb/host/fhci-hcd.c
index 78e7c3cfcb7..5dcfb3de994 100644
--- a/drivers/usb/host/fhci-hcd.c
+++ b/drivers/usb/host/fhci-hcd.c
@@ -433,7 +433,7 @@ static int fhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
return -ENOMEM;
/* allocate the private part of the URB */
- urb_priv->tds = kzalloc(size * sizeof(struct td), mem_flags);
+ urb_priv->tds = kcalloc(size, sizeof(*urb_priv->tds), mem_flags);
if (!urb_priv->tds) {
kfree(urb_priv);
return -ENOMEM;
@@ -805,7 +805,7 @@ static int __devexit of_fhci_remove(struct of_device *ofdev)
return fhci_remove(&ofdev->dev);
}
-static struct of_device_id of_fhci_match[] = {
+static const struct of_device_id of_fhci_match[] = {
{ .compatible = "fsl,mpc8323-qe-usb", },
{},
};
diff --git a/drivers/usb/host/imx21-dbg.c b/drivers/usb/host/imx21-dbg.c
new file mode 100644
index 00000000000..512f647448c
--- /dev/null
+++ b/drivers/usb/host/imx21-dbg.c
@@ -0,0 +1,527 @@
+/*
+ * Copyright (c) 2009 by Martin Fuzzey
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/* this file is part of imx21-hcd.c */
+
+#ifndef DEBUG
+
+static inline void create_debug_files(struct imx21 *imx21) { }
+static inline void remove_debug_files(struct imx21 *imx21) { }
+static inline void debug_urb_submitted(struct imx21 *imx21, struct urb *urb) {}
+static inline void debug_urb_completed(struct imx21 *imx21, struct urb *urb,
+ int status) {}
+static inline void debug_urb_unlinked(struct imx21 *imx21, struct urb *urb) {}
+static inline void debug_urb_queued_for_etd(struct imx21 *imx21,
+ struct urb *urb) {}
+static inline void debug_urb_queued_for_dmem(struct imx21 *imx21,
+ struct urb *urb) {}
+static inline void debug_etd_allocated(struct imx21 *imx21) {}
+static inline void debug_etd_freed(struct imx21 *imx21) {}
+static inline void debug_dmem_allocated(struct imx21 *imx21, int size) {}
+static inline void debug_dmem_freed(struct imx21 *imx21, int size) {}
+static inline void debug_isoc_submitted(struct imx21 *imx21,
+ int frame, struct td *td) {}
+static inline void debug_isoc_completed(struct imx21 *imx21,
+ int frame, struct td *td, int cc, int len) {}
+
+#else
+
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+static const char *dir_labels[] = {
+ "TD 0",
+ "OUT",
+ "IN",
+ "TD 1"
+};
+
+static const char *speed_labels[] = {
+ "Full",
+ "Low"
+};
+
+static const char *format_labels[] = {
+ "Control",
+ "ISO",
+ "Bulk",
+ "Interrupt"
+};
+
+static inline struct debug_stats *stats_for_urb(struct imx21 *imx21,
+ struct urb *urb)
+{
+ return usb_pipeisoc(urb->pipe) ?
+ &imx21->isoc_stats : &imx21->nonisoc_stats;
+}
+
+static void debug_urb_submitted(struct imx21 *imx21, struct urb *urb)
+{
+ stats_for_urb(imx21, urb)->submitted++;
+}
+
+static void debug_urb_completed(struct imx21 *imx21, struct urb *urb, int st)
+{
+ if (st)
+ stats_for_urb(imx21, urb)->completed_failed++;
+ else
+ stats_for_urb(imx21, urb)->completed_ok++;
+}
+
+static void debug_urb_unlinked(struct imx21 *imx21, struct urb *urb)
+{
+ stats_for_urb(imx21, urb)->unlinked++;
+}
+
+static void debug_urb_queued_for_etd(struct imx21 *imx21, struct urb *urb)
+{
+ stats_for_urb(imx21, urb)->queue_etd++;
+}
+
+static void debug_urb_queued_for_dmem(struct imx21 *imx21, struct urb *urb)
+{
+ stats_for_urb(imx21, urb)->queue_dmem++;
+}
+
+static inline void debug_etd_allocated(struct imx21 *imx21)
+{
+ imx21->etd_usage.maximum = max(
+ ++(imx21->etd_usage.value),
+ imx21->etd_usage.maximum);
+}
+
+static inline void debug_etd_freed(struct imx21 *imx21)
+{
+ imx21->etd_usage.value--;
+}
+
+static inline void debug_dmem_allocated(struct imx21 *imx21, int size)
+{
+ imx21->dmem_usage.value += size;
+ imx21->dmem_usage.maximum = max(
+ imx21->dmem_usage.value,
+ imx21->dmem_usage.maximum);
+}
+
+static inline void debug_dmem_freed(struct imx21 *imx21, int size)
+{
+ imx21->dmem_usage.value -= size;
+}
+
+
+static void debug_isoc_submitted(struct imx21 *imx21,
+ int frame, struct td *td)
+{
+ struct debug_isoc_trace *trace = &imx21->isoc_trace[
+ imx21->isoc_trace_index++];
+
+ imx21->isoc_trace_index %= ARRAY_SIZE(imx21->isoc_trace);
+ trace->schedule_frame = td->frame;
+ trace->submit_frame = frame;
+ trace->request_len = td->len;
+ trace->td = td;
+}
+
+static inline void debug_isoc_completed(struct imx21 *imx21,
+ int frame, struct td *td, int cc, int len)
+{
+ struct debug_isoc_trace *trace, *trace_failed;
+ int i;
+ int found = 0;
+
+ trace = imx21->isoc_trace;
+ for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace); i++, trace++) {
+ if (trace->td == td) {
+ trace->done_frame = frame;
+ trace->done_len = len;
+ trace->cc = cc;
+ trace->td = NULL;
+ found = 1;
+ break;
+ }
+ }
+
+ if (found && cc) {
+ trace_failed = &imx21->isoc_trace_failed[
+ imx21->isoc_trace_index_failed++];
+
+ imx21->isoc_trace_index_failed %= ARRAY_SIZE(
+ imx21->isoc_trace_failed);
+ *trace_failed = *trace;
+ }
+}
+
+
+static char *format_ep(struct usb_host_endpoint *ep, char *buf, int bufsize)
+{
+ if (ep)
+ snprintf(buf, bufsize, "ep_%02x (type:%02X kaddr:%p)",
+ ep->desc.bEndpointAddress,
+ usb_endpoint_type(&ep->desc),
+ ep);
+ else
+ snprintf(buf, bufsize, "none");
+ return buf;
+}
+
+static char *format_etd_dword0(u32 value, char *buf, int bufsize)
+{
+ snprintf(buf, bufsize,
+ "addr=%d ep=%d dir=%s speed=%s format=%s halted=%d",
+ value & 0x7F,
+ (value >> DW0_ENDPNT) & 0x0F,
+ dir_labels[(value >> DW0_DIRECT) & 0x03],
+ speed_labels[(value >> DW0_SPEED) & 0x01],
+ format_labels[(value >> DW0_FORMAT) & 0x03],
+ (value >> DW0_HALTED) & 0x01);
+ return buf;
+}
+
+static int debug_status_show(struct seq_file *s, void *v)
+{
+ struct imx21 *imx21 = s->private;
+ int etds_allocated = 0;
+ int etds_sw_busy = 0;
+ int etds_hw_busy = 0;
+ int dmem_blocks = 0;
+ int queued_for_etd = 0;
+ int queued_for_dmem = 0;
+ unsigned int dmem_bytes = 0;
+ int i;
+ struct etd_priv *etd;
+ u32 etd_enable_mask;
+ unsigned long flags;
+ struct imx21_dmem_area *dmem;
+ struct ep_priv *ep_priv;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ etd_enable_mask = readl(imx21->regs + USBH_ETDENSET);
+ for (i = 0, etd = imx21->etd; i < USB_NUM_ETD; i++, etd++) {
+ if (etd->alloc)
+ etds_allocated++;
+ if (etd->urb)
+ etds_sw_busy++;
+ if (etd_enable_mask & (1<<i))
+ etds_hw_busy++;
+ }
+
+ list_for_each_entry(dmem, &imx21->dmem_list, list) {
+ dmem_bytes += dmem->size;
+ dmem_blocks++;
+ }
+
+ list_for_each_entry(ep_priv, &imx21->queue_for_etd, queue)
+ queued_for_etd++;
+
+ list_for_each_entry(etd, &imx21->queue_for_dmem, queue)
+ queued_for_dmem++;
+
+ spin_unlock_irqrestore(&imx21->lock, flags);
+
+ seq_printf(s,
+ "Frame: %d\n"
+ "ETDs allocated: %d/%d (max=%d)\n"
+ "ETDs in use sw: %d\n"
+ "ETDs in use hw: %d\n"
+ "DMEM alocated: %d/%d (max=%d)\n"
+ "DMEM blocks: %d\n"
+ "Queued waiting for ETD: %d\n"
+ "Queued waiting for DMEM: %d\n",
+ readl(imx21->regs + USBH_FRMNUB) & 0xFFFF,
+ etds_allocated, USB_NUM_ETD, imx21->etd_usage.maximum,
+ etds_sw_busy,
+ etds_hw_busy,
+ dmem_bytes, DMEM_SIZE, imx21->dmem_usage.maximum,
+ dmem_blocks,
+ queued_for_etd,
+ queued_for_dmem);
+
+ return 0;
+}
+
+static int debug_dmem_show(struct seq_file *s, void *v)
+{
+ struct imx21 *imx21 = s->private;
+ struct imx21_dmem_area *dmem;
+ unsigned long flags;
+ char ep_text[40];
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ list_for_each_entry(dmem, &imx21->dmem_list, list)
+ seq_printf(s,
+ "%04X: size=0x%X "
+ "ep=%s\n",
+ dmem->offset, dmem->size,
+ format_ep(dmem->ep, ep_text, sizeof(ep_text)));
+
+ spin_unlock_irqrestore(&imx21->lock, flags);
+
+ return 0;
+}
+
+static int debug_etd_show(struct seq_file *s, void *v)
+{
+ struct imx21 *imx21 = s->private;
+ struct etd_priv *etd;
+ char buf[60];
+ u32 dword;
+ int i, j;
+ unsigned long flags;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ for (i = 0, etd = imx21->etd; i < USB_NUM_ETD; i++, etd++) {
+ int state = -1;
+ struct urb_priv *urb_priv;
+ if (etd->urb) {
+ urb_priv = etd->urb->hcpriv;
+ if (urb_priv)
+ state = urb_priv->state;
+ }
+
+ seq_printf(s,
+ "etd_num: %d\n"
+ "ep: %s\n"
+ "alloc: %d\n"
+ "len: %d\n"
+ "busy sw: %d\n"
+ "busy hw: %d\n"
+ "urb state: %d\n"
+ "current urb: %p\n",
+
+ i,
+ format_ep(etd->ep, buf, sizeof(buf)),
+ etd->alloc,
+ etd->len,
+ etd->urb != NULL,
+ (readl(imx21->regs + USBH_ETDENSET) & (1 << i)) > 0,
+ state,
+ etd->urb);
+
+ for (j = 0; j < 4; j++) {
+ dword = etd_readl(imx21, i, j);
+ switch (j) {
+ case 0:
+ format_etd_dword0(dword, buf, sizeof(buf));
+ break;
+ case 2:
+ snprintf(buf, sizeof(buf),
+ "cc=0X%02X", dword >> DW2_COMPCODE);
+ break;
+ default:
+ *buf = 0;
+ break;
+ }
+ seq_printf(s,
+ "dword %d: submitted=%08X cur=%08X [%s]\n",
+ j,
+ etd->submitted_dwords[j],
+ dword,
+ buf);
+ }
+ seq_printf(s, "\n");
+ }
+
+ spin_unlock_irqrestore(&imx21->lock, flags);
+
+ return 0;
+}
+
+static void debug_statistics_show_one(struct seq_file *s,
+ const char *name, struct debug_stats *stats)
+{
+ seq_printf(s, "%s:\n"
+ "submitted URBs: %lu\n"
+ "completed OK: %lu\n"
+ "completed failed: %lu\n"
+ "unlinked: %lu\n"
+ "queued for ETD: %lu\n"
+ "queued for DMEM: %lu\n\n",
+ name,
+ stats->submitted,
+ stats->completed_ok,
+ stats->completed_failed,
+ stats->unlinked,
+ stats->queue_etd,
+ stats->queue_dmem);
+}
+
+static int debug_statistics_show(struct seq_file *s, void *v)
+{
+ struct imx21 *imx21 = s->private;
+ unsigned long flags;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ debug_statistics_show_one(s, "nonisoc", &imx21->nonisoc_stats);
+ debug_statistics_show_one(s, "isoc", &imx21->isoc_stats);
+ seq_printf(s, "unblock kludge triggers: %lu\n", imx21->debug_unblocks);
+ spin_unlock_irqrestore(&imx21->lock, flags);
+
+ return 0;
+}
+
+static void debug_isoc_show_one(struct seq_file *s,
+ const char *name, int index, struct debug_isoc_trace *trace)
+{
+ seq_printf(s, "%s %d:\n"
+ "cc=0X%02X\n"
+ "scheduled frame %d (%d)\n"
+ "submittted frame %d (%d)\n"
+ "completed frame %d (%d)\n"
+ "requested length=%d\n"
+ "completed length=%d\n\n",
+ name, index,
+ trace->cc,
+ trace->schedule_frame, trace->schedule_frame & 0xFFFF,
+ trace->submit_frame, trace->submit_frame & 0xFFFF,
+ trace->done_frame, trace->done_frame & 0xFFFF,
+ trace->request_len,
+ trace->done_len);
+}
+
+static int debug_isoc_show(struct seq_file *s, void *v)
+{
+ struct imx21 *imx21 = s->private;
+ struct debug_isoc_trace *trace;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ trace = imx21->isoc_trace_failed;
+ for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace_failed); i++, trace++)
+ debug_isoc_show_one(s, "isoc failed", i, trace);
+
+ trace = imx21->isoc_trace;
+ for (i = 0; i < ARRAY_SIZE(imx21->isoc_trace); i++, trace++)
+ debug_isoc_show_one(s, "isoc", i, trace);
+
+ spin_unlock_irqrestore(&imx21->lock, flags);
+
+ return 0;
+}
+
+static int debug_status_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debug_status_show, inode->i_private);
+}
+
+static int debug_dmem_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debug_dmem_show, inode->i_private);
+}
+
+static int debug_etd_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debug_etd_show, inode->i_private);
+}
+
+static int debug_statistics_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debug_statistics_show, inode->i_private);
+}
+
+static int debug_isoc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, debug_isoc_show, inode->i_private);
+}
+
+static const struct file_operations debug_status_fops = {
+ .open = debug_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations debug_dmem_fops = {
+ .open = debug_dmem_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations debug_etd_fops = {
+ .open = debug_etd_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations debug_statistics_fops = {
+ .open = debug_statistics_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct file_operations debug_isoc_fops = {
+ .open = debug_isoc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void create_debug_files(struct imx21 *imx21)
+{
+ imx21->debug_root = debugfs_create_dir(dev_name(imx21->dev), NULL);
+ if (!imx21->debug_root)
+ goto failed_create_rootdir;
+
+ if (!debugfs_create_file("status", S_IRUGO,
+ imx21->debug_root, imx21, &debug_status_fops))
+ goto failed_create;
+
+ if (!debugfs_create_file("dmem", S_IRUGO,
+ imx21->debug_root, imx21, &debug_dmem_fops))
+ goto failed_create;
+
+ if (!debugfs_create_file("etd", S_IRUGO,
+ imx21->debug_root, imx21, &debug_etd_fops))
+ goto failed_create;
+
+ if (!debugfs_create_file("statistics", S_IRUGO,
+ imx21->debug_root, imx21, &debug_statistics_fops))
+ goto failed_create;
+
+ if (!debugfs_create_file("isoc", S_IRUGO,
+ imx21->debug_root, imx21, &debug_isoc_fops))
+ goto failed_create;
+
+ return;
+
+failed_create:
+ debugfs_remove_recursive(imx21->debug_root);
+
+failed_create_rootdir:
+ imx21->debug_root = NULL;
+}
+
+
+static void remove_debug_files(struct imx21 *imx21)
+{
+ if (imx21->debug_root) {
+ debugfs_remove_recursive(imx21->debug_root);
+ imx21->debug_root = NULL;
+ }
+}
+
+#endif
+
diff --git a/drivers/usb/host/imx21-hcd.c b/drivers/usb/host/imx21-hcd.c
new file mode 100644
index 00000000000..213e270e1c2
--- /dev/null
+++ b/drivers/usb/host/imx21-hcd.c
@@ -0,0 +1,1789 @@
+/*
+ * USB Host Controller Driver for IMX21
+ *
+ * Copyright (C) 2006 Loping Dog Embedded Systems
+ * Copyright (C) 2009 Martin Fuzzey
+ * Originally written by Jay Monkman <jtm@lopingdog.com>
+ * Ported to 2.6.30, debugged and enhanced by Martin Fuzzey
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+
+ /*
+ * The i.MX21 USB hardware contains
+ * * 32 transfer descriptors (called ETDs)
+ * * 4Kb of Data memory
+ *
+ * The data memory is shared between the host and fuction controlers
+ * (but this driver only supports the host controler)
+ *
+ * So setting up a transfer involves:
+ * * Allocating a ETD
+ * * Fill in ETD with appropriate information
+ * * Allocating data memory (and putting the offset in the ETD)
+ * * Activate the ETD
+ * * Get interrupt when done.
+ *
+ * An ETD is assigned to each active endpoint.
+ *
+ * Low resource (ETD and Data memory) situations are handled differently for
+ * isochronous and non insosynchronous transactions :
+ *
+ * Non ISOC transfers are queued if either ETDs or Data memory are unavailable
+ *
+ * ISOC transfers use 2 ETDs per endpoint to achieve double buffering.
+ * They allocate both ETDs and Data memory during URB submission
+ * (and fail if unavailable).
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/platform_device.h>
+#include <linux/usb.h>
+
+#include "../core/hcd.h"
+#include "imx21-hcd.h"
+
+#ifdef DEBUG
+#define DEBUG_LOG_FRAME(imx21, etd, event) \
+ (etd)->event##_frame = readl((imx21)->regs + USBH_FRMNUB)
+#else
+#define DEBUG_LOG_FRAME(imx21, etd, event) do { } while (0)
+#endif
+
+static const char hcd_name[] = "imx21-hcd";
+
+static inline struct imx21 *hcd_to_imx21(struct usb_hcd *hcd)
+{
+ return (struct imx21 *)hcd->hcd_priv;
+}
+
+
+/* =========================================== */
+/* Hardware access helpers */
+/* =========================================== */
+
+static inline void set_register_bits(struct imx21 *imx21, u32 offset, u32 mask)
+{
+ void __iomem *reg = imx21->regs + offset;
+ writel(readl(reg) | mask, reg);
+}
+
+static inline void clear_register_bits(struct imx21 *imx21,
+ u32 offset, u32 mask)
+{
+ void __iomem *reg = imx21->regs + offset;
+ writel(readl(reg) & ~mask, reg);
+}
+
+static inline void clear_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask)
+{
+ void __iomem *reg = imx21->regs + offset;
+
+ if (readl(reg) & mask)
+ writel(mask, reg);
+}
+
+static inline void set_toggle_bit(struct imx21 *imx21, u32 offset, u32 mask)
+{
+ void __iomem *reg = imx21->regs + offset;
+
+ if (!(readl(reg) & mask))
+ writel(mask, reg);
+}
+
+static void etd_writel(struct imx21 *imx21, int etd_num, int dword, u32 value)
+{
+ writel(value, imx21->regs + USB_ETD_DWORD(etd_num, dword));
+}
+
+static u32 etd_readl(struct imx21 *imx21, int etd_num, int dword)
+{
+ return readl(imx21->regs + USB_ETD_DWORD(etd_num, dword));
+}
+
+static inline int wrap_frame(int counter)
+{
+ return counter & 0xFFFF;
+}
+
+static inline int frame_after(int frame, int after)
+{
+ /* handle wrapping like jiffies time_afer */
+ return (s16)((s16)after - (s16)frame) < 0;
+}
+
+static int imx21_hc_get_frame(struct usb_hcd *hcd)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+
+ return wrap_frame(readl(imx21->regs + USBH_FRMNUB));
+}
+
+
+#include "imx21-dbg.c"
+
+/* =========================================== */
+/* ETD management */
+/* =========================================== */
+
+static int alloc_etd(struct imx21 *imx21)
+{
+ int i;
+ struct etd_priv *etd = imx21->etd;
+
+ for (i = 0; i < USB_NUM_ETD; i++, etd++) {
+ if (etd->alloc == 0) {
+ memset(etd, 0, sizeof(imx21->etd[0]));
+ etd->alloc = 1;
+ debug_etd_allocated(imx21);
+ return i;
+ }
+ }
+ return -1;
+}
+
+static void disactivate_etd(struct imx21 *imx21, int num)
+{
+ int etd_mask = (1 << num);
+ struct etd_priv *etd = &imx21->etd[num];
+
+ writel(etd_mask, imx21->regs + USBH_ETDENCLR);
+ clear_register_bits(imx21, USBH_ETDDONEEN, etd_mask);
+ writel(etd_mask, imx21->regs + USB_ETDDMACHANLCLR);
+ clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask);
+
+ etd->active_count = 0;
+
+ DEBUG_LOG_FRAME(imx21, etd, disactivated);
+}
+
+static void reset_etd(struct imx21 *imx21, int num)
+{
+ struct etd_priv *etd = imx21->etd + num;
+ int i;
+
+ disactivate_etd(imx21, num);
+
+ for (i = 0; i < 4; i++)
+ etd_writel(imx21, num, i, 0);
+ etd->urb = NULL;
+ etd->ep = NULL;
+ etd->td = NULL;;
+}
+
+static void free_etd(struct imx21 *imx21, int num)
+{
+ if (num < 0)
+ return;
+
+ if (num >= USB_NUM_ETD) {
+ dev_err(imx21->dev, "BAD etd=%d!\n", num);
+ return;
+ }
+ if (imx21->etd[num].alloc == 0) {
+ dev_err(imx21->dev, "ETD %d already free!\n", num);
+ return;
+ }
+
+ debug_etd_freed(imx21);
+ reset_etd(imx21, num);
+ memset(&imx21->etd[num], 0, sizeof(imx21->etd[0]));
+}
+
+
+static void setup_etd_dword0(struct imx21 *imx21,
+ int etd_num, struct urb *urb, u8 dir, u16 maxpacket)
+{
+ etd_writel(imx21, etd_num, 0,
+ ((u32) usb_pipedevice(urb->pipe)) << DW0_ADDRESS |
+ ((u32) usb_pipeendpoint(urb->pipe) << DW0_ENDPNT) |
+ ((u32) dir << DW0_DIRECT) |
+ ((u32) ((urb->dev->speed == USB_SPEED_LOW) ?
+ 1 : 0) << DW0_SPEED) |
+ ((u32) fmt_urb_to_etd[usb_pipetype(urb->pipe)] << DW0_FORMAT) |
+ ((u32) maxpacket << DW0_MAXPKTSIZ));
+}
+
+static void activate_etd(struct imx21 *imx21,
+ int etd_num, dma_addr_t dma, u8 dir)
+{
+ u32 etd_mask = 1 << etd_num;
+ struct etd_priv *etd = &imx21->etd[etd_num];
+
+ clear_toggle_bit(imx21, USBH_ETDDONESTAT, etd_mask);
+ set_register_bits(imx21, USBH_ETDDONEEN, etd_mask);
+ clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
+ clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
+
+ if (dma) {
+ set_register_bits(imx21, USB_ETDDMACHANLCLR, etd_mask);
+ clear_toggle_bit(imx21, USBH_XBUFSTAT, etd_mask);
+ clear_toggle_bit(imx21, USBH_YBUFSTAT, etd_mask);
+ writel(dma, imx21->regs + USB_ETDSMSA(etd_num));
+ set_register_bits(imx21, USB_ETDDMAEN, etd_mask);
+ } else {
+ if (dir != TD_DIR_IN) {
+ /* need to set for ZLP */
+ set_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
+ set_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
+ }
+ }
+
+ DEBUG_LOG_FRAME(imx21, etd, activated);
+
+#ifdef DEBUG
+ if (!etd->active_count) {
+ int i;
+ etd->activated_frame = readl(imx21->regs + USBH_FRMNUB);
+ etd->disactivated_frame = -1;
+ etd->last_int_frame = -1;
+ etd->last_req_frame = -1;
+
+ for (i = 0; i < 4; i++)
+ etd->submitted_dwords[i] = etd_readl(imx21, etd_num, i);
+ }
+#endif
+
+ etd->active_count = 1;
+ writel(etd_mask, imx21->regs + USBH_ETDENSET);
+}
+
+/* =========================================== */
+/* Data memory management */
+/* =========================================== */
+
+static int alloc_dmem(struct imx21 *imx21, unsigned int size,
+ struct usb_host_endpoint *ep)
+{
+ unsigned int offset = 0;
+ struct imx21_dmem_area *area;
+ struct imx21_dmem_area *tmp;
+
+ size += (~size + 1) & 0x3; /* Round to 4 byte multiple */
+
+ if (size > DMEM_SIZE) {
+ dev_err(imx21->dev, "size=%d > DMEM_SIZE(%d)\n",
+ size, DMEM_SIZE);
+ return -EINVAL;
+ }
+
+ list_for_each_entry(tmp, &imx21->dmem_list, list) {
+ if ((size + offset) < offset)
+ goto fail;
+ if ((size + offset) <= tmp->offset)
+ break;
+ offset = tmp->size + tmp->offset;
+ if ((offset + size) > DMEM_SIZE)
+ goto fail;
+ }
+
+ area = kmalloc(sizeof(struct imx21_dmem_area), GFP_ATOMIC);
+ if (area == NULL)
+ return -ENOMEM;
+
+ area->ep = ep;
+ area->offset = offset;
+ area->size = size;
+ list_add_tail(&area->list, &tmp->list);
+ debug_dmem_allocated(imx21, size);
+ return offset;
+
+fail:
+ return -ENOMEM;
+}
+
+/* Memory now available for a queued ETD - activate it */
+static void activate_queued_etd(struct imx21 *imx21,
+ struct etd_priv *etd, u32 dmem_offset)
+{
+ struct urb_priv *urb_priv = etd->urb->hcpriv;
+ int etd_num = etd - &imx21->etd[0];
+ u32 maxpacket = etd_readl(imx21, etd_num, 1) >> DW1_YBUFSRTAD;
+ u8 dir = (etd_readl(imx21, etd_num, 2) >> DW2_DIRPID) & 0x03;
+
+ dev_dbg(imx21->dev, "activating queued ETD %d now DMEM available\n",
+ etd_num);
+ etd_writel(imx21, etd_num, 1,
+ ((dmem_offset + maxpacket) << DW1_YBUFSRTAD) | dmem_offset);
+
+ urb_priv->active = 1;
+ activate_etd(imx21, etd_num, etd->dma_handle, dir);
+}
+
+static void free_dmem(struct imx21 *imx21, int offset)
+{
+ struct imx21_dmem_area *area;
+ struct etd_priv *etd, *tmp;
+ int found = 0;
+
+ list_for_each_entry(area, &imx21->dmem_list, list) {
+ if (area->offset == offset) {
+ debug_dmem_freed(imx21, area->size);
+ list_del(&area->list);
+ kfree(area);
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found) {
+ dev_err(imx21->dev,
+ "Trying to free unallocated DMEM %d\n", offset);
+ return;
+ }
+
+ /* Try again to allocate memory for anything we've queued */
+ list_for_each_entry_safe(etd, tmp, &imx21->queue_for_dmem, queue) {
+ offset = alloc_dmem(imx21, etd->dmem_size, etd->ep);
+ if (offset >= 0) {
+ list_del(&etd->queue);
+ activate_queued_etd(imx21, etd, (u32)offset);
+ }
+ }
+}
+
+static void free_epdmem(struct imx21 *imx21, struct usb_host_endpoint *ep)
+{
+ struct imx21_dmem_area *area, *tmp;
+
+ list_for_each_entry_safe(area, tmp, &imx21->dmem_list, list) {
+ if (area->ep == ep) {
+ dev_err(imx21->dev,
+ "Active DMEM %d for disabled ep=%p\n",
+ area->offset, ep);
+ list_del(&area->list);
+ kfree(area);
+ }
+ }
+}
+
+
+/* =========================================== */
+/* End handling */
+/* =========================================== */
+static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb);
+
+/* Endpoint now idle - release it's ETD(s) or asssign to queued request */
+static void ep_idle(struct imx21 *imx21, struct ep_priv *ep_priv)
+{
+ int etd_num;
+ int i;
+
+ for (i = 0; i < NUM_ISO_ETDS; i++) {
+ etd_num = ep_priv->etd[i];
+ if (etd_num < 0)
+ continue;
+
+ ep_priv->etd[i] = -1;
+ if (list_empty(&imx21->queue_for_etd)) {
+ free_etd(imx21, etd_num);
+ continue;
+ }
+
+ dev_dbg(imx21->dev,
+ "assigning idle etd %d for queued request\n", etd_num);
+ ep_priv = list_first_entry(&imx21->queue_for_etd,
+ struct ep_priv, queue);
+ list_del(&ep_priv->queue);
+ reset_etd(imx21, etd_num);
+ ep_priv->waiting_etd = 0;
+ ep_priv->etd[i] = etd_num;
+
+ if (list_empty(&ep_priv->ep->urb_list)) {
+ dev_err(imx21->dev, "No urb for queued ep!\n");
+ continue;
+ }
+ schedule_nonisoc_etd(imx21, list_first_entry(
+ &ep_priv->ep->urb_list, struct urb, urb_list));
+ }
+}
+
+static void urb_done(struct usb_hcd *hcd, struct urb *urb, int status)
+__releases(imx21->lock)
+__acquires(imx21->lock)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ struct ep_priv *ep_priv = urb->ep->hcpriv;
+ struct urb_priv *urb_priv = urb->hcpriv;
+
+ debug_urb_completed(imx21, urb, status);
+ dev_vdbg(imx21->dev, "urb %p done %d\n", urb, status);
+
+ kfree(urb_priv->isoc_td);
+ kfree(urb->hcpriv);
+ urb->hcpriv = NULL;
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+ spin_unlock(&imx21->lock);
+ usb_hcd_giveback_urb(hcd, urb, status);
+ spin_lock(&imx21->lock);
+ if (list_empty(&ep_priv->ep->urb_list))
+ ep_idle(imx21, ep_priv);
+}
+
+/* =========================================== */
+/* ISOC Handling ... */
+/* =========================================== */
+
+static void schedule_isoc_etds(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ struct ep_priv *ep_priv = ep->hcpriv;
+ struct etd_priv *etd;
+ struct urb_priv *urb_priv;
+ struct td *td;
+ int etd_num;
+ int i;
+ int cur_frame;
+ u8 dir;
+
+ for (i = 0; i < NUM_ISO_ETDS; i++) {
+too_late:
+ if (list_empty(&ep_priv->td_list))
+ break;
+
+ etd_num = ep_priv->etd[i];
+ if (etd_num < 0)
+ break;
+
+ etd = &imx21->etd[etd_num];
+ if (etd->urb)
+ continue;
+
+ td = list_entry(ep_priv->td_list.next, struct td, list);
+ list_del(&td->list);
+ urb_priv = td->urb->hcpriv;
+
+ cur_frame = imx21_hc_get_frame(hcd);
+ if (frame_after(cur_frame, td->frame)) {
+ dev_dbg(imx21->dev, "isoc too late frame %d > %d\n",
+ cur_frame, td->frame);
+ urb_priv->isoc_status = -EXDEV;
+ td->urb->iso_frame_desc[
+ td->isoc_index].actual_length = 0;
+ td->urb->iso_frame_desc[td->isoc_index].status = -EXDEV;
+ if (--urb_priv->isoc_remaining == 0)
+ urb_done(hcd, td->urb, urb_priv->isoc_status);
+ goto too_late;
+ }
+
+ urb_priv->active = 1;
+ etd->td = td;
+ etd->ep = td->ep;
+ etd->urb = td->urb;
+ etd->len = td->len;
+
+ debug_isoc_submitted(imx21, cur_frame, td);
+
+ dir = usb_pipeout(td->urb->pipe) ? TD_DIR_OUT : TD_DIR_IN;
+ setup_etd_dword0(imx21, etd_num, td->urb, dir, etd->dmem_size);
+ etd_writel(imx21, etd_num, 1, etd->dmem_offset);
+ etd_writel(imx21, etd_num, 2,
+ (TD_NOTACCESSED << DW2_COMPCODE) |
+ ((td->frame & 0xFFFF) << DW2_STARTFRM));
+ etd_writel(imx21, etd_num, 3,
+ (TD_NOTACCESSED << DW3_COMPCODE0) |
+ (td->len << DW3_PKTLEN0));
+
+ activate_etd(imx21, etd_num, td->data, dir);
+ }
+}
+
+static void isoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ int etd_mask = 1 << etd_num;
+ struct urb_priv *urb_priv = urb->hcpriv;
+ struct etd_priv *etd = imx21->etd + etd_num;
+ struct td *td = etd->td;
+ struct usb_host_endpoint *ep = etd->ep;
+ int isoc_index = td->isoc_index;
+ unsigned int pipe = urb->pipe;
+ int dir_in = usb_pipein(pipe);
+ int cc;
+ int bytes_xfrd;
+
+ disactivate_etd(imx21, etd_num);
+
+ cc = (etd_readl(imx21, etd_num, 3) >> DW3_COMPCODE0) & 0xf;
+ bytes_xfrd = etd_readl(imx21, etd_num, 3) & 0x3ff;
+
+ /* Input doesn't always fill the buffer, don't generate an error
+ * when this happens.
+ */
+ if (dir_in && (cc == TD_DATAUNDERRUN))
+ cc = TD_CC_NOERROR;
+
+ if (cc == TD_NOTACCESSED)
+ bytes_xfrd = 0;
+
+ debug_isoc_completed(imx21,
+ imx21_hc_get_frame(hcd), td, cc, bytes_xfrd);
+ if (cc) {
+ urb_priv->isoc_status = -EXDEV;
+ dev_dbg(imx21->dev,
+ "bad iso cc=0x%X frame=%d sched frame=%d "
+ "cnt=%d len=%d urb=%p etd=%d index=%d\n",
+ cc, imx21_hc_get_frame(hcd), td->frame,
+ bytes_xfrd, td->len, urb, etd_num, isoc_index);
+ }
+
+ if (dir_in)
+ clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
+
+ urb->actual_length += bytes_xfrd;
+ urb->iso_frame_desc[isoc_index].actual_length = bytes_xfrd;
+ urb->iso_frame_desc[isoc_index].status = cc_to_error[cc];
+
+ etd->td = NULL;
+ etd->urb = NULL;
+ etd->ep = NULL;
+
+ if (--urb_priv->isoc_remaining == 0)
+ urb_done(hcd, urb, urb_priv->isoc_status);
+
+ schedule_isoc_etds(hcd, ep);
+}
+
+static struct ep_priv *alloc_isoc_ep(
+ struct imx21 *imx21, struct usb_host_endpoint *ep)
+{
+ struct ep_priv *ep_priv;
+ int i;
+
+ ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC);
+ if (ep_priv == NULL)
+ return NULL;
+
+ /* Allocate the ETDs */
+ for (i = 0; i < NUM_ISO_ETDS; i++) {
+ ep_priv->etd[i] = alloc_etd(imx21);
+ if (ep_priv->etd[i] < 0) {
+ int j;
+ dev_err(imx21->dev, "isoc: Couldn't allocate etd\n");
+ for (j = 0; j < i; j++)
+ free_etd(imx21, ep_priv->etd[j]);
+ goto alloc_etd_failed;
+ }
+ imx21->etd[ep_priv->etd[i]].ep = ep;
+ }
+
+ INIT_LIST_HEAD(&ep_priv->td_list);
+ ep_priv->ep = ep;
+ ep->hcpriv = ep_priv;
+ return ep_priv;
+
+alloc_etd_failed:
+ kfree(ep_priv);
+ return NULL;
+}
+
+static int imx21_hc_urb_enqueue_isoc(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep,
+ struct urb *urb, gfp_t mem_flags)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ struct urb_priv *urb_priv;
+ unsigned long flags;
+ struct ep_priv *ep_priv;
+ struct td *td = NULL;
+ int i;
+ int ret;
+ int cur_frame;
+ u16 maxpacket;
+
+ urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags);
+ if (urb_priv == NULL)
+ return -ENOMEM;
+
+ urb_priv->isoc_td = kzalloc(
+ sizeof(struct td) * urb->number_of_packets, mem_flags);
+ if (urb_priv->isoc_td == NULL) {
+ ret = -ENOMEM;
+ goto alloc_td_failed;
+ }
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ if (ep->hcpriv == NULL) {
+ ep_priv = alloc_isoc_ep(imx21, ep);
+ if (ep_priv == NULL) {
+ ret = -ENOMEM;
+ goto alloc_ep_failed;
+ }
+ } else {
+ ep_priv = ep->hcpriv;
+ }
+
+ ret = usb_hcd_link_urb_to_ep(hcd, urb);
+ if (ret)
+ goto link_failed;
+
+ urb->status = -EINPROGRESS;
+ urb->actual_length = 0;
+ urb->error_count = 0;
+ urb->hcpriv = urb_priv;
+ urb_priv->ep = ep;
+
+ /* allocate data memory for largest packets if not already done */
+ maxpacket = usb_maxpacket(urb->dev, urb->pipe, usb_pipeout(urb->pipe));
+ for (i = 0; i < NUM_ISO_ETDS; i++) {
+ struct etd_priv *etd = &imx21->etd[ep_priv->etd[i]];
+
+ if (etd->dmem_size > 0 && etd->dmem_size < maxpacket) {
+ /* not sure if this can really occur.... */
+ dev_err(imx21->dev, "increasing isoc buffer %d->%d\n",
+ etd->dmem_size, maxpacket);
+ ret = -EMSGSIZE;
+ goto alloc_dmem_failed;
+ }
+
+ if (etd->dmem_size == 0) {
+ etd->dmem_offset = alloc_dmem(imx21, maxpacket, ep);
+ if (etd->dmem_offset < 0) {
+ dev_dbg(imx21->dev, "failed alloc isoc dmem\n");
+ ret = -EAGAIN;
+ goto alloc_dmem_failed;
+ }
+ etd->dmem_size = maxpacket;
+ }
+ }
+
+ /* calculate frame */
+ cur_frame = imx21_hc_get_frame(hcd);
+ if (urb->transfer_flags & URB_ISO_ASAP) {
+ if (list_empty(&ep_priv->td_list))
+ urb->start_frame = cur_frame + 5;
+ else
+ urb->start_frame = list_entry(
+ ep_priv->td_list.prev,
+ struct td, list)->frame + urb->interval;
+ }
+ urb->start_frame = wrap_frame(urb->start_frame);
+ if (frame_after(cur_frame, urb->start_frame)) {
+ dev_dbg(imx21->dev,
+ "enqueue: adjusting iso start %d (cur=%d) asap=%d\n",
+ urb->start_frame, cur_frame,
+ (urb->transfer_flags & URB_ISO_ASAP) != 0);
+ urb->start_frame = wrap_frame(cur_frame + 1);
+ }
+
+ /* set up transfers */
+ td = urb_priv->isoc_td;
+ for (i = 0; i < urb->number_of_packets; i++, td++) {
+ td->ep = ep;
+ td->urb = urb;
+ td->len = urb->iso_frame_desc[i].length;
+ td->isoc_index = i;
+ td->frame = wrap_frame(urb->start_frame + urb->interval * i);
+ td->data = urb->transfer_dma + urb->iso_frame_desc[i].offset;
+ list_add_tail(&td->list, &ep_priv->td_list);
+ }
+
+ urb_priv->isoc_remaining = urb->number_of_packets;
+ dev_vdbg(imx21->dev, "setup %d packets for iso frame %d->%d\n",
+ urb->number_of_packets, urb->start_frame, td->frame);
+
+ debug_urb_submitted(imx21, urb);
+ schedule_isoc_etds(hcd, ep);
+
+ spin_unlock_irqrestore(&imx21->lock, flags);
+ return 0;
+
+alloc_dmem_failed:
+ usb_hcd_unlink_urb_from_ep(hcd, urb);
+
+link_failed:
+alloc_ep_failed:
+ spin_unlock_irqrestore(&imx21->lock, flags);
+ kfree(urb_priv->isoc_td);
+
+alloc_td_failed:
+ kfree(urb_priv);
+ return ret;
+}
+
+static void dequeue_isoc_urb(struct imx21 *imx21,
+ struct urb *urb, struct ep_priv *ep_priv)
+{
+ struct urb_priv *urb_priv = urb->hcpriv;
+ struct td *td, *tmp;
+ int i;
+
+ if (urb_priv->active) {
+ for (i = 0; i < NUM_ISO_ETDS; i++) {
+ int etd_num = ep_priv->etd[i];
+ if (etd_num != -1 && imx21->etd[etd_num].urb == urb) {
+ struct etd_priv *etd = imx21->etd + etd_num;
+
+ reset_etd(imx21, etd_num);
+ if (etd->dmem_size)
+ free_dmem(imx21, etd->dmem_offset);
+ etd->dmem_size = 0;
+ }
+ }
+ }
+
+ list_for_each_entry_safe(td, tmp, &ep_priv->td_list, list) {
+ if (td->urb == urb) {
+ dev_vdbg(imx21->dev, "removing td %p\n", td);
+ list_del(&td->list);
+ }
+ }
+}
+
+/* =========================================== */
+/* NON ISOC Handling ... */
+/* =========================================== */
+
+static void schedule_nonisoc_etd(struct imx21 *imx21, struct urb *urb)
+{
+ unsigned int pipe = urb->pipe;
+ struct urb_priv *urb_priv = urb->hcpriv;
+ struct ep_priv *ep_priv = urb_priv->ep->hcpriv;
+ int state = urb_priv->state;
+ int etd_num = ep_priv->etd[0];
+ struct etd_priv *etd;
+ int dmem_offset;
+ u32 count;
+ u16 etd_buf_size;
+ u16 maxpacket;
+ u8 dir;
+ u8 bufround;
+ u8 datatoggle;
+ u8 interval = 0;
+ u8 relpolpos = 0;
+
+ if (etd_num < 0) {
+ dev_err(imx21->dev, "No valid ETD\n");
+ return;
+ }
+ if (readl(imx21->regs + USBH_ETDENSET) & (1 << etd_num))
+ dev_err(imx21->dev, "submitting to active ETD %d\n", etd_num);
+
+ etd = &imx21->etd[etd_num];
+ maxpacket = usb_maxpacket(urb->dev, pipe, usb_pipeout(pipe));
+ if (!maxpacket)
+ maxpacket = 8;
+
+ if (usb_pipecontrol(pipe) && (state != US_CTRL_DATA)) {
+ if (state == US_CTRL_SETUP) {
+ dir = TD_DIR_SETUP;
+ etd->dma_handle = urb->setup_dma;
+ bufround = 0;
+ count = 8;
+ datatoggle = TD_TOGGLE_DATA0;
+ } else { /* US_CTRL_ACK */
+ dir = usb_pipeout(pipe) ? TD_DIR_IN : TD_DIR_OUT;
+ etd->dma_handle = urb->transfer_dma;
+ bufround = 0;
+ count = 0;
+ datatoggle = TD_TOGGLE_DATA1;
+ }
+ } else {
+ dir = usb_pipeout(pipe) ? TD_DIR_OUT : TD_DIR_IN;
+ bufround = (dir == TD_DIR_IN) ? 1 : 0;
+ etd->dma_handle = urb->transfer_dma;
+ if (usb_pipebulk(pipe) && (state == US_BULK0))
+ count = 0;
+ else
+ count = urb->transfer_buffer_length;
+
+ if (usb_pipecontrol(pipe)) {
+ datatoggle = TD_TOGGLE_DATA1;
+ } else {
+ if (usb_gettoggle(
+ urb->dev,
+ usb_pipeendpoint(urb->pipe),
+ usb_pipeout(urb->pipe)))
+ datatoggle = TD_TOGGLE_DATA1;
+ else
+ datatoggle = TD_TOGGLE_DATA0;
+ }
+ }
+
+ etd->urb = urb;
+ etd->ep = urb_priv->ep;
+ etd->len = count;
+
+ if (usb_pipeint(pipe)) {
+ interval = urb->interval;
+ relpolpos = (readl(imx21->regs + USBH_FRMNUB) + 1) & 0xff;
+ }
+
+ /* Write ETD to device memory */
+ setup_etd_dword0(imx21, etd_num, urb, dir, maxpacket);
+
+ etd_writel(imx21, etd_num, 2,
+ (u32) interval << DW2_POLINTERV |
+ ((u32) relpolpos << DW2_RELPOLPOS) |
+ ((u32) dir << DW2_DIRPID) |
+ ((u32) bufround << DW2_BUFROUND) |
+ ((u32) datatoggle << DW2_DATATOG) |
+ ((u32) TD_NOTACCESSED << DW2_COMPCODE));
+
+ /* DMA will always transfer buffer size even if TOBYCNT in DWORD3
+ is smaller. Make sure we don't overrun the buffer!
+ */
+ if (count && count < maxpacket)
+ etd_buf_size = count;
+ else
+ etd_buf_size = maxpacket;
+
+ etd_writel(imx21, etd_num, 3,
+ ((u32) (etd_buf_size - 1) << DW3_BUFSIZE) | (u32) count);
+
+ if (!count)
+ etd->dma_handle = 0;
+
+ /* allocate x and y buffer space at once */
+ etd->dmem_size = (count > maxpacket) ? maxpacket * 2 : maxpacket;
+ dmem_offset = alloc_dmem(imx21, etd->dmem_size, urb_priv->ep);
+ if (dmem_offset < 0) {
+ /* Setup everything we can in HW and update when we get DMEM */
+ etd_writel(imx21, etd_num, 1, (u32)maxpacket << 16);
+
+ dev_dbg(imx21->dev, "Queuing etd %d for DMEM\n", etd_num);
+ debug_urb_queued_for_dmem(imx21, urb);
+ list_add_tail(&etd->queue, &imx21->queue_for_dmem);
+ return;
+ }
+
+ etd_writel(imx21, etd_num, 1,
+ (((u32) dmem_offset + (u32) maxpacket) << DW1_YBUFSRTAD) |
+ (u32) dmem_offset);
+
+ urb_priv->active = 1;
+
+ /* enable the ETD to kick off transfer */
+ dev_vdbg(imx21->dev, "Activating etd %d for %d bytes %s\n",
+ etd_num, count, dir != TD_DIR_IN ? "out" : "in");
+ activate_etd(imx21, etd_num, etd->dma_handle, dir);
+
+}
+
+static void nonisoc_etd_done(struct usb_hcd *hcd, struct urb *urb, int etd_num)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ struct etd_priv *etd = &imx21->etd[etd_num];
+ u32 etd_mask = 1 << etd_num;
+ struct urb_priv *urb_priv = urb->hcpriv;
+ int dir;
+ u16 xbufaddr;
+ int cc;
+ u32 bytes_xfrd;
+ int etd_done;
+
+ disactivate_etd(imx21, etd_num);
+
+ dir = (etd_readl(imx21, etd_num, 0) >> DW0_DIRECT) & 0x3;
+ xbufaddr = etd_readl(imx21, etd_num, 1) & 0xffff;
+ cc = (etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE) & 0xf;
+ bytes_xfrd = etd->len - (etd_readl(imx21, etd_num, 3) & 0x1fffff);
+
+ /* save toggle carry */
+ usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe),
+ usb_pipeout(urb->pipe),
+ (etd_readl(imx21, etd_num, 0) >> DW0_TOGCRY) & 0x1);
+
+ if (dir == TD_DIR_IN) {
+ clear_toggle_bit(imx21, USBH_XFILLSTAT, etd_mask);
+ clear_toggle_bit(imx21, USBH_YFILLSTAT, etd_mask);
+ }
+ free_dmem(imx21, xbufaddr);
+
+ urb->error_count = 0;
+ if (!(urb->transfer_flags & URB_SHORT_NOT_OK)
+ && (cc == TD_DATAUNDERRUN))
+ cc = TD_CC_NOERROR;
+
+ if (cc != 0)
+ dev_vdbg(imx21->dev, "cc is 0x%x\n", cc);
+
+ etd_done = (cc_to_error[cc] != 0); /* stop if error */
+
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_CONTROL:
+ switch (urb_priv->state) {
+ case US_CTRL_SETUP:
+ if (urb->transfer_buffer_length > 0)
+ urb_priv->state = US_CTRL_DATA;
+ else
+ urb_priv->state = US_CTRL_ACK;
+ break;
+ case US_CTRL_DATA:
+ urb->actual_length += bytes_xfrd;
+ urb_priv->state = US_CTRL_ACK;
+ break;
+ case US_CTRL_ACK:
+ etd_done = 1;
+ break;
+ default:
+ dev_err(imx21->dev,
+ "Invalid pipe state %d\n", urb_priv->state);
+ etd_done = 1;
+ break;
+ }
+ break;
+
+ case PIPE_BULK:
+ urb->actual_length += bytes_xfrd;
+ if ((urb_priv->state == US_BULK)
+ && (urb->transfer_flags & URB_ZERO_PACKET)
+ && urb->transfer_buffer_length > 0
+ && ((urb->transfer_buffer_length %
+ usb_maxpacket(urb->dev, urb->pipe,
+ usb_pipeout(urb->pipe))) == 0)) {
+ /* need a 0-packet */
+ urb_priv->state = US_BULK0;
+ } else {
+ etd_done = 1;
+ }
+ break;
+
+ case PIPE_INTERRUPT:
+ urb->actual_length += bytes_xfrd;
+ etd_done = 1;
+ break;
+ }
+
+ if (!etd_done) {
+ dev_vdbg(imx21->dev, "next state=%d\n", urb_priv->state);
+ schedule_nonisoc_etd(imx21, urb);
+ } else {
+ struct usb_host_endpoint *ep = urb->ep;
+
+ urb_done(hcd, urb, cc_to_error[cc]);
+ etd->urb = NULL;
+
+ if (!list_empty(&ep->urb_list)) {
+ urb = list_first_entry(&ep->urb_list,
+ struct urb, urb_list);
+ dev_vdbg(imx21->dev, "next URB %p\n", urb);
+ schedule_nonisoc_etd(imx21, urb);
+ }
+ }
+}
+
+static struct ep_priv *alloc_ep(void)
+{
+ int i;
+ struct ep_priv *ep_priv;
+
+ ep_priv = kzalloc(sizeof(struct ep_priv), GFP_ATOMIC);
+ if (!ep_priv)
+ return NULL;
+
+ for (i = 0; i < NUM_ISO_ETDS; ++i)
+ ep_priv->etd[i] = -1;
+
+ return ep_priv;
+}
+
+static int imx21_hc_urb_enqueue(struct usb_hcd *hcd,
+ struct urb *urb, gfp_t mem_flags)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ struct usb_host_endpoint *ep = urb->ep;
+ struct urb_priv *urb_priv;
+ struct ep_priv *ep_priv;
+ struct etd_priv *etd;
+ int ret;
+ unsigned long flags;
+ int new_ep = 0;
+
+ dev_vdbg(imx21->dev,
+ "enqueue urb=%p ep=%p len=%d "
+ "buffer=%p dma=%08X setupBuf=%p setupDma=%08X\n",
+ urb, ep,
+ urb->transfer_buffer_length,
+ urb->transfer_buffer, urb->transfer_dma,
+ urb->setup_packet, urb->setup_dma);
+
+ if (usb_pipeisoc(urb->pipe))
+ return imx21_hc_urb_enqueue_isoc(hcd, ep, urb, mem_flags);
+
+ urb_priv = kzalloc(sizeof(struct urb_priv), mem_flags);
+ if (!urb_priv)
+ return -ENOMEM;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ ep_priv = ep->hcpriv;
+ if (ep_priv == NULL) {
+ ep_priv = alloc_ep();
+ if (!ep_priv) {
+ ret = -ENOMEM;
+ goto failed_alloc_ep;
+ }
+ ep->hcpriv = ep_priv;
+ ep_priv->ep = ep;
+ new_ep = 1;
+ }
+
+ ret = usb_hcd_link_urb_to_ep(hcd, urb);
+ if (ret)
+ goto failed_link;
+
+ urb->status = -EINPROGRESS;
+ urb->actual_length = 0;
+ urb->error_count = 0;
+ urb->hcpriv = urb_priv;
+ urb_priv->ep = ep;
+
+ switch (usb_pipetype(urb->pipe)) {
+ case PIPE_CONTROL:
+ urb_priv->state = US_CTRL_SETUP;
+ break;
+ case PIPE_BULK:
+ urb_priv->state = US_BULK;
+ break;
+ }
+
+ debug_urb_submitted(imx21, urb);
+ if (ep_priv->etd[0] < 0) {
+ if (ep_priv->waiting_etd) {
+ dev_dbg(imx21->dev,
+ "no ETD available already queued %p\n",
+ ep_priv);
+ debug_urb_queued_for_etd(imx21, urb);
+ goto out;
+ }
+ ep_priv->etd[0] = alloc_etd(imx21);
+ if (ep_priv->etd[0] < 0) {
+ dev_dbg(imx21->dev,
+ "no ETD available queueing %p\n", ep_priv);
+ debug_urb_queued_for_etd(imx21, urb);
+ list_add_tail(&ep_priv->queue, &imx21->queue_for_etd);
+ ep_priv->waiting_etd = 1;
+ goto out;
+ }
+ }
+
+ /* Schedule if no URB already active for this endpoint */
+ etd = &imx21->etd[ep_priv->etd[0]];
+ if (etd->urb == NULL) {
+ DEBUG_LOG_FRAME(imx21, etd, last_req);
+ schedule_nonisoc_etd(imx21, urb);
+ }
+
+out:
+ spin_unlock_irqrestore(&imx21->lock, flags);
+ return 0;
+
+failed_link:
+failed_alloc_ep:
+ spin_unlock_irqrestore(&imx21->lock, flags);
+ kfree(urb_priv);
+ return ret;
+}
+
+static int imx21_hc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
+ int status)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ unsigned long flags;
+ struct usb_host_endpoint *ep;
+ struct ep_priv *ep_priv;
+ struct urb_priv *urb_priv = urb->hcpriv;
+ int ret = -EINVAL;
+
+ dev_vdbg(imx21->dev, "dequeue urb=%p iso=%d status=%d\n",
+ urb, usb_pipeisoc(urb->pipe), status);
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ ret = usb_hcd_check_unlink_urb(hcd, urb, status);
+ if (ret)
+ goto fail;
+ ep = urb_priv->ep;
+ ep_priv = ep->hcpriv;
+
+ debug_urb_unlinked(imx21, urb);
+
+ if (usb_pipeisoc(urb->pipe)) {
+ dequeue_isoc_urb(imx21, urb, ep_priv);
+ schedule_isoc_etds(hcd, ep);
+ } else if (urb_priv->active) {
+ int etd_num = ep_priv->etd[0];
+ if (etd_num != -1) {
+ disactivate_etd(imx21, etd_num);
+ free_dmem(imx21, etd_readl(imx21, etd_num, 1) & 0xffff);
+ imx21->etd[etd_num].urb = NULL;
+ }
+ }
+
+ urb_done(hcd, urb, status);
+
+ spin_unlock_irqrestore(&imx21->lock, flags);
+ return 0;
+
+fail:
+ spin_unlock_irqrestore(&imx21->lock, flags);
+ return ret;
+}
+
+/* =========================================== */
+/* Interrupt dispatch */
+/* =========================================== */
+
+static void process_etds(struct usb_hcd *hcd, struct imx21 *imx21, int sof)
+{
+ int etd_num;
+ int enable_sof_int = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ for (etd_num = 0; etd_num < USB_NUM_ETD; etd_num++) {
+ u32 etd_mask = 1 << etd_num;
+ u32 enabled = readl(imx21->regs + USBH_ETDENSET) & etd_mask;
+ u32 done = readl(imx21->regs + USBH_ETDDONESTAT) & etd_mask;
+ struct etd_priv *etd = &imx21->etd[etd_num];
+
+
+ if (done) {
+ DEBUG_LOG_FRAME(imx21, etd, last_int);
+ } else {
+/*
+ * Kludge warning!
+ *
+ * When multiple transfers are using the bus we sometimes get into a state
+ * where the transfer has completed (the CC field of the ETD is != 0x0F),
+ * the ETD has self disabled but the ETDDONESTAT flag is not set
+ * (and hence no interrupt occurs).
+ * This causes the transfer in question to hang.
+ * The kludge below checks for this condition at each SOF and processes any
+ * blocked ETDs (after an arbitary 10 frame wait)
+ *
+ * With a single active transfer the usbtest test suite will run for days
+ * without the kludge.
+ * With other bus activity (eg mass storage) even just test1 will hang without
+ * the kludge.
+ */
+ u32 dword0;
+ int cc;
+
+ if (etd->active_count && !enabled) /* suspicious... */
+ enable_sof_int = 1;
+
+ if (!sof || enabled || !etd->active_count)
+ continue;
+
+ cc = etd_readl(imx21, etd_num, 2) >> DW2_COMPCODE;
+ if (cc == TD_NOTACCESSED)
+ continue;
+
+ if (++etd->active_count < 10)
+ continue;
+
+ dword0 = etd_readl(imx21, etd_num, 0);
+ dev_dbg(imx21->dev,
+ "unblock ETD %d dev=0x%X ep=0x%X cc=0x%02X!\n",
+ etd_num, dword0 & 0x7F,
+ (dword0 >> DW0_ENDPNT) & 0x0F,
+ cc);
+
+#ifdef DEBUG
+ dev_dbg(imx21->dev,
+ "frame: act=%d disact=%d"
+ " int=%d req=%d cur=%d\n",
+ etd->activated_frame,
+ etd->disactivated_frame,
+ etd->last_int_frame,
+ etd->last_req_frame,
+ readl(imx21->regs + USBH_FRMNUB));
+ imx21->debug_unblocks++;
+#endif
+ etd->active_count = 0;
+/* End of kludge */
+ }
+
+ if (etd->ep == NULL || etd->urb == NULL) {
+ dev_dbg(imx21->dev,
+ "Interrupt for unexpected etd %d"
+ " ep=%p urb=%p\n",
+ etd_num, etd->ep, etd->urb);
+ disactivate_etd(imx21, etd_num);
+ continue;
+ }
+
+ if (usb_pipeisoc(etd->urb->pipe))
+ isoc_etd_done(hcd, etd->urb, etd_num);
+ else
+ nonisoc_etd_done(hcd, etd->urb, etd_num);
+ }
+
+ /* only enable SOF interrupt if it may be needed for the kludge */
+ if (enable_sof_int)
+ set_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT);
+ else
+ clear_register_bits(imx21, USBH_SYSIEN, USBH_SYSIEN_SOFINT);
+
+
+ spin_unlock_irqrestore(&imx21->lock, flags);
+}
+
+static irqreturn_t imx21_irq(struct usb_hcd *hcd)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ u32 ints = readl(imx21->regs + USBH_SYSISR);
+
+ if (ints & USBH_SYSIEN_HERRINT)
+ dev_dbg(imx21->dev, "Scheduling error\n");
+
+ if (ints & USBH_SYSIEN_SORINT)
+ dev_dbg(imx21->dev, "Scheduling overrun\n");
+
+ if (ints & (USBH_SYSISR_DONEINT | USBH_SYSISR_SOFINT))
+ process_etds(hcd, imx21, ints & USBH_SYSISR_SOFINT);
+
+ writel(ints, imx21->regs + USBH_SYSISR);
+ return IRQ_HANDLED;
+}
+
+static void imx21_hc_endpoint_disable(struct usb_hcd *hcd,
+ struct usb_host_endpoint *ep)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ unsigned long flags;
+ struct ep_priv *ep_priv;
+ int i;
+
+ if (ep == NULL)
+ return;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+ ep_priv = ep->hcpriv;
+ dev_vdbg(imx21->dev, "disable ep=%p, ep->hcpriv=%p\n", ep, ep_priv);
+
+ if (!list_empty(&ep->urb_list))
+ dev_dbg(imx21->dev, "ep's URB list is not empty\n");
+
+ if (ep_priv != NULL) {
+ for (i = 0; i < NUM_ISO_ETDS; i++) {
+ if (ep_priv->etd[i] > -1)
+ dev_dbg(imx21->dev, "free etd %d for disable\n",
+ ep_priv->etd[i]);
+
+ free_etd(imx21, ep_priv->etd[i]);
+ }
+ kfree(ep_priv);
+ ep->hcpriv = NULL;
+ }
+
+ for (i = 0; i < USB_NUM_ETD; i++) {
+ if (imx21->etd[i].alloc && imx21->etd[i].ep == ep) {
+ dev_err(imx21->dev,
+ "Active etd %d for disabled ep=%p!\n", i, ep);
+ free_etd(imx21, i);
+ }
+ }
+ free_epdmem(imx21, ep);
+ spin_unlock_irqrestore(&imx21->lock, flags);
+}
+
+/* =========================================== */
+/* Hub handling */
+/* =========================================== */
+
+static int get_hub_descriptor(struct usb_hcd *hcd,
+ struct usb_hub_descriptor *desc)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ desc->bDescriptorType = 0x29; /* HUB descriptor */
+ desc->bHubContrCurrent = 0;
+
+ desc->bNbrPorts = readl(imx21->regs + USBH_ROOTHUBA)
+ & USBH_ROOTHUBA_NDNSTMPRT_MASK;
+ desc->bDescLength = 9;
+ desc->bPwrOn2PwrGood = 0;
+ desc->wHubCharacteristics = (__force __u16) cpu_to_le16(
+ 0x0002 | /* No power switching */
+ 0x0010 | /* No over current protection */
+ 0);
+
+ desc->bitmap[0] = 1 << 1;
+ desc->bitmap[1] = ~0;
+ return 0;
+}
+
+static int imx21_hc_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ int ports;
+ int changed = 0;
+ int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+ ports = readl(imx21->regs + USBH_ROOTHUBA)
+ & USBH_ROOTHUBA_NDNSTMPRT_MASK;
+ if (ports > 7) {
+ ports = 7;
+ dev_err(imx21->dev, "ports %d > 7\n", ports);
+ }
+ for (i = 0; i < ports; i++) {
+ if (readl(imx21->regs + USBH_PORTSTAT(i)) &
+ (USBH_PORTSTAT_CONNECTSC |
+ USBH_PORTSTAT_PRTENBLSC |
+ USBH_PORTSTAT_PRTSTATSC |
+ USBH_PORTSTAT_OVRCURIC |
+ USBH_PORTSTAT_PRTRSTSC)) {
+
+ changed = 1;
+ buf[0] |= 1 << (i + 1);
+ }
+ }
+ spin_unlock_irqrestore(&imx21->lock, flags);
+
+ if (changed)
+ dev_info(imx21->dev, "Hub status changed\n");
+ return changed;
+}
+
+static int imx21_hc_hub_control(struct usb_hcd *hcd,
+ u16 typeReq,
+ u16 wValue, u16 wIndex, char *buf, u16 wLength)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ int rc = 0;
+ u32 status_write = 0;
+
+ switch (typeReq) {
+ case ClearHubFeature:
+ dev_dbg(imx21->dev, "ClearHubFeature\n");
+ switch (wValue) {
+ case C_HUB_OVER_CURRENT:
+ dev_dbg(imx21->dev, " OVER_CURRENT\n");
+ break;
+ case C_HUB_LOCAL_POWER:
+ dev_dbg(imx21->dev, " LOCAL_POWER\n");
+ break;
+ default:
+ dev_dbg(imx21->dev, " unknown\n");
+ rc = -EINVAL;
+ break;
+ }
+ break;
+
+ case ClearPortFeature:
+ dev_dbg(imx21->dev, "ClearPortFeature\n");
+ switch (wValue) {
+ case USB_PORT_FEAT_ENABLE:
+ dev_dbg(imx21->dev, " ENABLE\n");
+ status_write = USBH_PORTSTAT_CURCONST;
+ break;
+ case USB_PORT_FEAT_SUSPEND:
+ dev_dbg(imx21->dev, " SUSPEND\n");
+ status_write = USBH_PORTSTAT_PRTOVRCURI;
+ break;
+ case USB_PORT_FEAT_POWER:
+ dev_dbg(imx21->dev, " POWER\n");
+ status_write = USBH_PORTSTAT_LSDEVCON;
+ break;
+ case USB_PORT_FEAT_C_ENABLE:
+ dev_dbg(imx21->dev, " C_ENABLE\n");
+ status_write = USBH_PORTSTAT_PRTENBLSC;
+ break;
+ case USB_PORT_FEAT_C_SUSPEND:
+ dev_dbg(imx21->dev, " C_SUSPEND\n");
+ status_write = USBH_PORTSTAT_PRTSTATSC;
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ dev_dbg(imx21->dev, " C_CONNECTION\n");
+ status_write = USBH_PORTSTAT_CONNECTSC;
+ break;
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ dev_dbg(imx21->dev, " C_OVER_CURRENT\n");
+ status_write = USBH_PORTSTAT_OVRCURIC;
+ break;
+ case USB_PORT_FEAT_C_RESET:
+ dev_dbg(imx21->dev, " C_RESET\n");
+ status_write = USBH_PORTSTAT_PRTRSTSC;
+ break;
+ default:
+ dev_dbg(imx21->dev, " unknown\n");
+ rc = -EINVAL;
+ break;
+ }
+
+ break;
+
+ case GetHubDescriptor:
+ dev_dbg(imx21->dev, "GetHubDescriptor\n");
+ rc = get_hub_descriptor(hcd, (void *)buf);
+ break;
+
+ case GetHubStatus:
+ dev_dbg(imx21->dev, " GetHubStatus\n");
+ *(__le32 *) buf = 0;
+ break;
+
+ case GetPortStatus:
+ dev_dbg(imx21->dev, "GetPortStatus: port: %d, 0x%x\n",
+ wIndex, USBH_PORTSTAT(wIndex - 1));
+ *(__le32 *) buf = readl(imx21->regs +
+ USBH_PORTSTAT(wIndex - 1));
+ break;
+
+ case SetHubFeature:
+ dev_dbg(imx21->dev, "SetHubFeature\n");
+ switch (wValue) {
+ case C_HUB_OVER_CURRENT:
+ dev_dbg(imx21->dev, " OVER_CURRENT\n");
+ break;
+
+ case C_HUB_LOCAL_POWER:
+ dev_dbg(imx21->dev, " LOCAL_POWER\n");
+ break;
+ default:
+ dev_dbg(imx21->dev, " unknown\n");
+ rc = -EINVAL;
+ break;
+ }
+
+ break;
+
+ case SetPortFeature:
+ dev_dbg(imx21->dev, "SetPortFeature\n");
+ switch (wValue) {
+ case USB_PORT_FEAT_SUSPEND:
+ dev_dbg(imx21->dev, " SUSPEND\n");
+ status_write = USBH_PORTSTAT_PRTSUSPST;
+ break;
+ case USB_PORT_FEAT_POWER:
+ dev_dbg(imx21->dev, " POWER\n");
+ status_write = USBH_PORTSTAT_PRTPWRST;
+ break;
+ case USB_PORT_FEAT_RESET:
+ dev_dbg(imx21->dev, " RESET\n");
+ status_write = USBH_PORTSTAT_PRTRSTST;
+ break;
+ default:
+ dev_dbg(imx21->dev, " unknown\n");
+ rc = -EINVAL;
+ break;
+ }
+ break;
+
+ default:
+ dev_dbg(imx21->dev, " unknown\n");
+ rc = -EINVAL;
+ break;
+ }
+
+ if (status_write)
+ writel(status_write, imx21->regs + USBH_PORTSTAT(wIndex - 1));
+ return rc;
+}
+
+/* =========================================== */
+/* Host controller management */
+/* =========================================== */
+
+static int imx21_hc_reset(struct usb_hcd *hcd)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ unsigned long timeout;
+ unsigned long flags;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ /* Reset the Host controler modules */
+ writel(USBOTG_RST_RSTCTRL | USBOTG_RST_RSTRH |
+ USBOTG_RST_RSTHSIE | USBOTG_RST_RSTHC,
+ imx21->regs + USBOTG_RST_CTRL);
+
+ /* Wait for reset to finish */
+ timeout = jiffies + HZ;
+ while (readl(imx21->regs + USBOTG_RST_CTRL) != 0) {
+ if (time_after(jiffies, timeout)) {
+ spin_unlock_irqrestore(&imx21->lock, flags);
+ dev_err(imx21->dev, "timeout waiting for reset\n");
+ return -ETIMEDOUT;
+ }
+ spin_unlock_irq(&imx21->lock);
+ schedule_timeout(1);
+ spin_lock_irq(&imx21->lock);
+ }
+ spin_unlock_irqrestore(&imx21->lock, flags);
+ return 0;
+}
+
+static int __devinit imx21_hc_start(struct usb_hcd *hcd)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ unsigned long flags;
+ int i, j;
+ u32 hw_mode = USBOTG_HWMODE_CRECFG_HOST;
+ u32 usb_control = 0;
+
+ hw_mode |= ((imx21->pdata->host_xcvr << USBOTG_HWMODE_HOSTXCVR_SHIFT) &
+ USBOTG_HWMODE_HOSTXCVR_MASK);
+ hw_mode |= ((imx21->pdata->otg_xcvr << USBOTG_HWMODE_OTGXCVR_SHIFT) &
+ USBOTG_HWMODE_OTGXCVR_MASK);
+
+ if (imx21->pdata->host1_txenoe)
+ usb_control |= USBCTRL_HOST1_TXEN_OE;
+
+ if (!imx21->pdata->host1_xcverless)
+ usb_control |= USBCTRL_HOST1_BYP_TLL;
+
+ if (imx21->pdata->otg_ext_xcvr)
+ usb_control |= USBCTRL_OTC_RCV_RXDP;
+
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ writel((USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN),
+ imx21->regs + USBOTG_CLK_CTRL);
+ writel(hw_mode, imx21->regs + USBOTG_HWMODE);
+ writel(usb_control, imx21->regs + USBCTRL);
+ writel(USB_MISCCONTROL_SKPRTRY | USB_MISCCONTROL_ARBMODE,
+ imx21->regs + USB_MISCCONTROL);
+
+ /* Clear the ETDs */
+ for (i = 0; i < USB_NUM_ETD; i++)
+ for (j = 0; j < 4; j++)
+ etd_writel(imx21, i, j, 0);
+
+ /* Take the HC out of reset */
+ writel(USBH_HOST_CTRL_HCUSBSTE_OPERATIONAL | USBH_HOST_CTRL_CTLBLKSR_1,
+ imx21->regs + USBH_HOST_CTRL);
+
+ /* Enable ports */
+ if (imx21->pdata->enable_otg_host)
+ writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
+ imx21->regs + USBH_PORTSTAT(0));
+
+ if (imx21->pdata->enable_host1)
+ writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
+ imx21->regs + USBH_PORTSTAT(1));
+
+ if (imx21->pdata->enable_host2)
+ writel(USBH_PORTSTAT_PRTPWRST | USBH_PORTSTAT_PRTENABST,
+ imx21->regs + USBH_PORTSTAT(2));
+
+
+ hcd->state = HC_STATE_RUNNING;
+
+ /* Enable host controller interrupts */
+ set_register_bits(imx21, USBH_SYSIEN,
+ USBH_SYSIEN_HERRINT |
+ USBH_SYSIEN_DONEINT | USBH_SYSIEN_SORINT);
+ set_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT);
+
+ spin_unlock_irqrestore(&imx21->lock, flags);
+
+ return 0;
+}
+
+static void imx21_hc_stop(struct usb_hcd *hcd)
+{
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ unsigned long flags;
+
+ spin_lock_irqsave(&imx21->lock, flags);
+
+ writel(0, imx21->regs + USBH_SYSIEN);
+ clear_register_bits(imx21, USBOTG_CINT_STEN, USBOTG_HCINT);
+ clear_register_bits(imx21, USBOTG_CLK_CTRL_HST | USBOTG_CLK_CTRL_MAIN,
+ USBOTG_CLK_CTRL);
+ spin_unlock_irqrestore(&imx21->lock, flags);
+}
+
+/* =========================================== */
+/* Driver glue */
+/* =========================================== */
+
+static struct hc_driver imx21_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "IMX21 USB Host Controller",
+ .hcd_priv_size = sizeof(struct imx21),
+
+ .flags = HCD_USB11,
+ .irq = imx21_irq,
+
+ .reset = imx21_hc_reset,
+ .start = imx21_hc_start,
+ .stop = imx21_hc_stop,
+
+ /* I/O requests */
+ .urb_enqueue = imx21_hc_urb_enqueue,
+ .urb_dequeue = imx21_hc_urb_dequeue,
+ .endpoint_disable = imx21_hc_endpoint_disable,
+
+ /* scheduling support */
+ .get_frame_number = imx21_hc_get_frame,
+
+ /* Root hub support */
+ .hub_status_data = imx21_hc_hub_status_data,
+ .hub_control = imx21_hc_hub_control,
+
+};
+
+static struct mx21_usbh_platform_data default_pdata = {
+ .host_xcvr = MX21_USBXCVR_TXDIF_RXDIF,
+ .otg_xcvr = MX21_USBXCVR_TXDIF_RXDIF,
+ .enable_host1 = 1,
+ .enable_host2 = 1,
+ .enable_otg_host = 1,
+
+};
+
+static int imx21_remove(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(pdev);
+ struct imx21 *imx21 = hcd_to_imx21(hcd);
+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ remove_debug_files(imx21);
+ usb_remove_hcd(hcd);
+
+ if (res != NULL) {
+ clk_disable(imx21->clk);
+ clk_put(imx21->clk);
+ iounmap(imx21->regs);
+ release_mem_region(res->start, resource_size(res));
+ }
+
+ kfree(hcd);
+ return 0;
+}
+
+
+static int imx21_probe(struct platform_device *pdev)
+{
+ struct usb_hcd *hcd;
+ struct imx21 *imx21;
+ struct resource *res;
+ int ret;
+ int irq;
+
+ printk(KERN_INFO "%s\n", imx21_hc_driver.product_desc);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -ENODEV;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -ENXIO;
+
+ hcd = usb_create_hcd(&imx21_hc_driver,
+ &pdev->dev, dev_name(&pdev->dev));
+ if (hcd == NULL) {
+ dev_err(&pdev->dev, "Cannot create hcd (%s)\n",
+ dev_name(&pdev->dev));
+ return -ENOMEM;
+ }
+
+ imx21 = hcd_to_imx21(hcd);
+ imx21->dev = &pdev->dev;
+ imx21->pdata = pdev->dev.platform_data;
+ if (!imx21->pdata)
+ imx21->pdata = &default_pdata;
+
+ spin_lock_init(&imx21->lock);
+ INIT_LIST_HEAD(&imx21->dmem_list);
+ INIT_LIST_HEAD(&imx21->queue_for_etd);
+ INIT_LIST_HEAD(&imx21->queue_for_dmem);
+ create_debug_files(imx21);
+
+ res = request_mem_region(res->start, resource_size(res), hcd_name);
+ if (!res) {
+ ret = -EBUSY;
+ goto failed_request_mem;
+ }
+
+ imx21->regs = ioremap(res->start, resource_size(res));
+ if (imx21->regs == NULL) {
+ dev_err(imx21->dev, "Cannot map registers\n");
+ ret = -ENOMEM;
+ goto failed_ioremap;
+ }
+
+ /* Enable clocks source */
+ imx21->clk = clk_get(imx21->dev, NULL);
+ if (IS_ERR(imx21->clk)) {
+ dev_err(imx21->dev, "no clock found\n");
+ ret = PTR_ERR(imx21->clk);
+ goto failed_clock_get;
+ }
+
+ ret = clk_set_rate(imx21->clk, clk_round_rate(imx21->clk, 48000000));
+ if (ret)
+ goto failed_clock_set;
+ ret = clk_enable(imx21->clk);
+ if (ret)
+ goto failed_clock_enable;
+
+ dev_info(imx21->dev, "Hardware HC revision: 0x%02X\n",
+ (readl(imx21->regs + USBOTG_HWMODE) >> 16) & 0xFF);
+
+ ret = usb_add_hcd(hcd, irq, IRQF_DISABLED);
+ if (ret != 0) {
+ dev_err(imx21->dev, "usb_add_hcd() returned %d\n", ret);
+ goto failed_add_hcd;
+ }
+
+ return 0;
+
+failed_add_hcd:
+ clk_disable(imx21->clk);
+failed_clock_enable:
+failed_clock_set:
+ clk_put(imx21->clk);
+failed_clock_get:
+ iounmap(imx21->regs);
+failed_ioremap:
+ release_mem_region(res->start, res->end - res->start);
+failed_request_mem:
+ remove_debug_files(imx21);
+ usb_put_hcd(hcd);
+ return ret;
+}
+
+static struct platform_driver imx21_hcd_driver = {
+ .driver = {
+ .name = (char *)hcd_name,
+ },
+ .probe = imx21_probe,
+ .remove = imx21_remove,
+ .suspend = NULL,
+ .resume = NULL,
+};
+
+static int __init imx21_hcd_init(void)
+{
+ return platform_driver_register(&imx21_hcd_driver);
+}
+
+static void __exit imx21_hcd_cleanup(void)
+{
+ platform_driver_unregister(&imx21_hcd_driver);
+}
+
+module_init(imx21_hcd_init);
+module_exit(imx21_hcd_cleanup);
+
+MODULE_DESCRIPTION("i.MX21 USB Host controller");
+MODULE_AUTHOR("Martin Fuzzey");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:imx21-hcd");
diff --git a/drivers/usb/host/imx21-hcd.h b/drivers/usb/host/imx21-hcd.h
new file mode 100644
index 00000000000..1b0d913780a
--- /dev/null
+++ b/drivers/usb/host/imx21-hcd.h
@@ -0,0 +1,436 @@
+/*
+ * Macros and prototypes for i.MX21
+ *
+ * Copyright (C) 2006 Loping Dog Embedded Systems
+ * Copyright (C) 2009 Martin Fuzzey
+ * Originally written by Jay Monkman <jtm@lopingdog.com>
+ * Ported to 2.6.30, debugged and enhanced by Martin Fuzzey
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __LINUX_IMX21_HCD_H__
+#define __LINUX_IMX21_HCD_H__
+
+#include <mach/mx21-usbhost.h>
+
+#define NUM_ISO_ETDS 2
+#define USB_NUM_ETD 32
+#define DMEM_SIZE 4096
+
+/* Register definitions */
+#define USBOTG_HWMODE 0x00
+#define USBOTG_HWMODE_ANASDBEN (1 << 14)
+#define USBOTG_HWMODE_OTGXCVR_SHIFT 6
+#define USBOTG_HWMODE_OTGXCVR_MASK (3 << 6)
+#define USBOTG_HWMODE_OTGXCVR_TD_RD (0 << 6)
+#define USBOTG_HWMODE_OTGXCVR_TS_RD (2 << 6)
+#define USBOTG_HWMODE_OTGXCVR_TD_RS (1 << 6)
+#define USBOTG_HWMODE_OTGXCVR_TS_RS (3 << 6)
+#define USBOTG_HWMODE_HOSTXCVR_SHIFT 4
+#define USBOTG_HWMODE_HOSTXCVR_MASK (3 << 4)
+#define USBOTG_HWMODE_HOSTXCVR_TD_RD (0 << 4)
+#define USBOTG_HWMODE_HOSTXCVR_TS_RD (2 << 4)
+#define USBOTG_HWMODE_HOSTXCVR_TD_RS (1 << 4)
+#define USBOTG_HWMODE_HOSTXCVR_TS_RS (3 << 4)
+#define USBOTG_HWMODE_CRECFG_MASK (3 << 0)
+#define USBOTG_HWMODE_CRECFG_HOST (1 << 0)
+#define USBOTG_HWMODE_CRECFG_FUNC (2 << 0)
+#define USBOTG_HWMODE_CRECFG_HNP (3 << 0)
+
+#define USBOTG_CINT_STAT 0x04
+#define USBOTG_CINT_STEN 0x08
+#define USBOTG_ASHNPINT (1 << 5)
+#define USBOTG_ASFCINT (1 << 4)
+#define USBOTG_ASHCINT (1 << 3)
+#define USBOTG_SHNPINT (1 << 2)
+#define USBOTG_FCINT (1 << 1)
+#define USBOTG_HCINT (1 << 0)
+
+#define USBOTG_CLK_CTRL 0x0c
+#define USBOTG_CLK_CTRL_FUNC (1 << 2)
+#define USBOTG_CLK_CTRL_HST (1 << 1)
+#define USBOTG_CLK_CTRL_MAIN (1 << 0)
+
+#define USBOTG_RST_CTRL 0x10
+#define USBOTG_RST_RSTI2C (1 << 15)
+#define USBOTG_RST_RSTCTRL (1 << 5)
+#define USBOTG_RST_RSTFC (1 << 4)
+#define USBOTG_RST_RSTFSKE (1 << 3)
+#define USBOTG_RST_RSTRH (1 << 2)
+#define USBOTG_RST_RSTHSIE (1 << 1)
+#define USBOTG_RST_RSTHC (1 << 0)
+
+#define USBOTG_FRM_INTVL 0x14
+#define USBOTG_FRM_REMAIN 0x18
+#define USBOTG_HNP_CSR 0x1c
+#define USBOTG_HNP_ISR 0x2c
+#define USBOTG_HNP_IEN 0x30
+
+#define USBOTG_I2C_TXCVR_REG(x) (0x100 + (x))
+#define USBOTG_I2C_XCVR_DEVAD 0x118
+#define USBOTG_I2C_SEQ_OP_REG 0x119
+#define USBOTG_I2C_SEQ_RD_STARTAD 0x11a
+#define USBOTG_I2C_OP_CTRL_REG 0x11b
+#define USBOTG_I2C_SCLK_TO_SCK_HPER 0x11e
+#define USBOTG_I2C_MASTER_INT_REG 0x11f
+
+#define USBH_HOST_CTRL 0x80
+#define USBH_HOST_CTRL_HCRESET (1 << 31)
+#define USBH_HOST_CTRL_SCHDOVR(x) ((x) << 16)
+#define USBH_HOST_CTRL_RMTWUEN (1 << 4)
+#define USBH_HOST_CTRL_HCUSBSTE_RESET (0 << 2)
+#define USBH_HOST_CTRL_HCUSBSTE_RESUME (1 << 2)
+#define USBH_HOST_CTRL_HCUSBSTE_OPERATIONAL (2 << 2)
+#define USBH_HOST_CTRL_HCUSBSTE_SUSPEND (3 << 2)
+#define USBH_HOST_CTRL_CTLBLKSR_1 (0 << 0)
+#define USBH_HOST_CTRL_CTLBLKSR_2 (1 << 0)
+#define USBH_HOST_CTRL_CTLBLKSR_3 (2 << 0)
+#define USBH_HOST_CTRL_CTLBLKSR_4 (3 << 0)
+
+#define USBH_SYSISR 0x88
+#define USBH_SYSISR_PSCINT (1 << 6)
+#define USBH_SYSISR_FMOFINT (1 << 5)
+#define USBH_SYSISR_HERRINT (1 << 4)
+#define USBH_SYSISR_RESDETINT (1 << 3)
+#define USBH_SYSISR_SOFINT (1 << 2)
+#define USBH_SYSISR_DONEINT (1 << 1)
+#define USBH_SYSISR_SORINT (1 << 0)
+
+#define USBH_SYSIEN 0x8c
+#define USBH_SYSIEN_PSCINT (1 << 6)
+#define USBH_SYSIEN_FMOFINT (1 << 5)
+#define USBH_SYSIEN_HERRINT (1 << 4)
+#define USBH_SYSIEN_RESDETINT (1 << 3)
+#define USBH_SYSIEN_SOFINT (1 << 2)
+#define USBH_SYSIEN_DONEINT (1 << 1)
+#define USBH_SYSIEN_SORINT (1 << 0)
+
+#define USBH_XBUFSTAT 0x98
+#define USBH_YBUFSTAT 0x9c
+#define USBH_XYINTEN 0xa0
+#define USBH_XFILLSTAT 0xa8
+#define USBH_YFILLSTAT 0xac
+#define USBH_ETDENSET 0xc0
+#define USBH_ETDENCLR 0xc4
+#define USBH_IMMEDINT 0xcc
+#define USBH_ETDDONESTAT 0xd0
+#define USBH_ETDDONEEN 0xd4
+#define USBH_FRMNUB 0xe0
+#define USBH_LSTHRESH 0xe4
+
+#define USBH_ROOTHUBA 0xe8
+#define USBH_ROOTHUBA_PWRTOGOOD_MASK (0xff)
+#define USBH_ROOTHUBA_PWRTOGOOD_SHIFT (24)
+#define USBH_ROOTHUBA_NOOVRCURP (1 << 12)
+#define USBH_ROOTHUBA_OVRCURPM (1 << 11)
+#define USBH_ROOTHUBA_DEVTYPE (1 << 10)
+#define USBH_ROOTHUBA_PWRSWTMD (1 << 9)
+#define USBH_ROOTHUBA_NOPWRSWT (1 << 8)
+#define USBH_ROOTHUBA_NDNSTMPRT_MASK (0xff)
+
+#define USBH_ROOTHUBB 0xec
+#define USBH_ROOTHUBB_PRTPWRCM(x) (1 << ((x) + 16))
+#define USBH_ROOTHUBB_DEVREMOVE(x) (1 << (x))
+
+#define USBH_ROOTSTAT 0xf0
+#define USBH_ROOTSTAT_CLRRMTWUE (1 << 31)
+#define USBH_ROOTSTAT_OVRCURCHG (1 << 17)
+#define USBH_ROOTSTAT_DEVCONWUE (1 << 15)
+#define USBH_ROOTSTAT_OVRCURI (1 << 1)
+#define USBH_ROOTSTAT_LOCPWRS (1 << 0)
+
+#define USBH_PORTSTAT(x) (0xf4 + ((x) * 4))
+#define USBH_PORTSTAT_PRTRSTSC (1 << 20)
+#define USBH_PORTSTAT_OVRCURIC (1 << 19)
+#define USBH_PORTSTAT_PRTSTATSC (1 << 18)
+#define USBH_PORTSTAT_PRTENBLSC (1 << 17)
+#define USBH_PORTSTAT_CONNECTSC (1 << 16)
+#define USBH_PORTSTAT_LSDEVCON (1 << 9)
+#define USBH_PORTSTAT_PRTPWRST (1 << 8)
+#define USBH_PORTSTAT_PRTRSTST (1 << 4)
+#define USBH_PORTSTAT_PRTOVRCURI (1 << 3)
+#define USBH_PORTSTAT_PRTSUSPST (1 << 2)
+#define USBH_PORTSTAT_PRTENABST (1 << 1)
+#define USBH_PORTSTAT_CURCONST (1 << 0)
+
+#define USB_DMAREV 0x800
+#define USB_DMAINTSTAT 0x804
+#define USB_DMAINTSTAT_EPERR (1 << 1)
+#define USB_DMAINTSTAT_ETDERR (1 << 0)
+
+#define USB_DMAINTEN 0x808
+#define USB_DMAINTEN_EPERRINTEN (1 << 1)
+#define USB_DMAINTEN_ETDERRINTEN (1 << 0)
+
+#define USB_ETDDMAERSTAT 0x80c
+#define USB_EPDMAERSTAT 0x810
+#define USB_ETDDMAEN 0x820
+#define USB_EPDMAEN 0x824
+#define USB_ETDDMAXTEN 0x828
+#define USB_EPDMAXTEN 0x82c
+#define USB_ETDDMAENXYT 0x830
+#define USB_EPDMAENXYT 0x834
+#define USB_ETDDMABST4EN 0x838
+#define USB_EPDMABST4EN 0x83c
+
+#define USB_MISCCONTROL 0x840
+#define USB_MISCCONTROL_ISOPREVFRM (1 << 3)
+#define USB_MISCCONTROL_SKPRTRY (1 << 2)
+#define USB_MISCCONTROL_ARBMODE (1 << 1)
+#define USB_MISCCONTROL_FILTCC (1 << 0)
+
+#define USB_ETDDMACHANLCLR 0x848
+#define USB_EPDMACHANLCLR 0x84c
+#define USB_ETDSMSA(x) (0x900 + ((x) * 4))
+#define USB_EPSMSA(x) (0x980 + ((x) * 4))
+#define USB_ETDDMABUFPTR(x) (0xa00 + ((x) * 4))
+#define USB_EPDMABUFPTR(x) (0xa80 + ((x) * 4))
+
+#define USB_ETD_DWORD(x, w) (0x200 + ((x) * 16) + ((w) * 4))
+#define DW0_ADDRESS 0
+#define DW0_ENDPNT 7
+#define DW0_DIRECT 11
+#define DW0_SPEED 13
+#define DW0_FORMAT 14
+#define DW0_MAXPKTSIZ 16
+#define DW0_HALTED 27
+#define DW0_TOGCRY 28
+#define DW0_SNDNAK 30
+
+#define DW1_XBUFSRTAD 0
+#define DW1_YBUFSRTAD 16
+
+#define DW2_RTRYDELAY 0
+#define DW2_POLINTERV 0
+#define DW2_STARTFRM 0
+#define DW2_RELPOLPOS 8
+#define DW2_DIRPID 16
+#define DW2_BUFROUND 18
+#define DW2_DELAYINT 19
+#define DW2_DATATOG 22
+#define DW2_ERRORCNT 24
+#define DW2_COMPCODE 28
+
+#define DW3_TOTBYECNT 0
+#define DW3_PKTLEN0 0
+#define DW3_COMPCODE0 12
+#define DW3_PKTLEN1 16
+#define DW3_BUFSIZE 21
+#define DW3_COMPCODE1 28
+
+#define USBCTRL 0x600
+#define USBCTRL_I2C_WU_INT_STAT (1 << 27)
+#define USBCTRL_OTG_WU_INT_STAT (1 << 26)
+#define USBCTRL_HOST_WU_INT_STAT (1 << 25)
+#define USBCTRL_FNT_WU_INT_STAT (1 << 24)
+#define USBCTRL_I2C_WU_INT_EN (1 << 19)
+#define USBCTRL_OTG_WU_INT_EN (1 << 18)
+#define USBCTRL_HOST_WU_INT_EN (1 << 17)
+#define USBCTRL_FNT_WU_INT_EN (1 << 16)
+#define USBCTRL_OTC_RCV_RXDP (1 << 13)
+#define USBCTRL_HOST1_BYP_TLL (1 << 12)
+#define USBCTRL_OTG_BYP_VAL(x) ((x) << 10)
+#define USBCTRL_HOST1_BYP_VAL(x) ((x) << 8)
+#define USBCTRL_OTG_PWR_MASK (1 << 6)
+#define USBCTRL_HOST1_PWR_MASK (1 << 5)
+#define USBCTRL_HOST2_PWR_MASK (1 << 4)
+#define USBCTRL_USB_BYP (1 << 2)
+#define USBCTRL_HOST1_TXEN_OE (1 << 1)
+
+
+/* Values in TD blocks */
+#define TD_DIR_SETUP 0
+#define TD_DIR_OUT 1
+#define TD_DIR_IN 2
+#define TD_FORMAT_CONTROL 0
+#define TD_FORMAT_ISO 1
+#define TD_FORMAT_BULK 2
+#define TD_FORMAT_INT 3
+#define TD_TOGGLE_CARRY 0
+#define TD_TOGGLE_DATA0 2
+#define TD_TOGGLE_DATA1 3
+
+/* control transfer states */
+#define US_CTRL_SETUP 2
+#define US_CTRL_DATA 1
+#define US_CTRL_ACK 0
+
+/* bulk transfer main state and 0-length packet */
+#define US_BULK 1
+#define US_BULK0 0
+
+/*ETD format description*/
+#define IMX_FMT_CTRL 0x0
+#define IMX_FMT_ISO 0x1
+#define IMX_FMT_BULK 0x2
+#define IMX_FMT_INT 0x3
+
+static char fmt_urb_to_etd[4] = {
+/*PIPE_ISOCHRONOUS*/ IMX_FMT_ISO,
+/*PIPE_INTERRUPT*/ IMX_FMT_INT,
+/*PIPE_CONTROL*/ IMX_FMT_CTRL,
+/*PIPE_BULK*/ IMX_FMT_BULK
+};
+
+/* condition (error) CC codes and mapping (OHCI like) */
+
+#define TD_CC_NOERROR 0x00
+#define TD_CC_CRC 0x01
+#define TD_CC_BITSTUFFING 0x02
+#define TD_CC_DATATOGGLEM 0x03
+#define TD_CC_STALL 0x04
+#define TD_DEVNOTRESP 0x05
+#define TD_PIDCHECKFAIL 0x06
+/*#define TD_UNEXPECTEDPID 0x07 - reserved, not active on MX2*/
+#define TD_DATAOVERRUN 0x08
+#define TD_DATAUNDERRUN 0x09
+#define TD_BUFFEROVERRUN 0x0C
+#define TD_BUFFERUNDERRUN 0x0D
+#define TD_SCHEDULEOVERRUN 0x0E
+#define TD_NOTACCESSED 0x0F
+
+static const int cc_to_error[16] = {
+ /* No Error */ 0,
+ /* CRC Error */ -EILSEQ,
+ /* Bit Stuff */ -EPROTO,
+ /* Data Togg */ -EILSEQ,
+ /* Stall */ -EPIPE,
+ /* DevNotResp */ -ETIMEDOUT,
+ /* PIDCheck */ -EPROTO,
+ /* UnExpPID */ -EPROTO,
+ /* DataOver */ -EOVERFLOW,
+ /* DataUnder */ -EREMOTEIO,
+ /* (for hw) */ -EIO,
+ /* (for hw) */ -EIO,
+ /* BufferOver */ -ECOMM,
+ /* BuffUnder */ -ENOSR,
+ /* (for HCD) */ -ENOSPC,
+ /* (for HCD) */ -EALREADY
+};
+
+/* HCD data associated with a usb core URB */
+struct urb_priv {
+ struct urb *urb;
+ struct usb_host_endpoint *ep;
+ int active;
+ int state;
+ struct td *isoc_td;
+ int isoc_remaining;
+ int isoc_status;
+};
+
+/* HCD data associated with a usb core endpoint */
+struct ep_priv {
+ struct usb_host_endpoint *ep;
+ struct list_head td_list;
+ struct list_head queue;
+ int etd[NUM_ISO_ETDS];
+ int waiting_etd;
+};
+
+/* isoc packet */
+struct td {
+ struct list_head list;
+ struct urb *urb;
+ struct usb_host_endpoint *ep;
+ dma_addr_t data;
+ unsigned long buf_addr;
+ int len;
+ int frame;
+ int isoc_index;
+};
+
+/* HCD data associated with a hardware ETD */
+struct etd_priv {
+ struct usb_host_endpoint *ep;
+ struct urb *urb;
+ struct td *td;
+ struct list_head queue;
+ dma_addr_t dma_handle;
+ int alloc;
+ int len;
+ int dmem_size;
+ int dmem_offset;
+ int active_count;
+#ifdef DEBUG
+ int activated_frame;
+ int disactivated_frame;
+ int last_int_frame;
+ int last_req_frame;
+ u32 submitted_dwords[4];
+#endif
+};
+
+/* Hardware data memory info */
+struct imx21_dmem_area {
+ struct usb_host_endpoint *ep;
+ unsigned int offset;
+ unsigned int size;
+ struct list_head list;
+};
+
+#ifdef DEBUG
+struct debug_usage_stats {
+ unsigned int value;
+ unsigned int maximum;
+};
+
+struct debug_stats {
+ unsigned long submitted;
+ unsigned long completed_ok;
+ unsigned long completed_failed;
+ unsigned long unlinked;
+ unsigned long queue_etd;
+ unsigned long queue_dmem;
+};
+
+struct debug_isoc_trace {
+ int schedule_frame;
+ int submit_frame;
+ int request_len;
+ int done_frame;
+ int done_len;
+ int cc;
+ struct td *td;
+};
+#endif
+
+/* HCD data structure */
+struct imx21 {
+ spinlock_t lock;
+ struct device *dev;
+ struct mx21_usbh_platform_data *pdata;
+ struct list_head dmem_list;
+ struct list_head queue_for_etd; /* eps queued due to etd shortage */
+ struct list_head queue_for_dmem; /* etds queued due to dmem shortage */
+ struct etd_priv etd[USB_NUM_ETD];
+ struct clk *clk;
+ void __iomem *regs;
+#ifdef DEBUG
+ struct dentry *debug_root;
+ struct debug_stats nonisoc_stats;
+ struct debug_stats isoc_stats;
+ struct debug_usage_stats etd_usage;
+ struct debug_usage_stats dmem_usage;
+ struct debug_isoc_trace isoc_trace[20];
+ struct debug_isoc_trace isoc_trace_failed[20];
+ unsigned long debug_unblocks;
+ int isoc_trace_index;
+ int isoc_trace_index_failed;
+#endif
+};
+
+#endif
diff --git a/drivers/usb/host/isp1362-hcd.c b/drivers/usb/host/isp1362-hcd.c
index 42971657fde..217fb517020 100644
--- a/drivers/usb/host/isp1362-hcd.c
+++ b/drivers/usb/host/isp1362-hcd.c
@@ -1257,7 +1257,7 @@ static int isp1362_urb_enqueue(struct usb_hcd *hcd,
/* avoid all allocations within spinlocks: request or endpoint */
if (!hep->hcpriv) {
- ep = kcalloc(1, sizeof *ep, mem_flags);
+ ep = kzalloc(sizeof *ep, mem_flags);
if (!ep)
return -ENOMEM;
}
@@ -2719,24 +2719,11 @@ static int __init isp1362_probe(struct platform_device *pdev)
}
irq = irq_res->start;
-#ifdef CONFIG_USB_HCD_DMA
- if (pdev->dev.dma_mask) {
- struct resource *dma_res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
-
- if (!dma_res) {
- retval = -ENODEV;
- goto err1;
- }
- isp1362_hcd->data_dma = dma_res->start;
- isp1362_hcd->max_dma_size = resource_len(dma_res);
- }
-#else
if (pdev->dev.dma_mask) {
DBG(1, "won't do DMA");
retval = -ENODEV;
goto err1;
}
-#endif
if (!request_mem_region(addr->start, resource_len(addr), hcd_name)) {
retval = -EBUSY;
diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
index 27b8f7cb447..9f01293600b 100644
--- a/drivers/usb/host/isp1760-hcd.c
+++ b/drivers/usb/host/isp1760-hcd.c
@@ -17,7 +17,9 @@
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/io.h>
+#include <linux/mm.h>
#include <asm/unaligned.h>
+#include <asm/cacheflush.h>
#include "../core/hcd.h"
#include "isp1760-hcd.h"
@@ -904,6 +906,14 @@ __acquires(priv->lock)
status = 0;
}
+ if (usb_pipein(urb->pipe) && usb_pipetype(urb->pipe) != PIPE_CONTROL) {
+ void *ptr;
+ for (ptr = urb->transfer_buffer;
+ ptr < urb->transfer_buffer + urb->transfer_buffer_length;
+ ptr += PAGE_SIZE)
+ flush_dcache_page(virt_to_page(ptr));
+ }
+
/* complete() can reenter this HCD */
usb_hcd_unlink_urb_from_ep(priv_to_hcd(priv), urb);
spin_unlock(&priv->lock);
diff --git a/drivers/usb/host/isp1760-if.c b/drivers/usb/host/isp1760-if.c
index 1c9f977a5c9..4293cfd28d6 100644
--- a/drivers/usb/host/isp1760-if.c
+++ b/drivers/usb/host/isp1760-if.c
@@ -109,7 +109,7 @@ static int of_isp1760_remove(struct of_device *dev)
return 0;
}
-static struct of_device_id of_isp1760_match[] = {
+static const struct of_device_id of_isp1760_match[] = {
{
.compatible = "nxp,usb-isp1760",
},
diff --git a/drivers/usb/host/ohci-da8xx.c b/drivers/usb/host/ohci-da8xx.c
new file mode 100644
index 00000000000..4aa08d36d07
--- /dev/null
+++ b/drivers/usb/host/ohci-da8xx.c
@@ -0,0 +1,456 @@
+/*
+ * OHCI HCD (Host Controller Driver) for USB.
+ *
+ * TI DA8xx (OMAP-L1x) Bus Glue
+ *
+ * Derived from: ohci-omap.c and ohci-s3c2410.c
+ * Copyright (C) 2008-2009 MontaVista Software, Inc. <source@mvista.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+
+#include <mach/da8xx.h>
+#include <mach/usb.h>
+
+#ifndef CONFIG_ARCH_DAVINCI_DA8XX
+#error "This file is DA8xx bus glue. Define CONFIG_ARCH_DAVINCI_DA8XX."
+#endif
+
+#define CFGCHIP2 DA8XX_SYSCFG_VIRT(DA8XX_CFGCHIP2_REG)
+
+static struct clk *usb11_clk;
+static struct clk *usb20_clk;
+
+/* Over-current indicator change bitmask */
+static volatile u16 ocic_mask;
+
+static void ohci_da8xx_clock(int on)
+{
+ u32 cfgchip2;
+
+ cfgchip2 = __raw_readl(CFGCHIP2);
+ if (on) {
+ clk_enable(usb11_clk);
+
+ /*
+ * If USB 1.1 reference clock is sourced from USB 2.0 PHY, we
+ * need to enable the USB 2.0 module clocking, start its PHY,
+ * and not allow it to stop the clock during USB 2.0 suspend.
+ */
+ if (!(cfgchip2 & CFGCHIP2_USB1PHYCLKMUX)) {
+ clk_enable(usb20_clk);
+
+ cfgchip2 &= ~(CFGCHIP2_RESET | CFGCHIP2_PHYPWRDN);
+ cfgchip2 |= CFGCHIP2_PHY_PLLON;
+ __raw_writel(cfgchip2, CFGCHIP2);
+
+ pr_info("Waiting for USB PHY clock good...\n");
+ while (!(__raw_readl(CFGCHIP2) & CFGCHIP2_PHYCLKGD))
+ cpu_relax();
+ }
+
+ /* Enable USB 1.1 PHY */
+ cfgchip2 |= CFGCHIP2_USB1SUSPENDM;
+ } else {
+ clk_disable(usb11_clk);
+ if (!(cfgchip2 & CFGCHIP2_USB1PHYCLKMUX))
+ clk_disable(usb20_clk);
+
+ /* Disable USB 1.1 PHY */
+ cfgchip2 &= ~CFGCHIP2_USB1SUSPENDM;
+ }
+ __raw_writel(cfgchip2, CFGCHIP2);
+}
+
+/*
+ * Handle the port over-current indicator change.
+ */
+static void ohci_da8xx_ocic_handler(struct da8xx_ohci_root_hub *hub,
+ unsigned port)
+{
+ ocic_mask |= 1 << port;
+
+ /* Once over-current is detected, the port needs to be powered down */
+ if (hub->get_oci(port) > 0)
+ hub->set_power(port, 0);
+}
+
+static int ohci_da8xx_init(struct usb_hcd *hcd)
+{
+ struct device *dev = hcd->self.controller;
+ struct da8xx_ohci_root_hub *hub = dev->platform_data;
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ int result;
+ u32 rh_a;
+
+ dev_dbg(dev, "starting USB controller\n");
+
+ ohci_da8xx_clock(1);
+
+ /*
+ * DA8xx only have 1 port connected to the pins but the HC root hub
+ * register A reports 2 ports, thus we'll have to override it...
+ */
+ ohci->num_ports = 1;
+
+ result = ohci_init(ohci);
+ if (result < 0)
+ return result;
+
+ /*
+ * Since we're providing a board-specific root hub port power control
+ * and over-current reporting, we have to override the HC root hub A
+ * register's default value, so that ohci_hub_control() could return
+ * the correct hub descriptor...
+ */
+ rh_a = ohci_readl(ohci, &ohci->regs->roothub.a);
+ if (hub->set_power) {
+ rh_a &= ~RH_A_NPS;
+ rh_a |= RH_A_PSM;
+ }
+ if (hub->get_oci) {
+ rh_a &= ~RH_A_NOCP;
+ rh_a |= RH_A_OCPM;
+ }
+ rh_a &= ~RH_A_POTPGT;
+ rh_a |= hub->potpgt << 24;
+ ohci_writel(ohci, rh_a, &ohci->regs->roothub.a);
+
+ return result;
+}
+
+static void ohci_da8xx_stop(struct usb_hcd *hcd)
+{
+ ohci_stop(hcd);
+ ohci_da8xx_clock(0);
+}
+
+static int ohci_da8xx_start(struct usb_hcd *hcd)
+{
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+ int result;
+
+ result = ohci_run(ohci);
+ if (result < 0)
+ ohci_da8xx_stop(hcd);
+
+ return result;
+}
+
+/*
+ * Update the status data from the hub with the over-current indicator change.
+ */
+static int ohci_da8xx_hub_status_data(struct usb_hcd *hcd, char *buf)
+{
+ int length = ohci_hub_status_data(hcd, buf);
+
+ /* See if we have OCIC bit set on port 1 */
+ if (ocic_mask & (1 << 1)) {
+ dev_dbg(hcd->self.controller, "over-current indicator change "
+ "on port 1\n");
+
+ if (!length)
+ length = 1;
+
+ buf[0] |= 1 << 1;
+ }
+ return length;
+}
+
+/*
+ * Look at the control requests to the root hub and see if we need to override.
+ */
+static int ohci_da8xx_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength)
+{
+ struct device *dev = hcd->self.controller;
+ struct da8xx_ohci_root_hub *hub = dev->platform_data;
+ int temp;
+
+ switch (typeReq) {
+ case GetPortStatus:
+ /* Check the port number */
+ if (wIndex != 1)
+ break;
+
+ dev_dbg(dev, "GetPortStatus(%u)\n", wIndex);
+
+ temp = roothub_portstatus(hcd_to_ohci(hcd), wIndex - 1);
+
+ /* The port power status (PPS) bit defaults to 1 */
+ if (hub->get_power && hub->get_power(wIndex) == 0)
+ temp &= ~RH_PS_PPS;
+
+ /* The port over-current indicator (POCI) bit is always 0 */
+ if (hub->get_oci && hub->get_oci(wIndex) > 0)
+ temp |= RH_PS_POCI;
+
+ /* The over-current indicator change (OCIC) bit is 0 too */
+ if (ocic_mask & (1 << wIndex))
+ temp |= RH_PS_OCIC;
+
+ put_unaligned(cpu_to_le32(temp), (__le32 *)buf);
+ return 0;
+ case SetPortFeature:
+ temp = 1;
+ goto check_port;
+ case ClearPortFeature:
+ temp = 0;
+
+check_port:
+ /* Check the port number */
+ if (wIndex != 1)
+ break;
+
+ switch (wValue) {
+ case USB_PORT_FEAT_POWER:
+ dev_dbg(dev, "%sPortFeature(%u): %s\n",
+ temp ? "Set" : "Clear", wIndex, "POWER");
+
+ if (!hub->set_power)
+ return -EPIPE;
+
+ return hub->set_power(wIndex, temp) ? -EPIPE : 0;
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ dev_dbg(dev, "%sPortFeature(%u): %s\n",
+ temp ? "Set" : "Clear", wIndex,
+ "C_OVER_CURRENT");
+
+ if (temp)
+ ocic_mask |= 1 << wIndex;
+ else
+ ocic_mask &= ~(1 << wIndex);
+ return 0;
+ }
+ }
+
+ return ohci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
+}
+
+static const struct hc_driver ohci_da8xx_hc_driver = {
+ .description = hcd_name,
+ .product_desc = "DA8xx OHCI",
+ .hcd_priv_size = sizeof(struct ohci_hcd),
+
+ /*
+ * generic hardware linkage
+ */
+ .irq = ohci_irq,
+ .flags = HCD_USB11 | HCD_MEMORY,
+
+ /*
+ * basic lifecycle operations
+ */
+ .reset = ohci_da8xx_init,
+ .start = ohci_da8xx_start,
+ .stop = ohci_da8xx_stop,
+ .shutdown = ohci_shutdown,
+
+ /*
+ * managing i/o requests and associated device resources
+ */
+ .urb_enqueue = ohci_urb_enqueue,
+ .urb_dequeue = ohci_urb_dequeue,
+ .endpoint_disable = ohci_endpoint_disable,
+
+ /*
+ * scheduling support
+ */
+ .get_frame_number = ohci_get_frame,
+
+ /*
+ * root hub support
+ */
+ .hub_status_data = ohci_da8xx_hub_status_data,
+ .hub_control = ohci_da8xx_hub_control,
+
+#ifdef CONFIG_PM
+ .bus_suspend = ohci_bus_suspend,
+ .bus_resume = ohci_bus_resume,
+#endif
+ .start_port_reset = ohci_start_port_reset,
+};
+
+/*-------------------------------------------------------------------------*/
+
+
+/**
+ * usb_hcd_da8xx_probe - initialize DA8xx-based HCDs
+ * Context: !in_interrupt()
+ *
+ * Allocates basic resources for this USB host controller, and
+ * then invokes the start() method for the HCD associated with it
+ * through the hotplug entry's driver_data.
+ */
+static int usb_hcd_da8xx_probe(const struct hc_driver *driver,
+ struct platform_device *pdev)
+{
+ struct da8xx_ohci_root_hub *hub = pdev->dev.platform_data;
+ struct usb_hcd *hcd;
+ struct resource *mem;
+ int error, irq;
+
+ if (hub == NULL)
+ return -ENODEV;
+
+ usb11_clk = clk_get(&pdev->dev, "usb11");
+ if (IS_ERR(usb11_clk))
+ return PTR_ERR(usb11_clk);
+
+ usb20_clk = clk_get(&pdev->dev, "usb20");
+ if (IS_ERR(usb20_clk)) {
+ error = PTR_ERR(usb20_clk);
+ goto err0;
+ }
+
+ hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
+ if (!hcd) {
+ error = -ENOMEM;
+ goto err1;
+ }
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ error = -ENODEV;
+ goto err2;
+ }
+ hcd->rsrc_start = mem->start;
+ hcd->rsrc_len = mem->end - mem->start + 1;
+
+ if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
+ dev_dbg(&pdev->dev, "request_mem_region failed\n");
+ error = -EBUSY;
+ goto err2;
+ }
+
+ hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
+ if (!hcd->regs) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ error = -ENOMEM;
+ goto err3;
+ }
+
+ ohci_hcd_init(hcd_to_ohci(hcd));
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ error = -ENODEV;
+ goto err4;
+ }
+ error = usb_add_hcd(hcd, irq, IRQF_DISABLED);
+ if (error)
+ goto err4;
+
+ if (hub->ocic_notify) {
+ error = hub->ocic_notify(ohci_da8xx_ocic_handler);
+ if (!error)
+ return 0;
+ }
+
+ usb_remove_hcd(hcd);
+err4:
+ iounmap(hcd->regs);
+err3:
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+err2:
+ usb_put_hcd(hcd);
+err1:
+ clk_put(usb20_clk);
+err0:
+ clk_put(usb11_clk);
+ return error;
+}
+
+/**
+ * usb_hcd_da8xx_remove - shutdown processing for DA8xx-based HCDs
+ * @dev: USB Host Controller being removed
+ * Context: !in_interrupt()
+ *
+ * Reverses the effect of usb_hcd_da8xx_probe(), first invoking
+ * the HCD's stop() method. It is always called from a thread
+ * context, normally "rmmod", "apmd", or something similar.
+ */
+static inline void
+usb_hcd_da8xx_remove(struct usb_hcd *hcd, struct platform_device *pdev)
+{
+ struct da8xx_ohci_root_hub *hub = pdev->dev.platform_data;
+
+ hub->ocic_notify(NULL);
+ usb_remove_hcd(hcd);
+ iounmap(hcd->regs);
+ release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
+ usb_put_hcd(hcd);
+ clk_put(usb20_clk);
+ clk_put(usb11_clk);
+}
+
+static int ohci_hcd_da8xx_drv_probe(struct platform_device *dev)
+{
+ return usb_hcd_da8xx_probe(&ohci_da8xx_hc_driver, dev);
+}
+
+static int ohci_hcd_da8xx_drv_remove(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+
+ usb_hcd_da8xx_remove(hcd, dev);
+ platform_set_drvdata(dev, NULL);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int ohci_da8xx_suspend(struct platform_device *dev, pm_message_t message)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+
+ if (time_before(jiffies, ohci->next_statechange))
+ msleep(5);
+ ohci->next_statechange = jiffies;
+
+ ohci_da8xx_clock(0);
+ hcd->state = HC_STATE_SUSPENDED;
+ dev->dev.power.power_state = PMSG_SUSPEND;
+ return 0;
+}
+
+static int ohci_da8xx_resume(struct platform_device *dev)
+{
+ struct usb_hcd *hcd = platform_get_drvdata(dev);
+ struct ohci_hcd *ohci = hcd_to_ohci(hcd);
+
+ if (time_before(jiffies, ohci->next_statechange))
+ msleep(5);
+ ohci->next_statechange = jiffies;
+
+ ohci_da8xx_clock(1);
+ dev->dev.power.power_state = PMSG_ON;
+ usb_hcd_resume_root_hub(hcd);
+ return 0;
+}
+#endif
+
+/*
+ * Driver definition to register with platform structure.
+ */
+static struct platform_driver ohci_hcd_da8xx_driver = {
+ .probe = ohci_hcd_da8xx_drv_probe,
+ .remove = ohci_hcd_da8xx_drv_remove,
+ .shutdown = usb_hcd_platform_shutdown,
+#ifdef CONFIG_PM
+ .suspend = ohci_da8xx_suspend,
+ .resume = ohci_da8xx_resume,
+#endif
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "ohci",
+ },
+};
diff --git a/drivers/usb/host/ohci-dbg.c b/drivers/usb/host/ohci-dbg.c
index 811f5dfdc58..8ad2441b028 100644
--- a/drivers/usb/host/ohci-dbg.c
+++ b/drivers/usb/host/ohci-dbg.c
@@ -53,13 +53,13 @@ urb_print(struct urb * urb, char * str, int small, int status)
int i, len;
if (usb_pipecontrol (pipe)) {
- printk (KERN_DEBUG __FILE__ ": setup(8):");
+ printk (KERN_DEBUG "%s: setup(8):", __FILE__);
for (i = 0; i < 8 ; i++)
printk (" %02x", ((__u8 *) urb->setup_packet) [i]);
printk ("\n");
}
if (urb->transfer_buffer_length > 0 && urb->transfer_buffer) {
- printk (KERN_DEBUG __FILE__ ": data(%d/%d):",
+ printk (KERN_DEBUG "%s: data(%d/%d):", __FILE__,
urb->actual_length,
urb->transfer_buffer_length);
len = usb_pipeout (pipe)?
diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c
index 24eb7478191..afe59be2364 100644
--- a/drivers/usb/host/ohci-hcd.c
+++ b/drivers/usb/host/ohci-hcd.c
@@ -1051,6 +1051,11 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER usb_hcd_pnx4008_driver
#endif
+#ifdef CONFIG_ARCH_DAVINCI_DA8XX
+#include "ohci-da8xx.c"
+#define PLATFORM_DRIVER ohci_hcd_da8xx_driver
+#endif
+
#if defined(CONFIG_CPU_SUBTYPE_SH7720) || \
defined(CONFIG_CPU_SUBTYPE_SH7721) || \
defined(CONFIG_CPU_SUBTYPE_SH7763) || \
diff --git a/drivers/usb/host/ohci-lh7a404.c b/drivers/usb/host/ohci-lh7a404.c
index de42283149c..18d39f0463e 100644
--- a/drivers/usb/host/ohci-lh7a404.c
+++ b/drivers/usb/host/ohci-lh7a404.c
@@ -28,8 +28,8 @@ extern int usb_disabled(void);
static void lh7a404_start_hc(struct platform_device *dev)
{
- printk(KERN_DEBUG __FILE__
- ": starting LH7A404 OHCI USB Controller\n");
+ printk(KERN_DEBUG "%s: starting LH7A404 OHCI USB Controller\n",
+ __FILE__);
/*
* Now, carefully enable the USB clock, and take
@@ -39,14 +39,13 @@ static void lh7a404_start_hc(struct platform_device *dev)
udelay(1000);
USBH_CMDSTATUS = OHCI_HCR;
- printk(KERN_DEBUG __FILE__
- ": Clock to USB host has been enabled \n");
+ printk(KERN_DEBUG "%s: Clock to USB host has been enabled \n", __FILE__);
}
static void lh7a404_stop_hc(struct platform_device *dev)
{
- printk(KERN_DEBUG __FILE__
- ": stopping LH7A404 OHCI USB Controller\n");
+ printk(KERN_DEBUG "%s: stopping LH7A404 OHCI USB Controller\n",
+ __FILE__);
CSC_PWRCNT &= ~CSC_PWRCNT_USBH_EN; /* Disable clock */
}
diff --git a/drivers/usb/host/ohci-pnx4008.c b/drivers/usb/host/ohci-pnx4008.c
index 2769326da42..cd74bbdd007 100644
--- a/drivers/usb/host/ohci-pnx4008.c
+++ b/drivers/usb/host/ohci-pnx4008.c
@@ -327,7 +327,7 @@ static int __devinit usb_hcd_pnx4008_probe(struct platform_device *pdev)
}
i2c_adap = i2c_get_adapter(2);
memset(&i2c_info, 0, sizeof(struct i2c_board_info));
- strlcpy(i2c_info.name, "isp1301_pnx", I2C_NAME_SIZE);
+ strlcpy(i2c_info.type, "isp1301_pnx", I2C_NAME_SIZE);
isp1301_i2c_client = i2c_new_probed_device(i2c_adap, &i2c_info,
normal_i2c);
i2c_put_adapter(i2c_adap);
@@ -411,7 +411,7 @@ out3:
out2:
clk_put(usb_clk);
out1:
- i2c_unregister_client(isp1301_i2c_client);
+ i2c_unregister_device(isp1301_i2c_client);
isp1301_i2c_client = NULL;
out_i2c_driver:
i2c_del_driver(&isp1301_driver);
@@ -430,7 +430,7 @@ static int usb_hcd_pnx4008_remove(struct platform_device *pdev)
pnx4008_unset_usb_bits();
clk_disable(usb_clk);
clk_put(usb_clk);
- i2c_unregister_client(isp1301_i2c_client);
+ i2c_unregister_device(isp1301_i2c_client);
isp1301_i2c_client = NULL;
i2c_del_driver(&isp1301_driver);
diff --git a/drivers/usb/host/ohci-ppc-of.c b/drivers/usb/host/ohci-ppc-of.c
index 68a30171029..103263c230c 100644
--- a/drivers/usb/host/ohci-ppc-of.c
+++ b/drivers/usb/host/ohci-ppc-of.c
@@ -114,21 +114,21 @@ ohci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
hcd->rsrc_len = res.end - res.start + 1;
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- printk(KERN_ERR __FILE__ ": request_mem_region failed\n");
+ printk(KERN_ERR "%s: request_mem_region failed\n", __FILE__);
rv = -EBUSY;
goto err_rmr;
}
irq = irq_of_parse_and_map(dn, 0);
if (irq == NO_IRQ) {
- printk(KERN_ERR __FILE__ ": irq_of_parse_and_map failed\n");
+ printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__);
rv = -EBUSY;
goto err_irq;
}
hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
if (!hcd->regs) {
- printk(KERN_ERR __FILE__ ": ioremap failed\n");
+ printk(KERN_ERR "%s: ioremap failed\n", __FILE__);
rv = -ENOMEM;
goto err_ioremap;
}
@@ -169,7 +169,7 @@ ohci_hcd_ppc_of_probe(struct of_device *op, const struct of_device_id *match)
} else
release_mem_region(res.start, 0x4);
} else
- pr_debug(__FILE__ ": cannot get ehci offset from fdt\n");
+ pr_debug("%s: cannot get ehci offset from fdt\n", __FILE__);
}
iounmap(hcd->regs);
@@ -212,7 +212,7 @@ static int ohci_hcd_ppc_of_shutdown(struct of_device *op)
}
-static struct of_device_id ohci_hcd_ppc_of_match[] = {
+static const struct of_device_id ohci_hcd_ppc_of_match[] = {
#ifdef CONFIG_USB_OHCI_HCD_PPC_OF_BE
{
.name = "usb",
diff --git a/drivers/usb/host/ohci-ppc-soc.c b/drivers/usb/host/ohci-ppc-soc.c
index cd3398b675b..89e670e38c1 100644
--- a/drivers/usb/host/ohci-ppc-soc.c
+++ b/drivers/usb/host/ohci-ppc-soc.c
@@ -41,14 +41,14 @@ static int usb_hcd_ppc_soc_probe(const struct hc_driver *driver,
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
- pr_debug(__FILE__ ": no irq\n");
+ pr_debug("%s: no irq\n", __FILE__);
return -ENODEV;
}
irq = res->start;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
- pr_debug(__FILE__ ": no reg addr\n");
+ pr_debug("%s: no reg addr\n", __FILE__);
return -ENODEV;
}
@@ -59,14 +59,14 @@ static int usb_hcd_ppc_soc_probe(const struct hc_driver *driver,
hcd->rsrc_len = res->end - res->start + 1;
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
- pr_debug(__FILE__ ": request_mem_region failed\n");
+ pr_debug("%s: request_mem_region failed\n", __FILE__);
retval = -EBUSY;
goto err1;
}
hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
if (!hcd->regs) {
- pr_debug(__FILE__ ": ioremap failed\n");
+ pr_debug("%s: ioremap failed\n", __FILE__);
retval = -ENOMEM;
goto err2;
}
diff --git a/drivers/usb/host/ohci-sa1111.c b/drivers/usb/host/ohci-sa1111.c
index e4bbe8e188e..d8eb3bdafab 100644
--- a/drivers/usb/host/ohci-sa1111.c
+++ b/drivers/usb/host/ohci-sa1111.c
@@ -31,8 +31,8 @@ static void sa1111_start_hc(struct sa1111_dev *dev)
{
unsigned int usb_rst = 0;
- printk(KERN_DEBUG __FILE__
- ": starting SA-1111 OHCI USB Controller\n");
+ printk(KERN_DEBUG "%s: starting SA-1111 OHCI USB Controller\n",
+ __FILE__);
#ifdef CONFIG_SA1100_BADGE4
if (machine_is_badge4()) {
@@ -65,8 +65,8 @@ static void sa1111_start_hc(struct sa1111_dev *dev)
static void sa1111_stop_hc(struct sa1111_dev *dev)
{
unsigned int usb_rst;
- printk(KERN_DEBUG __FILE__
- ": stopping SA-1111 OHCI USB Controller\n");
+ printk(KERN_DEBUG "%s: stopping SA-1111 OHCI USB Controller\n",
+ __FILE__);
/*
* Put the USB host controller into reset.
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c
index 5b22a4d1c9e..e11cc3aa4b8 100644
--- a/drivers/usb/host/sl811-hcd.c
+++ b/drivers/usb/host/sl811-hcd.c
@@ -51,6 +51,7 @@
#include <asm/irq.h>
#include <asm/system.h>
#include <asm/byteorder.h>
+#include <asm/unaligned.h>
#include "../core/hcd.h"
#include "sl811.h"
@@ -1272,12 +1273,12 @@ sl811h_hub_control(
sl811h_hub_descriptor(sl811, (struct usb_hub_descriptor *) buf);
break;
case GetHubStatus:
- *(__le32 *) buf = cpu_to_le32(0);
+ put_unaligned_le32(0, buf);
break;
case GetPortStatus:
if (wIndex != 1)
goto error;
- *(__le32 *) buf = cpu_to_le32(sl811->port1);
+ put_unaligned_le32(sl811->port1, buf);
#ifndef VERBOSE
if (*(u16*)(buf+2)) /* only if wPortChange is interesting */
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index 99cd00fd351..09197067fe6 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -735,6 +735,7 @@ static void uhci_stop(struct usb_hcd *hcd)
uhci_hc_died(uhci);
uhci_scan_schedule(uhci);
spin_unlock_irq(&uhci->lock);
+ synchronize_irq(hcd->irq);
del_timer_sync(&uhci->fsbr_timer);
release_uhci(uhci);
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index 33128d52f21..105fa8b025b 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -406,6 +406,25 @@ static void dbg_rsvd64(struct xhci_hcd *xhci, u64 *ctx, dma_addr_t dma)
}
}
+char *xhci_get_slot_state(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *ctx)
+{
+ struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
+
+ switch (GET_SLOT_STATE(slot_ctx->dev_state)) {
+ case 0:
+ return "enabled/disabled";
+ case 1:
+ return "default";
+ case 2:
+ return "addressed";
+ case 3:
+ return "configured";
+ default:
+ return "reserved";
+ }
+}
+
void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
{
/* Fields are 32 bits wide, DMA addresses are in bytes */
diff --git a/drivers/usb/host/xhci-ext-caps.h b/drivers/usb/host/xhci-ext-caps.h
index ecc131c3fe3..78c4edac1db 100644
--- a/drivers/usb/host/xhci-ext-caps.h
+++ b/drivers/usb/host/xhci-ext-caps.h
@@ -101,12 +101,15 @@ static inline int xhci_find_next_cap_offset(void __iomem *base, int ext_offset)
next = readl(base + ext_offset);
- if (ext_offset == XHCI_HCC_PARAMS_OFFSET)
+ if (ext_offset == XHCI_HCC_PARAMS_OFFSET) {
/* Find the first extended capability */
next = XHCI_HCC_EXT_CAPS(next);
- else
+ ext_offset = 0;
+ } else {
/* Find the next extended capability */
next = XHCI_EXT_CAPS_NEXT(next);
+ }
+
if (!next)
return 0;
/*
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci-hcd.c
index 5e92c72df64..4cb69e0af83 100644
--- a/drivers/usb/host/xhci-hcd.c
+++ b/drivers/usb/host/xhci-hcd.c
@@ -1007,7 +1007,7 @@ int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
* for usb_set_interface() and usb_set_configuration() claim).
*/
if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id],
- udev, ep, GFP_KERNEL) < 0) {
+ udev, ep, GFP_NOIO) < 0) {
dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
__func__, ep->desc.bEndpointAddress);
return -ENOMEM;
@@ -1181,6 +1181,8 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci,
ret = xhci_queue_evaluate_context(xhci, in_ctx->dma,
udev->slot_id);
if (ret < 0) {
+ if (command)
+ list_del(&command->cmd_list);
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
return -ENOMEM;
@@ -1264,30 +1266,13 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
xhci_zero_in_ctx(xhci, virt_dev);
/* Install new rings and free or cache any old rings */
for (i = 1; i < 31; ++i) {
- int rings_cached;
-
if (!virt_dev->eps[i].new_ring)
continue;
/* Only cache or free the old ring if it exists.
* It may not if this is the first add of an endpoint.
*/
if (virt_dev->eps[i].ring) {
- rings_cached = virt_dev->num_rings_cached;
- if (rings_cached < XHCI_MAX_RINGS_CACHED) {
- virt_dev->num_rings_cached++;
- rings_cached = virt_dev->num_rings_cached;
- virt_dev->ring_cache[rings_cached] =
- virt_dev->eps[i].ring;
- xhci_dbg(xhci, "Cached old ring, "
- "%d ring%s cached\n",
- rings_cached,
- (rings_cached > 1) ? "s" : "");
- } else {
- xhci_ring_free(xhci, virt_dev->eps[i].ring);
- xhci_dbg(xhci, "Ring cache full (%d rings), "
- "freeing ring\n",
- virt_dev->num_rings_cached);
- }
+ xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
}
virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
virt_dev->eps[i].new_ring = NULL;
@@ -1458,6 +1443,131 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
}
/*
+ * This submits a Reset Device Command, which will set the device state to 0,
+ * set the device address to 0, and disable all the endpoints except the default
+ * control endpoint. The USB core should come back and call
+ * xhci_address_device(), and then re-set up the configuration. If this is
+ * called because of a usb_reset_and_verify_device(), then the old alternate
+ * settings will be re-installed through the normal bandwidth allocation
+ * functions.
+ *
+ * Wait for the Reset Device command to finish. Remove all structures
+ * associated with the endpoints that were disabled. Clear the input device
+ * structure? Cache the rings? Reset the control endpoint 0 max packet size?
+ */
+int xhci_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ int ret, i;
+ unsigned long flags;
+ struct xhci_hcd *xhci;
+ unsigned int slot_id;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_command *reset_device_cmd;
+ int timeleft;
+ int last_freed_endpoint;
+
+ ret = xhci_check_args(hcd, udev, NULL, 0, __func__);
+ if (ret <= 0)
+ return ret;
+ xhci = hcd_to_xhci(hcd);
+ slot_id = udev->slot_id;
+ virt_dev = xhci->devs[slot_id];
+ if (!virt_dev) {
+ xhci_dbg(xhci, "%s called with invalid slot ID %u\n",
+ __func__, slot_id);
+ return -EINVAL;
+ }
+
+ xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
+ /* Allocate the command structure that holds the struct completion.
+ * Assume we're in process context, since the normal device reset
+ * process has to wait for the device anyway. Storage devices are
+ * reset as part of error handling, so use GFP_NOIO instead of
+ * GFP_KERNEL.
+ */
+ reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
+ if (!reset_device_cmd) {
+ xhci_dbg(xhci, "Couldn't allocate command structure.\n");
+ return -ENOMEM;
+ }
+
+ /* Attempt to submit the Reset Device command to the command ring */
+ spin_lock_irqsave(&xhci->lock, flags);
+ reset_device_cmd->command_trb = xhci->cmd_ring->enqueue;
+ list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
+ ret = xhci_queue_reset_device(xhci, slot_id);
+ if (ret) {
+ xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
+ list_del(&reset_device_cmd->cmd_list);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ goto command_cleanup;
+ }
+ xhci_ring_cmd_db(xhci);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+
+ /* Wait for the Reset Device command to finish */
+ timeleft = wait_for_completion_interruptible_timeout(
+ reset_device_cmd->completion,
+ USB_CTRL_SET_TIMEOUT);
+ if (timeleft <= 0) {
+ xhci_warn(xhci, "%s while waiting for reset device command\n",
+ timeleft == 0 ? "Timeout" : "Signal");
+ spin_lock_irqsave(&xhci->lock, flags);
+ /* The timeout might have raced with the event ring handler, so
+ * only delete from the list if the item isn't poisoned.
+ */
+ if (reset_device_cmd->cmd_list.next != LIST_POISON1)
+ list_del(&reset_device_cmd->cmd_list);
+ spin_unlock_irqrestore(&xhci->lock, flags);
+ ret = -ETIME;
+ goto command_cleanup;
+ }
+
+ /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
+ * unless we tried to reset a slot ID that wasn't enabled,
+ * or the device wasn't in the addressed or configured state.
+ */
+ ret = reset_device_cmd->status;
+ switch (ret) {
+ case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */
+ case COMP_CTX_STATE: /* 0.96 completion code for same thing */
+ xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n",
+ slot_id,
+ xhci_get_slot_state(xhci, virt_dev->out_ctx));
+ xhci_info(xhci, "Not freeing device rings.\n");
+ /* Don't treat this as an error. May change my mind later. */
+ ret = 0;
+ goto command_cleanup;
+ case COMP_SUCCESS:
+ xhci_dbg(xhci, "Successful reset device command.\n");
+ break;
+ default:
+ if (xhci_is_vendor_info_code(xhci, ret))
+ break;
+ xhci_warn(xhci, "Unknown completion code %u for "
+ "reset device command.\n", ret);
+ ret = -EINVAL;
+ goto command_cleanup;
+ }
+
+ /* Everything but endpoint 0 is disabled, so free or cache the rings. */
+ last_freed_endpoint = 1;
+ for (i = 1; i < 31; ++i) {
+ if (!virt_dev->eps[i].ring)
+ continue;
+ xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
+ last_freed_endpoint = i;
+ }
+ xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
+ xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
+ ret = 0;
+
+command_cleanup:
+ xhci_free_command(xhci, reset_device_cmd);
+ return ret;
+}
+
+/*
* At this point, the struct usb_device is about to go away, the device has
* disconnected, and all traffic has been stopped and the endpoints have been
* disabled. Free any HC data structures associated with that device.
@@ -1694,7 +1804,7 @@ int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
return -EINVAL;
}
- config_cmd = xhci_alloc_command(xhci, true, mem_flags);
+ config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
if (!config_cmd) {
xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
return -ENOMEM;
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index eac5b53aa9e..208b805b80e 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -129,6 +129,50 @@ static u32 xhci_port_state_to_neutral(u32 state)
return (state & XHCI_PORT_RO) | (state & XHCI_PORT_RWS);
}
+static void xhci_disable_port(struct xhci_hcd *xhci, u16 wIndex,
+ u32 __iomem *addr, u32 port_status)
+{
+ /* Write 1 to disable the port */
+ xhci_writel(xhci, port_status | PORT_PE, addr);
+ port_status = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "disable port, actual port %d status = 0x%x\n",
+ wIndex, port_status);
+}
+
+static void xhci_clear_port_change_bit(struct xhci_hcd *xhci, u16 wValue,
+ u16 wIndex, u32 __iomem *addr, u32 port_status)
+{
+ char *port_change_bit;
+ u32 status;
+
+ switch (wValue) {
+ case USB_PORT_FEAT_C_RESET:
+ status = PORT_RC;
+ port_change_bit = "reset";
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ status = PORT_CSC;
+ port_change_bit = "connect";
+ break;
+ case USB_PORT_FEAT_C_OVER_CURRENT:
+ status = PORT_OCC;
+ port_change_bit = "over-current";
+ break;
+ case USB_PORT_FEAT_C_ENABLE:
+ status = PORT_PEC;
+ port_change_bit = "enable/disable";
+ break;
+ default:
+ /* Should never happen */
+ return;
+ }
+ /* Change bits are all write 1 to clear */
+ xhci_writel(xhci, port_status | status, addr);
+ port_status = xhci_readl(xhci, addr);
+ xhci_dbg(xhci, "clear port %s change, actual port %d status = 0x%x\n",
+ port_change_bit, wIndex, port_status);
+}
+
int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u16 wIndex, char *buf, u16 wLength)
{
@@ -138,7 +182,6 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
u32 temp, status;
int retval = 0;
u32 __iomem *addr;
- char *port_change_bit;
ports = HCS_MAX_PORTS(xhci->hcs_params1);
@@ -229,26 +272,18 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
temp = xhci_port_state_to_neutral(temp);
switch (wValue) {
case USB_PORT_FEAT_C_RESET:
- status = PORT_RC;
- port_change_bit = "reset";
- break;
case USB_PORT_FEAT_C_CONNECTION:
- status = PORT_CSC;
- port_change_bit = "connect";
- break;
case USB_PORT_FEAT_C_OVER_CURRENT:
- status = PORT_OCC;
- port_change_bit = "over-current";
+ case USB_PORT_FEAT_C_ENABLE:
+ xhci_clear_port_change_bit(xhci, wValue, wIndex,
+ addr, temp);
+ break;
+ case USB_PORT_FEAT_ENABLE:
+ xhci_disable_port(xhci, wIndex, addr, temp);
break;
default:
goto error;
}
- /* Change bits are all write 1 to clear */
- xhci_writel(xhci, temp | status, addr);
- temp = xhci_readl(xhci, addr);
- xhci_dbg(xhci, "clear port %s change, actual port %d status = 0x%x\n",
- port_change_bit, wIndex, temp);
- temp = xhci_readl(xhci, addr); /* unblock any posted writes */
break;
default:
error:
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index bffcef7a554..49f7d72f8b1 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -198,6 +198,31 @@ fail:
return 0;
}
+void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ unsigned int ep_index)
+{
+ int rings_cached;
+
+ rings_cached = virt_dev->num_rings_cached;
+ if (rings_cached < XHCI_MAX_RINGS_CACHED) {
+ virt_dev->num_rings_cached++;
+ rings_cached = virt_dev->num_rings_cached;
+ virt_dev->ring_cache[rings_cached] =
+ virt_dev->eps[ep_index].ring;
+ xhci_dbg(xhci, "Cached old ring, "
+ "%d ring%s cached\n",
+ rings_cached,
+ (rings_cached > 1) ? "s" : "");
+ } else {
+ xhci_ring_free(xhci, virt_dev->eps[ep_index].ring);
+ xhci_dbg(xhci, "Ring cache full (%d rings), "
+ "freeing ring\n",
+ virt_dev->num_rings_cached);
+ }
+ virt_dev->eps[ep_index].ring = NULL;
+}
+
/* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue
* pointers to the beginning of the ring.
*/
@@ -242,6 +267,8 @@ struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci,
void xhci_free_container_ctx(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx)
{
+ if (!ctx)
+ return;
dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
kfree(ctx);
}
@@ -427,7 +454,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
case USB_SPEED_LOW:
slot_ctx->dev_info |= (u32) SLOT_SPEED_LS;
break;
- case USB_SPEED_VARIABLE:
+ case USB_SPEED_WIRELESS:
xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
return -EINVAL;
break;
@@ -471,7 +498,7 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
case USB_SPEED_LOW:
ep0_ctx->ep_info2 |= MAX_PACKET(8);
break;
- case USB_SPEED_VARIABLE:
+ case USB_SPEED_WIRELESS:
xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
return -EINVAL;
break;
@@ -819,7 +846,8 @@ static void scratchpad_free(struct xhci_hcd *xhci)
}
struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
- bool allocate_completion, gfp_t mem_flags)
+ bool allocate_in_ctx, bool allocate_completion,
+ gfp_t mem_flags)
{
struct xhci_command *command;
@@ -827,11 +855,14 @@ struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
if (!command)
return NULL;
- command->in_ctx =
- xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, mem_flags);
- if (!command->in_ctx) {
- kfree(command);
- return NULL;
+ if (allocate_in_ctx) {
+ command->in_ctx =
+ xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT,
+ mem_flags);
+ if (!command->in_ctx) {
+ kfree(command);
+ return NULL;
+ }
}
if (allocate_completion) {
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index e097008d6fb..417d37aff8d 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -139,6 +139,7 @@ static const struct hc_driver xhci_pci_hc_driver = {
.reset_bandwidth = xhci_reset_bandwidth,
.address_device = xhci_address_device,
.update_hub_device = xhci_update_hub_device,
+ .reset_device = xhci_reset_device,
/*
* scheduling support
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index ee7bc7ecbc5..6ba841bca4a 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -953,6 +953,17 @@ bandwidth_change:
case TRB_TYPE(TRB_RESET_EP):
handle_reset_ep_completion(xhci, event, xhci->cmd_ring->dequeue);
break;
+ case TRB_TYPE(TRB_RESET_DEV):
+ xhci_dbg(xhci, "Completed reset device command.\n");
+ slot_id = TRB_TO_SLOT_ID(
+ xhci->cmd_ring->dequeue->generic.field[3]);
+ virt_dev = xhci->devs[slot_id];
+ if (virt_dev)
+ handle_cmd_in_cmd_wait_list(xhci, virt_dev, event);
+ else
+ xhci_warn(xhci, "Reset device command completion "
+ "for disabled slot %u\n", slot_id);
+ break;
default:
/* Skip over unknown commands on the event ring */
xhci->error_bitmask |= 1 << 6;
@@ -1080,6 +1091,20 @@ static int xhci_requires_manual_halt_cleanup(struct xhci_hcd *xhci,
return 0;
}
+int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
+{
+ if (trb_comp_code >= 224 && trb_comp_code <= 255) {
+ /* Vendor defined "informational" completion code,
+ * treat as not-an-error.
+ */
+ xhci_dbg(xhci, "Vendor defined info completion code %u\n",
+ trb_comp_code);
+ xhci_dbg(xhci, "Treating code as success.\n");
+ return 1;
+ }
+ return 0;
+}
+
/*
* If this function returns an error condition, it means it got a Transfer
* event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
@@ -1196,13 +1221,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
status = -ENOSR;
break;
default:
- if (trb_comp_code >= 224 && trb_comp_code <= 255) {
- /* Vendor defined "informational" completion code,
- * treat as not-an-error.
- */
- xhci_dbg(xhci, "Vendor defined info completion code %u\n",
- trb_comp_code);
- xhci_dbg(xhci, "Treating code as success.\n");
+ if (xhci_is_vendor_info_code(xhci, trb_comp_code)) {
status = 0;
break;
}
@@ -2181,6 +2200,14 @@ int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
false);
}
+/* Queue a reset device command TRB */
+int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id)
+{
+ return queue_command(xhci, 0, 0, 0,
+ TRB_TYPE(TRB_RESET_DEV) | SLOT_ID_FOR_TRB(slot_id),
+ false);
+}
+
/* Queue a configure endpoint command TRB */
int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
u32 slot_id, bool command_must_succeed)
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 877813505ef..e5eb09b2f38 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1210,6 +1210,8 @@ void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst);
void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci);
void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring);
void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep);
+char *xhci_get_slot_state(struct xhci_hcd *xhci,
+ struct xhci_container_ctx *ctx);
/* xHCI memory management */
void xhci_mem_cleanup(struct xhci_hcd *xhci);
@@ -1233,8 +1235,12 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev,
struct usb_device *udev, struct usb_host_endpoint *ep,
gfp_t mem_flags);
void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
+void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ unsigned int ep_index);
struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
- bool allocate_completion, gfp_t mem_flags);
+ bool allocate_in_ctx, bool allocate_completion,
+ gfp_t mem_flags);
void xhci_free_command(struct xhci_hcd *xhci,
struct xhci_command *command);
@@ -1264,6 +1270,7 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
void xhci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep);
+int xhci_reset_device(struct usb_hcd *hcd, struct usb_device *udev);
int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
@@ -1272,6 +1279,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);
struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
union xhci_trb *start_trb, union xhci_trb *end_trb,
dma_addr_t suspect_dma);
+int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code);
void xhci_ring_cmd_db(struct xhci_hcd *xhci);
void *xhci_setup_one_noop(struct xhci_hcd *xhci);
void xhci_handle_event(struct xhci_hcd *xhci);
@@ -1293,6 +1301,7 @@ int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
u32 slot_id);
int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
unsigned int ep_index);
+int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id);
void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
struct xhci_td *cur_td, struct xhci_dequeue_state *state);
diff --git a/drivers/usb/image/mdc800.c b/drivers/usb/image/mdc800.c
index eca355dccf6..e192e8f7c56 100644
--- a/drivers/usb/image/mdc800.c
+++ b/drivers/usb/image/mdc800.c
@@ -967,7 +967,7 @@ static const struct file_operations mdc800_device_ops =
-static struct usb_device_id mdc800_table [] = {
+static const struct usb_device_id mdc800_table[] = {
{ USB_DEVICE(MDC800_VENDOR_ID, MDC800_PRODUCT_ID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c
index 459a7287fe0..3a6bcd5fee0 100644
--- a/drivers/usb/image/microtek.c
+++ b/drivers/usb/image/microtek.c
@@ -155,7 +155,7 @@ static int mts_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id);
static void mts_usb_disconnect(struct usb_interface *intf);
-static struct usb_device_id mts_usb_ids [];
+static const struct usb_device_id mts_usb_ids[];
static struct usb_driver mts_usb_driver = {
.name = "microtekX6",
@@ -656,7 +656,7 @@ static struct scsi_host_template mts_scsi_host_template = {
/* The entries of microtek_table must correspond, line-by-line to
the entries of mts_supported_products[]. */
-static struct usb_device_id mts_usb_ids [] =
+static const struct usb_device_id mts_usb_ids[] =
{
{ USB_DEVICE(0x4ce, 0x0300) },
{ USB_DEVICE(0x5da, 0x0094) },
diff --git a/drivers/usb/misc/Kconfig b/drivers/usb/misc/Kconfig
index abe3aa67ed0..55660eaf947 100644
--- a/drivers/usb/misc/Kconfig
+++ b/drivers/usb/misc/Kconfig
@@ -87,17 +87,6 @@ config USB_LCD
To compile this driver as a module, choose M here: the
module will be called usblcd.
-config USB_BERRY_CHARGE
- tristate "USB BlackBerry recharge support"
- depends on USB
- help
- Say Y here if you want to connect a BlackBerry device to your
- computer's USB port and have it automatically switch to "recharge"
- mode.
-
- To compile this driver as a module, choose M here: the
- module will be called berry_charge.
-
config USB_LED
tristate "USB LED driver support"
depends on USB
@@ -242,17 +231,3 @@ config USB_ISIGHTFW
driver beforehand. Tools for doing so are available at
http://bersace03.free.fr
-config USB_VST
- tristate "USB VST driver"
- depends on USB
- help
- This driver is intended for Vernier Software Technologies
- bulk usb devices such as their Ocean-Optics spectrometers or
- Labquest.
- It is a bulk channel driver with configurable read and write
- timeouts.
-
- To compile this driver as a module, choose M here: the
- module will be called vstusb.
-
-
diff --git a/drivers/usb/misc/Makefile b/drivers/usb/misc/Makefile
index 0826aab8303..717703e8142 100644
--- a/drivers/usb/misc/Makefile
+++ b/drivers/usb/misc/Makefile
@@ -5,7 +5,6 @@
obj-$(CONFIG_USB_ADUTUX) += adutux.o
obj-$(CONFIG_USB_APPLEDISPLAY) += appledisplay.o
-obj-$(CONFIG_USB_BERRY_CHARGE) += berry_charge.o
obj-$(CONFIG_USB_CYPRESS_CY7C63)+= cypress_cy7c63.o
obj-$(CONFIG_USB_CYTHERM) += cytherm.o
obj-$(CONFIG_USB_EMI26) += emi26.o
@@ -23,7 +22,6 @@ obj-$(CONFIG_USB_TEST) += usbtest.o
obj-$(CONFIG_USB_TRANCEVIBRATOR) += trancevibrator.o
obj-$(CONFIG_USB_USS720) += uss720.o
obj-$(CONFIG_USB_SEVSEG) += usbsevseg.o
-obj-$(CONFIG_USB_VST) += vstusb.o
obj-$(CONFIG_USB_SISUSBVGA) += sisusbvga/
diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c
index 20352654201..d240de097c6 100644
--- a/drivers/usb/misc/adutux.c
+++ b/drivers/usb/misc/adutux.c
@@ -38,7 +38,7 @@ static int debug = 1;
#define dbg(lvl, format, arg...) \
do { \
if (debug >= lvl) \
- printk(KERN_DEBUG __FILE__ " : " format " \n", ## arg); \
+ printk(KERN_DEBUG "%s: " format "\n", __FILE__, ##arg); \
} while (0)
@@ -56,7 +56,7 @@ MODULE_PARM_DESC(debug, "Debug enabled or not");
#define ADU_PRODUCT_ID 0x0064
/* table of devices that work with this driver */
-static struct usb_device_id device_table [] = {
+static const struct usb_device_id device_table[] = {
{ USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID) }, /* ADU100 */
{ USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+20) }, /* ADU120 */
{ USB_DEVICE(ADU_VENDOR_ID, ADU_PRODUCT_ID+30) }, /* ADU130 */
@@ -132,8 +132,8 @@ static void adu_debug_data(int level, const char *function, int size,
if (debug < level)
return;
- printk(KERN_DEBUG __FILE__": %s - length = %d, data = ",
- function, size);
+ printk(KERN_DEBUG "%s: %s - length = %d, data = ",
+ __FILE__, function, size);
for (i = 0; i < size; ++i)
printk("%.2x ", data[i]);
printk("\n");
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index 1eb9e4162cc..4d2952f1fb1 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -57,7 +57,7 @@
.bInterfaceProtocol = 0x00
/* table of devices that work with this driver */
-static struct usb_device_id appledisplay_table [] = {
+static const struct usb_device_id appledisplay_table[] = {
{ APPLEDISPLAY_DEVICE(0x9218) },
{ APPLEDISPLAY_DEVICE(0x9219) },
{ APPLEDISPLAY_DEVICE(0x921c) },
@@ -179,7 +179,7 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd)
return pdata->msgdata[1];
}
-static struct backlight_ops appledisplay_bl_data = {
+static const struct backlight_ops appledisplay_bl_data = {
.get_brightness = appledisplay_bl_get_brightness,
.update_status = appledisplay_bl_update_status,
};
@@ -283,6 +283,7 @@ static int appledisplay_probe(struct usb_interface *iface,
&appledisplay_bl_data);
if (IS_ERR(pdata->bd)) {
dev_err(&iface->dev, "Backlight registration failed\n");
+ retval = PTR_ERR(pdata->bd);
goto error;
}
diff --git a/drivers/usb/misc/berry_charge.c b/drivers/usb/misc/berry_charge.c
deleted file mode 100644
index c05a85bc592..00000000000
--- a/drivers/usb/misc/berry_charge.c
+++ /dev/null
@@ -1,183 +0,0 @@
-/*
- * USB BlackBerry charging module
- *
- * Copyright (C) 2007 Greg Kroah-Hartman <gregkh@suse.de>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2.
- *
- * Information on how to switch configs was taken by the bcharge.cc file
- * created by the barry.sf.net project.
- *
- * bcharge.cc has the following copyright:
- * Copyright (C) 2006, Net Direct Inc. (http://www.netdirect.ca/)
- * and is released under the GPLv2.
- *
- *
- */
-
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/usb.h>
-
-#define RIM_VENDOR 0x0fca
-#define BLACKBERRY 0x0001
-#define BLACKBERRY_PEARL_DUAL 0x0004
-#define BLACKBERRY_PEARL 0x0006
-
-static int debug;
-static int pearl_dual_mode = 1;
-
-#ifdef dbg
-#undef dbg
-#endif
-#define dbg(dev, format, arg...) \
- if (debug) \
- dev_printk(KERN_DEBUG , dev , format , ## arg)
-
-static struct usb_device_id id_table [] = {
- { USB_DEVICE(RIM_VENDOR, BLACKBERRY) },
- { USB_DEVICE(RIM_VENDOR, BLACKBERRY_PEARL) },
- { USB_DEVICE(RIM_VENDOR, BLACKBERRY_PEARL_DUAL) },
- { }, /* Terminating entry */
-};
-MODULE_DEVICE_TABLE(usb, id_table);
-
-static int magic_charge(struct usb_device *udev)
-{
- char *dummy_buffer = kzalloc(2, GFP_KERNEL);
- int retval;
-
- if (!dummy_buffer)
- return -ENOMEM;
-
- /* send two magic commands and then set the configuration. The device
- * will then reset itself with the new power usage and should start
- * charging. */
-
- /* Note, with testing, it only seems that the first message is really
- * needed (at least for the 8700c), but to be safe, we emulate what
- * other operating systems seem to be sending to their device. We
- * really need to get some specs for this device to be sure about what
- * is going on here.
- */
- dbg(&udev->dev, "Sending first magic command\n");
- retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- 0xa5, 0xc0, 0, 1, dummy_buffer, 2, 100);
- if (retval != 2) {
- dev_err(&udev->dev, "First magic command failed: %d.\n",
- retval);
- goto exit;
- }
-
- dbg(&udev->dev, "Sending second magic command\n");
- retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- 0xa2, 0x40, 0, 1, dummy_buffer, 0, 100);
- if (retval != 0) {
- dev_err(&udev->dev, "Second magic command failed: %d.\n",
- retval);
- goto exit;
- }
-
- dbg(&udev->dev, "Calling set_configuration\n");
- retval = usb_driver_set_configuration(udev, 1);
- if (retval)
- dev_err(&udev->dev, "Set Configuration failed :%d.\n", retval);
-
-exit:
- kfree(dummy_buffer);
- return retval;
-}
-
-static int magic_dual_mode(struct usb_device *udev)
-{
- char *dummy_buffer = kzalloc(2, GFP_KERNEL);
- int retval;
-
- if (!dummy_buffer)
- return -ENOMEM;
-
- /* send magic command so that the Blackberry Pearl device exposes
- * two interfaces: both the USB mass-storage one and one which can
- * be used for database access. */
- dbg(&udev->dev, "Sending magic pearl command\n");
- retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- 0xa9, 0xc0, 1, 1, dummy_buffer, 2, 100);
- dbg(&udev->dev, "Magic pearl command returned %d\n", retval);
-
- dbg(&udev->dev, "Calling set_configuration\n");
- retval = usb_driver_set_configuration(udev, 1);
- if (retval)
- dev_err(&udev->dev, "Set Configuration failed :%d.\n", retval);
-
- kfree(dummy_buffer);
- return retval;
-}
-
-static int berry_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- struct usb_device *udev = interface_to_usbdev(intf);
-
- if (udev->bus_mA < 500) {
- dbg(&udev->dev, "Not enough power to charge available\n");
- return -ENODEV;
- }
-
- dbg(&udev->dev, "Power is set to %dmA\n",
- udev->actconfig->desc.bMaxPower * 2);
-
- /* check the power usage so we don't try to enable something that is
- * already enabled */
- if ((udev->actconfig->desc.bMaxPower * 2) == 500) {
- dbg(&udev->dev, "device is already charging, power is "
- "set to %dmA\n", udev->actconfig->desc.bMaxPower * 2);
- return -ENODEV;
- }
-
- /* turn the power on */
- magic_charge(udev);
-
- if ((le16_to_cpu(udev->descriptor.idProduct) == BLACKBERRY_PEARL) &&
- (pearl_dual_mode))
- magic_dual_mode(udev);
-
- /* we don't really want to bind to the device, userspace programs can
- * handle the syncing just fine, so get outta here. */
- return -ENODEV;
-}
-
-static void berry_disconnect(struct usb_interface *intf)
-{
-}
-
-static struct usb_driver berry_driver = {
- .name = "berry_charge",
- .probe = berry_probe,
- .disconnect = berry_disconnect,
- .id_table = id_table,
-};
-
-static int __init berry_init(void)
-{
- return usb_register(&berry_driver);
-}
-
-static void __exit berry_exit(void)
-{
- usb_deregister(&berry_driver);
-}
-
-module_init(berry_init);
-module_exit(berry_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@suse.de>");
-module_param(debug, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(debug, "Debug enabled or not");
-module_param(pearl_dual_mode, bool, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(pearl_dual_mode, "Change Blackberry Pearl to run in dual mode");
diff --git a/drivers/usb/misc/cypress_cy7c63.c b/drivers/usb/misc/cypress_cy7c63.c
index 5720bfef6a3..1547d8cac5f 100644
--- a/drivers/usb/misc/cypress_cy7c63.c
+++ b/drivers/usb/misc/cypress_cy7c63.c
@@ -56,7 +56,7 @@
/* table of devices that work with this driver */
-static struct usb_device_id cypress_table [] = {
+static const struct usb_device_id cypress_table[] = {
{ USB_DEVICE(CYPRESS_VENDOR_ID, CYPRESS_PRODUCT_ID) },
{ }
};
diff --git a/drivers/usb/misc/cytherm.c b/drivers/usb/misc/cytherm.c
index 4fb3c38b924..b9cbbbda824 100644
--- a/drivers/usb/misc/cytherm.c
+++ b/drivers/usb/misc/cytherm.c
@@ -27,7 +27,7 @@
#define USB_SKEL_VENDOR_ID 0x04b4
#define USB_SKEL_PRODUCT_ID 0x0002
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(USB_SKEL_VENDOR_ID, USB_SKEL_PRODUCT_ID) },
{ }
};
diff --git a/drivers/usb/misc/emi26.c b/drivers/usb/misc/emi26.c
index 879a980ca8c..a6521c95f68 100644
--- a/drivers/usb/misc/emi26.c
+++ b/drivers/usb/misc/emi26.c
@@ -245,7 +245,7 @@ wraperr:
return err;
}
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(EMI26_VENDOR_ID, EMI26_PRODUCT_ID) },
{ USB_DEVICE(EMI26_VENDOR_ID, EMI26B_PRODUCT_ID) },
{ } /* Terminating entry */
diff --git a/drivers/usb/misc/emi62.c b/drivers/usb/misc/emi62.c
index 59860b32853..fc15ad4c313 100644
--- a/drivers/usb/misc/emi62.c
+++ b/drivers/usb/misc/emi62.c
@@ -259,7 +259,7 @@ wraperr:
return err;
}
-static __devinitdata struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] __devinitconst = {
{ USB_DEVICE(EMI62_VENDOR_ID, EMI62_PRODUCT_ID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/misc/ftdi-elan.c b/drivers/usb/misc/ftdi-elan.c
index 9d0675ed0d4..1edb6d36189 100644
--- a/drivers/usb/misc/ftdi-elan.c
+++ b/drivers/usb/misc/ftdi-elan.c
@@ -86,7 +86,7 @@ static struct list_head ftdi_static_list;
#define USB_FTDI_ELAN_VENDOR_ID 0x0403
#define USB_FTDI_ELAN_PRODUCT_ID 0xd6ea
/* table of devices that work with this driver*/
-static struct usb_device_id ftdi_elan_table[] = {
+static const struct usb_device_id ftdi_elan_table[] = {
{USB_DEVICE(USB_FTDI_ELAN_VENDOR_ID, USB_FTDI_ELAN_PRODUCT_ID)},
{ /* Terminating entry */ }
};
@@ -623,9 +623,12 @@ static void ftdi_elan_status_work(struct work_struct *work)
*/
static int ftdi_elan_open(struct inode *inode, struct file *file)
{
- int subminor = iminor(inode);
- struct usb_interface *interface = usb_find_interface(&ftdi_elan_driver,
- subminor);
+ int subminor;
+ struct usb_interface *interface;
+
+ subminor = iminor(inode);
+ interface = usb_find_interface(&ftdi_elan_driver, subminor);
+
if (!interface) {
printk(KERN_ERR "can't find device for minor %d\n", subminor);
return -ENODEV;
diff --git a/drivers/usb/misc/idmouse.c b/drivers/usb/misc/idmouse.c
index 1337a9ce80b..a54c3cb804c 100644
--- a/drivers/usb/misc/idmouse.c
+++ b/drivers/usb/misc/idmouse.c
@@ -48,7 +48,7 @@
#define ID_CHERRY 0x0010
/* device ID table */
-static struct usb_device_id idmouse_table[] = {
+static const struct usb_device_id idmouse_table[] = {
{USB_DEVICE(ID_SIEMENS, ID_IDMOUSE)}, /* Siemens ID Mouse (Professional) */
{USB_DEVICE(ID_SIEMENS, ID_CHERRY )}, /* Cherry FingerTIP ID Board */
{} /* terminating null entry */
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index e75bb87ee92..d3c85236388 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -139,7 +139,7 @@ static int usb_set_report(struct usb_interface *intf, unsigned char type,
/* driver registration */
/*---------------------*/
/* table of devices that work with this driver */
-static struct usb_device_id iowarrior_ids[] = {
+static const struct usb_device_id iowarrior_ids[] = {
{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW40)},
{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW24)},
{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOWPV1)},
@@ -602,10 +602,12 @@ static int iowarrior_open(struct inode *inode, struct file *file)
dbg("%s", __func__);
+ lock_kernel();
subminor = iminor(inode);
interface = usb_find_interface(&iowarrior_driver, subminor);
if (!interface) {
+ unlock_kernel();
err("%s - error, can't find device for minor %d", __func__,
subminor);
return -ENODEV;
@@ -615,6 +617,7 @@ static int iowarrior_open(struct inode *inode, struct file *file)
dev = usb_get_intfdata(interface);
if (!dev) {
mutex_unlock(&iowarrior_open_disc_lock);
+ unlock_kernel();
return -ENODEV;
}
@@ -641,6 +644,7 @@ static int iowarrior_open(struct inode *inode, struct file *file)
out:
mutex_unlock(&dev->mutex);
+ unlock_kernel();
return retval;
}
diff --git a/drivers/usb/misc/isight_firmware.c b/drivers/usb/misc/isight_firmware.c
index b897f6554ec..06e990adc6c 100644
--- a/drivers/usb/misc/isight_firmware.c
+++ b/drivers/usb/misc/isight_firmware.c
@@ -26,7 +26,7 @@
#include <linux/errno.h>
#include <linux/module.h>
-static struct usb_device_id id_table[] = {
+static const struct usb_device_id id_table[] = {
{USB_DEVICE(0x05ac, 0x8300)},
{},
};
@@ -112,6 +112,8 @@ out:
return ret;
}
+MODULE_FIRMWARE("isight.fw");
+
static void isight_firmware_disconnect(struct usb_interface *intf)
{
}
diff --git a/drivers/usb/misc/ldusb.c b/drivers/usb/misc/ldusb.c
index 90f130126c1..dd41d871004 100644
--- a/drivers/usb/misc/ldusb.c
+++ b/drivers/usb/misc/ldusb.c
@@ -69,7 +69,7 @@
#endif
/* table of devices that work with this driver */
-static struct usb_device_id ld_usb_table [] = {
+static const struct usb_device_id ld_usb_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_CASSY) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_POCKETCASSY) },
{ USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_MOBILECASSY) },
@@ -798,7 +798,7 @@ static int __init ld_usb_init(void)
/* register this driver with the USB subsystem */
retval = usb_register(&ld_usb_driver);
if (retval)
- err("usb_register failed for the "__FILE__" driver. Error number %d\n", retval);
+ err("usb_register failed for the %s driver. Error number %d\n", __FILE__, retval);
return retval;
}
diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c
index faa6d623de7..8547bf9e317 100644
--- a/drivers/usb/misc/legousbtower.c
+++ b/drivers/usb/misc/legousbtower.c
@@ -95,8 +95,11 @@
/* Use our own dbg macro */
#undef dbg
-#define dbg(lvl, format, arg...) do { if (debug >= lvl) printk(KERN_DEBUG __FILE__ ": " format "\n", ## arg); } while (0)
-
+#define dbg(lvl, format, arg...) \
+do { \
+ if (debug >= lvl) \
+ printk(KERN_DEBUG "%s: " format "\n", __FILE__, ##arg); \
+} while (0)
/* Version Information */
#define DRIVER_VERSION "v0.96"
@@ -192,7 +195,7 @@ struct tower_get_version_reply {
/* table of devices that work with this driver */
-static struct usb_device_id tower_table [] = {
+static const struct usb_device_id tower_table[] = {
{ USB_DEVICE(LEGO_USB_TOWER_VENDOR_ID, LEGO_USB_TOWER_PRODUCT_ID) },
{ } /* Terminating entry */
};
@@ -302,7 +305,7 @@ static inline void lego_usb_tower_debug_data (int level, const char *function, i
if (debug < level)
return;
- printk (KERN_DEBUG __FILE__": %s - length = %d, data = ", function, size);
+ printk (KERN_DEBUG "%s: %s - length = %d, data = ", __FILE__, function, size);
for (i = 0; i < size; ++i) {
printk ("%.2x ", data[i]);
}
@@ -1055,7 +1058,7 @@ static int __init lego_usb_tower_init(void)
/* register this driver with the USB subsystem */
result = usb_register(&tower_driver);
if (result < 0) {
- err("usb_register failed for the "__FILE__" driver. Error number %d", result);
+ err("usb_register failed for the %s driver. Error number %d", __FILE__, result);
retval = -1;
goto exit;
}
diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c
index 32d0199d0c3..a85771b1563 100644
--- a/drivers/usb/misc/rio500.c
+++ b/drivers/usb/misc/rio500.c
@@ -78,10 +78,13 @@ static int open_rio(struct inode *inode, struct file *file)
{
struct rio_usb_data *rio = &rio_instance;
+ /* against disconnect() */
+ lock_kernel();
mutex_lock(&(rio->lock));
if (rio->isopen || !rio->present) {
mutex_unlock(&(rio->lock));
+ unlock_kernel();
return -EBUSY;
}
rio->isopen = 1;
@@ -91,6 +94,7 @@ static int open_rio(struct inode *inode, struct file *file)
mutex_unlock(&(rio->lock));
dev_info(&rio->rio_dev->dev, "Rio opened.\n");
+ unlock_kernel();
return 0;
}
@@ -115,7 +119,6 @@ static long ioctl_rio(struct file *file, unsigned int cmd, unsigned long arg)
int retries;
int retval=0;
- lock_kernel();
mutex_lock(&(rio->lock));
/* Sanity check to make sure rio is connected, powered, etc */
if (rio->present == 0 || rio->rio_dev == NULL) {
@@ -254,7 +257,6 @@ static long ioctl_rio(struct file *file, unsigned int cmd, unsigned long arg)
err_out:
mutex_unlock(&(rio->lock));
- unlock_kernel();
return retval;
}
@@ -489,6 +491,7 @@ static void disconnect_rio(struct usb_interface *intf)
struct rio_usb_data *rio = usb_get_intfdata (intf);
usb_set_intfdata (intf, NULL);
+ lock_kernel();
if (rio) {
usb_deregister_dev(intf, &usb_rio_class);
@@ -498,6 +501,7 @@ static void disconnect_rio(struct usb_interface *intf)
/* better let it finish - the release will do whats needed */
rio->rio_dev = NULL;
mutex_unlock(&(rio->lock));
+ unlock_kernel();
return;
}
kfree(rio->ibuf);
@@ -508,9 +512,10 @@ static void disconnect_rio(struct usb_interface *intf)
rio->present = 0;
mutex_unlock(&(rio->lock));
}
+ unlock_kernel();
}
-static struct usb_device_id rio_table [] = {
+static const struct usb_device_id rio_table[] = {
{ USB_DEVICE(0x0841, 1) }, /* Rio 500 */
{ } /* Terminating entry */
};
diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
index 8b37a4b9839..aae95a009bd 100644
--- a/drivers/usb/misc/sisusbvga/sisusb.c
+++ b/drivers/usb/misc/sisusbvga/sisusb.c
@@ -250,7 +250,7 @@ sisusb_bulkout_msg(struct sisusb_usb_data *sisusb, int index, unsigned int pipe,
sisusb->urbstatus[index] |= SU_URB_BUSY;
/* Submit URB */
- retval = usb_submit_urb(urb, GFP_ATOMIC);
+ retval = usb_submit_urb(urb, GFP_KERNEL);
/* If OK, and if timeout > 0, wait for completion */
if ((retval == 0) && timeout) {
@@ -306,7 +306,7 @@ sisusb_bulkin_msg(struct sisusb_usb_data *sisusb, unsigned int pipe, void *data,
urb->actual_length = 0;
sisusb->completein = 0;
- retval = usb_submit_urb(urb, GFP_ATOMIC);
+ retval = usb_submit_urb(urb, GFP_KERNEL);
if (retval == 0) {
wait_event_timeout(sisusb->wait_q, sisusb->completein, timeout);
if (!sisusb->completein) {
@@ -2416,21 +2416,28 @@ sisusb_open(struct inode *inode, struct file *file)
struct usb_interface *interface;
int subminor = iminor(inode);
- if (!(interface = usb_find_interface(&sisusb_driver, subminor)))
+ lock_kernel();
+ if (!(interface = usb_find_interface(&sisusb_driver, subminor))) {
+ unlock_kernel();
return -ENODEV;
+ }
- if (!(sisusb = usb_get_intfdata(interface)))
+ if (!(sisusb = usb_get_intfdata(interface))) {
+ unlock_kernel();
return -ENODEV;
+ }
mutex_lock(&sisusb->lock);
if (!sisusb->present || !sisusb->ready) {
mutex_unlock(&sisusb->lock);
+ unlock_kernel();
return -ENODEV;
}
if (sisusb->isopen) {
mutex_unlock(&sisusb->lock);
+ unlock_kernel();
return -EBUSY;
}
@@ -2439,11 +2446,13 @@ sisusb_open(struct inode *inode, struct file *file)
if (sisusb_init_gfxdevice(sisusb, 0)) {
mutex_unlock(&sisusb->lock);
dev_err(&sisusb->sisusb_dev->dev, "Failed to initialize device\n");
+ unlock_kernel();
return -EIO;
}
} else {
mutex_unlock(&sisusb->lock);
dev_err(&sisusb->sisusb_dev->dev, "Device not attached to USB 2.0 hub\n");
+ unlock_kernel();
return -EIO;
}
}
@@ -2456,6 +2465,7 @@ sisusb_open(struct inode *inode, struct file *file)
file->private_data = sisusb;
mutex_unlock(&sisusb->lock);
+ unlock_kernel();
return 0;
}
@@ -3238,7 +3248,7 @@ static void sisusb_disconnect(struct usb_interface *intf)
kref_put(&sisusb->kref, sisusb_delete);
}
-static struct usb_device_id sisusb_table [] = {
+static const struct usb_device_id sisusb_table[] = {
{ USB_DEVICE(0x0711, 0x0550) },
{ USB_DEVICE(0x0711, 0x0900) },
{ USB_DEVICE(0x0711, 0x0901) },
diff --git a/drivers/usb/misc/trancevibrator.c b/drivers/usb/misc/trancevibrator.c
index 2e14102955c..5da28eaee31 100644
--- a/drivers/usb/misc/trancevibrator.c
+++ b/drivers/usb/misc/trancevibrator.c
@@ -33,7 +33,7 @@
#define TRANCEVIBRATOR_VENDOR_ID 0x0b49 /* ASCII Corporation */
#define TRANCEVIBRATOR_PRODUCT_ID 0x064f /* Trance Vibrator */
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(TRANCEVIBRATOR_VENDOR_ID, TRANCEVIBRATOR_PRODUCT_ID) },
{ },
};
diff --git a/drivers/usb/misc/usblcd.c b/drivers/usb/misc/usblcd.c
index 4fb120357c5..90aede90553 100644
--- a/drivers/usb/misc/usblcd.c
+++ b/drivers/usb/misc/usblcd.c
@@ -30,7 +30,7 @@
#define IOCTL_GET_DRV_VERSION 2
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ .idVendor = 0x10D2, .match_flags = USB_DEVICE_ID_MATCH_VENDOR, },
{ },
};
@@ -74,10 +74,12 @@ static int lcd_open(struct inode *inode, struct file *file)
struct usb_interface *interface;
int subminor, r;
+ lock_kernel();
subminor = iminor(inode);
interface = usb_find_interface(&lcd_driver, subminor);
if (!interface) {
+ unlock_kernel();
err ("USBLCD: %s - error, can't find device for minor %d",
__func__, subminor);
return -ENODEV;
@@ -87,6 +89,7 @@ static int lcd_open(struct inode *inode, struct file *file)
dev = usb_get_intfdata(interface);
if (!dev) {
mutex_unlock(&open_disc_mutex);
+ unlock_kernel();
return -ENODEV;
}
@@ -98,11 +101,13 @@ static int lcd_open(struct inode *inode, struct file *file)
r = usb_autopm_get_interface(interface);
if (r < 0) {
kref_put(&dev->kref, lcd_delete);
+ unlock_kernel();
return r;
}
/* save our object in the file's private structure */
file->private_data = dev;
+ unlock_kernel();
return 0;
}
diff --git a/drivers/usb/misc/usbled.c b/drivers/usb/misc/usbled.c
index 06cb71942dc..63da2c3c838 100644
--- a/drivers/usb/misc/usbled.c
+++ b/drivers/usb/misc/usbled.c
@@ -24,7 +24,7 @@
#define PRODUCT_ID 0x1223
/* table of devices that work with this driver */
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(VENDOR_ID, PRODUCT_ID) },
{ },
};
diff --git a/drivers/usb/misc/usbsevseg.c b/drivers/usb/misc/usbsevseg.c
index 3db255537e7..a9555cb901a 100644
--- a/drivers/usb/misc/usbsevseg.c
+++ b/drivers/usb/misc/usbsevseg.c
@@ -27,7 +27,7 @@
#define MAXLEN 6
/* table of devices that work with this driver */
-static struct usb_device_id id_table[] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(VENDOR_ID, PRODUCT_ID) },
{ },
};
diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c
index 3dab0c0b196..a21cce6f740 100644
--- a/drivers/usb/misc/usbtest.c
+++ b/drivers/usb/misc/usbtest.c
@@ -1580,10 +1580,6 @@ usbtest_ioctl (struct usb_interface *intf, unsigned int code, void *buf)
return -ERESTARTSYS;
/* FIXME: What if a system sleep starts while a test is running? */
- if (!intf->is_active) {
- mutex_unlock(&dev->lock);
- return -EHOSTUNREACH;
- }
/* some devices, like ez-usb default devices, need a non-default
* altsetting to have any active endpoints. some tests change
@@ -2101,7 +2097,7 @@ static struct usbtest_info generic_info = {
#endif
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
/*-------------------------------------------------------------*/
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index 9a6c27a0179..f56fed53f2d 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -770,7 +770,7 @@ static void uss720_disconnect(struct usb_interface *intf)
}
/* table of cables that work through this driver */
-static struct usb_device_id uss720_table [] = {
+static const struct usb_device_id uss720_table[] = {
{ USB_DEVICE(0x047e, 0x1001) },
{ USB_DEVICE(0x0557, 0x2001) },
{ USB_DEVICE(0x0729, 0x1284) },
diff --git a/drivers/usb/misc/vstusb.c b/drivers/usb/misc/vstusb.c
deleted file mode 100644
index f26ea8dc157..00000000000
--- a/drivers/usb/misc/vstusb.c
+++ /dev/null
@@ -1,783 +0,0 @@
-/*****************************************************************************
- * File: drivers/usb/misc/vstusb.c
- *
- * Purpose: Support for the bulk USB Vernier Spectrophotometers
- *
- * Author: Johnnie Peters
- * Axian Consulting
- * Beaverton, OR, USA 97005
- *
- * Modified by: EQware Engineering, Inc.
- * Oregon City, OR, USA 97045
- *
- * Copyright: 2007, 2008
- * Vernier Software & Technology
- * Beaverton, OR, USA 97005
- *
- * Web: www.vernier.com
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- *****************************************************************************/
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/uaccess.h>
-#include <linux/usb.h>
-
-#include <linux/usb/vstusb.h>
-
-#define DRIVER_VERSION "VST USB Driver Version 1.5"
-#define DRIVER_DESC "Vernier Software Technology Bulk USB Driver"
-
-#ifdef CONFIG_USB_DYNAMIC_MINORS
- #define VSTUSB_MINOR_BASE 0
-#else
- #define VSTUSB_MINOR_BASE 199
-#endif
-
-#define USB_VENDOR_OCEANOPTICS 0x2457
-#define USB_VENDOR_VERNIER 0x08F7 /* Vernier Software & Technology */
-
-#define USB_PRODUCT_USB2000 0x1002
-#define USB_PRODUCT_ADC1000_FW 0x1003 /* firmware download (renumerates) */
-#define USB_PRODUCT_ADC1000 0x1004
-#define USB_PRODUCT_HR2000_FW 0x1009 /* firmware download (renumerates) */
-#define USB_PRODUCT_HR2000 0x100A
-#define USB_PRODUCT_HR4000_FW 0x1011 /* firmware download (renumerates) */
-#define USB_PRODUCT_HR4000 0x1012
-#define USB_PRODUCT_USB650 0x1014 /* "Red Tide" */
-#define USB_PRODUCT_QE65000 0x1018
-#define USB_PRODUCT_USB4000 0x1022
-#define USB_PRODUCT_USB325 0x1024 /* "Vernier Spectrometer" */
-
-#define USB_PRODUCT_LABPRO 0x0001
-#define USB_PRODUCT_LABQUEST 0x0005
-
-#define VST_MAXBUFFER (64*1024)
-
-static struct usb_device_id id_table[] = {
- { USB_DEVICE(USB_VENDOR_OCEANOPTICS, USB_PRODUCT_USB2000)},
- { USB_DEVICE(USB_VENDOR_OCEANOPTICS, USB_PRODUCT_HR4000)},
- { USB_DEVICE(USB_VENDOR_OCEANOPTICS, USB_PRODUCT_USB650)},
- { USB_DEVICE(USB_VENDOR_OCEANOPTICS, USB_PRODUCT_USB4000)},
- { USB_DEVICE(USB_VENDOR_OCEANOPTICS, USB_PRODUCT_USB325)},
- { USB_DEVICE(USB_VENDOR_VERNIER, USB_PRODUCT_LABQUEST)},
- { USB_DEVICE(USB_VENDOR_VERNIER, USB_PRODUCT_LABPRO)},
- {},
-};
-
-MODULE_DEVICE_TABLE(usb, id_table);
-
-struct vstusb_device {
- struct kref kref;
- struct mutex lock;
- struct usb_device *usb_dev;
- char present;
- char isopen;
- struct usb_anchor submitted;
- int rd_pipe;
- int rd_timeout_ms;
- int wr_pipe;
- int wr_timeout_ms;
-};
-#define to_vst_dev(d) container_of(d, struct vstusb_device, kref)
-
-static struct usb_driver vstusb_driver;
-
-static void vstusb_delete(struct kref *kref)
-{
- struct vstusb_device *vstdev = to_vst_dev(kref);
-
- usb_put_dev(vstdev->usb_dev);
- kfree(vstdev);
-}
-
-static int vstusb_open(struct inode *inode, struct file *file)
-{
- struct vstusb_device *vstdev;
- struct usb_interface *interface;
-
- interface = usb_find_interface(&vstusb_driver, iminor(inode));
-
- if (!interface) {
- printk(KERN_ERR KBUILD_MODNAME
- ": %s - error, can't find device for minor %d\n",
- __func__, iminor(inode));
- return -ENODEV;
- }
-
- vstdev = usb_get_intfdata(interface);
-
- if (!vstdev)
- return -ENODEV;
-
- /* lock this device */
- mutex_lock(&vstdev->lock);
-
- /* can only open one time */
- if ((!vstdev->present) || (vstdev->isopen)) {
- mutex_unlock(&vstdev->lock);
- return -EBUSY;
- }
-
- /* increment our usage count */
- kref_get(&vstdev->kref);
-
- vstdev->isopen = 1;
-
- /* save device in the file's private structure */
- file->private_data = vstdev;
-
- dev_dbg(&vstdev->usb_dev->dev, "%s: opened\n", __func__);
-
- mutex_unlock(&vstdev->lock);
-
- return 0;
-}
-
-static int vstusb_release(struct inode *inode, struct file *file)
-{
- struct vstusb_device *vstdev;
-
- vstdev = file->private_data;
-
- if (vstdev == NULL)
- return -ENODEV;
-
- mutex_lock(&vstdev->lock);
-
- vstdev->isopen = 0;
-
- dev_dbg(&vstdev->usb_dev->dev, "%s: released\n", __func__);
-
- mutex_unlock(&vstdev->lock);
-
- kref_put(&vstdev->kref, vstusb_delete);
-
- return 0;
-}
-
-static void usb_api_blocking_completion(struct urb *urb)
-{
- struct completion *completeit = urb->context;
-
- complete(completeit);
-}
-
-static int vstusb_fill_and_send_urb(struct urb *urb,
- struct usb_device *usb_dev,
- unsigned int pipe, void *data,
- unsigned int len, struct completion *done)
-{
- struct usb_host_endpoint *ep;
- struct usb_host_endpoint **hostep;
- unsigned int pipend;
-
- int status;
-
- hostep = usb_pipein(pipe) ? usb_dev->ep_in : usb_dev->ep_out;
- pipend = usb_pipeendpoint(pipe);
- ep = hostep[pipend];
-
- if (!ep || (len == 0))
- return -EINVAL;
-
- if ((ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK)
- == USB_ENDPOINT_XFER_INT) {
- pipe = (pipe & ~(3 << 30)) | (PIPE_INTERRUPT << 30);
- usb_fill_int_urb(urb, usb_dev, pipe, data, len,
- (usb_complete_t)usb_api_blocking_completion,
- NULL, ep->desc.bInterval);
- } else
- usb_fill_bulk_urb(urb, usb_dev, pipe, data, len,
- (usb_complete_t)usb_api_blocking_completion,
- NULL);
-
- init_completion(done);
- urb->context = done;
- urb->actual_length = 0;
- status = usb_submit_urb(urb, GFP_KERNEL);
-
- return status;
-}
-
-static int vstusb_complete_urb(struct urb *urb, struct completion *done,
- int timeout, int *actual_length)
-{
- unsigned long expire;
- int status;
-
- expire = timeout ? msecs_to_jiffies(timeout) : MAX_SCHEDULE_TIMEOUT;
- if (!wait_for_completion_interruptible_timeout(done, expire)) {
- usb_kill_urb(urb);
- status = urb->status == -ENOENT ? -ETIMEDOUT : urb->status;
-
- dev_dbg(&urb->dev->dev,
- "%s timed out on ep%d%s len=%d/%d, urb status = %d\n",
- current->comm,
- usb_pipeendpoint(urb->pipe),
- usb_pipein(urb->pipe) ? "in" : "out",
- urb->actual_length,
- urb->transfer_buffer_length,
- urb->status);
-
- } else {
- if (signal_pending(current)) {
- /* if really an error */
- if (urb->status && !((urb->status == -ENOENT) ||
- (urb->status == -ECONNRESET) ||
- (urb->status == -ESHUTDOWN))) {
- status = -EINTR;
- usb_kill_urb(urb);
- } else {
- status = 0;
- }
-
- dev_dbg(&urb->dev->dev,
- "%s: signal pending on ep%d%s len=%d/%d,"
- "urb status = %d\n",
- current->comm,
- usb_pipeendpoint(urb->pipe),
- usb_pipein(urb->pipe) ? "in" : "out",
- urb->actual_length,
- urb->transfer_buffer_length,
- urb->status);
-
- } else {
- status = urb->status;
- }
- }
-
- if (actual_length)
- *actual_length = urb->actual_length;
-
- return status;
-}
-
-static ssize_t vstusb_read(struct file *file, char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct vstusb_device *vstdev;
- int cnt = -1;
- void *buf;
- int retval = 0;
-
- struct urb *urb;
- struct usb_device *dev;
- unsigned int pipe;
- int timeout;
-
- DECLARE_COMPLETION_ONSTACK(done);
-
- vstdev = file->private_data;
-
- if (vstdev == NULL)
- return -ENODEV;
-
- /* verify that we actually want to read some data */
- if ((count == 0) || (count > VST_MAXBUFFER))
- return -EINVAL;
-
- /* lock this object */
- if (mutex_lock_interruptible(&vstdev->lock))
- return -ERESTARTSYS;
-
- /* anyone home */
- if (!vstdev->present) {
- mutex_unlock(&vstdev->lock);
- printk(KERN_ERR KBUILD_MODNAME
- ": %s: device not present\n", __func__);
- return -ENODEV;
- }
-
- /* pull out the necessary data */
- dev = vstdev->usb_dev;
- pipe = usb_rcvbulkpipe(dev, vstdev->rd_pipe);
- timeout = vstdev->rd_timeout_ms;
-
- buf = kmalloc(count, GFP_KERNEL);
- if (buf == NULL) {
- mutex_unlock(&vstdev->lock);
- return -ENOMEM;
- }
-
- urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb) {
- kfree(buf);
- mutex_unlock(&vstdev->lock);
- return -ENOMEM;
- }
-
- usb_anchor_urb(urb, &vstdev->submitted);
- retval = vstusb_fill_and_send_urb(urb, dev, pipe, buf, count, &done);
- mutex_unlock(&vstdev->lock);
- if (retval) {
- usb_unanchor_urb(urb);
- dev_err(&dev->dev, "%s: error %d filling and sending urb %d\n",
- __func__, retval, pipe);
- goto exit;
- }
-
- retval = vstusb_complete_urb(urb, &done, timeout, &cnt);
- if (retval) {
- dev_err(&dev->dev, "%s: error %d completing urb %d\n",
- __func__, retval, pipe);
- goto exit;
- }
-
- if (copy_to_user(buffer, buf, cnt)) {
- dev_err(&dev->dev, "%s: can't copy_to_user\n", __func__);
- retval = -EFAULT;
- } else {
- retval = cnt;
- dev_dbg(&dev->dev, "%s: read %d bytes from pipe %d\n",
- __func__, cnt, pipe);
- }
-
-exit:
- usb_free_urb(urb);
- kfree(buf);
- return retval;
-}
-
-static ssize_t vstusb_write(struct file *file, const char __user *buffer,
- size_t count, loff_t *ppos)
-{
- struct vstusb_device *vstdev;
- int cnt = -1;
- void *buf;
- int retval = 0;
-
- struct urb *urb;
- struct usb_device *dev;
- unsigned int pipe;
- int timeout;
-
- DECLARE_COMPLETION_ONSTACK(done);
-
- vstdev = file->private_data;
-
- if (vstdev == NULL)
- return -ENODEV;
-
- /* verify that we actually have some data to write */
- if ((count == 0) || (count > VST_MAXBUFFER))
- return retval;
-
- /* lock this object */
- if (mutex_lock_interruptible(&vstdev->lock))
- return -ERESTARTSYS;
-
- /* anyone home */
- if (!vstdev->present) {
- mutex_unlock(&vstdev->lock);
- printk(KERN_ERR KBUILD_MODNAME
- ": %s: device not present\n", __func__);
- return -ENODEV;
- }
-
- /* pull out the necessary data */
- dev = vstdev->usb_dev;
- pipe = usb_sndbulkpipe(dev, vstdev->wr_pipe);
- timeout = vstdev->wr_timeout_ms;
-
- buf = kmalloc(count, GFP_KERNEL);
- if (buf == NULL) {
- mutex_unlock(&vstdev->lock);
- return -ENOMEM;
- }
-
- urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb) {
- kfree(buf);
- mutex_unlock(&vstdev->lock);
- return -ENOMEM;
- }
-
- if (copy_from_user(buf, buffer, count)) {
- mutex_unlock(&vstdev->lock);
- dev_err(&dev->dev, "%s: can't copy_from_user\n", __func__);
- retval = -EFAULT;
- goto exit;
- }
-
- usb_anchor_urb(urb, &vstdev->submitted);
- retval = vstusb_fill_and_send_urb(urb, dev, pipe, buf, count, &done);
- mutex_unlock(&vstdev->lock);
- if (retval) {
- usb_unanchor_urb(urb);
- dev_err(&dev->dev, "%s: error %d filling and sending urb %d\n",
- __func__, retval, pipe);
- goto exit;
- }
-
- retval = vstusb_complete_urb(urb, &done, timeout, &cnt);
- if (retval) {
- dev_err(&dev->dev, "%s: error %d completing urb %d\n",
- __func__, retval, pipe);
- goto exit;
- } else {
- retval = cnt;
- dev_dbg(&dev->dev, "%s: sent %d bytes to pipe %d\n",
- __func__, cnt, pipe);
- }
-
-exit:
- usb_free_urb(urb);
- kfree(buf);
- return retval;
-}
-
-static long vstusb_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
-{
- int retval = 0;
- int cnt = -1;
- void __user *data = (void __user *)arg;
- struct vstusb_args usb_data;
-
- struct vstusb_device *vstdev;
- void *buffer = NULL; /* must be initialized. buffer is
- * referenced on exit but not all
- * ioctls allocate it */
-
- struct urb *urb = NULL; /* must be initialized. urb is
- * referenced on exit but not all
- * ioctls allocate it */
- struct usb_device *dev;
- unsigned int pipe;
- int timeout;
-
- DECLARE_COMPLETION_ONSTACK(done);
-
- vstdev = file->private_data;
-
- if (_IOC_TYPE(cmd) != VST_IOC_MAGIC) {
- dev_warn(&vstdev->usb_dev->dev,
- "%s: ioctl command %x, bad ioctl magic %x, "
- "expected %x\n", __func__, cmd,
- _IOC_TYPE(cmd), VST_IOC_MAGIC);
- return -EINVAL;
- }
-
- if (vstdev == NULL)
- return -ENODEV;
-
- if (copy_from_user(&usb_data, data, sizeof(struct vstusb_args))) {
- dev_err(&vstdev->usb_dev->dev, "%s: can't copy_from_user\n",
- __func__);
- return -EFAULT;
- }
-
- /* lock this object */
- if (mutex_lock_interruptible(&vstdev->lock)) {
- retval = -ERESTARTSYS;
- goto exit;
- }
-
- /* anyone home */
- if (!vstdev->present) {
- mutex_unlock(&vstdev->lock);
- dev_err(&vstdev->usb_dev->dev, "%s: device not present\n",
- __func__);
- retval = -ENODEV;
- goto exit;
- }
-
- /* pull out the necessary data */
- dev = vstdev->usb_dev;
-
- switch (cmd) {
-
- case IOCTL_VSTUSB_CONFIG_RW:
-
- vstdev->rd_pipe = usb_data.rd_pipe;
- vstdev->rd_timeout_ms = usb_data.rd_timeout_ms;
- vstdev->wr_pipe = usb_data.wr_pipe;
- vstdev->wr_timeout_ms = usb_data.wr_timeout_ms;
-
- mutex_unlock(&vstdev->lock);
-
- dev_dbg(&dev->dev, "%s: setting pipes/timeouts, "
- "rdpipe = %d, rdtimeout = %d, "
- "wrpipe = %d, wrtimeout = %d\n", __func__,
- vstdev->rd_pipe, vstdev->rd_timeout_ms,
- vstdev->wr_pipe, vstdev->wr_timeout_ms);
- break;
-
- case IOCTL_VSTUSB_SEND_PIPE:
-
- if ((usb_data.count == 0) || (usb_data.count > VST_MAXBUFFER)) {
- mutex_unlock(&vstdev->lock);
- retval = -EINVAL;
- goto exit;
- }
-
- buffer = kmalloc(usb_data.count, GFP_KERNEL);
- if (buffer == NULL) {
- mutex_unlock(&vstdev->lock);
- retval = -ENOMEM;
- goto exit;
- }
-
- urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb) {
- mutex_unlock(&vstdev->lock);
- retval = -ENOMEM;
- goto exit;
- }
-
- timeout = usb_data.timeout_ms;
-
- pipe = usb_sndbulkpipe(dev, usb_data.pipe);
-
- if (copy_from_user(buffer, usb_data.buffer, usb_data.count)) {
- dev_err(&dev->dev, "%s: can't copy_from_user\n",
- __func__);
- mutex_unlock(&vstdev->lock);
- retval = -EFAULT;
- goto exit;
- }
-
- usb_anchor_urb(urb, &vstdev->submitted);
- retval = vstusb_fill_and_send_urb(urb, dev, pipe, buffer,
- usb_data.count, &done);
- mutex_unlock(&vstdev->lock);
- if (retval) {
- usb_unanchor_urb(urb);
- dev_err(&dev->dev,
- "%s: error %d filling and sending urb %d\n",
- __func__, retval, pipe);
- goto exit;
- }
-
- retval = vstusb_complete_urb(urb, &done, timeout, &cnt);
- if (retval) {
- dev_err(&dev->dev, "%s: error %d completing urb %d\n",
- __func__, retval, pipe);
- }
-
- break;
- case IOCTL_VSTUSB_RECV_PIPE:
-
- if ((usb_data.count == 0) || (usb_data.count > VST_MAXBUFFER)) {
- mutex_unlock(&vstdev->lock);
- retval = -EINVAL;
- goto exit;
- }
-
- buffer = kmalloc(usb_data.count, GFP_KERNEL);
- if (buffer == NULL) {
- mutex_unlock(&vstdev->lock);
- retval = -ENOMEM;
- goto exit;
- }
-
- urb = usb_alloc_urb(0, GFP_KERNEL);
- if (!urb) {
- mutex_unlock(&vstdev->lock);
- retval = -ENOMEM;
- goto exit;
- }
-
- timeout = usb_data.timeout_ms;
-
- pipe = usb_rcvbulkpipe(dev, usb_data.pipe);
-
- usb_anchor_urb(urb, &vstdev->submitted);
- retval = vstusb_fill_and_send_urb(urb, dev, pipe, buffer,
- usb_data.count, &done);
- mutex_unlock(&vstdev->lock);
- if (retval) {
- usb_unanchor_urb(urb);
- dev_err(&dev->dev,
- "%s: error %d filling and sending urb %d\n",
- __func__, retval, pipe);
- goto exit;
- }
-
- retval = vstusb_complete_urb(urb, &done, timeout, &cnt);
- if (retval) {
- dev_err(&dev->dev, "%s: error %d completing urb %d\n",
- __func__, retval, pipe);
- goto exit;
- }
-
- if (copy_to_user(usb_data.buffer, buffer, cnt)) {
- dev_err(&dev->dev, "%s: can't copy_to_user\n",
- __func__);
- retval = -EFAULT;
- goto exit;
- }
-
- usb_data.count = cnt;
- if (copy_to_user(data, &usb_data, sizeof(struct vstusb_args))) {
- dev_err(&dev->dev, "%s: can't copy_to_user\n",
- __func__);
- retval = -EFAULT;
- } else {
- dev_dbg(&dev->dev, "%s: recv %zd bytes from pipe %d\n",
- __func__, usb_data.count, usb_data.pipe);
- }
-
- break;
-
- default:
- mutex_unlock(&vstdev->lock);
- dev_warn(&dev->dev, "ioctl_vstusb: invalid ioctl cmd %x\n",
- cmd);
- return -EINVAL;
- break;
- }
-exit:
- usb_free_urb(urb);
- kfree(buffer);
- return retval;
-}
-
-static const struct file_operations vstusb_fops = {
- .owner = THIS_MODULE,
- .read = vstusb_read,
- .write = vstusb_write,
- .unlocked_ioctl = vstusb_ioctl,
- .compat_ioctl = vstusb_ioctl,
- .open = vstusb_open,
- .release = vstusb_release,
-};
-
-static struct usb_class_driver usb_vstusb_class = {
- .name = "usb/vstusb%d",
- .fops = &vstusb_fops,
- .minor_base = VSTUSB_MINOR_BASE,
-};
-
-static int vstusb_probe(struct usb_interface *intf,
- const struct usb_device_id *id)
-{
- struct usb_device *dev = interface_to_usbdev(intf);
- struct vstusb_device *vstdev;
- int i;
- int retval = 0;
-
- /* allocate memory for our device state and intialize it */
-
- vstdev = kzalloc(sizeof(*vstdev), GFP_KERNEL);
- if (vstdev == NULL)
- return -ENOMEM;
-
- /* must do usb_get_dev() prior to kref_init() since the kref_put()
- * release function will do a usb_put_dev() */
- usb_get_dev(dev);
- kref_init(&vstdev->kref);
- mutex_init(&vstdev->lock);
-
- i = dev->descriptor.bcdDevice;
-
- dev_dbg(&intf->dev, "Version %1d%1d.%1d%1d found at address %d\n",
- (i & 0xF000) >> 12, (i & 0xF00) >> 8,
- (i & 0xF0) >> 4, (i & 0xF), dev->devnum);
-
- vstdev->present = 1;
- vstdev->isopen = 0;
- vstdev->usb_dev = dev;
- init_usb_anchor(&vstdev->submitted);
-
- usb_set_intfdata(intf, vstdev);
- retval = usb_register_dev(intf, &usb_vstusb_class);
- if (retval) {
- dev_err(&intf->dev,
- "%s: Not able to get a minor for this device.\n",
- __func__);
- usb_set_intfdata(intf, NULL);
- kref_put(&vstdev->kref, vstusb_delete);
- return retval;
- }
-
- /* let the user know what node this device is now attached to */
- dev_info(&intf->dev,
- "VST USB Device #%d now attached to major %d minor %d\n",
- (intf->minor - VSTUSB_MINOR_BASE), USB_MAJOR, intf->minor);
-
- dev_info(&intf->dev, "%s, %s\n", DRIVER_DESC, DRIVER_VERSION);
-
- return retval;
-}
-
-static void vstusb_disconnect(struct usb_interface *intf)
-{
- struct vstusb_device *vstdev = usb_get_intfdata(intf);
-
- usb_deregister_dev(intf, &usb_vstusb_class);
- usb_set_intfdata(intf, NULL);
-
- if (vstdev) {
-
- mutex_lock(&vstdev->lock);
- vstdev->present = 0;
-
- usb_kill_anchored_urbs(&vstdev->submitted);
-
- mutex_unlock(&vstdev->lock);
-
- kref_put(&vstdev->kref, vstusb_delete);
- }
-
-}
-
-static int vstusb_suspend(struct usb_interface *intf, pm_message_t message)
-{
- struct vstusb_device *vstdev = usb_get_intfdata(intf);
- int time;
- if (!vstdev)
- return 0;
-
- mutex_lock(&vstdev->lock);
- time = usb_wait_anchor_empty_timeout(&vstdev->submitted, 1000);
- if (!time)
- usb_kill_anchored_urbs(&vstdev->submitted);
- mutex_unlock(&vstdev->lock);
-
- return 0;
-}
-
-static int vstusb_resume(struct usb_interface *intf)
-{
- return 0;
-}
-
-static struct usb_driver vstusb_driver = {
- .name = "vstusb",
- .probe = vstusb_probe,
- .disconnect = vstusb_disconnect,
- .suspend = vstusb_suspend,
- .resume = vstusb_resume,
- .id_table = id_table,
-};
-
-static int __init vstusb_init(void)
-{
- int rc;
-
- rc = usb_register(&vstusb_driver);
- if (rc)
- printk(KERN_ERR "%s: failed to register (%d)", __func__, rc);
-
- return rc;
-}
-
-static void __exit vstusb_exit(void)
-{
- usb_deregister(&vstusb_driver);
-}
-
-module_init(vstusb_init);
-module_exit(vstusb_exit);
-
-MODULE_AUTHOR("Dennis O'Brien/Stephen Ware");
-MODULE_DESCRIPTION(DRIVER_VERSION);
-MODULE_LICENSE("GPL");
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index 385ec052016..6dd44bc1f5f 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -460,8 +460,8 @@ static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb,
char ev_type, int status)
{
const struct usb_endpoint_descriptor *epd = &urb->ep->desc;
- unsigned long flags;
struct timeval ts;
+ unsigned long flags;
unsigned int urb_length;
unsigned int offset;
unsigned int length;
@@ -600,10 +600,13 @@ static void mon_bin_complete(void *data, struct urb *urb, int status)
static void mon_bin_error(void *data, struct urb *urb, int error)
{
struct mon_reader_bin *rp = data;
+ struct timeval ts;
unsigned long flags;
unsigned int offset;
struct mon_bin_hdr *ep;
+ do_gettimeofday(&ts);
+
spin_lock_irqsave(&rp->b_lock, flags);
offset = mon_buff_area_alloc(rp, PKT_SIZE);
@@ -623,6 +626,8 @@ static void mon_bin_error(void *data, struct urb *urb, int error)
ep->devnum = urb->dev->devnum;
ep->busnum = urb->dev->bus->busnum;
ep->id = (unsigned long) urb;
+ ep->ts_sec = ts.tv_sec;
+ ep->ts_usec = ts.tv_usec;
ep->status = error;
ep->flag_setup = '-';
diff --git a/drivers/usb/mon/mon_text.c b/drivers/usb/mon/mon_text.c
index 047568ff223..31c11888ec6 100644
--- a/drivers/usb/mon/mon_text.c
+++ b/drivers/usb/mon/mon_text.c
@@ -180,7 +180,7 @@ static inline unsigned int mon_get_timestamp(void)
unsigned int stamp;
do_gettimeofday(&tval);
- stamp = tval.tv_sec & 0xFFFF; /* 2^32 = 4294967296. Limit to 4096s. */
+ stamp = tval.tv_sec & 0xFFF; /* 2^32 = 4294967296. Limit to 4096s. */
stamp = stamp * 1000000 + tval.tv_usec;
return stamp;
}
@@ -273,12 +273,12 @@ static void mon_text_error(void *data, struct urb *urb, int error)
ep->type = 'E';
ep->id = (unsigned long) urb;
- ep->busnum = 0;
+ ep->busnum = urb->dev->bus->busnum;
ep->devnum = urb->dev->devnum;
ep->epnum = usb_endpoint_num(&urb->ep->desc);
ep->xfertype = usb_endpoint_type(&urb->ep->desc);
ep->is_in = usb_urb_dir_in(urb);
- ep->tstamp = 0;
+ ep->tstamp = mon_get_timestamp();
ep->length = 0;
ep->status = error;
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index d9db8649802..b4c783c284b 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -37,7 +37,7 @@ config USB_MUSB_SOC
depends on USB_MUSB_HDRC
default y if ARCH_DAVINCI
default y if ARCH_OMAP2430
- default y if ARCH_OMAP34XX
+ default y if ARCH_OMAP3
default y if (BF54x && !BF544)
default y if (BF52x && !BF522 && !BF523)
@@ -48,7 +48,7 @@ comment "OMAP 243x high speed USB support"
depends on USB_MUSB_HDRC && ARCH_OMAP2430
comment "OMAP 343x high speed USB support"
- depends on USB_MUSB_HDRC && ARCH_OMAP34XX
+ depends on USB_MUSB_HDRC && ARCH_OMAP3
comment "Blackfin high speed USB Support"
depends on USB_MUSB_HDRC && ((BF54x && !BF544) || (BF52x && !BF522 && !BF523))
@@ -153,7 +153,7 @@ config MUSB_PIO_ONLY
config USB_INVENTRA_DMA
bool
depends on USB_MUSB_HDRC && !MUSB_PIO_ONLY
- default ARCH_OMAP2430 || ARCH_OMAP34XX || BLACKFIN
+ default ARCH_OMAP2430 || ARCH_OMAP3 || BLACKFIN
help
Enable DMA transfers using Mentor's engine.
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index ad26e656966..bcee1339d4f 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -30,7 +30,6 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
void __iomem *fifo = hw_ep->fifo;
void __iomem *epio = hw_ep->regs;
u8 epnum = hw_ep->epnum;
- u16 dma_reg = 0;
prefetch((u8 *)src);
@@ -42,15 +41,17 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
dump_fifo_data(src, len);
if (!ANOMALY_05000380 && epnum != 0) {
- flush_dcache_range((unsigned int)src,
- (unsigned int)(src + len));
+ u16 dma_reg;
+
+ flush_dcache_range((unsigned long)src,
+ (unsigned long)(src + len));
/* Setup DMA address register */
- dma_reg = (u16) ((u32) src & 0xFFFF);
+ dma_reg = (u32)src;
bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_LOW), dma_reg);
SSYNC();
- dma_reg = (u16) (((u32) src >> 16) & 0xFFFF);
+ dma_reg = (u32)src >> 16;
bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_HIGH), dma_reg);
SSYNC();
@@ -79,12 +80,9 @@ void musb_write_fifo(struct musb_hw_ep *hw_ep, u16 len, const u8 *src)
SSYNC();
if (unlikely((unsigned long)src & 0x01))
- outsw_8((unsigned long)fifo, src,
- len & 0x01 ? (len >> 1) + 1 : len >> 1);
+ outsw_8((unsigned long)fifo, src, (len + 1) >> 1);
else
- outsw((unsigned long)fifo, src,
- len & 0x01 ? (len >> 1) + 1 : len >> 1);
-
+ outsw((unsigned long)fifo, src, (len + 1) >> 1);
}
}
/*
@@ -94,19 +92,19 @@ void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
{
void __iomem *fifo = hw_ep->fifo;
u8 epnum = hw_ep->epnum;
- u16 dma_reg = 0;
if (ANOMALY_05000467 && epnum != 0) {
+ u16 dma_reg;
- invalidate_dcache_range((unsigned int)dst,
- (unsigned int)(dst + len));
+ invalidate_dcache_range((unsigned long)dst,
+ (unsigned long)(dst + len));
/* Setup DMA address register */
- dma_reg = (u16) ((u32) dst & 0xFFFF);
+ dma_reg = (u32)dst;
bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_LOW), dma_reg);
SSYNC();
- dma_reg = (u16) (((u32) dst >> 16) & 0xFFFF);
+ dma_reg = (u32)dst >> 16;
bfin_write16(USB_DMA_REG(epnum, USB_DMAx_ADDR_HIGH), dma_reg);
SSYNC();
diff --git a/drivers/usb/musb/cppi_dma.c b/drivers/usb/musb/cppi_dma.c
index a44a450c860..3c69a76ec39 100644
--- a/drivers/usb/musb/cppi_dma.c
+++ b/drivers/usb/musb/cppi_dma.c
@@ -1191,8 +1191,13 @@ irqreturn_t cppi_interrupt(int irq, void *dev_id)
bd = tx_ch->head;
+ /*
+ * If Head is null then this could mean that a abort interrupt
+ * that needs to be acknowledged.
+ */
if (NULL == bd) {
DBG(1, "null BD\n");
+ tx_ram->tx_complete = 0;
continue;
}
@@ -1412,15 +1417,6 @@ static int cppi_channel_abort(struct dma_channel *channel)
if (cppi_ch->transmit) {
struct cppi_tx_stateram __iomem *tx_ram;
- int enabled;
-
- /* mask interrupts raised to signal teardown complete. */
- enabled = musb_readl(tibase, DAVINCI_TXCPPI_INTENAB_REG)
- & (1 << cppi_ch->index);
- if (enabled)
- musb_writel(tibase, DAVINCI_TXCPPI_INTCLR_REG,
- (1 << cppi_ch->index));
-
/* REVISIT put timeouts on these controller handshakes */
cppi_dump_tx(6, cppi_ch, " (teardown)");
@@ -1435,7 +1431,6 @@ static int cppi_channel_abort(struct dma_channel *channel)
do {
value = musb_readl(&tx_ram->tx_complete, 0);
} while (0xFFFFFFFC != value);
- musb_writel(&tx_ram->tx_complete, 0, 0xFFFFFFFC);
/* FIXME clean up the transfer state ... here?
* the completion routine should get called with
@@ -1448,23 +1443,15 @@ static int cppi_channel_abort(struct dma_channel *channel)
musb_writew(regs, MUSB_TXCSR, value);
musb_writew(regs, MUSB_TXCSR, value);
- /* While we scrub the TX state RAM, ensure that we clean
- * up any interrupt that's currently asserted:
+ /*
* 1. Write to completion Ptr value 0x1(bit 0 set)
* (write back mode)
- * 2. Write to completion Ptr value 0x0(bit 0 cleared)
- * (compare mode)
- * Value written is compared(for bits 31:2) and when
- * equal, interrupt is deasserted.
+ * 2. Wait for abort interrupt and then put the channel in
+ * compare mode by writing 1 to the tx_complete register.
*/
cppi_reset_tx(tx_ram, 1);
- musb_writel(&tx_ram->tx_complete, 0, 0);
-
- /* re-enable interrupt */
- if (enabled)
- musb_writel(tibase, DAVINCI_TXCPPI_INTENAB_REG,
- (1 << cppi_ch->index));
-
+ cppi_ch->head = 0;
+ musb_writel(&tx_ram->tx_complete, 0, 1);
cppi_dump_tx(5, cppi_ch, " (done teardown)");
/* REVISIT tx side _should_ clean up the same way
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 5eb9318cff7..b4bbf8f2c23 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -557,6 +557,69 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
handled = IRQ_HANDLED;
}
+
+ if (int_usb & MUSB_INTR_SUSPEND) {
+ DBG(1, "SUSPEND (%s) devctl %02x power %02x\n",
+ otg_state_string(musb), devctl, power);
+ handled = IRQ_HANDLED;
+
+ switch (musb->xceiv->state) {
+#ifdef CONFIG_USB_MUSB_OTG
+ case OTG_STATE_A_PERIPHERAL:
+ /* We also come here if the cable is removed, since
+ * this silicon doesn't report ID-no-longer-grounded.
+ *
+ * We depend on T(a_wait_bcon) to shut us down, and
+ * hope users don't do anything dicey during this
+ * undesired detour through A_WAIT_BCON.
+ */
+ musb_hnp_stop(musb);
+ usb_hcd_resume_root_hub(musb_to_hcd(musb));
+ musb_root_disconnect(musb);
+ musb_platform_try_idle(musb, jiffies
+ + msecs_to_jiffies(musb->a_wait_bcon
+ ? : OTG_TIME_A_WAIT_BCON));
+
+ break;
+#endif
+ case OTG_STATE_B_IDLE:
+ if (!musb->is_active)
+ break;
+ case OTG_STATE_B_PERIPHERAL:
+ musb_g_suspend(musb);
+ musb->is_active = is_otg_enabled(musb)
+ && musb->xceiv->gadget->b_hnp_enable;
+ if (musb->is_active) {
+#ifdef CONFIG_USB_MUSB_OTG
+ musb->xceiv->state = OTG_STATE_B_WAIT_ACON;
+ DBG(1, "HNP: Setting timer for b_ase0_brst\n");
+ mod_timer(&musb->otg_timer, jiffies
+ + msecs_to_jiffies(
+ OTG_TIME_B_ASE0_BRST));
+#endif
+ }
+ break;
+ case OTG_STATE_A_WAIT_BCON:
+ if (musb->a_wait_bcon != 0)
+ musb_platform_try_idle(musb, jiffies
+ + msecs_to_jiffies(musb->a_wait_bcon));
+ break;
+ case OTG_STATE_A_HOST:
+ musb->xceiv->state = OTG_STATE_A_SUSPEND;
+ musb->is_active = is_otg_enabled(musb)
+ && musb->xceiv->host->b_hnp_enable;
+ break;
+ case OTG_STATE_B_HOST:
+ /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
+ DBG(1, "REVISIT: SUSPEND as B_HOST\n");
+ break;
+ default:
+ /* "should not happen" */
+ musb->is_active = 0;
+ break;
+ }
+ }
+
if (int_usb & MUSB_INTR_CONNECT) {
struct usb_hcd *hcd = musb_to_hcd(musb);
@@ -625,10 +688,61 @@ b_host:
}
#endif /* CONFIG_USB_MUSB_HDRC_HCD */
+ if ((int_usb & MUSB_INTR_DISCONNECT) && !musb->ignore_disconnect) {
+ DBG(1, "DISCONNECT (%s) as %s, devctl %02x\n",
+ otg_state_string(musb),
+ MUSB_MODE(musb), devctl);
+ handled = IRQ_HANDLED;
+
+ switch (musb->xceiv->state) {
+#ifdef CONFIG_USB_MUSB_HDRC_HCD
+ case OTG_STATE_A_HOST:
+ case OTG_STATE_A_SUSPEND:
+ usb_hcd_resume_root_hub(musb_to_hcd(musb));
+ musb_root_disconnect(musb);
+ if (musb->a_wait_bcon != 0 && is_otg_enabled(musb))
+ musb_platform_try_idle(musb, jiffies
+ + msecs_to_jiffies(musb->a_wait_bcon));
+ break;
+#endif /* HOST */
+#ifdef CONFIG_USB_MUSB_OTG
+ case OTG_STATE_B_HOST:
+ /* REVISIT this behaves for "real disconnect"
+ * cases; make sure the other transitions from
+ * from B_HOST act right too. The B_HOST code
+ * in hnp_stop() is currently not used...
+ */
+ musb_root_disconnect(musb);
+ musb_to_hcd(musb)->self.is_b_host = 0;
+ musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
+ MUSB_DEV_MODE(musb);
+ musb_g_disconnect(musb);
+ break;
+ case OTG_STATE_A_PERIPHERAL:
+ musb_hnp_stop(musb);
+ musb_root_disconnect(musb);
+ /* FALLTHROUGH */
+ case OTG_STATE_B_WAIT_ACON:
+ /* FALLTHROUGH */
+#endif /* OTG */
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ case OTG_STATE_B_PERIPHERAL:
+ case OTG_STATE_B_IDLE:
+ musb_g_disconnect(musb);
+ break;
+#endif /* GADGET */
+ default:
+ WARNING("unhandled DISCONNECT transition (%s)\n",
+ otg_state_string(musb));
+ break;
+ }
+ }
+
/* mentor saves a bit: bus reset and babble share the same irq.
* only host sees babble; only peripheral sees bus reset.
*/
if (int_usb & MUSB_INTR_RESET) {
+ handled = IRQ_HANDLED;
if (is_host_capable() && (devctl & MUSB_DEVCTL_HM) != 0) {
/*
* Looks like non-HS BABBLE can be ignored, but
@@ -641,7 +755,7 @@ b_host:
DBG(1, "BABBLE devctl: %02x\n", devctl);
else {
ERR("Stopping host session -- babble\n");
- musb_writeb(mbase, MUSB_DEVCTL, 0);
+ musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
}
} else if (is_peripheral_capable()) {
DBG(1, "BUS RESET as %s\n", otg_state_string(musb));
@@ -686,29 +800,7 @@ b_host:
otg_state_string(musb));
}
}
-
- handled = IRQ_HANDLED;
}
- schedule_work(&musb->irq_work);
-
- return handled;
-}
-
-/*
- * Interrupt Service Routine to record USB "global" interrupts.
- * Since these do not happen often and signify things of
- * paramount importance, it seems OK to check them individually;
- * the order of the tests is specified in the manual
- *
- * @param musb instance pointer
- * @param int_usb register contents
- * @param devctl
- * @param power
- */
-static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb,
- u8 devctl, u8 power)
-{
- irqreturn_t handled = IRQ_NONE;
#if 0
/* REVISIT ... this would be for multiplexing periodic endpoints, or
@@ -755,117 +847,7 @@ static irqreturn_t musb_stage2_irq(struct musb *musb, u8 int_usb,
}
#endif
- if ((int_usb & MUSB_INTR_DISCONNECT) && !musb->ignore_disconnect) {
- DBG(1, "DISCONNECT (%s) as %s, devctl %02x\n",
- otg_state_string(musb),
- MUSB_MODE(musb), devctl);
- handled = IRQ_HANDLED;
-
- switch (musb->xceiv->state) {
-#ifdef CONFIG_USB_MUSB_HDRC_HCD
- case OTG_STATE_A_HOST:
- case OTG_STATE_A_SUSPEND:
- usb_hcd_resume_root_hub(musb_to_hcd(musb));
- musb_root_disconnect(musb);
- if (musb->a_wait_bcon != 0 && is_otg_enabled(musb))
- musb_platform_try_idle(musb, jiffies
- + msecs_to_jiffies(musb->a_wait_bcon));
- break;
-#endif /* HOST */
-#ifdef CONFIG_USB_MUSB_OTG
- case OTG_STATE_B_HOST:
- /* REVISIT this behaves for "real disconnect"
- * cases; make sure the other transitions from
- * from B_HOST act right too. The B_HOST code
- * in hnp_stop() is currently not used...
- */
- musb_root_disconnect(musb);
- musb_to_hcd(musb)->self.is_b_host = 0;
- musb->xceiv->state = OTG_STATE_B_PERIPHERAL;
- MUSB_DEV_MODE(musb);
- musb_g_disconnect(musb);
- break;
- case OTG_STATE_A_PERIPHERAL:
- musb_hnp_stop(musb);
- musb_root_disconnect(musb);
- /* FALLTHROUGH */
- case OTG_STATE_B_WAIT_ACON:
- /* FALLTHROUGH */
-#endif /* OTG */
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
- case OTG_STATE_B_PERIPHERAL:
- case OTG_STATE_B_IDLE:
- musb_g_disconnect(musb);
- break;
-#endif /* GADGET */
- default:
- WARNING("unhandled DISCONNECT transition (%s)\n",
- otg_state_string(musb));
- break;
- }
-
- schedule_work(&musb->irq_work);
- }
-
- if (int_usb & MUSB_INTR_SUSPEND) {
- DBG(1, "SUSPEND (%s) devctl %02x power %02x\n",
- otg_state_string(musb), devctl, power);
- handled = IRQ_HANDLED;
-
- switch (musb->xceiv->state) {
-#ifdef CONFIG_USB_MUSB_OTG
- case OTG_STATE_A_PERIPHERAL:
- /* We also come here if the cable is removed, since
- * this silicon doesn't report ID-no-longer-grounded.
- *
- * We depend on T(a_wait_bcon) to shut us down, and
- * hope users don't do anything dicey during this
- * undesired detour through A_WAIT_BCON.
- */
- musb_hnp_stop(musb);
- usb_hcd_resume_root_hub(musb_to_hcd(musb));
- musb_root_disconnect(musb);
- musb_platform_try_idle(musb, jiffies
- + msecs_to_jiffies(musb->a_wait_bcon
- ? : OTG_TIME_A_WAIT_BCON));
- break;
-#endif
- case OTG_STATE_B_PERIPHERAL:
- musb_g_suspend(musb);
- musb->is_active = is_otg_enabled(musb)
- && musb->xceiv->gadget->b_hnp_enable;
- if (musb->is_active) {
-#ifdef CONFIG_USB_MUSB_OTG
- musb->xceiv->state = OTG_STATE_B_WAIT_ACON;
- DBG(1, "HNP: Setting timer for b_ase0_brst\n");
- mod_timer(&musb->otg_timer, jiffies
- + msecs_to_jiffies(
- OTG_TIME_B_ASE0_BRST));
-#endif
- }
- break;
- case OTG_STATE_A_WAIT_BCON:
- if (musb->a_wait_bcon != 0)
- musb_platform_try_idle(musb, jiffies
- + msecs_to_jiffies(musb->a_wait_bcon));
- break;
- case OTG_STATE_A_HOST:
- musb->xceiv->state = OTG_STATE_A_SUSPEND;
- musb->is_active = is_otg_enabled(musb)
- && musb->xceiv->host->b_hnp_enable;
- break;
- case OTG_STATE_B_HOST:
- /* Transition to B_PERIPHERAL, see 6.8.2.6 p 44 */
- DBG(1, "REVISIT: SUSPEND as B_HOST\n");
- break;
- default:
- /* "should not happen" */
- musb->is_active = 0;
- break;
- }
- schedule_work(&musb->irq_work);
- }
-
+ schedule_work(&musb->irq_work);
return handled;
}
@@ -1000,7 +982,7 @@ static void musb_shutdown(struct platform_device *pdev)
* more than selecting one of a bunch of predefined configurations.
*/
#if defined(CONFIG_USB_TUSB6010) || \
- defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP34XX)
+ defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3)
static ushort __initdata fifo_mode = 4;
#else
static ushort __initdata fifo_mode = 2;
@@ -1095,6 +1077,36 @@ static struct fifo_cfg __initdata mode_4_cfg[] = {
{ .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
};
+/* mode 5 - fits in 8KB */
+static struct fifo_cfg __initdata mode_5_cfg[] = {
+{ .hw_ep_num = 1, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 1, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 2, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 3, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 4, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 4, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 5, .style = FIFO_TX, .maxpacket = 512, },
+{ .hw_ep_num = 5, .style = FIFO_RX, .maxpacket = 512, },
+{ .hw_ep_num = 6, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 6, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 7, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 7, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 8, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 8, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 9, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 9, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 10, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 10, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 11, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 11, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 12, .style = FIFO_TX, .maxpacket = 32, },
+{ .hw_ep_num = 12, .style = FIFO_RX, .maxpacket = 32, },
+{ .hw_ep_num = 13, .style = FIFO_RXTX, .maxpacket = 512, },
+{ .hw_ep_num = 14, .style = FIFO_RXTX, .maxpacket = 1024, },
+{ .hw_ep_num = 15, .style = FIFO_RXTX, .maxpacket = 1024, },
+};
/*
* configure a fifo; for non-shared endpoints, this may be called
@@ -1210,6 +1222,10 @@ static int __init ep_config_from_table(struct musb *musb)
cfg = mode_4_cfg;
n = ARRAY_SIZE(mode_4_cfg);
break;
+ case 5:
+ cfg = mode_5_cfg;
+ n = ARRAY_SIZE(mode_5_cfg);
+ break;
}
printk(KERN_DEBUG "%s: setup fifo_mode %d\n",
@@ -1314,9 +1330,6 @@ enum { MUSB_CONTROLLER_MHDRC, MUSB_CONTROLLER_HDRC, };
*/
static int __init musb_core_init(u16 musb_type, struct musb *musb)
{
-#ifdef MUSB_AHB_ID
- u32 data;
-#endif
u8 reg;
char *type;
char aInfo[90], aRevision[32], aDate[12];
@@ -1328,23 +1341,17 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
reg = musb_read_configdata(mbase);
strcpy(aInfo, (reg & MUSB_CONFIGDATA_UTMIDW) ? "UTMI-16" : "UTMI-8");
- if (reg & MUSB_CONFIGDATA_DYNFIFO)
+ if (reg & MUSB_CONFIGDATA_DYNFIFO) {
strcat(aInfo, ", dyn FIFOs");
+ musb->dyn_fifo = true;
+ }
if (reg & MUSB_CONFIGDATA_MPRXE) {
strcat(aInfo, ", bulk combine");
-#ifdef C_MP_RX
musb->bulk_combine = true;
-#else
- strcat(aInfo, " (X)"); /* no driver support */
-#endif
}
if (reg & MUSB_CONFIGDATA_MPTXE) {
strcat(aInfo, ", bulk split");
-#ifdef C_MP_TX
musb->bulk_split = true;
-#else
- strcat(aInfo, " (X)"); /* no driver support */
-#endif
}
if (reg & MUSB_CONFIGDATA_HBRXE) {
strcat(aInfo, ", HB-ISO Rx");
@@ -1360,20 +1367,7 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
printk(KERN_DEBUG "%s: ConfigData=0x%02x (%s)\n",
musb_driver_name, reg, aInfo);
-#ifdef MUSB_AHB_ID
- data = musb_readl(mbase, 0x404);
- sprintf(aDate, "%04d-%02x-%02x", (data & 0xffff),
- (data >> 16) & 0xff, (data >> 24) & 0xff);
- /* FIXME ID2 and ID3 are unused */
- data = musb_readl(mbase, 0x408);
- printk(KERN_DEBUG "ID2=%lx\n", (long unsigned)data);
- data = musb_readl(mbase, 0x40c);
- printk(KERN_DEBUG "ID3=%lx\n", (long unsigned)data);
- reg = musb_readb(mbase, 0x400);
- musb_type = ('M' == reg) ? MUSB_CONTROLLER_MHDRC : MUSB_CONTROLLER_HDRC;
-#else
aDate[0] = 0;
-#endif
if (MUSB_CONTROLLER_MHDRC == musb_type) {
musb->is_multipoint = 1;
type = "M";
@@ -1404,21 +1398,10 @@ static int __init musb_core_init(u16 musb_type, struct musb *musb)
musb->nr_endpoints = 1;
musb->epmask = 1;
- if (reg & MUSB_CONFIGDATA_DYNFIFO) {
- if (musb->config->dyn_fifo)
- status = ep_config_from_table(musb);
- else {
- ERR("reconfigure software for Dynamic FIFOs\n");
- status = -ENODEV;
- }
- } else {
- if (!musb->config->dyn_fifo)
- status = ep_config_from_hw(musb);
- else {
- ERR("reconfigure software for static FIFOs\n");
- return -ENODEV;
- }
- }
+ if (musb->dyn_fifo)
+ status = ep_config_from_table(musb);
+ else
+ status = ep_config_from_hw(musb);
if (status < 0)
return status;
@@ -1587,11 +1570,6 @@ irqreturn_t musb_interrupt(struct musb *musb)
ep_num++;
}
- /* finish handling "global" interrupts after handling fifos */
- if (musb->int_usb)
- retval |= musb_stage2_irq(musb,
- musb->int_usb, devctl, power);
-
return retval;
}
@@ -1696,7 +1674,7 @@ musb_vbus_store(struct device *dev, struct device_attribute *attr,
unsigned long val;
if (sscanf(buf, "%lu", &val) < 1) {
- printk(KERN_ERR "Invalid VBUS timeout ms value\n");
+ dev_err(dev, "Invalid VBUS timeout ms value\n");
return -EINVAL;
}
@@ -1746,7 +1724,7 @@ musb_srp_store(struct device *dev, struct device_attribute *attr,
if (sscanf(buf, "%hu", &srp) != 1
|| (srp != 1)) {
- printk(KERN_ERR "SRP: Value must be 1\n");
+ dev_err(dev, "SRP: Value must be 1\n");
return -EINVAL;
}
@@ -1759,6 +1737,19 @@ static DEVICE_ATTR(srp, 0644, NULL, musb_srp_store);
#endif /* CONFIG_USB_GADGET_MUSB_HDRC */
+static struct attribute *musb_attributes[] = {
+ &dev_attr_mode.attr,
+ &dev_attr_vbus.attr,
+#ifdef CONFIG_USB_GADGET_MUSB_HDRC
+ &dev_attr_srp.attr,
+#endif
+ NULL
+};
+
+static const struct attribute_group musb_attr_group = {
+ .attrs = musb_attributes,
+};
+
#endif /* sysfs */
/* Only used to provide driver mode change events */
@@ -1833,11 +1824,7 @@ static void musb_free(struct musb *musb)
*/
#ifdef CONFIG_SYSFS
- device_remove_file(musb->controller, &dev_attr_mode);
- device_remove_file(musb->controller, &dev_attr_vbus);
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
- device_remove_file(musb->controller, &dev_attr_srp);
-#endif
+ sysfs_remove_group(&musb->controller->kobj, &musb_attr_group);
#endif
#ifdef CONFIG_USB_GADGET_MUSB_HDRC
@@ -2017,22 +2004,10 @@ bad_config:
musb->irq_wake = 0;
}
- pr_info("%s: USB %s mode controller at %p using %s, IRQ %d\n",
- musb_driver_name,
- ({char *s;
- switch (musb->board_mode) {
- case MUSB_HOST: s = "Host"; break;
- case MUSB_PERIPHERAL: s = "Peripheral"; break;
- default: s = "OTG"; break;
- }; s; }),
- ctrl,
- (is_dma_capable() && musb->dma_controller)
- ? "DMA" : "PIO",
- musb->nIrq);
-
/* host side needs more setup */
if (is_host_enabled(musb)) {
struct usb_hcd *hcd = musb_to_hcd(musb);
+ u8 busctl;
otg_set_host(musb->xceiv, &hcd->self);
@@ -2040,6 +2015,13 @@ bad_config:
hcd->self.otg_port = 1;
musb->xceiv->host = &hcd->self;
hcd->power_budget = 2 * (plat->power ? : 250);
+
+ /* program PHY to use external vBus if required */
+ if (plat->extvbus) {
+ busctl = musb_readb(musb->mregs, MUSB_ULPI_BUSCONTROL);
+ busctl |= MUSB_ULPI_USE_EXTVBUS;
+ musb_writeb(musb->mregs, MUSB_ULPI_BUSCONTROL, busctl);
+ }
}
/* For the host-only role, we can activate right away.
@@ -2079,26 +2061,26 @@ bad_config:
}
#ifdef CONFIG_SYSFS
- status = device_create_file(dev, &dev_attr_mode);
- status = device_create_file(dev, &dev_attr_vbus);
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
- status = device_create_file(dev, &dev_attr_srp);
-#endif /* CONFIG_USB_GADGET_MUSB_HDRC */
- status = 0;
+ status = sysfs_create_group(&musb->controller->kobj, &musb_attr_group);
#endif
if (status)
goto fail2;
+ dev_info(dev, "USB %s mode controller at %p using %s, IRQ %d\n",
+ ({char *s;
+ switch (musb->board_mode) {
+ case MUSB_HOST: s = "Host"; break;
+ case MUSB_PERIPHERAL: s = "Peripheral"; break;
+ default: s = "OTG"; break;
+ }; s; }),
+ ctrl,
+ (is_dma_capable() && musb->dma_controller)
+ ? "DMA" : "PIO",
+ musb->nIrq);
+
return 0;
fail2:
-#ifdef CONFIG_SYSFS
- device_remove_file(musb->controller, &dev_attr_mode);
- device_remove_file(musb->controller, &dev_attr_vbus);
-#ifdef CONFIG_USB_GADGET_MUSB_HDRC
- device_remove_file(musb->controller, &dev_attr_srp);
-#endif
-#endif
musb_platform_exit(musb);
fail:
dev_err(musb->controller,
@@ -2127,6 +2109,7 @@ static int __init musb_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
int irq = platform_get_irq(pdev, 0);
+ int status;
struct resource *iomem;
void __iomem *base;
@@ -2134,7 +2117,7 @@ static int __init musb_probe(struct platform_device *pdev)
if (!iomem || irq == 0)
return -ENODEV;
- base = ioremap(iomem->start, iomem->end - iomem->start + 1);
+ base = ioremap(iomem->start, resource_size(iomem));
if (!base) {
dev_err(dev, "ioremap failed\n");
return -ENOMEM;
@@ -2144,7 +2127,12 @@ static int __init musb_probe(struct platform_device *pdev)
/* clobbered by use_dma=n */
orig_dma_mask = dev->dma_mask;
#endif
- return musb_init_controller(dev, irq, base);
+
+ status = musb_init_controller(dev, irq, base);
+ if (status < 0)
+ iounmap(base);
+
+ return status;
}
static int __exit musb_remove(struct platform_device *pdev)
@@ -2173,6 +2161,148 @@ static int __exit musb_remove(struct platform_device *pdev)
#ifdef CONFIG_PM
+static struct musb_context_registers musb_context;
+
+void musb_save_context(struct musb *musb)
+{
+ int i;
+ void __iomem *musb_base = musb->mregs;
+
+ if (is_host_enabled(musb)) {
+ musb_context.frame = musb_readw(musb_base, MUSB_FRAME);
+ musb_context.testmode = musb_readb(musb_base, MUSB_TESTMODE);
+ }
+ musb_context.power = musb_readb(musb_base, MUSB_POWER);
+ musb_context.intrtxe = musb_readw(musb_base, MUSB_INTRTXE);
+ musb_context.intrrxe = musb_readw(musb_base, MUSB_INTRRXE);
+ musb_context.intrusbe = musb_readb(musb_base, MUSB_INTRUSBE);
+ musb_context.index = musb_readb(musb_base, MUSB_INDEX);
+ musb_context.devctl = musb_readb(musb_base, MUSB_DEVCTL);
+
+ for (i = 0; i < MUSB_C_NUM_EPS; ++i) {
+ musb_writeb(musb_base, MUSB_INDEX, i);
+ musb_context.index_regs[i].txmaxp =
+ musb_readw(musb_base, 0x10 + MUSB_TXMAXP);
+ musb_context.index_regs[i].txcsr =
+ musb_readw(musb_base, 0x10 + MUSB_TXCSR);
+ musb_context.index_regs[i].rxmaxp =
+ musb_readw(musb_base, 0x10 + MUSB_RXMAXP);
+ musb_context.index_regs[i].rxcsr =
+ musb_readw(musb_base, 0x10 + MUSB_RXCSR);
+
+ if (musb->dyn_fifo) {
+ musb_context.index_regs[i].txfifoadd =
+ musb_read_txfifoadd(musb_base);
+ musb_context.index_regs[i].rxfifoadd =
+ musb_read_rxfifoadd(musb_base);
+ musb_context.index_regs[i].txfifosz =
+ musb_read_txfifosz(musb_base);
+ musb_context.index_regs[i].rxfifosz =
+ musb_read_rxfifosz(musb_base);
+ }
+ if (is_host_enabled(musb)) {
+ musb_context.index_regs[i].txtype =
+ musb_readb(musb_base, 0x10 + MUSB_TXTYPE);
+ musb_context.index_regs[i].txinterval =
+ musb_readb(musb_base, 0x10 + MUSB_TXINTERVAL);
+ musb_context.index_regs[i].rxtype =
+ musb_readb(musb_base, 0x10 + MUSB_RXTYPE);
+ musb_context.index_regs[i].rxinterval =
+ musb_readb(musb_base, 0x10 + MUSB_RXINTERVAL);
+
+ musb_context.index_regs[i].txfunaddr =
+ musb_read_txfunaddr(musb_base, i);
+ musb_context.index_regs[i].txhubaddr =
+ musb_read_txhubaddr(musb_base, i);
+ musb_context.index_regs[i].txhubport =
+ musb_read_txhubport(musb_base, i);
+
+ musb_context.index_regs[i].rxfunaddr =
+ musb_read_rxfunaddr(musb_base, i);
+ musb_context.index_regs[i].rxhubaddr =
+ musb_read_rxhubaddr(musb_base, i);
+ musb_context.index_regs[i].rxhubport =
+ musb_read_rxhubport(musb_base, i);
+ }
+ }
+
+ musb_writeb(musb_base, MUSB_INDEX, musb_context.index);
+
+ musb_platform_save_context(musb, &musb_context);
+}
+
+void musb_restore_context(struct musb *musb)
+{
+ int i;
+ void __iomem *musb_base = musb->mregs;
+ void __iomem *ep_target_regs;
+
+ musb_platform_restore_context(musb, &musb_context);
+
+ if (is_host_enabled(musb)) {
+ musb_writew(musb_base, MUSB_FRAME, musb_context.frame);
+ musb_writeb(musb_base, MUSB_TESTMODE, musb_context.testmode);
+ }
+ musb_writeb(musb_base, MUSB_POWER, musb_context.power);
+ musb_writew(musb_base, MUSB_INTRTXE, musb_context.intrtxe);
+ musb_writew(musb_base, MUSB_INTRRXE, musb_context.intrrxe);
+ musb_writeb(musb_base, MUSB_INTRUSBE, musb_context.intrusbe);
+ musb_writeb(musb_base, MUSB_DEVCTL, musb_context.devctl);
+
+ for (i = 0; i < MUSB_C_NUM_EPS; ++i) {
+ musb_writeb(musb_base, MUSB_INDEX, i);
+ musb_writew(musb_base, 0x10 + MUSB_TXMAXP,
+ musb_context.index_regs[i].txmaxp);
+ musb_writew(musb_base, 0x10 + MUSB_TXCSR,
+ musb_context.index_regs[i].txcsr);
+ musb_writew(musb_base, 0x10 + MUSB_RXMAXP,
+ musb_context.index_regs[i].rxmaxp);
+ musb_writew(musb_base, 0x10 + MUSB_RXCSR,
+ musb_context.index_regs[i].rxcsr);
+
+ if (musb->dyn_fifo) {
+ musb_write_txfifosz(musb_base,
+ musb_context.index_regs[i].txfifosz);
+ musb_write_rxfifosz(musb_base,
+ musb_context.index_regs[i].rxfifosz);
+ musb_write_txfifoadd(musb_base,
+ musb_context.index_regs[i].txfifoadd);
+ musb_write_rxfifoadd(musb_base,
+ musb_context.index_regs[i].rxfifoadd);
+ }
+
+ if (is_host_enabled(musb)) {
+ musb_writeb(musb_base, 0x10 + MUSB_TXTYPE,
+ musb_context.index_regs[i].txtype);
+ musb_writeb(musb_base, 0x10 + MUSB_TXINTERVAL,
+ musb_context.index_regs[i].txinterval);
+ musb_writeb(musb_base, 0x10 + MUSB_RXTYPE,
+ musb_context.index_regs[i].rxtype);
+ musb_writeb(musb_base, 0x10 + MUSB_RXINTERVAL,
+
+ musb_context.index_regs[i].rxinterval);
+ musb_write_txfunaddr(musb_base, i,
+ musb_context.index_regs[i].txfunaddr);
+ musb_write_txhubaddr(musb_base, i,
+ musb_context.index_regs[i].txhubaddr);
+ musb_write_txhubport(musb_base, i,
+ musb_context.index_regs[i].txhubport);
+
+ ep_target_regs =
+ musb_read_target_reg_base(i, musb_base);
+
+ musb_write_rxfunaddr(ep_target_regs,
+ musb_context.index_regs[i].rxfunaddr);
+ musb_write_rxhubaddr(ep_target_regs,
+ musb_context.index_regs[i].rxhubaddr);
+ musb_write_rxhubport(ep_target_regs,
+ musb_context.index_regs[i].rxhubport);
+ }
+ }
+
+ musb_writeb(musb_base, MUSB_INDEX, musb_context.index);
+}
+
static int musb_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -2194,6 +2324,8 @@ static int musb_suspend(struct device *dev)
*/
}
+ musb_save_context(musb);
+
if (musb->set_clock)
musb->set_clock(musb->clock, 0);
else
@@ -2215,6 +2347,8 @@ static int musb_resume_noirq(struct device *dev)
else
clk_enable(musb->clock);
+ musb_restore_context(musb);
+
/* for static cmos like DaVinci, register values were preserved
* unless for some reason the whole soc powered down or the USB
* module got reset through the PSC (vs just being disabled).
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index 03d50909b07..d849fb81c13 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -52,6 +52,15 @@ struct musb;
struct musb_hw_ep;
struct musb_ep;
+/* Helper defines for struct musb->hwvers */
+#define MUSB_HWVERS_MAJOR(x) ((x >> 10) & 0x1f)
+#define MUSB_HWVERS_MINOR(x) (x & 0x3ff)
+#define MUSB_HWVERS_RC 0x8000
+#define MUSB_HWVERS_1300 0x52C
+#define MUSB_HWVERS_1400 0x590
+#define MUSB_HWVERS_1800 0x720
+#define MUSB_HWVERS_1900 0x784
+#define MUSB_HWVERS_2000 0x800
#include "musb_debug.h"
#include "musb_dma.h"
@@ -322,13 +331,6 @@ struct musb {
struct clk *clock;
irqreturn_t (*isr)(int, void *);
struct work_struct irq_work;
-#define MUSB_HWVERS_MAJOR(x) ((x >> 10) & 0x1f)
-#define MUSB_HWVERS_MINOR(x) (x & 0x3ff)
-#define MUSB_HWVERS_RC 0x8000
-#define MUSB_HWVERS_1300 0x52C
-#define MUSB_HWVERS_1400 0x590
-#define MUSB_HWVERS_1800 0x720
-#define MUSB_HWVERS_2000 0x800
u16 hwvers;
/* this hub status bit is reserved by USB 2.0 and not seen by usbcore */
@@ -411,22 +413,15 @@ struct musb {
unsigned hb_iso_rx:1; /* high bandwidth iso rx? */
unsigned hb_iso_tx:1; /* high bandwidth iso tx? */
+ unsigned dyn_fifo:1; /* dynamic FIFO supported? */
-#ifdef C_MP_TX
- unsigned bulk_split:1;
+ unsigned bulk_split:1;
#define can_bulk_split(musb,type) \
- (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_split)
-#else
-#define can_bulk_split(musb, type) 0
-#endif
+ (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_split)
-#ifdef C_MP_RX
- unsigned bulk_combine:1;
+ unsigned bulk_combine:1;
#define can_bulk_combine(musb,type) \
- (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_combine)
-#else
-#define can_bulk_combine(musb, type) 0
-#endif
+ (((type) == USB_ENDPOINT_XFER_BULK) && (musb)->bulk_combine)
#ifdef CONFIG_USB_GADGET_MUSB_HDRC
/* is_suspended means USB B_PERIPHERAL suspend */
@@ -461,6 +456,45 @@ struct musb {
#endif
};
+#ifdef CONFIG_PM
+struct musb_csr_regs {
+ /* FIFO registers */
+ u16 txmaxp, txcsr, rxmaxp, rxcsr;
+ u16 rxfifoadd, txfifoadd;
+ u8 txtype, txinterval, rxtype, rxinterval;
+ u8 rxfifosz, txfifosz;
+ u8 txfunaddr, txhubaddr, txhubport;
+ u8 rxfunaddr, rxhubaddr, rxhubport;
+};
+
+struct musb_context_registers {
+
+#if defined(CONFIG_ARCH_OMAP34XX) || defined(CONFIG_ARCH_OMAP2430)
+ u32 otg_sysconfig, otg_forcestandby;
+#endif
+ u8 power;
+ u16 intrtxe, intrrxe;
+ u8 intrusbe;
+ u16 frame;
+ u8 index, testmode;
+
+ u8 devctl, misc;
+
+ struct musb_csr_regs index_regs[MUSB_C_NUM_EPS];
+};
+
+#if defined(CONFIG_ARCH_OMAP34XX) || defined(CONFIG_ARCH_OMAP2430)
+extern void musb_platform_save_context(struct musb *musb,
+ struct musb_context_registers *musb_context);
+extern void musb_platform_restore_context(struct musb *musb,
+ struct musb_context_registers *musb_context);
+#else
+#define musb_platform_save_context(m, x) do {} while (0)
+#define musb_platform_restore_context(m, x) do {} while (0)
+#endif
+
+#endif
+
static inline void musb_set_vbus(struct musb *musb, int is_on)
{
musb->board_set_vbus(musb, is_on);
@@ -562,7 +596,7 @@ extern void musb_hnp_stop(struct musb *musb);
extern int musb_platform_set_mode(struct musb *musb, u8 musb_mode);
#if defined(CONFIG_USB_TUSB6010) || defined(CONFIG_BLACKFIN) || \
- defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP34XX)
+ defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3)
extern void musb_platform_try_idle(struct musb *musb, unsigned long timeout);
#else
#define musb_platform_try_idle(x, y) do {} while (0)
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index cbcf14a236e..a9f288cd70e 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -895,7 +895,14 @@ static int musb_gadget_enable(struct usb_ep *ep,
/* REVISIT if can_bulk_split(), use by updating "tmp";
* likewise high bandwidth periodic tx
*/
- musb_writew(regs, MUSB_TXMAXP, tmp);
+ /* Set TXMAXP with the FIFO size of the endpoint
+ * to disable double buffering mode. Currently, It seems that double
+ * buffering has problem if musb RTL revision number < 2.0.
+ */
+ if (musb->hwvers < MUSB_HWVERS_2000)
+ musb_writew(regs, MUSB_TXMAXP, hw_ep->max_packet_sz_tx);
+ else
+ musb_writew(regs, MUSB_TXMAXP, tmp);
csr = MUSB_TXCSR_MODE | MUSB_TXCSR_CLRDATATOG;
if (musb_readw(regs, MUSB_TXCSR)
@@ -925,7 +932,13 @@ static int musb_gadget_enable(struct usb_ep *ep,
/* REVISIT if can_bulk_combine() use by updating "tmp"
* likewise high bandwidth periodic rx
*/
- musb_writew(regs, MUSB_RXMAXP, tmp);
+ /* Set RXMAXP with the FIFO size of the endpoint
+ * to disable double buffering mode.
+ */
+ if (musb->hwvers < MUSB_HWVERS_2000)
+ musb_writew(regs, MUSB_RXMAXP, hw_ep->max_packet_sz_rx);
+ else
+ musb_writew(regs, MUSB_RXMAXP, tmp);
/* force shared fifo to OUT-only mode */
if (hw_ep->is_shared_fifo) {
@@ -1697,8 +1710,7 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
return -EINVAL;
/* driver must be initialized to support peripheral mode */
- if (!musb || !(musb->board_mode == MUSB_OTG
- || musb->board_mode != MUSB_OTG)) {
+ if (!musb) {
DBG(1, "%s, no dev??\n", __func__);
return -ENODEV;
}
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 74c4c3698f1..3421cf9858b 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -605,8 +605,14 @@ musb_rx_reinit(struct musb *musb, struct musb_qh *qh, struct musb_hw_ep *ep)
musb_writeb(ep->regs, MUSB_RXTYPE, qh->type_reg);
musb_writeb(ep->regs, MUSB_RXINTERVAL, qh->intv_reg);
/* NOTE: bulk combining rewrites high bits of maxpacket */
- musb_writew(ep->regs, MUSB_RXMAXP,
- qh->maxpacket | ((qh->hb_mult - 1) << 11));
+ /* Set RXMAXP with the FIFO size of the endpoint
+ * to disable double buffer mode.
+ */
+ if (musb->hwvers < MUSB_HWVERS_2000)
+ musb_writew(ep->regs, MUSB_RXMAXP, ep->max_packet_sz_rx);
+ else
+ musb_writew(ep->regs, MUSB_RXMAXP,
+ qh->maxpacket | ((qh->hb_mult - 1) << 11));
ep->rx_reinit = 0;
}
@@ -1771,6 +1777,9 @@ static int musb_schedule(
int best_end, epnum;
struct musb_hw_ep *hw_ep = NULL;
struct list_head *head = NULL;
+ u8 toggle;
+ u8 txtype;
+ struct urb *urb = next_urb(qh);
/* use fixed hardware for control and bulk */
if (qh->type == USB_ENDPOINT_XFER_CONTROL) {
@@ -1809,6 +1818,27 @@ static int musb_schedule(
diff -= (qh->maxpacket * qh->hb_mult);
if (diff >= 0 && best_diff > diff) {
+
+ /*
+ * Mentor controller has a bug in that if we schedule
+ * a BULK Tx transfer on an endpoint that had earlier
+ * handled ISOC then the BULK transfer has to start on
+ * a zero toggle. If the BULK transfer starts on a 1
+ * toggle then this transfer will fail as the mentor
+ * controller starts the Bulk transfer on a 0 toggle
+ * irrespective of the programming of the toggle bits
+ * in the TXCSR register. Check for this condition
+ * while allocating the EP for a Tx Bulk transfer. If
+ * so skip this EP.
+ */
+ hw_ep = musb->endpoints + epnum;
+ toggle = usb_gettoggle(urb->dev, qh->epnum, !is_in);
+ txtype = (musb_readb(hw_ep->regs, MUSB_TXTYPE)
+ >> 4) & 0x3;
+ if (!is_in && (qh->type == USB_ENDPOINT_XFER_BULK) &&
+ toggle && (txtype == USB_ENDPOINT_XFER_ISOC))
+ continue;
+
best_diff = diff;
best_end = epnum;
}
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
index 473a94ef905..292894a2c24 100644
--- a/drivers/usb/musb/musb_regs.h
+++ b/drivers/usb/musb/musb_regs.h
@@ -72,6 +72,10 @@
#define MUSB_DEVCTL_HR 0x02
#define MUSB_DEVCTL_SESSION 0x01
+/* MUSB ULPI VBUSCONTROL */
+#define MUSB_ULPI_USE_EXTVBUS 0x01
+#define MUSB_ULPI_USE_EXTVBUSIND 0x02
+
/* TESTMODE */
#define MUSB_TEST_FORCE_HOST 0x80
#define MUSB_TEST_FIFO_ACCESS 0x40
@@ -246,6 +250,7 @@
/* REVISIT: vctrl/vstatus: optional vendor utmi+phy register at 0x68 */
#define MUSB_HWVERS 0x6C /* 8 bit */
+#define MUSB_ULPI_BUSCONTROL 0x70 /* 8 bit */
#define MUSB_EPINFO 0x78 /* 8 bit */
#define MUSB_RAMINFO 0x79 /* 8 bit */
@@ -321,6 +326,26 @@ static inline void musb_write_rxfifoadd(void __iomem *mbase, u16 c_off)
musb_writew(mbase, MUSB_RXFIFOADD, c_off);
}
+static inline u8 musb_read_txfifosz(void __iomem *mbase)
+{
+ return musb_readb(mbase, MUSB_TXFIFOSZ);
+}
+
+static inline u16 musb_read_txfifoadd(void __iomem *mbase)
+{
+ return musb_readw(mbase, MUSB_TXFIFOADD);
+}
+
+static inline u8 musb_read_rxfifosz(void __iomem *mbase)
+{
+ return musb_readb(mbase, MUSB_RXFIFOSZ);
+}
+
+static inline u16 musb_read_rxfifoadd(void __iomem *mbase)
+{
+ return musb_readw(mbase, MUSB_RXFIFOADD);
+}
+
static inline u8 musb_read_configdata(void __iomem *mbase)
{
musb_writeb(mbase, MUSB_INDEX, 0);
@@ -376,6 +401,36 @@ static inline void musb_write_txhubport(void __iomem *mbase, u8 epnum,
qh_h_port_reg);
}
+static inline u8 musb_read_rxfunaddr(void __iomem *mbase, u8 epnum)
+{
+ return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_RXFUNCADDR));
+}
+
+static inline u8 musb_read_rxhubaddr(void __iomem *mbase, u8 epnum)
+{
+ return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_RXHUBADDR));
+}
+
+static inline u8 musb_read_rxhubport(void __iomem *mbase, u8 epnum)
+{
+ return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_RXHUBPORT));
+}
+
+static inline u8 musb_read_txfunaddr(void __iomem *mbase, u8 epnum)
+{
+ return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_TXFUNCADDR));
+}
+
+static inline u8 musb_read_txhubaddr(void __iomem *mbase, u8 epnum)
+{
+ return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBADDR));
+}
+
+static inline u8 musb_read_txhubport(void __iomem *mbase, u8 epnum)
+{
+ return musb_readb(mbase, MUSB_BUSCTL_OFFSET(epnum, MUSB_TXHUBPORT));
+}
+
#else /* CONFIG_BLACKFIN */
#define USB_BASE USB_FADDR
@@ -455,6 +510,22 @@ static inline void musb_write_rxfifoadd(void __iomem *mbase, u16 c_off)
{
}
+static inline u8 musb_read_txfifosz(void __iomem *mbase)
+{
+}
+
+static inline u16 musb_read_txfifoadd(void __iomem *mbase)
+{
+}
+
+static inline u8 musb_read_rxfifosz(void __iomem *mbase)
+{
+}
+
+static inline u16 musb_read_rxfifoadd(void __iomem *mbase)
+{
+}
+
static inline u8 musb_read_configdata(void __iomem *mbase)
{
return 0;
@@ -462,7 +533,11 @@ static inline u8 musb_read_configdata(void __iomem *mbase)
static inline u16 musb_read_hwvers(void __iomem *mbase)
{
- return 0;
+ /*
+ * This register is invisible on Blackfin, actually the MUSB
+ * RTL version of Blackfin is 1.9, so just harcode its value.
+ */
+ return MUSB_HWVERS_1900;
}
static inline void __iomem *musb_read_target_reg_base(u8 i, void __iomem *mbase)
@@ -500,6 +575,30 @@ static inline void musb_write_txhubport(void __iomem *mbase, u8 epnum,
{
}
+static inline u8 musb_read_rxfunaddr(void __iomem *mbase, u8 epnum)
+{
+}
+
+static inline u8 musb_read_rxhubaddr(void __iomem *mbase, u8 epnum)
+{
+}
+
+static inline u8 musb_read_rxhubport(void __iomem *mbase, u8 epnum)
+{
+}
+
+static inline u8 musb_read_txfunaddr(void __iomem *mbase, u8 epnum)
+{
+}
+
+static inline u8 musb_read_txhubaddr(void __iomem *mbase, u8 epnum)
+{
+}
+
+static inline void musb_read_txhubport(void __iomem *mbase, u8 epnum)
+{
+}
+
#endif /* CONFIG_BLACKFIN */
#endif /* __MUSB_REGS_H__ */
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index a237550f91b..2fa7d5c00f3 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -250,20 +250,39 @@ static irqreturn_t dma_controller_irq(int irq, void *private_data)
u8 bchannel;
u8 int_hsdma;
- u32 addr;
+ u32 addr, count;
u16 csr;
spin_lock_irqsave(&musb->lock, flags);
int_hsdma = musb_readb(mbase, MUSB_HSDMA_INTR);
- if (!int_hsdma)
- goto done;
#ifdef CONFIG_BLACKFIN
/* Clear DMA interrupt flags */
musb_writeb(mbase, MUSB_HSDMA_INTR, int_hsdma);
#endif
+ if (!int_hsdma) {
+ DBG(2, "spurious DMA irq\n");
+
+ for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) {
+ musb_channel = (struct musb_dma_channel *)
+ &(controller->channel[bchannel]);
+ channel = &musb_channel->channel;
+ if (channel->status == MUSB_DMA_STATUS_BUSY) {
+ count = musb_read_hsdma_count(mbase, bchannel);
+
+ if (count == 0)
+ int_hsdma |= (1 << bchannel);
+ }
+ }
+
+ DBG(2, "int_hsdma = 0x%x\n", int_hsdma);
+
+ if (!int_hsdma)
+ goto done;
+ }
+
for (bchannel = 0; bchannel < MUSB_HSDMA_CHANNELS; bchannel++) {
if (int_hsdma & (1 << bchannel)) {
musb_channel = (struct musb_dma_channel *)
diff --git a/drivers/usb/musb/musbhsdma.h b/drivers/usb/musb/musbhsdma.h
index 1299d92dc83..613f95a058f 100644
--- a/drivers/usb/musb/musbhsdma.h
+++ b/drivers/usb/musb/musbhsdma.h
@@ -55,6 +55,10 @@
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_ADDRESS), \
addr)
+#define musb_read_hsdma_count(mbase, bchannel) \
+ musb_readl(mbase, \
+ MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT))
+
#define musb_write_hsdma_count(mbase, bchannel, len) \
musb_writel(mbase, \
MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT), \
@@ -96,6 +100,19 @@ static inline void musb_write_hsdma_addr(void __iomem *mbase,
((u16)(((u32) dma_addr >> 16) & 0xFFFF)));
}
+static inline u32 musb_read_hsdma_count(void __iomem *mbase, u8 bchannel)
+{
+ u32 count = musb_readw(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_HIGH));
+
+ count = count << 16;
+
+ count |= musb_readw(mbase,
+ MUSB_HSDMA_CHANNEL_OFFSET(bchannel, MUSB_HSDMA_COUNT_LOW));
+
+ return count;
+}
+
static inline void musb_write_hsdma_count(void __iomem *mbase,
u8 bchannel, u32 len)
{
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 83beeac5e7b..3fe16867b5a 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -220,7 +220,7 @@ int __init musb_platform_init(struct musb *musb)
musb_platform_resume(musb);
- l = omap_readl(OTG_SYSCONFIG);
+ l = musb_readl(musb->mregs, OTG_SYSCONFIG);
l &= ~ENABLEWAKEUP; /* disable wakeup */
l &= ~NOSTDBY; /* remove possible nostdby */
l |= SMARTSTDBY; /* enable smart standby */
@@ -233,17 +233,19 @@ int __init musb_platform_init(struct musb *musb)
*/
if (!cpu_is_omap3430())
l |= AUTOIDLE; /* enable auto idle */
- omap_writel(l, OTG_SYSCONFIG);
+ musb_writel(musb->mregs, OTG_SYSCONFIG, l);
- l = omap_readl(OTG_INTERFSEL);
+ l = musb_readl(musb->mregs, OTG_INTERFSEL);
l |= ULPI_12PIN;
- omap_writel(l, OTG_INTERFSEL);
+ musb_writel(musb->mregs, OTG_INTERFSEL, l);
pr_debug("HS USB OTG: revision 0x%x, sysconfig 0x%02x, "
"sysstatus 0x%x, intrfsel 0x%x, simenable 0x%x\n",
- omap_readl(OTG_REVISION), omap_readl(OTG_SYSCONFIG),
- omap_readl(OTG_SYSSTATUS), omap_readl(OTG_INTERFSEL),
- omap_readl(OTG_SIMENABLE));
+ musb_readl(musb->mregs, OTG_REVISION),
+ musb_readl(musb->mregs, OTG_SYSCONFIG),
+ musb_readl(musb->mregs, OTG_SYSSTATUS),
+ musb_readl(musb->mregs, OTG_INTERFSEL),
+ musb_readl(musb->mregs, OTG_SIMENABLE));
omap_vbus_power(musb, musb->board_mode == MUSB_HOST, 1);
@@ -255,6 +257,22 @@ int __init musb_platform_init(struct musb *musb)
return 0;
}
+#ifdef CONFIG_PM
+void musb_platform_save_context(struct musb *musb,
+ struct musb_context_registers *musb_context)
+{
+ musb_context->otg_sysconfig = musb_readl(musb->mregs, OTG_SYSCONFIG);
+ musb_context->otg_forcestandby = musb_readl(musb->mregs, OTG_FORCESTDBY);
+}
+
+void musb_platform_restore_context(struct musb *musb,
+ struct musb_context_registers *musb_context)
+{
+ musb_writel(musb->mregs, OTG_SYSCONFIG, musb_context->otg_sysconfig);
+ musb_writel(musb->mregs, OTG_FORCESTDBY, musb_context->otg_forcestandby);
+}
+#endif
+
int musb_platform_suspend(struct musb *musb)
{
u32 l;
@@ -263,13 +281,13 @@ int musb_platform_suspend(struct musb *musb)
return 0;
/* in any role */
- l = omap_readl(OTG_FORCESTDBY);
+ l = musb_readl(musb->mregs, OTG_FORCESTDBY);
l |= ENABLEFORCE; /* enable MSTANDBY */
- omap_writel(l, OTG_FORCESTDBY);
+ musb_writel(musb->mregs, OTG_FORCESTDBY, l);
- l = omap_readl(OTG_SYSCONFIG);
+ l = musb_readl(musb->mregs, OTG_SYSCONFIG);
l |= ENABLEWAKEUP; /* enable wakeup */
- omap_writel(l, OTG_SYSCONFIG);
+ musb_writel(musb->mregs, OTG_SYSCONFIG, l);
otg_set_suspend(musb->xceiv, 1);
@@ -295,13 +313,13 @@ static int musb_platform_resume(struct musb *musb)
else
clk_enable(musb->clock);
- l = omap_readl(OTG_SYSCONFIG);
+ l = musb_readl(musb->mregs, OTG_SYSCONFIG);
l &= ~ENABLEWAKEUP; /* disable wakeup */
- omap_writel(l, OTG_SYSCONFIG);
+ musb_writel(musb->mregs, OTG_SYSCONFIG, l);
- l = omap_readl(OTG_FORCESTDBY);
+ l = musb_readl(musb->mregs, OTG_FORCESTDBY);
l &= ~ENABLEFORCE; /* disable MSTANDBY */
- omap_writel(l, OTG_FORCESTDBY);
+ musb_writel(musb->mregs, OTG_FORCESTDBY, l);
return 0;
}
diff --git a/drivers/usb/musb/omap2430.h b/drivers/usb/musb/omap2430.h
index fbede7798ae..40b3c02ae9f 100644
--- a/drivers/usb/musb/omap2430.h
+++ b/drivers/usb/musb/omap2430.h
@@ -10,47 +10,43 @@
#ifndef __MUSB_OMAP243X_H__
#define __MUSB_OMAP243X_H__
-#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3430)
-#include <mach/hardware.h>
#include <plat/usb.h>
/*
* OMAP2430-specific definitions
*/
-#define MENTOR_BASE_OFFSET 0
-#if defined(CONFIG_ARCH_OMAP2430)
-#define OMAP_HSOTG_BASE (OMAP243X_HS_BASE)
-#elif defined(CONFIG_ARCH_OMAP3430)
-#define OMAP_HSOTG_BASE (OMAP34XX_HSUSB_OTG_BASE)
-#endif
-#define OMAP_HSOTG(offset) (OMAP_HSOTG_BASE + 0x400 + (offset))
-#define OTG_REVISION OMAP_HSOTG(0x0)
-#define OTG_SYSCONFIG OMAP_HSOTG(0x4)
+#define OTG_REVISION 0x400
+
+#define OTG_SYSCONFIG 0x404
# define MIDLEMODE 12 /* bit position */
# define FORCESTDBY (0 << MIDLEMODE)
# define NOSTDBY (1 << MIDLEMODE)
# define SMARTSTDBY (2 << MIDLEMODE)
+
# define SIDLEMODE 3 /* bit position */
# define FORCEIDLE (0 << SIDLEMODE)
# define NOIDLE (1 << SIDLEMODE)
# define SMARTIDLE (2 << SIDLEMODE)
+
# define ENABLEWAKEUP (1 << 2)
# define SOFTRST (1 << 1)
# define AUTOIDLE (1 << 0)
-#define OTG_SYSSTATUS OMAP_HSOTG(0x8)
+
+#define OTG_SYSSTATUS 0x408
# define RESETDONE (1 << 0)
-#define OTG_INTERFSEL OMAP_HSOTG(0xc)
+
+#define OTG_INTERFSEL 0x40c
# define EXTCP (1 << 2)
-# define PHYSEL 0 /* bit position */
+# define PHYSEL 0 /* bit position */
# define UTMI_8BIT (0 << PHYSEL)
# define ULPI_12PIN (1 << PHYSEL)
# define ULPI_8PIN (2 << PHYSEL)
-#define OTG_SIMENABLE OMAP_HSOTG(0x10)
+
+#define OTG_SIMENABLE 0x410
# define TM1 (1 << 0)
-#define OTG_FORCESTDBY OMAP_HSOTG(0x14)
-# define ENABLEFORCE (1 << 0)
-#endif /* CONFIG_ARCH_OMAP2430 */
+#define OTG_FORCESTDBY 0x414
+# define ENABLEFORCE (1 << 0)
#endif /* __MUSB_OMAP243X_H__ */
diff --git a/drivers/usb/musb/tusb6010.c b/drivers/usb/musb/tusb6010.c
index 88b587c703e..ab776a8d98c 100644
--- a/drivers/usb/musb/tusb6010.c
+++ b/drivers/usb/musb/tusb6010.c
@@ -1118,7 +1118,7 @@ int __init musb_platform_init(struct musb *musb)
}
musb->sync = mem->start;
- sync = ioremap(mem->start, mem->end - mem->start + 1);
+ sync = ioremap(mem->start, resource_size(mem));
if (!sync) {
pr_debug("ioremap for sync failed\n");
ret = -ENOMEM;
diff --git a/drivers/usb/musb/tusb6010_omap.c b/drivers/usb/musb/tusb6010_omap.c
index e13c77052e5..1c868096bd6 100644
--- a/drivers/usb/musb/tusb6010_omap.c
+++ b/drivers/usb/musb/tusb6010_omap.c
@@ -648,7 +648,7 @@ void dma_controller_destroy(struct dma_controller *c)
}
}
- if (!tusb_dma->multichannel && tusb_dma && tusb_dma->ch >= 0)
+ if (tusb_dma && !tusb_dma->multichannel && tusb_dma->ch >= 0)
omap_free_dma(tusb_dma->ch);
kfree(tusb_dma);
diff --git a/drivers/usb/otg/twl4030-usb.c b/drivers/usb/otg/twl4030-usb.c
index 2be9f2fa41f..3e4e9f434d7 100644
--- a/drivers/usb/otg/twl4030-usb.c
+++ b/drivers/usb/otg/twl4030-usb.c
@@ -36,7 +36,7 @@
#include <linux/i2c/twl.h>
#include <linux/regulator/consumer.h>
#include <linux/err.h>
-
+#include <linux/notifier.h>
/* Register defines */
@@ -236,15 +236,6 @@
#define PMBR1 0x0D
#define GPIO_USB_4PIN_ULPI_2430C (3 << 0)
-
-
-enum linkstat {
- USB_LINK_UNKNOWN = 0,
- USB_LINK_NONE,
- USB_LINK_VBUS,
- USB_LINK_ID,
-};
-
struct twl4030_usb {
struct otg_transceiver otg;
struct device *dev;
@@ -347,10 +338,10 @@ twl4030_usb_clear_bits(struct twl4030_usb *twl, u8 reg, u8 bits)
/*-------------------------------------------------------------------------*/
-static enum linkstat twl4030_usb_linkstat(struct twl4030_usb *twl)
+static enum usb_xceiv_events twl4030_usb_linkstat(struct twl4030_usb *twl)
{
int status;
- int linkstat = USB_LINK_UNKNOWN;
+ int linkstat = USB_EVENT_NONE;
/*
* For ID/VBUS sensing, see manual section 15.4.8 ...
@@ -368,11 +359,11 @@ static enum linkstat twl4030_usb_linkstat(struct twl4030_usb *twl)
dev_err(twl->dev, "USB link status err %d\n", status);
else if (status & (BIT(7) | BIT(2))) {
if (status & BIT(2))
- linkstat = USB_LINK_ID;
+ linkstat = USB_EVENT_ID;
else
- linkstat = USB_LINK_VBUS;
+ linkstat = USB_EVENT_VBUS;
} else
- linkstat = USB_LINK_NONE;
+ linkstat = USB_EVENT_NONE;
dev_dbg(twl->dev, "HW_CONDITIONS 0x%02x/%d; link %d\n",
status, status, linkstat);
@@ -383,7 +374,7 @@ static enum linkstat twl4030_usb_linkstat(struct twl4030_usb *twl)
spin_lock_irq(&twl->lock);
twl->linkstat = linkstat;
- if (linkstat == USB_LINK_ID) {
+ if (linkstat == USB_EVENT_ID) {
twl->otg.default_a = true;
twl->otg.state = OTG_STATE_A_IDLE;
} else {
@@ -564,7 +555,7 @@ static ssize_t twl4030_usb_vbus_show(struct device *dev,
spin_lock_irqsave(&twl->lock, flags);
ret = sprintf(buf, "%s\n",
- (twl->linkstat == USB_LINK_VBUS) ? "on" : "off");
+ (twl->linkstat == USB_EVENT_VBUS) ? "on" : "off");
spin_unlock_irqrestore(&twl->lock, flags);
return ret;
@@ -576,17 +567,8 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
struct twl4030_usb *twl = _twl;
int status;
-#ifdef CONFIG_LOCKDEP
- /* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which
- * we don't want and can't tolerate. Although it might be
- * friendlier not to borrow this thread context...
- */
- local_irq_enable();
-#endif
-
status = twl4030_usb_linkstat(twl);
- if (status != USB_LINK_UNKNOWN) {
-
+ if (status >= 0) {
/* FIXME add a set_power() method so that B-devices can
* configure the charger appropriately. It's not always
* correct to consume VBUS power, and how much current to
@@ -598,12 +580,13 @@ static irqreturn_t twl4030_usb_irq(int irq, void *_twl)
* USB_LINK_VBUS state. musb_hdrc won't care until it
* starts to handle softconnect right.
*/
- if (status == USB_LINK_NONE)
+ if (status == USB_EVENT_NONE)
twl4030_phy_suspend(twl, 0);
else
twl4030_phy_resume(twl);
- twl4030charger_usb_en(status == USB_LINK_VBUS);
+ blocking_notifier_call_chain(&twl->otg.notifier, status,
+ twl->otg.gadget);
}
sysfs_notify(&twl->dev->kobj, NULL, "vbus");
@@ -693,6 +676,8 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev)
if (device_create_file(&pdev->dev, &dev_attr_vbus))
dev_warn(&pdev->dev, "could not create sysfs file\n");
+ BLOCKING_INIT_NOTIFIER_HEAD(&twl->otg.notifier);
+
/* Our job is to use irqs and status from the power module
* to keep the transceiver disabled when nothing's connected.
*
@@ -702,7 +687,7 @@ static int __devinit twl4030_usb_probe(struct platform_device *pdev)
* need both handles, otherwise just one suffices.
*/
twl->irq_enabled = true;
- status = request_irq(twl->irq, twl4030_usb_irq,
+ status = request_threaded_irq(twl->irq, NULL, twl4030_usb_irq,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
"twl4030_usb", twl);
if (status < 0) {
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index c480ea4c19f..c78b255e3f8 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -472,6 +472,17 @@ config USB_SERIAL_OTI6858
To compile this driver as a module, choose M here: the
module will be called oti6858.
+config USB_SERIAL_QCAUX
+ tristate "USB Qualcomm Auxiliary Serial Port Driver"
+ ---help---
+ Say Y here if you want to use the auxiliary serial ports provided
+ by many modems based on Qualcomm chipsets. These ports often use
+ a proprietary protocol called DM and cannot be used for AT- or
+ PPP-based communication.
+
+ To compile this driver as a module, choose M here: the
+ module will be called moto_modem. If unsure, choose N.
+
config USB_SERIAL_QUALCOMM
tristate "USB Qualcomm Serial modem"
help
@@ -600,6 +611,14 @@ config USB_SERIAL_OPTICON
To compile this driver as a module, choose M here: the
module will be called opticon.
+config USB_SERIAL_VIVOPAY_SERIAL
+ tristate "USB ViVOpay serial interface driver"
+ help
+ Say Y here if you want to use a ViVOtech ViVOpay USB device.
+
+ To compile this driver as a module, choose M here: the
+ module will be called vivopay-serial.
+
config USB_SERIAL_DEBUG
tristate "USB Debugging Device"
help
diff --git a/drivers/usb/serial/Makefile b/drivers/usb/serial/Makefile
index 66619beb6cc..83c9e431a56 100644
--- a/drivers/usb/serial/Makefile
+++ b/drivers/usb/serial/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_USB_SERIAL_OPTICON) += opticon.o
obj-$(CONFIG_USB_SERIAL_OPTION) += option.o
obj-$(CONFIG_USB_SERIAL_OTI6858) += oti6858.o
obj-$(CONFIG_USB_SERIAL_PL2303) += pl2303.o
+obj-$(CONFIG_USB_SERIAL_QCAUX) += qcaux.o
obj-$(CONFIG_USB_SERIAL_QUALCOMM) += qcserial.o
obj-$(CONFIG_USB_SERIAL_SAFE) += safe_serial.o
obj-$(CONFIG_USB_SERIAL_SIEMENS_MPI) += siemens_mpi.o
@@ -55,4 +56,5 @@ obj-$(CONFIG_USB_SERIAL_TI) += ti_usb_3410_5052.o
obj-$(CONFIG_USB_SERIAL_VISOR) += visor.o
obj-$(CONFIG_USB_SERIAL_WHITEHEAT) += whiteheat.o
obj-$(CONFIG_USB_SERIAL_XIRCOM) += keyspan_pda.o
+obj-$(CONFIG_USB_SERIAL_VIVOPAY_SERIAL) += vivopay-serial.o
diff --git a/drivers/usb/serial/aircable.c b/drivers/usb/serial/aircable.c
index b10ac840941..365db1097bf 100644
--- a/drivers/usb/serial/aircable.c
+++ b/drivers/usb/serial/aircable.c
@@ -78,7 +78,7 @@ static int debug;
#define DRIVER_DESC "AIRcable USB Driver"
/* ID table that will be registered with USB core */
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(AIRCABLE_VID, AIRCABLE_USB_PID) },
{ },
};
@@ -468,10 +468,6 @@ static void aircable_read_bulk_callback(struct urb *urb)
if (status) {
dbg("%s - urb status = %d", __func__, status);
- if (!port->port.count) {
- dbg("%s - port is closed, exiting.", __func__);
- return;
- }
if (status == -EPROTO) {
dbg("%s - caught -EPROTO, resubmitting the urb",
__func__);
@@ -530,23 +526,19 @@ static void aircable_read_bulk_callback(struct urb *urb)
}
tty_kref_put(tty);
- /* Schedule the next read _if_ we are still open */
- if (port->port.count) {
- usb_fill_bulk_urb(port->read_urb, port->serial->dev,
- usb_rcvbulkpipe(port->serial->dev,
- port->bulk_in_endpointAddress),
- port->read_urb->transfer_buffer,
- port->read_urb->transfer_buffer_length,
- aircable_read_bulk_callback, port);
-
- result = usb_submit_urb(urb, GFP_ATOMIC);
- if (result)
- dev_err(&urb->dev->dev,
- "%s - failed resubmitting read urb, error %d\n",
- __func__, result);
- }
-
- return;
+ /* Schedule the next read */
+ usb_fill_bulk_urb(port->read_urb, port->serial->dev,
+ usb_rcvbulkpipe(port->serial->dev,
+ port->bulk_in_endpointAddress),
+ port->read_urb->transfer_buffer,
+ port->read_urb->transfer_buffer_length,
+ aircable_read_bulk_callback, port);
+
+ result = usb_submit_urb(urb, GFP_ATOMIC);
+ if (result && result != -EPERM)
+ dev_err(&urb->dev->dev,
+ "%s - failed resubmitting read urb, error %d\n",
+ __func__, result);
}
/* Based on ftdi_sio.c throttle */
diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c
index a9c2dec8e3f..547c9448c28 100644
--- a/drivers/usb/serial/ark3116.c
+++ b/drivers/usb/serial/ark3116.c
@@ -50,7 +50,7 @@ static int debug;
/* usb timeout of 1 second */
#define ARK_TIMEOUT (1*HZ)
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x6547, 0x0232) },
{ USB_DEVICE(0x18ec, 0x3118) }, /* USB to IrDA adapter */
{ },
@@ -733,7 +733,6 @@ static void ark3116_read_bulk_callback(struct urb *urb)
tty = tty_port_tty_get(&port->port);
if (tty) {
- tty_buffer_request_room(tty, urb->actual_length + 1);
/* overrun is special, not associated with a char */
if (unlikely(lsr & UART_LSR_OE))
tty_insert_flip_char(tty, 0, TTY_OVERRUN);
diff --git a/drivers/usb/serial/belkin_sa.c b/drivers/usb/serial/belkin_sa.c
index a0467bc6162..1295e44e3f1 100644
--- a/drivers/usb/serial/belkin_sa.c
+++ b/drivers/usb/serial/belkin_sa.c
@@ -103,7 +103,7 @@ static int belkin_sa_tiocmset(struct tty_struct *tty, struct file *file,
unsigned int set, unsigned int clear);
-static struct usb_device_id id_table_combined [] = {
+static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(BELKIN_SA_VID, BELKIN_SA_PID) },
{ USB_DEVICE(BELKIN_OLD_VID, BELKIN_OLD_PID) },
{ USB_DEVICE(PERACOM_VID, PERACOM_PID) },
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 59eff721fcc..9f4fed1968b 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -22,6 +22,7 @@
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include <linux/serial.h>
+#include <asm/unaligned.h>
#define DEFAULT_BAUD_RATE 9600
#define DEFAULT_TIMEOUT 1000
@@ -70,7 +71,7 @@
static int debug;
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x4348, 0x5523) },
{ USB_DEVICE(0x1a86, 0x7523) },
{ },
@@ -392,16 +393,22 @@ static void ch341_break_ctl(struct tty_struct *tty, int break_state)
struct usb_serial_port *port = tty->driver_data;
int r;
uint16_t reg_contents;
- uint8_t break_reg[2];
+ uint8_t *break_reg;
dbg("%s()", __func__);
+ break_reg = kmalloc(2, GFP_KERNEL);
+ if (!break_reg) {
+ dev_err(&port->dev, "%s - kmalloc failed\n", __func__);
+ return;
+ }
+
r = ch341_control_in(port->serial->dev, CH341_REQ_READ_REG,
- ch341_break_reg, 0, break_reg, sizeof(break_reg));
+ ch341_break_reg, 0, break_reg, 2);
if (r < 0) {
- printk(KERN_WARNING "%s: USB control read error whilst getting"
- " break register contents.\n", __FILE__);
- return;
+ dev_err(&port->dev, "%s - USB control read error (%d)\n",
+ __func__, r);
+ goto out;
}
dbg("%s - initial ch341 break register contents - reg1: %x, reg2: %x",
__func__, break_reg[0], break_reg[1]);
@@ -416,12 +423,14 @@ static void ch341_break_ctl(struct tty_struct *tty, int break_state)
}
dbg("%s - New ch341 break register contents - reg1: %x, reg2: %x",
__func__, break_reg[0], break_reg[1]);
- reg_contents = (uint16_t)break_reg[0] | ((uint16_t)break_reg[1] << 8);
+ reg_contents = get_unaligned_le16(break_reg);
r = ch341_control_out(port->serial->dev, CH341_REQ_WRITE_REG,
ch341_break_reg, reg_contents);
if (r < 0)
- printk(KERN_WARNING "%s: USB control write error whilst setting"
- " break register contents.\n", __FILE__);
+ dev_err(&port->dev, "%s - USB control write error (%d)\n",
+ __func__, r);
+out:
+ kfree(break_reg);
}
static int ch341_tiocmset(struct tty_struct *tty, struct file *file,
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index bd254ec97d1..507382b0a9e 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -55,7 +55,7 @@ static int cp210x_carrier_raised(struct usb_serial_port *p);
static int debug;
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */
{ USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */
{ USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */
@@ -91,11 +91,12 @@ static struct usb_device_id id_table [] = {
{ USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */
{ USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */
{ USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */
+ { USB_DEVICE(0x10C4, 0x81E8) }, /* Zephyr Bioharness */
{ USB_DEVICE(0x10C4, 0x81F2) }, /* C1007 HF band RFID controller */
{ USB_DEVICE(0x10C4, 0x8218) }, /* Lipowsky Industrie Elektronik GmbH, HARP-1 */
{ USB_DEVICE(0x10C4, 0x822B) }, /* Modem EDGE(GSM) Comander 2 */
{ USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demostration module */
- { USB_DEVICE(0x10c4, 0x8293) }, /* Telegesys ETRX2USB */
+ { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesys ETRX2USB */
{ USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
{ USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
{ USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */
@@ -612,7 +613,7 @@ static void cp210x_set_termios(struct tty_struct *tty,
baud);
if (cp210x_set_config_single(port, CP210X_SET_BAUDDIV,
((BAUD_RATE_GEN_FREQ + baud/2) / baud))) {
- dbg("Baud rate requested not supported by device\n");
+ dbg("Baud rate requested not supported by device");
baud = tty_termios_baud_rate(old_termios);
}
}
diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
index b0f6402a91c..f744ab7a3b1 100644
--- a/drivers/usb/serial/cyberjack.c
+++ b/drivers/usb/serial/cyberjack.c
@@ -70,7 +70,7 @@ static void cyberjack_read_int_callback(struct urb *urb);
static void cyberjack_read_bulk_callback(struct urb *urb);
static void cyberjack_write_bulk_callback(struct urb *urb);
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(CYBERJACK_VENDOR_ID, CYBERJACK_PRODUCT_ID) },
{ } /* Terminating entry */
};
@@ -391,11 +391,10 @@ static void cyberjack_read_bulk_callback(struct urb *urb)
tty = tty_port_tty_get(&port->port);
if (!tty) {
- dbg("%s - ignoring since device not open\n", __func__);
+ dbg("%s - ignoring since device not open", __func__);
return;
}
if (urb->actual_length) {
- tty_buffer_request_room(tty, urb->actual_length);
tty_insert_flip_string(tty, data, urb->actual_length);
tty_flip_buffer_push(tty);
}
diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
index a591ebec0f8..baf74b44e6e 100644
--- a/drivers/usb/serial/cypress_m8.c
+++ b/drivers/usb/serial/cypress_m8.c
@@ -66,17 +66,15 @@
#include <linux/serial.h>
#include <linux/delay.h>
#include <linux/uaccess.h>
+#include <asm/unaligned.h>
#include "cypress_m8.h"
-#ifdef CONFIG_USB_SERIAL_DEBUG
- static int debug = 1;
-#else
- static int debug;
-#endif
+static int debug;
static int stats;
static int interval;
+static int unstable_bauds;
/*
* Version Information
@@ -89,24 +87,24 @@ static int interval;
#define CYPRESS_BUF_SIZE 1024
#define CYPRESS_CLOSING_WAIT (30*HZ)
-static struct usb_device_id id_table_earthmate [] = {
+static const struct usb_device_id id_table_earthmate[] = {
{ USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) },
{ USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) },
{ } /* Terminating entry */
};
-static struct usb_device_id id_table_cyphidcomrs232 [] = {
+static const struct usb_device_id id_table_cyphidcomrs232[] = {
{ USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) },
{ USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) },
{ } /* Terminating entry */
};
-static struct usb_device_id id_table_nokiaca42v2 [] = {
+static const struct usb_device_id id_table_nokiaca42v2[] = {
{ USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) },
{ } /* Terminating entry */
};
-static struct usb_device_id id_table_combined [] = {
+static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB) },
{ USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) },
{ USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) },
@@ -295,6 +293,9 @@ static int analyze_baud_rate(struct usb_serial_port *port, speed_t new_rate)
struct cypress_private *priv;
priv = usb_get_serial_port_data(port);
+ if (unstable_bauds)
+ return new_rate;
+
/*
* The general purpose firmware for the Cypress M8 allows for
* a maximum speed of 57600bps (I have no idea whether DeLorme
@@ -344,7 +345,8 @@ static int cypress_serial_control(struct tty_struct *tty,
{
int new_baudrate = 0, retval = 0, tries = 0;
struct cypress_private *priv;
- __u8 feature_buffer[5];
+ u8 *feature_buffer;
+ const unsigned int feature_len = 5;
unsigned long flags;
dbg("%s", __func__);
@@ -354,17 +356,18 @@ static int cypress_serial_control(struct tty_struct *tty,
if (!priv->comm_is_ok)
return -ENODEV;
+ feature_buffer = kcalloc(feature_len, sizeof(u8), GFP_KERNEL);
+ if (!feature_buffer)
+ return -ENOMEM;
+
switch (cypress_request_type) {
case CYPRESS_SET_CONFIG:
- new_baudrate = priv->baud_rate;
/* 0 means 'Hang up' so doesn't change the true bit rate */
- if (baud_rate == 0)
- new_baudrate = priv->baud_rate;
- /* Change of speed ? */
- else if (baud_rate != priv->baud_rate) {
+ new_baudrate = priv->baud_rate;
+ if (baud_rate && baud_rate != priv->baud_rate) {
dbg("%s - baud rate is changing", __func__);
retval = analyze_baud_rate(port, baud_rate);
- if (retval >= 0) {
+ if (retval >= 0) {
new_baudrate = retval;
dbg("%s - New baud rate set to %d",
__func__, new_baudrate);
@@ -373,9 +376,8 @@ static int cypress_serial_control(struct tty_struct *tty,
dbg("%s - baud rate is being sent as %d",
__func__, new_baudrate);
- memset(feature_buffer, 0, sizeof(feature_buffer));
/* fill the feature_buffer with new configuration */
- *((u_int32_t *)feature_buffer) = new_baudrate;
+ put_unaligned_le32(new_baudrate, feature_buffer);
feature_buffer[4] |= data_bits; /* assign data bits in 2 bit space ( max 3 ) */
/* 1 bit gap */
feature_buffer[4] |= (stop_bits << 3); /* assign stop bits in 1 bit space */
@@ -397,15 +399,15 @@ static int cypress_serial_control(struct tty_struct *tty,
HID_REQ_SET_REPORT,
USB_DIR_OUT | USB_RECIP_INTERFACE | USB_TYPE_CLASS,
0x0300, 0, feature_buffer,
- sizeof(feature_buffer), 500);
+ feature_len, 500);
if (tries++ >= 3)
break;
- } while (retval != sizeof(feature_buffer) &&
+ } while (retval != feature_len &&
retval != -ENODEV);
- if (retval != sizeof(feature_buffer)) {
+ if (retval != feature_len) {
dev_err(&port->dev, "%s - failed sending serial "
"line settings - %d\n", __func__, retval);
cypress_set_dead(port);
@@ -425,43 +427,42 @@ static int cypress_serial_control(struct tty_struct *tty,
/* Not implemented for this device,
and if we try to do it we're likely
to crash the hardware. */
- return -ENOTTY;
+ retval = -ENOTTY;
+ goto out;
}
dbg("%s - retreiving serial line settings", __func__);
- /* set initial values in feature buffer */
- memset(feature_buffer, 0, sizeof(feature_buffer));
-
do {
retval = usb_control_msg(port->serial->dev,
usb_rcvctrlpipe(port->serial->dev, 0),
HID_REQ_GET_REPORT,
USB_DIR_IN | USB_RECIP_INTERFACE | USB_TYPE_CLASS,
0x0300, 0, feature_buffer,
- sizeof(feature_buffer), 500);
+ feature_len, 500);
if (tries++ >= 3)
break;
- } while (retval != sizeof(feature_buffer)
+ } while (retval != feature_len
&& retval != -ENODEV);
- if (retval != sizeof(feature_buffer)) {
+ if (retval != feature_len) {
dev_err(&port->dev, "%s - failed to retrieve serial "
"line settings - %d\n", __func__, retval);
cypress_set_dead(port);
- return retval;
+ goto out;
} else {
spin_lock_irqsave(&priv->lock, flags);
/* store the config in one byte, and later
use bit masks to check values */
priv->current_config = feature_buffer[4];
- priv->baud_rate = *((u_int32_t *)feature_buffer);
+ priv->baud_rate = get_unaligned_le32(feature_buffer);
spin_unlock_irqrestore(&priv->lock, flags);
}
}
spin_lock_irqsave(&priv->lock, flags);
++priv->cmd_count;
spin_unlock_irqrestore(&priv->lock, flags);
-
+out:
+ kfree(feature_buffer);
return retval;
} /* cypress_serial_control */
@@ -690,7 +691,6 @@ static void cypress_dtr_rts(struct usb_serial_port *port, int on)
{
struct cypress_private *priv = usb_get_serial_port_data(port);
/* drop dtr and rts */
- priv = usb_get_serial_port_data(port);
spin_lock_irq(&priv->lock);
if (on == 0)
priv->line_control = 0;
@@ -1307,13 +1307,9 @@ static void cypress_read_int_callback(struct urb *urb)
spin_unlock_irqrestore(&priv->lock, flags);
/* process read if there is data other than line status */
- if (tty && (bytes > i)) {
- bytes = tty_buffer_request_room(tty, bytes);
- for (; i < bytes ; ++i) {
- dbg("pushing byte number %d - %d - %c", i, data[i],
- data[i]);
- tty_insert_flip_char(tty, data[i], tty_flag);
- }
+ if (tty && bytes > i) {
+ tty_insert_flip_string_fixed_flag(tty, data + i,
+ bytes - i, tty_flag);
tty_flip_buffer_push(tty);
}
@@ -1325,9 +1321,9 @@ static void cypress_read_int_callback(struct urb *urb)
continue_read:
tty_kref_put(tty);
- /* Continue trying to always read... unless the port has closed. */
+ /* Continue trying to always read */
- if (port->port.count > 0 && priv->comm_is_ok) {
+ if (priv->comm_is_ok) {
usb_fill_int_urb(port->interrupt_in_urb, port->serial->dev,
usb_rcvintpipe(port->serial->dev,
port->interrupt_in_endpointAddress),
@@ -1336,7 +1332,7 @@ continue_read:
cypress_read_int_callback, port,
priv->read_urb_interval);
result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
- if (result) {
+ if (result && result != -EPERM) {
dev_err(&urb->dev->dev, "%s - failed resubmitting "
"read urb, error %d\n", __func__,
result);
@@ -1650,3 +1646,5 @@ module_param(stats, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(stats, "Enable statistics or not");
module_param(interval, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(interval, "Overrides interrupt interval");
+module_param(unstable_bauds, bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(unstable_bauds, "Allow unstable baud rates");
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c
index 68e80be6b9e..68b0aa5e516 100644
--- a/drivers/usb/serial/digi_acceleport.c
+++ b/drivers/usb/serial/digi_acceleport.c
@@ -470,18 +470,18 @@ static int digi_read_oob_callback(struct urb *urb);
static int debug;
-static struct usb_device_id id_table_combined [] = {
+static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(DIGI_VENDOR_ID, DIGI_2_ID) },
{ USB_DEVICE(DIGI_VENDOR_ID, DIGI_4_ID) },
{ } /* Terminating entry */
};
-static struct usb_device_id id_table_2 [] = {
+static const struct usb_device_id id_table_2[] = {
{ USB_DEVICE(DIGI_VENDOR_ID, DIGI_2_ID) },
{ } /* Terminating entry */
};
-static struct usb_device_id id_table_4 [] = {
+static const struct usb_device_id id_table_4[] = {
{ USB_DEVICE(DIGI_VENDOR_ID, DIGI_4_ID) },
{ } /* Terminating entry */
};
@@ -1262,10 +1262,10 @@ static void digi_write_bulk_callback(struct urb *urb)
return;
}
- /* try to send any buffered data on this port, if it is open */
+ /* try to send any buffered data on this port */
spin_lock(&priv->dp_port_lock);
priv->dp_write_urb_in_use = 0;
- if (port->port.count && priv->dp_out_buf_len > 0) {
+ if (priv->dp_out_buf_len > 0) {
*((unsigned char *)(port->write_urb->transfer_buffer))
= (unsigned char)DIGI_CMD_SEND_DATA;
*((unsigned char *)(port->write_urb->transfer_buffer) + 1)
@@ -1288,7 +1288,7 @@ static void digi_write_bulk_callback(struct urb *urb)
schedule_work(&priv->dp_wakeup_work);
spin_unlock(&priv->dp_port_lock);
- if (ret)
+ if (ret && ret != -EPERM)
dev_err(&port->dev,
"%s: usb_submit_urb failed, ret=%d, port=%d\n",
__func__, ret, priv->dp_port_num);
@@ -1353,8 +1353,7 @@ static int digi_open(struct tty_struct *tty, struct usb_serial_port *port)
struct digi_port *priv = usb_get_serial_port_data(port);
struct ktermios not_termios;
- dbg("digi_open: TOP: port=%d, open_count=%d",
- priv->dp_port_num, port->port.count);
+ dbg("digi_open: TOP: port=%d", priv->dp_port_num);
/* be sure the device is started up */
if (digi_startup_device(port->serial) != 0)
@@ -1393,8 +1392,7 @@ static void digi_close(struct usb_serial_port *port)
unsigned char buf[32];
struct digi_port *priv = usb_get_serial_port_data(port);
- dbg("digi_close: TOP: port=%d, open_count=%d",
- priv->dp_port_num, port->port.count);
+ dbg("digi_close: TOP: port=%d", priv->dp_port_num);
mutex_lock(&port->serial->disc_mutex);
/* if disconnected, just clear flags */
@@ -1629,7 +1627,7 @@ static void digi_read_bulk_callback(struct urb *urb)
/* continue read */
urb->dev = port->serial->dev;
ret = usb_submit_urb(urb, GFP_ATOMIC);
- if (ret != 0) {
+ if (ret != 0 && ret != -EPERM) {
dev_err(&port->dev,
"%s: failed resubmitting urb, ret=%d, port=%d\n",
__func__, ret, priv->dp_port_num);
@@ -1658,12 +1656,11 @@ static int digi_read_inb_callback(struct urb *urb)
int port_status = ((unsigned char *)urb->transfer_buffer)[2];
unsigned char *data = ((unsigned char *)urb->transfer_buffer) + 3;
int flag, throttled;
- int i;
int status = urb->status;
/* do not process callbacks on closed ports */
/* but do continue the read chain */
- if (port->port.count == 0)
+ if (urb->status == -ENOENT)
return 0;
/* short/multiple packet check */
@@ -1705,17 +1702,9 @@ static int digi_read_inb_callback(struct urb *urb)
/* data length is len-1 (one byte of len is port_status) */
--len;
-
- len = tty_buffer_request_room(tty, len);
if (len > 0) {
- /* Hot path */
- if (flag == TTY_NORMAL)
- tty_insert_flip_string(tty, data, len);
- else {
- for (i = 0; i < len; i++)
- tty_insert_flip_char(tty,
- data[i], flag);
- }
+ tty_insert_flip_string_fixed_flag(tty, data, len,
+ flag);
tty_flip_buffer_push(tty);
}
}
@@ -1776,8 +1765,7 @@ static int digi_read_oob_callback(struct urb *urb)
tty = tty_port_tty_get(&port->port);
rts = 0;
- if (port->port.count)
- rts = tty->termios->c_cflag & CRTSCTS;
+ rts = tty->termios->c_cflag & CRTSCTS;
if (opcode == DIGI_CMD_READ_INPUT_SIGNALS) {
spin_lock(&priv->dp_port_lock);
diff --git a/drivers/usb/serial/empeg.c b/drivers/usb/serial/empeg.c
index 7dd0e3eadbe..5f740a1eaca 100644
--- a/drivers/usb/serial/empeg.c
+++ b/drivers/usb/serial/empeg.c
@@ -93,7 +93,7 @@ static void empeg_init_termios(struct tty_struct *tty);
static void empeg_write_bulk_callback(struct urb *urb);
static void empeg_read_bulk_callback(struct urb *urb);
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(EMPEG_VENDOR_ID, EMPEG_PRODUCT_ID) },
{ } /* Terminating entry */
};
@@ -346,7 +346,6 @@ static void empeg_read_bulk_callback(struct urb *urb)
tty = tty_port_tty_get(&port->port);
if (urb->actual_length) {
- tty_buffer_request_room(tty, urb->actual_length);
tty_insert_flip_string(tty, data, urb->actual_length);
tty_flip_buffer_push(tty);
bytes_in += urb->actual_length;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 7638828e731..6af0dfa5f5a 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -33,12 +33,12 @@
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
-#include <linux/smp_lock.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/spinlock.h>
+#include <linux/mutex.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/serial.h>
@@ -88,10 +88,10 @@ struct ftdi_private {
unsigned int latency; /* latency setting in use */
spinlock_t tx_lock; /* spinlock for transmit state */
- unsigned long tx_bytes;
unsigned long tx_outstanding_bytes;
unsigned long tx_outstanding_urbs;
unsigned short max_packet_size;
+ struct mutex cfg_lock; /* Avoid mess by parallel calls of config ioctl() */
};
/* struct ftdi_sio_quirk is used by devices requiring special attention. */
@@ -614,6 +614,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(FTDI_VID, FTDI_OCEANIC_PID) },
{ USB_DEVICE(TTI_VID, TTI_QL355P_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_RM_CANVIEW_PID) },
+ { USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) },
{ USB_DEVICE(BANDB_VID, BANDB_USOTL4_PID) },
{ USB_DEVICE(BANDB_VID, BANDB_USTL4_PID) },
{ USB_DEVICE(BANDB_VID, BANDB_USO9ML2_PID) },
@@ -737,6 +738,10 @@ static struct usb_device_id id_table_combined [] = {
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(FTDI_VID, HAMEG_HO820_PID) },
{ USB_DEVICE(FTDI_VID, HAMEG_HO870_PID) },
+ { USB_DEVICE(FTDI_VID, MJSG_GENERIC_PID) },
+ { USB_DEVICE(FTDI_VID, MJSG_SR_RADIO_PID) },
+ { USB_DEVICE(FTDI_VID, MJSG_HD_RADIO_PID) },
+ { USB_DEVICE(FTDI_VID, MJSG_XM_RADIO_PID) },
{ }, /* Optional parameter entry */
{ } /* Terminating entry */
};
@@ -812,7 +817,7 @@ static struct usb_serial_driver ftdi_sio_device = {
.name = "ftdi_sio",
},
.description = "FTDI USB Serial Device",
- .usb_driver = &ftdi_driver ,
+ .usb_driver = &ftdi_driver,
.id_table = id_table_combined,
.num_ports = 1,
.probe = ftdi_sio_probe,
@@ -828,8 +833,8 @@ static struct usb_serial_driver ftdi_sio_device = {
.chars_in_buffer = ftdi_chars_in_buffer,
.read_bulk_callback = ftdi_read_bulk_callback,
.write_bulk_callback = ftdi_write_bulk_callback,
- .tiocmget = ftdi_tiocmget,
- .tiocmset = ftdi_tiocmset,
+ .tiocmget = ftdi_tiocmget,
+ .tiocmset = ftdi_tiocmset,
.ioctl = ftdi_ioctl,
.set_termios = ftdi_set_termios,
.break_ctl = ftdi_break_ctl,
@@ -935,7 +940,6 @@ static int update_mctrl(struct usb_serial_port *port, unsigned int set,
unsigned int clear)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
- char *buf;
unsigned urb_value;
int rv;
@@ -944,10 +948,6 @@ static int update_mctrl(struct usb_serial_port *port, unsigned int set,
return 0; /* no change */
}
- buf = kmalloc(1, GFP_NOIO);
- if (!buf)
- return -ENOMEM;
-
clear &= ~set; /* 'set' takes precedence over 'clear' */
urb_value = 0;
if (clear & TIOCM_DTR)
@@ -963,9 +963,7 @@ static int update_mctrl(struct usb_serial_port *port, unsigned int set,
FTDI_SIO_SET_MODEM_CTRL_REQUEST,
FTDI_SIO_SET_MODEM_CTRL_REQUEST_TYPE,
urb_value, priv->interface,
- buf, 0, WDR_TIMEOUT);
-
- kfree(buf);
+ NULL, 0, WDR_TIMEOUT);
if (rv < 0) {
dbg("%s Error from MODEM_CTRL urb: DTR %s, RTS %s",
__func__,
@@ -1124,16 +1122,11 @@ static __u32 get_ftdi_divisor(struct tty_struct *tty,
static int change_speed(struct tty_struct *tty, struct usb_serial_port *port)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
- char *buf;
__u16 urb_value;
__u16 urb_index;
__u32 urb_index_value;
int rv;
- buf = kmalloc(1, GFP_NOIO);
- if (!buf)
- return -ENOMEM;
-
urb_index_value = get_ftdi_divisor(tty, port);
urb_value = (__u16)urb_index_value;
urb_index = (__u16)(urb_index_value >> 16);
@@ -1146,9 +1139,7 @@ static int change_speed(struct tty_struct *tty, struct usb_serial_port *port)
FTDI_SIO_SET_BAUDRATE_REQUEST,
FTDI_SIO_SET_BAUDRATE_REQUEST_TYPE,
urb_value, urb_index,
- buf, 0, WDR_SHORT_TIMEOUT);
-
- kfree(buf);
+ NULL, 0, WDR_SHORT_TIMEOUT);
return rv;
}
@@ -1156,8 +1147,7 @@ static int write_latency_timer(struct usb_serial_port *port)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
struct usb_device *udev = port->serial->dev;
- char buf[1];
- int rv = 0;
+ int rv;
int l = priv->latency;
if (priv->flags & ASYNC_LOW_LATENCY)
@@ -1170,8 +1160,7 @@ static int write_latency_timer(struct usb_serial_port *port)
FTDI_SIO_SET_LATENCY_TIMER_REQUEST,
FTDI_SIO_SET_LATENCY_TIMER_REQUEST_TYPE,
l, priv->interface,
- buf, 0, WDR_TIMEOUT);
-
+ NULL, 0, WDR_TIMEOUT);
if (rv < 0)
dev_err(&port->dev, "Unable to write latency timer: %i\n", rv);
return rv;
@@ -1181,24 +1170,29 @@ static int read_latency_timer(struct usb_serial_port *port)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
struct usb_device *udev = port->serial->dev;
- unsigned short latency = 0;
- int rv = 0;
-
+ unsigned char *buf;
+ int rv;
dbg("%s", __func__);
+ buf = kmalloc(1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
rv = usb_control_msg(udev,
usb_rcvctrlpipe(udev, 0),
FTDI_SIO_GET_LATENCY_TIMER_REQUEST,
FTDI_SIO_GET_LATENCY_TIMER_REQUEST_TYPE,
0, priv->interface,
- (char *) &latency, 1, WDR_TIMEOUT);
-
- if (rv < 0) {
+ buf, 1, WDR_TIMEOUT);
+ if (rv < 0)
dev_err(&port->dev, "Unable to read latency timer: %i\n", rv);
- return -EIO;
- }
- return latency;
+ else
+ priv->latency = buf[0];
+
+ kfree(buf);
+
+ return rv;
}
static int get_serial_info(struct usb_serial_port *port,
@@ -1229,7 +1223,7 @@ static int set_serial_info(struct tty_struct *tty,
if (copy_from_user(&new_serial, newinfo, sizeof(new_serial)))
return -EFAULT;
- lock_kernel();
+ mutex_lock(&priv->cfg_lock);
old_priv = *priv;
/* Do error checking and permission checking */
@@ -1237,7 +1231,7 @@ static int set_serial_info(struct tty_struct *tty,
if (!capable(CAP_SYS_ADMIN)) {
if (((new_serial.flags & ~ASYNC_USR_MASK) !=
(priv->flags & ~ASYNC_USR_MASK))) {
- unlock_kernel();
+ mutex_unlock(&priv->cfg_lock);
return -EPERM;
}
priv->flags = ((priv->flags & ~ASYNC_USR_MASK) |
@@ -1248,7 +1242,7 @@ static int set_serial_info(struct tty_struct *tty,
if ((new_serial.baud_base != priv->baud_base) &&
(new_serial.baud_base < 9600)) {
- unlock_kernel();
+ mutex_unlock(&priv->cfg_lock);
return -EINVAL;
}
@@ -1278,11 +1272,11 @@ check_and_exit:
(priv->flags & ASYNC_SPD_MASK)) ||
(((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST) &&
(old_priv.custom_divisor != priv->custom_divisor))) {
- unlock_kernel();
+ mutex_unlock(&priv->cfg_lock);
change_speed(tty, port);
}
else
- unlock_kernel();
+ mutex_unlock(&priv->cfg_lock);
return 0;
} /* set_serial_info */
@@ -1338,20 +1332,20 @@ static void ftdi_determine_type(struct usb_serial_port *port)
__func__);
}
} else if (version < 0x200) {
- /* Old device. Assume its the original SIO. */
+ /* Old device. Assume it's the original SIO. */
priv->chip_type = SIO;
priv->baud_base = 12000000 / 16;
priv->write_offset = 1;
} else if (version < 0x400) {
- /* Assume its an FT8U232AM (or FT8U245AM) */
+ /* Assume it's an FT8U232AM (or FT8U245AM) */
/* (It might be a BM because of the iSerialNumber bug,
* but it will still work as an AM device.) */
priv->chip_type = FT8U232AM;
} else if (version < 0x600) {
- /* Assume its an FT232BM (or FT245BM) */
+ /* Assume it's an FT232BM (or FT245BM) */
priv->chip_type = FT232BM;
} else {
- /* Assume its an FT232R */
+ /* Assume it's an FT232R */
priv->chip_type = FT232RL;
}
dev_info(&udev->dev, "Detected %s\n", ftdi_chip_name[priv->chip_type]);
@@ -1371,7 +1365,7 @@ static void ftdi_set_max_packet_size(struct usb_serial_port *port)
struct usb_endpoint_descriptor *ep_desc = &interface->cur_altsetting->endpoint[1].desc;
unsigned num_endpoints;
- int i = 0;
+ int i;
num_endpoints = interface->cur_altsetting->desc.bNumEndpoints;
dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints);
@@ -1423,7 +1417,7 @@ static ssize_t store_latency_timer(struct device *dev,
struct usb_serial_port *port = to_usb_serial_port(dev);
struct ftdi_private *priv = usb_get_serial_port_data(port);
int v = simple_strtoul(valbuf, NULL, 10);
- int rv = 0;
+ int rv;
priv->latency = v;
rv = write_latency_timer(port);
@@ -1440,9 +1434,8 @@ static ssize_t store_event_char(struct device *dev,
struct usb_serial_port *port = to_usb_serial_port(dev);
struct ftdi_private *priv = usb_get_serial_port_data(port);
struct usb_device *udev = port->serial->dev;
- char buf[1];
int v = simple_strtoul(valbuf, NULL, 10);
- int rv = 0;
+ int rv;
dbg("%s: setting event char = %i", __func__, v);
@@ -1451,8 +1444,7 @@ static ssize_t store_event_char(struct device *dev,
FTDI_SIO_SET_EVENT_CHAR_REQUEST,
FTDI_SIO_SET_EVENT_CHAR_REQUEST_TYPE,
v, priv->interface,
- buf, 0, WDR_TIMEOUT);
-
+ NULL, 0, WDR_TIMEOUT);
if (rv < 0) {
dbg("Unable to write event character: %i", rv);
return -EIO;
@@ -1551,9 +1543,9 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
kref_init(&priv->kref);
spin_lock_init(&priv->tx_lock);
+ mutex_init(&priv->cfg_lock);
init_waitqueue_head(&priv->delta_msr_wait);
- /* This will push the characters through immediately rather
- than queue a task to deliver them */
+
priv->flags = ASYNC_LOW_LATENCY;
if (quirk && quirk->port_probe)
@@ -1585,7 +1577,8 @@ static int ftdi_sio_port_probe(struct usb_serial_port *port)
ftdi_determine_type(port);
ftdi_set_max_packet_size(port);
- read_latency_timer(port);
+ if (read_latency_timer(port) < 0)
+ priv->latency = 16;
create_sysfs_attrs(port);
return 0;
}
@@ -1630,8 +1623,6 @@ static int ftdi_NDI_device_setup(struct usb_serial *serial)
{
struct usb_device *udev = serial->dev;
int latency = ndi_latency_timer;
- int rv = 0;
- char buf[1];
if (latency == 0)
latency = 1;
@@ -1641,10 +1632,11 @@ static int ftdi_NDI_device_setup(struct usb_serial *serial)
dbg("%s setting NDI device latency to %d", __func__, latency);
dev_info(&udev->dev, "NDI device with a latency value of %d", latency);
- rv = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
+ /* FIXME: errors are not returned */
+ usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
FTDI_SIO_SET_LATENCY_TIMER_REQUEST,
FTDI_SIO_SET_LATENCY_TIMER_REQUEST_TYPE,
- latency, 0, buf, 0, WDR_TIMEOUT);
+ latency, 0, NULL, 0, WDR_TIMEOUT);
return 0;
}
@@ -1720,7 +1712,7 @@ static int ftdi_submit_read_urb(struct usb_serial_port *port, gfp_t mem_flags)
urb->transfer_buffer_length,
ftdi_read_bulk_callback, port);
result = usb_submit_urb(urb, mem_flags);
- if (result)
+ if (result && result != -EPERM)
dev_err(&port->dev,
"%s - failed submitting read urb, error %d\n",
__func__, result);
@@ -1732,16 +1724,10 @@ static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port)
struct usb_device *dev = port->serial->dev;
struct ftdi_private *priv = usb_get_serial_port_data(port);
unsigned long flags;
-
- int result = 0;
- char buf[1]; /* Needed for the usb_control_msg I think */
+ int result;
dbg("%s", __func__);
- spin_lock_irqsave(&priv->tx_lock, flags);
- priv->tx_bytes = 0;
- spin_unlock_irqrestore(&priv->tx_lock, flags);
-
write_latency_timer(port);
/* No error checking for this (will get errors later anyway) */
@@ -1749,7 +1735,7 @@ static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port)
usb_control_msg(dev, usb_sndctrlpipe(dev, 0),
FTDI_SIO_RESET_REQUEST, FTDI_SIO_RESET_REQUEST_TYPE,
FTDI_SIO_RESET_SIO,
- priv->interface, buf, 0, WDR_TIMEOUT);
+ priv->interface, NULL, 0, WDR_TIMEOUT);
/* Termios defaults are set by usb_serial_init. We don't change
port->tty->termios - this would lose speed settings, etc.
@@ -1777,7 +1763,6 @@ static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port)
static void ftdi_dtr_rts(struct usb_serial_port *port, int on)
{
struct ftdi_private *priv = usb_get_serial_port_data(port);
- char buf[1];
mutex_lock(&port->serial->disc_mutex);
if (!port->serial->disconnected) {
@@ -1786,7 +1771,7 @@ static void ftdi_dtr_rts(struct usb_serial_port *port, int on)
usb_sndctrlpipe(port->serial->dev, 0),
FTDI_SIO_SET_FLOW_CTRL_REQUEST,
FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
- 0, priv->interface, buf, 0,
+ 0, priv->interface, NULL, 0,
WDR_TIMEOUT) < 0) {
dev_err(&port->dev, "error from flowcontrol urb\n");
}
@@ -1847,7 +1832,7 @@ static int ftdi_write(struct tty_struct *tty, struct usb_serial_port *port,
spin_lock_irqsave(&priv->tx_lock, flags);
if (priv->tx_outstanding_urbs > URB_UPPER_LIMIT) {
spin_unlock_irqrestore(&priv->tx_lock, flags);
- dbg("%s - write limit hit\n", __func__);
+ dbg("%s - write limit hit", __func__);
return 0;
}
priv->tx_outstanding_urbs++;
@@ -1927,7 +1912,6 @@ static int ftdi_write(struct tty_struct *tty, struct usb_serial_port *port,
} else {
spin_lock_irqsave(&priv->tx_lock, flags);
priv->tx_outstanding_bytes += count;
- priv->tx_bytes += count;
spin_unlock_irqrestore(&priv->tx_lock, flags);
}
@@ -2154,8 +2138,7 @@ static void ftdi_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
struct ftdi_private *priv = usb_get_serial_port_data(port);
- __u16 urb_value = 0;
- char buf[1];
+ __u16 urb_value;
/* break_state = -1 to turn on break, and 0 to turn off break */
/* see drivers/char/tty_io.c to see it used */
@@ -2171,7 +2154,7 @@ static void ftdi_break_ctl(struct tty_struct *tty, int break_state)
FTDI_SIO_SET_DATA_REQUEST,
FTDI_SIO_SET_DATA_REQUEST_TYPE,
urb_value , priv->interface,
- buf, 0, WDR_TIMEOUT) < 0) {
+ NULL, 0, WDR_TIMEOUT) < 0) {
dev_err(&port->dev, "%s FAILED to enable/disable break state "
"(state was %d)\n", __func__, break_state);
}
@@ -2195,7 +2178,6 @@ static void ftdi_set_termios(struct tty_struct *tty,
struct ktermios *termios = tty->termios;
unsigned int cflag = termios->c_cflag;
__u16 urb_value; /* will hold the new flags */
- char buf[1]; /* Perhaps I should dynamically alloc this? */
/* Added for xon/xoff support */
unsigned int iflag = termios->c_iflag;
@@ -2246,12 +2228,10 @@ static void ftdi_set_termios(struct tty_struct *tty,
}
if (cflag & CSIZE) {
switch (cflag & CSIZE) {
- case CS5: urb_value |= 5; dbg("Setting CS5"); break;
- case CS6: urb_value |= 6; dbg("Setting CS6"); break;
case CS7: urb_value |= 7; dbg("Setting CS7"); break;
case CS8: urb_value |= 8; dbg("Setting CS8"); break;
default:
- dev_err(&port->dev, "CSIZE was set but not CS5-CS8\n");
+ dev_err(&port->dev, "CSIZE was set but not CS7-CS8\n");
}
}
@@ -2263,7 +2243,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
FTDI_SIO_SET_DATA_REQUEST,
FTDI_SIO_SET_DATA_REQUEST_TYPE,
urb_value , priv->interface,
- buf, 0, WDR_SHORT_TIMEOUT) < 0) {
+ NULL, 0, WDR_SHORT_TIMEOUT) < 0) {
dev_err(&port->dev, "%s FAILED to set "
"databits/stopbits/parity\n", __func__);
}
@@ -2275,7 +2255,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
FTDI_SIO_SET_FLOW_CTRL_REQUEST,
FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
0, priv->interface,
- buf, 0, WDR_TIMEOUT) < 0) {
+ NULL, 0, WDR_TIMEOUT) < 0) {
dev_err(&port->dev,
"%s error from disable flowcontrol urb\n",
__func__);
@@ -2301,7 +2281,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
FTDI_SIO_SET_FLOW_CTRL_REQUEST,
FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
0 , (FTDI_SIO_RTS_CTS_HS | priv->interface),
- buf, 0, WDR_TIMEOUT) < 0) {
+ NULL, 0, WDR_TIMEOUT) < 0) {
dev_err(&port->dev,
"urb failed to set to rts/cts flow control\n");
}
@@ -2333,7 +2313,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
urb_value , (FTDI_SIO_XON_XOFF_HS
| priv->interface),
- buf, 0, WDR_TIMEOUT) < 0) {
+ NULL, 0, WDR_TIMEOUT) < 0) {
dev_err(&port->dev, "urb failed to set to "
"xon/xoff flow control\n");
}
@@ -2347,7 +2327,7 @@ static void ftdi_set_termios(struct tty_struct *tty,
FTDI_SIO_SET_FLOW_CTRL_REQUEST,
FTDI_SIO_SET_FLOW_CTRL_REQUEST_TYPE,
0, priv->interface,
- buf, 0, WDR_TIMEOUT) < 0) {
+ NULL, 0, WDR_TIMEOUT) < 0) {
dev_err(&port->dev,
"urb failed to clear flow control\n");
}
@@ -2361,21 +2341,22 @@ static int ftdi_tiocmget(struct tty_struct *tty, struct file *file)
{
struct usb_serial_port *port = tty->driver_data;
struct ftdi_private *priv = usb_get_serial_port_data(port);
- unsigned char buf[2];
+ unsigned char *buf;
+ int len;
int ret;
dbg("%s TIOCMGET", __func__);
+
+ buf = kmalloc(2, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ /*
+ * The 8U232AM returns a two byte value (the SIO a 1 byte value) in
+ * the same format as the data returned from the in point.
+ */
switch (priv->chip_type) {
case SIO:
- /* Request the status from the device */
- ret = usb_control_msg(port->serial->dev,
- usb_rcvctrlpipe(port->serial->dev, 0),
- FTDI_SIO_GET_MODEM_STATUS_REQUEST,
- FTDI_SIO_GET_MODEM_STATUS_REQUEST_TYPE,
- 0, 0,
- buf, 1, WDR_TIMEOUT);
- if (ret < 0)
- return ret;
+ len = 1;
break;
case FT8U232AM:
case FT232BM:
@@ -2383,27 +2364,30 @@ static int ftdi_tiocmget(struct tty_struct *tty, struct file *file)
case FT232RL:
case FT2232H:
case FT4232H:
- /* the 8U232AM returns a two byte value (the sio is a 1 byte
- value) - in the same format as the data returned from the in
- point */
- ret = usb_control_msg(port->serial->dev,
- usb_rcvctrlpipe(port->serial->dev, 0),
- FTDI_SIO_GET_MODEM_STATUS_REQUEST,
- FTDI_SIO_GET_MODEM_STATUS_REQUEST_TYPE,
- 0, priv->interface,
- buf, 2, WDR_TIMEOUT);
- if (ret < 0)
- return ret;
+ len = 2;
break;
default:
- return -EFAULT;
+ ret = -EFAULT;
+ goto out;
}
- return (buf[0] & FTDI_SIO_DSR_MASK ? TIOCM_DSR : 0) |
+ ret = usb_control_msg(port->serial->dev,
+ usb_rcvctrlpipe(port->serial->dev, 0),
+ FTDI_SIO_GET_MODEM_STATUS_REQUEST,
+ FTDI_SIO_GET_MODEM_STATUS_REQUEST_TYPE,
+ 0, priv->interface,
+ buf, len, WDR_TIMEOUT);
+ if (ret < 0)
+ goto out;
+
+ ret = (buf[0] & FTDI_SIO_DSR_MASK ? TIOCM_DSR : 0) |
(buf[0] & FTDI_SIO_CTS_MASK ? TIOCM_CTS : 0) |
(buf[0] & FTDI_SIO_RI_MASK ? TIOCM_RI : 0) |
(buf[0] & FTDI_SIO_RLSD_MASK ? TIOCM_CD : 0) |
priv->last_dtr_rts;
+out:
+ kfree(buf);
+ return ret;
}
static int ftdi_tiocmset(struct tty_struct *tty, struct file *file,
@@ -2508,8 +2492,7 @@ void ftdi_unthrottle(struct tty_struct *tty)
port->throttled = port->throttle_req = 0;
spin_unlock_irqrestore(&port->lock, flags);
- /* Resubmit urb if throttled and open. */
- if (was_throttled && test_bit(ASYNCB_INITIALIZED, &port->port.flags))
+ if (was_throttled)
ftdi_submit_read_urb(port, GFP_KERNEL);
}
diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
index b0e0d64f822..ff9bf80327a 100644
--- a/drivers/usb/serial/ftdi_sio.h
+++ b/drivers/usb/serial/ftdi_sio.h
@@ -28,13 +28,13 @@
#define FTDI_SIO_SET_FLOW_CTRL 2 /* Set flow control register */
#define FTDI_SIO_SET_BAUD_RATE 3 /* Set baud rate */
#define FTDI_SIO_SET_DATA 4 /* Set the data characteristics of the port */
-#define FTDI_SIO_GET_MODEM_STATUS 5 /* Retrieve current value of modern status register */
+#define FTDI_SIO_GET_MODEM_STATUS 5 /* Retrieve current value of modem status register */
#define FTDI_SIO_SET_EVENT_CHAR 6 /* Set the event character */
#define FTDI_SIO_SET_ERROR_CHAR 7 /* Set the error character */
#define FTDI_SIO_SET_LATENCY_TIMER 9 /* Set the latency timer */
#define FTDI_SIO_GET_LATENCY_TIMER 10 /* Get the latency timer */
-/* Interface indicies for FT2232, FT2232H and FT4232H devices*/
+/* Interface indices for FT2232, FT2232H and FT4232H devices */
#define INTERFACE_A 1
#define INTERFACE_B 2
#define INTERFACE_C 3
@@ -270,7 +270,7 @@ typedef enum {
* BmRequestType: 0100 0000b
* bRequest: FTDI_SIO_SET_FLOW_CTRL
* wValue: Xoff/Xon
- * wIndex: Protocol/Port - hIndex is protocl / lIndex is port
+ * wIndex: Protocol/Port - hIndex is protocol / lIndex is port
* wLength: 0
* Data: None
*
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index c8951aeed98..0727e198503 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -22,7 +22,7 @@
#define FTDI_8U232AM_ALT_PID 0x6006 /* FTDI's alternate PID for above */
#define FTDI_8U2232C_PID 0x6010 /* Dual channel device */
#define FTDI_4232H_PID 0x6011 /* Quad channel hi-speed device */
-#define FTDI_SIO_PID 0x8372 /* Product Id SIO application of 8U100AX */
+#define FTDI_SIO_PID 0x8372 /* Product Id SIO application of 8U100AX */
#define FTDI_232RL_PID 0xFBFA /* Product ID for FT232RL */
@@ -49,7 +49,7 @@
#define LMI_LM3S_DEVEL_BOARD_PID 0xbcd8
#define LMI_LM3S_EVAL_BOARD_PID 0xbcd9
-#define FTDI_TURTELIZER_PID 0xBDC8 /* JTAG/RS-232 adapter by egnite GmBH */
+#define FTDI_TURTELIZER_PID 0xBDC8 /* JTAG/RS-232 adapter by egnite GmbH */
/* OpenDCC (www.opendcc.de) product id */
#define FTDI_OPENDCC_PID 0xBFD8
@@ -185,7 +185,7 @@
#define FTDI_ELV_TFD128_PID 0xE0EC /* ELV Temperatur-Feuchte-Datenlogger TFD 128 */
#define FTDI_ELV_FM3RX_PID 0xE0ED /* ELV Messwertuebertragung FM3 RX */
#define FTDI_ELV_WS777_PID 0xE0EE /* Conrad WS 777 */
-#define FTDI_ELV_EM1010PC_PID 0xE0EF /* Engery monitor EM 1010 PC */
+#define FTDI_ELV_EM1010PC_PID 0xE0EF /* Energy monitor EM 1010 PC */
#define FTDI_ELV_CSI8_PID 0xE0F0 /* Computer-Schalt-Interface (CSI 8) */
#define FTDI_ELV_EM1000DL_PID 0xE0F1 /* PC-Datenlogger fuer Energiemonitor (EM 1000 DL) */
#define FTDI_ELV_PCK100_PID 0xE0F2 /* PC-Kabeltester (PCK 100) */
@@ -212,8 +212,8 @@
* drivers, or possibly the Comedi drivers in some cases. */
#define FTDI_ELV_CLI7000_PID 0xFB59 /* Computer-Light-Interface (CLI 7000) */
#define FTDI_ELV_PPS7330_PID 0xFB5C /* Processor-Power-Supply (PPS 7330) */
-#define FTDI_ELV_TFM100_PID 0xFB5D /* Temperartur-Feuchte Messgeraet (TFM 100) */
-#define FTDI_ELV_UDF77_PID 0xFB5E /* USB DCF Funkurh (UDF 77) */
+#define FTDI_ELV_TFM100_PID 0xFB5D /* Temperatur-Feuchte-Messgeraet (TFM 100) */
+#define FTDI_ELV_UDF77_PID 0xFB5E /* USB DCF Funkuhr (UDF 77) */
#define FTDI_ELV_UIO88_PID 0xFB5F /* USB-I/O Interface (UIO 88) */
/*
@@ -320,7 +320,7 @@
/*
* 4N-GALAXY.DE PIDs for CAN-USB, USB-RS232, USB-RS422, USB-RS485,
- * USB-TTY activ, USB-TTY passiv. Some PIDs are used by several devices
+ * USB-TTY aktiv, USB-TTY passiv. Some PIDs are used by several devices
* and I'm not entirely sure which are used by which.
*/
#define FTDI_4N_GALAXY_DE_1_PID 0xF3C0
@@ -330,10 +330,10 @@
* Linx Technologies product ids
*/
#define LINX_SDMUSBQSS_PID 0xF448 /* Linx SDM-USB-QS-S */
-#define LINX_MASTERDEVEL2_PID 0xF449 /* Linx Master Development 2.0 */
-#define LINX_FUTURE_0_PID 0xF44A /* Linx future device */
-#define LINX_FUTURE_1_PID 0xF44B /* Linx future device */
-#define LINX_FUTURE_2_PID 0xF44C /* Linx future device */
+#define LINX_MASTERDEVEL2_PID 0xF449 /* Linx Master Development 2.0 */
+#define LINX_FUTURE_0_PID 0xF44A /* Linx future device */
+#define LINX_FUTURE_1_PID 0xF44B /* Linx future device */
+#define LINX_FUTURE_2_PID 0xF44C /* Linx future device */
/*
* Oceanic product ids
@@ -494,6 +494,13 @@
#define RATOC_PRODUCT_ID_USB60F 0xb020
/*
+ * Contec products (http://www.contec.com)
+ * Submitted by Daniel Sangorrin
+ */
+#define CONTEC_VID 0x06CE /* Vendor ID */
+#define CONTEC_COM1USBH_PID 0x8311 /* COM-1(USB)H */
+
+/*
* Definitions for B&B Electronics products.
*/
#define BANDB_VID 0x0856 /* B&B Electronics Vendor ID */
@@ -642,7 +649,7 @@
#define FALCOM_TWIST_PID 0x0001 /* Falcom Twist USB GPRS modem */
#define FALCOM_SAMBA_PID 0x0005 /* Falcom Samba USB GPRS modem */
-/* Larsen and Brusgaard AltiTrack/USBtrack */
+/* Larsen and Brusgaard AltiTrack/USBtrack */
#define LARSENBRUSGAARD_VID 0x0FD8
#define LB_ALTITRACK_PID 0x0001
@@ -971,7 +978,7 @@
#define ALTI2_N3_PID 0x6001 /* Neptune 3 */
/*
- * Dresden Elektronic Sensor Terminal Board
+ * Dresden Elektronik Sensor Terminal Board
*/
#define DE_VID 0x1cf1 /* Vendor ID */
#define STB_PID 0x0001 /* Sensor Terminal Board */
@@ -1002,3 +1009,11 @@
#define EVO_8U232AM_PID 0x02FF /* Evolution robotics RCM2 (FT232AM)*/
#define EVO_HYBRID_PID 0x0302 /* Evolution robotics RCM4 PID (FT232BM)*/
#define EVO_RCM4_PID 0x0303 /* Evolution robotics RCM4 PID */
+
+/*
+ * MJS Gadgets HD Radio / XM Radio / Sirius Radio interfaces (using VID 0x0403)
+ */
+#define MJSG_GENERIC_PID 0x9378
+#define MJSG_SR_RADIO_PID 0x9379
+#define MJSG_XM_RADIO_PID 0x937A
+#define MJSG_HD_RADIO_PID 0x937C
diff --git a/drivers/usb/serial/funsoft.c b/drivers/usb/serial/funsoft.c
index d30f736d2cc..e21ce9ddfc6 100644
--- a/drivers/usb/serial/funsoft.c
+++ b/drivers/usb/serial/funsoft.c
@@ -18,7 +18,7 @@
static int debug;
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1404, 0xcddc) },
{ },
};
diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
index 5ac900e5a50..a42b29a695b 100644
--- a/drivers/usb/serial/garmin_gps.c
+++ b/drivers/usb/serial/garmin_gps.c
@@ -210,7 +210,7 @@ static unsigned char const PRIVATE_REQ[]
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
/* the same device id seems to be used by all
usb enabled GPS devices */
{ USB_DEVICE(GARMIN_VENDOR_ID, 3) },
@@ -271,7 +271,6 @@ static void send_to_tty(struct usb_serial_port *port,
usb_serial_debug_data(debug, &port->dev,
__func__, actual_length, data);
- tty_buffer_request_room(tty, actual_length);
tty_insert_flip_string(tty, data, actual_length);
tty_flip_buffer_push(tty);
}
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 83443d6306d..89fac36684c 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -20,6 +20,7 @@
#include <linux/usb/serial.h>
#include <linux/uaccess.h>
#include <linux/kfifo.h>
+#include <linux/serial.h>
static int debug;
@@ -41,7 +42,7 @@ static struct usb_device_id generic_device_ids[2]; /* Initially all zeroes. */
/* we want to look at all devices, as the vendor/product id can change
* depending on the command line argument */
-static struct usb_device_id generic_serial_ids[] = {
+static const struct usb_device_id generic_serial_ids[] = {
{.driver_info = 42},
{}
};
@@ -194,7 +195,7 @@ static int usb_serial_multi_urb_write(struct tty_struct *tty,
if (port->urbs_in_flight >
port->serial->type->max_in_flight_urbs) {
spin_unlock_irqrestore(&port->lock, flags);
- dbg("%s - write limit hit\n", __func__);
+ dbg("%s - write limit hit", __func__);
return bwrite;
}
port->tx_bytes_flight += towrite;
@@ -585,7 +586,7 @@ int usb_serial_generic_resume(struct usb_serial *serial)
for (i = 0; i < serial->num_ports; i++) {
port = serial->port[i];
- if (!port->port.count)
+ if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags))
continue;
if (port->read_urb) {
diff --git a/drivers/usb/serial/hp4x.c b/drivers/usb/serial/hp4x.c
index 43132927513..809379159b0 100644
--- a/drivers/usb/serial/hp4x.c
+++ b/drivers/usb/serial/hp4x.c
@@ -29,7 +29,7 @@
#define HP_VENDOR_ID 0x03f0
#define HP49GP_PRODUCT_ID 0x0121
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(HP_VENDOR_ID, HP49GP_PRODUCT_ID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
index b97960ac92f..3ef8df0ef88 100644
--- a/drivers/usb/serial/io_edgeport.c
+++ b/drivers/usb/serial/io_edgeport.c
@@ -364,42 +364,6 @@ static void update_edgeport_E2PROM(struct edgeport_serial *edge_serial)
release_firmware(fw);
}
-
-/************************************************************************
- * *
- * Get string descriptor from device *
- * *
- ************************************************************************/
-static int get_string(struct usb_device *dev, int Id, char *string, int buflen)
-{
- struct usb_string_descriptor StringDesc;
- struct usb_string_descriptor *pStringDesc;
-
- dbg("%s - USB String ID = %d", __func__, Id);
-
- if (!usb_get_descriptor(dev, USB_DT_STRING, Id,
- &StringDesc, sizeof(StringDesc)))
- return 0;
-
- pStringDesc = kmalloc(StringDesc.bLength, GFP_KERNEL);
- if (!pStringDesc)
- return 0;
-
- if (!usb_get_descriptor(dev, USB_DT_STRING, Id,
- pStringDesc, StringDesc.bLength)) {
- kfree(pStringDesc);
- return 0;
- }
-
- unicode_to_ascii(string, buflen,
- pStringDesc->wData, pStringDesc->bLength/2);
-
- kfree(pStringDesc);
- dbg("%s - USB String %s", __func__, string);
- return strlen(string);
-}
-
-
#if 0
/************************************************************************
*
@@ -2007,7 +1971,7 @@ static void process_rcvd_status(struct edgeport_serial *edge_serial,
return;
case IOSP_EXT_STATUS_RX_CHECK_RSP:
- dbg("%s ========== Port %u CHECK_RSP Sequence = %02x =============\n", __func__, edge_serial->rxPort, byte3);
+ dbg("%s ========== Port %u CHECK_RSP Sequence = %02x =============", __func__, edge_serial->rxPort, byte3);
/* Port->RxCheckRsp = true; */
return;
}
@@ -2075,7 +2039,7 @@ static void process_rcvd_status(struct edgeport_serial *edge_serial,
break;
default:
- dbg("%s - Unrecognized IOSP status code %u\n", __func__, code);
+ dbg("%s - Unrecognized IOSP status code %u", __func__, code);
break;
}
return;
@@ -2091,18 +2055,13 @@ static void edge_tty_recv(struct device *dev, struct tty_struct *tty,
{
int cnt;
- do {
- cnt = tty_buffer_request_room(tty, length);
- if (cnt < length) {
- dev_err(dev, "%s - dropping data, %d bytes lost\n",
- __func__, length - cnt);
- if (cnt == 0)
- break;
- }
- tty_insert_flip_string(tty, data, cnt);
- data += cnt;
- length -= cnt;
- } while (length > 0);
+ cnt = tty_insert_flip_string(tty, data, length);
+ if (cnt < length) {
+ dev_err(dev, "%s - dropping data, %d bytes lost\n",
+ __func__, length - cnt);
+ }
+ data += cnt;
+ length -= cnt;
tty_flip_buffer_push(tty);
}
@@ -2530,7 +2489,7 @@ static int calc_baud_rate_divisor(int baudrate, int *divisor)
*divisor = custom;
- dbg("%s - Baud %d = %d\n", __func__, baudrate, custom);
+ dbg("%s - Baud %d = %d", __func__, baudrate, custom);
return 0;
}
@@ -2915,7 +2874,7 @@ static void load_application_firmware(struct edgeport_serial *edge_serial)
break;
case EDGE_DOWNLOAD_FILE_NONE:
- dbg ("No download file specified, skipping download\n");
+ dbg("No download file specified, skipping download");
return;
default:
@@ -2997,10 +2956,12 @@ static int edge_startup(struct usb_serial *serial)
usb_set_serial_data(serial, edge_serial);
/* get the name for the device from the device */
- i = get_string(dev, dev->descriptor.iManufacturer,
+ i = usb_string(dev, dev->descriptor.iManufacturer,
&edge_serial->name[0], MAX_NAME_LEN+1);
+ if (i < 0)
+ i = 0;
edge_serial->name[i++] = ' ';
- get_string(dev, dev->descriptor.iProduct,
+ usb_string(dev, dev->descriptor.iProduct,
&edge_serial->name[i], MAX_NAME_LEN+2 - i);
dev_info(&serial->dev->dev, "%s detected\n", edge_serial->name);
diff --git a/drivers/usb/serial/io_tables.h b/drivers/usb/serial/io_tables.h
index 9241d314751..feb56a4ca79 100644
--- a/drivers/usb/serial/io_tables.h
+++ b/drivers/usb/serial/io_tables.h
@@ -14,7 +14,7 @@
#ifndef IO_TABLES_H
#define IO_TABLES_H
-static struct usb_device_id edgeport_2port_id_table [] = {
+static const struct usb_device_id edgeport_2port_id_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_2) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_2I) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_421) },
@@ -23,7 +23,7 @@ static struct usb_device_id edgeport_2port_id_table [] = {
{ }
};
-static struct usb_device_id edgeport_4port_id_table [] = {
+static const struct usb_device_id edgeport_4port_id_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_RAPIDPORT_4) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4T) },
@@ -37,7 +37,7 @@ static struct usb_device_id edgeport_4port_id_table [] = {
{ }
};
-static struct usb_device_id edgeport_8port_id_table [] = {
+static const struct usb_device_id edgeport_8port_id_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_16_DUAL_CPU) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_8I) },
@@ -47,7 +47,7 @@ static struct usb_device_id edgeport_8port_id_table [] = {
{ }
};
-static struct usb_device_id Epic_port_id_table [] = {
+static const struct usb_device_id Epic_port_id_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0202) },
{ USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0203) },
{ USB_DEVICE(USB_VENDOR_ID_NCR, NCR_DEVICE_ID_EPIC_0310) },
@@ -60,7 +60,7 @@ static struct usb_device_id Epic_port_id_table [] = {
};
/* Devices that this driver supports */
-static struct usb_device_id id_table_combined [] = {
+static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_RAPIDPORT_4) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_EDGEPORT_4T) },
diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
index d4cc0f7af40..aa876f71f22 100644
--- a/drivers/usb/serial/io_ti.c
+++ b/drivers/usb/serial/io_ti.c
@@ -134,7 +134,7 @@ struct edgeport_serial {
/* Devices that this driver supports */
-static struct usb_device_id edgeport_1port_id_table [] = {
+static const struct usb_device_id edgeport_1port_id_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_1) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1I) },
@@ -154,7 +154,7 @@ static struct usb_device_id edgeport_1port_id_table [] = {
{ }
};
-static struct usb_device_id edgeport_2port_id_table [] = {
+static const struct usb_device_id edgeport_2port_id_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2C) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_2I) },
@@ -177,7 +177,7 @@ static struct usb_device_id edgeport_2port_id_table [] = {
};
/* Devices that this driver supports */
-static struct usb_device_id id_table_combined [] = {
+static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_1) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1) },
{ USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_TI3410_EDGEPORT_1I) },
@@ -413,11 +413,18 @@ static int write_boot_mem(struct edgeport_serial *serial,
{
int status = 0;
int i;
- __u8 temp;
+ u8 *temp;
/* Must do a read before write */
if (!serial->TiReadI2C) {
- status = read_boot_mem(serial, 0, 1, &temp);
+ temp = kmalloc(1, GFP_KERNEL);
+ if (!temp) {
+ dev_err(&serial->serial->dev->dev,
+ "%s - out of memory\n", __func__);
+ return -ENOMEM;
+ }
+ status = read_boot_mem(serial, 0, 1, temp);
+ kfree(temp);
if (status)
return status;
}
@@ -935,37 +942,47 @@ static int build_i2c_fw_hdr(__u8 *header, struct device *dev)
static int i2c_type_bootmode(struct edgeport_serial *serial)
{
int status;
- __u8 data;
+ u8 *data;
+
+ data = kmalloc(1, GFP_KERNEL);
+ if (!data) {
+ dev_err(&serial->serial->dev->dev,
+ "%s - out of memory\n", __func__);
+ return -ENOMEM;
+ }
/* Try to read type 2 */
status = ti_vread_sync(serial->serial->dev, UMPC_MEMORY_READ,
- DTK_ADDR_SPACE_I2C_TYPE_II, 0, &data, 0x01);
+ DTK_ADDR_SPACE_I2C_TYPE_II, 0, data, 0x01);
if (status)
dbg("%s - read 2 status error = %d", __func__, status);
else
- dbg("%s - read 2 data = 0x%x", __func__, data);
- if ((!status) && (data == UMP5152 || data == UMP3410)) {
+ dbg("%s - read 2 data = 0x%x", __func__, *data);
+ if ((!status) && (*data == UMP5152 || *data == UMP3410)) {
dbg("%s - ROM_TYPE_II", __func__);
serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II;
- return 0;
+ goto out;
}
/* Try to read type 3 */
status = ti_vread_sync(serial->serial->dev, UMPC_MEMORY_READ,
- DTK_ADDR_SPACE_I2C_TYPE_III, 0, &data, 0x01);
+ DTK_ADDR_SPACE_I2C_TYPE_III, 0, data, 0x01);
if (status)
dbg("%s - read 3 status error = %d", __func__, status);
else
- dbg("%s - read 2 data = 0x%x", __func__, data);
- if ((!status) && (data == UMP5152 || data == UMP3410)) {
+ dbg("%s - read 2 data = 0x%x", __func__, *data);
+ if ((!status) && (*data == UMP5152 || *data == UMP3410)) {
dbg("%s - ROM_TYPE_III", __func__);
serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_III;
- return 0;
+ goto out;
}
dbg("%s - Unknown", __func__);
serial->TI_I2C_Type = DTK_ADDR_SPACE_I2C_TYPE_II;
- return -ENODEV;
+ status = -ENODEV;
+out:
+ kfree(data);
+ return status;
}
static int bulk_xfer(struct usb_serial *serial, void *buffer,
@@ -1113,7 +1130,7 @@ static int download_fw(struct edgeport_serial *serial)
I2C_DESC_TYPE_FIRMWARE_BASIC, rom_desc);
if (start_address != 0) {
struct ti_i2c_firmware_rec *firmware_version;
- __u8 record;
+ u8 *record;
dbg("%s - Found Type FIRMWARE (Type 2) record",
__func__);
@@ -1165,6 +1182,15 @@ static int download_fw(struct edgeport_serial *serial)
OperationalMajorVersion,
OperationalMinorVersion);
+ record = kmalloc(1, GFP_KERNEL);
+ if (!record) {
+ dev_err(dev, "%s - out of memory.\n",
+ __func__);
+ kfree(firmware_version);
+ kfree(rom_desc);
+ kfree(ti_manuf_desc);
+ return -ENOMEM;
+ }
/* In order to update the I2C firmware we must
* change the type 2 record to type 0xF2. This
* will force the UMP to come up in Boot Mode.
@@ -1177,13 +1203,14 @@ static int download_fw(struct edgeport_serial *serial)
* firmware will update the record type from
* 0xf2 to 0x02.
*/
- record = I2C_DESC_TYPE_FIRMWARE_BLANK;
+ *record = I2C_DESC_TYPE_FIRMWARE_BLANK;
/* Change the I2C Firmware record type to
0xf2 to trigger an update */
status = write_rom(serial, start_address,
- sizeof(record), &record);
+ sizeof(*record), record);
if (status) {
+ kfree(record);
kfree(firmware_version);
kfree(rom_desc);
kfree(ti_manuf_desc);
@@ -1196,19 +1223,21 @@ static int download_fw(struct edgeport_serial *serial)
*/
status = read_rom(serial,
start_address,
- sizeof(record),
- &record);
+ sizeof(*record),
+ record);
if (status) {
+ kfree(record);
kfree(firmware_version);
kfree(rom_desc);
kfree(ti_manuf_desc);
return status;
}
- if (record != I2C_DESC_TYPE_FIRMWARE_BLANK) {
+ if (*record != I2C_DESC_TYPE_FIRMWARE_BLANK) {
dev_err(dev,
"%s - error resetting device\n",
__func__);
+ kfree(record);
kfree(firmware_version);
kfree(rom_desc);
kfree(ti_manuf_desc);
@@ -1226,6 +1255,7 @@ static int download_fw(struct edgeport_serial *serial)
__func__, status);
/* return an error on purpose. */
+ kfree(record);
kfree(firmware_version);
kfree(rom_desc);
kfree(ti_manuf_desc);
@@ -1686,7 +1716,7 @@ static void edge_interrupt_callback(struct urb *urb)
case TIUMP_INTERRUPT_CODE_MSR: /* MSR */
/* Copy MSR from UMP */
msr = data[1];
- dbg("%s - ===== Port %u MSR Status = %02x ======\n",
+ dbg("%s - ===== Port %u MSR Status = %02x ======",
__func__, port_number, msr);
handle_new_msr(edge_port, msr);
break;
@@ -1790,7 +1820,6 @@ static void edge_tty_recv(struct device *dev, struct tty_struct *tty,
{
int queued;
- tty_buffer_request_room(tty, length);
queued = tty_insert_flip_string(tty, data, length);
if (queued < length)
dev_err(dev, "%s - dropping data, %d bytes lost\n",
diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
index d6231c38813..3fea9298eb1 100644
--- a/drivers/usb/serial/ipaq.c
+++ b/drivers/usb/serial/ipaq.c
@@ -747,7 +747,6 @@ static void ipaq_read_bulk_callback(struct urb *urb)
tty = tty_port_tty_get(&port->port);
if (tty && urb->actual_length) {
- tty_buffer_request_room(tty, urb->actual_length);
tty_insert_flip_string(tty, data, urb->actual_length);
tty_flip_buffer_push(tty);
bytes_in += urb->actual_length;
diff --git a/drivers/usb/serial/ipw.c b/drivers/usb/serial/ipw.c
index 727d323f092..e1d07840cee 100644
--- a/drivers/usb/serial/ipw.c
+++ b/drivers/usb/serial/ipw.c
@@ -134,7 +134,7 @@ enum {
#define IPW_WANTS_TO_SEND 0x30
-static struct usb_device_id usb_ipw_ids[] = {
+static const struct usb_device_id usb_ipw_ids[] = {
{ USB_DEVICE(IPW_VID, IPW_PID) },
{ },
};
@@ -172,7 +172,6 @@ static void ipw_read_bulk_callback(struct urb *urb)
tty = tty_port_tty_get(&port->port);
if (tty && urb->actual_length) {
- tty_buffer_request_room(tty, urb->actual_length);
tty_insert_flip_string(tty, data, urb->actual_length);
tty_flip_buffer_push(tty);
}
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index 95d8d26b9a4..4a0f5197423 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -100,7 +100,7 @@ static u8 ir_baud;
static u8 ir_xbof;
static u8 ir_add_bof;
-static struct usb_device_id ir_id_table[] = {
+static const struct usb_device_id ir_id_table[] = {
{ USB_DEVICE(0x050f, 0x0180) }, /* KC Technology, KC-180 */
{ USB_DEVICE(0x08e9, 0x0100) }, /* XTNDAccess */
{ USB_DEVICE(0x09c4, 0x0011) }, /* ACTiSys ACT-IR2000U */
@@ -445,11 +445,6 @@ static void ir_read_bulk_callback(struct urb *urb)
dbg("%s - port %d", __func__, port->number);
- if (!port->port.count) {
- dbg("%s - port closed.", __func__);
- return;
- }
-
switch (status) {
case 0: /* Successful */
/*
@@ -462,10 +457,8 @@ static void ir_read_bulk_callback(struct urb *urb)
usb_serial_debug_data(debug, &port->dev, __func__,
urb->actual_length, data);
tty = tty_port_tty_get(&port->port);
- if (tty_buffer_request_room(tty, urb->actual_length - 1)) {
- tty_insert_flip_string(tty, data+1, urb->actual_length - 1);
- tty_flip_buffer_push(tty);
- }
+ tty_insert_flip_string(tty, data+1, urb->actual_length - 1);
+ tty_flip_buffer_push(tty);
tty_kref_put(tty);
/*
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index e6e02b178d2..43f13cf2f01 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -43,7 +43,7 @@ static int debug;
#define DRIVER_VERSION "v0.11"
#define DRIVER_DESC "Infinity USB Unlimited Phoenix driver"
-static struct usb_device_id id_table[] = {
+static const struct usb_device_id id_table[] = {
{USB_DEVICE(IUU_USB_VENDOR_ID, IUU_USB_PRODUCT_ID)},
{} /* Terminating entry */
};
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index f8c4b07033f..297163c3c61 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -464,13 +464,9 @@ static void usa26_indat_callback(struct urb *urb)
/* Resubmit urb so we continue receiving */
urb->dev = port->serial->dev;
- if (port->port.count) {
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err != 0)
- dbg("%s - resubmit read urb failed. (%d)",
- __func__, err);
- }
- return;
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err != 0)
+ dbg("%s - resubmit read urb failed. (%d)", __func__, err);
}
/* Outdat handling is common for all devices */
@@ -483,8 +479,7 @@ static void usa2x_outdat_callback(struct urb *urb)
p_priv = usb_get_serial_port_data(port);
dbg("%s - urb %d", __func__, urb == p_priv->out_urbs[1]);
- if (port->port.count)
- usb_serial_port_softint(port);
+ usb_serial_port_softint(port);
}
static void usa26_inack_callback(struct urb *urb)
@@ -615,12 +610,10 @@ static void usa28_indat_callback(struct urb *urb)
/* Resubmit urb so we continue receiving */
urb->dev = port->serial->dev;
- if (port->port.count) {
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err != 0)
- dbg("%s - resubmit read urb failed. (%d)",
- __func__, err);
- }
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err != 0)
+ dbg("%s - resubmit read urb failed. (%d)",
+ __func__, err);
p_priv->in_flip ^= 1;
urb = p_priv->in_urbs[p_priv->in_flip];
@@ -856,12 +849,9 @@ static void usa49_indat_callback(struct urb *urb)
/* Resubmit urb so we continue receiving */
urb->dev = port->serial->dev;
- if (port->port.count) {
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err != 0)
- dbg("%s - resubmit read urb failed. (%d)",
- __func__, err);
- }
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err != 0)
+ dbg("%s - resubmit read urb failed. (%d)", __func__, err);
}
static void usa49wg_indat_callback(struct urb *urb)
@@ -904,11 +894,7 @@ static void usa49wg_indat_callback(struct urb *urb)
/* no error on any byte */
i++;
for (x = 1; x < len ; ++x)
- if (port->port.count)
- tty_insert_flip_char(tty,
- data[i++], 0);
- else
- i++;
+ tty_insert_flip_char(tty, data[i++], 0);
} else {
/*
* some bytes had errors, every byte has status
@@ -922,14 +908,12 @@ static void usa49wg_indat_callback(struct urb *urb)
if (stat & RXERROR_PARITY)
flag |= TTY_PARITY;
/* XXX should handle break (0x10) */
- if (port->port.count)
- tty_insert_flip_char(tty,
+ tty_insert_flip_char(tty,
data[i+1], flag);
i += 2;
}
}
- if (port->port.count)
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(tty);
tty_kref_put(tty);
}
}
@@ -1013,13 +997,9 @@ static void usa90_indat_callback(struct urb *urb)
/* Resubmit urb so we continue receiving */
urb->dev = port->serial->dev;
- if (port->port.count) {
- err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err != 0)
- dbg("%s - resubmit read urb failed. (%d)",
- __func__, err);
- }
- return;
+ err = usb_submit_urb(urb, GFP_ATOMIC);
+ if (err != 0)
+ dbg("%s - resubmit read urb failed. (%d)", __func__, err);
}
@@ -2418,8 +2398,7 @@ static int keyspan_usa90_send_setup(struct usb_serial *serial,
msg.portEnabled = 0;
/* Sending intermediate configs */
else {
- if (port->port.count)
- msg.portEnabled = 1;
+ msg.portEnabled = 1;
msg.txBreak = (p_priv->break_on);
}
diff --git a/drivers/usb/serial/keyspan.h b/drivers/usb/serial/keyspan.h
index 30771e5b397..bf3297ddd18 100644
--- a/drivers/usb/serial/keyspan.h
+++ b/drivers/usb/serial/keyspan.h
@@ -456,7 +456,7 @@ static const struct keyspan_device_details *keyspan_devices[] = {
NULL,
};
-static struct usb_device_id keyspan_ids_combined[] = {
+static const struct usb_device_id keyspan_ids_combined[] = {
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa18x_pre_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19_pre_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19w_pre_product_id) },
@@ -497,7 +497,7 @@ static struct usb_driver keyspan_driver = {
};
/* usb_device_id table for the pre-firmware download keyspan devices */
-static struct usb_device_id keyspan_pre_ids[] = {
+static const struct usb_device_id keyspan_pre_ids[] = {
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa18x_pre_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19_pre_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19qi_pre_product_id) },
@@ -513,7 +513,7 @@ static struct usb_device_id keyspan_pre_ids[] = {
{ } /* Terminating entry */
};
-static struct usb_device_id keyspan_1port_ids[] = {
+static const struct usb_device_id keyspan_1port_ids[] = {
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa18x_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa19qi_product_id) },
@@ -524,7 +524,7 @@ static struct usb_device_id keyspan_1port_ids[] = {
{ } /* Terminating entry */
};
-static struct usb_device_id keyspan_2port_ids[] = {
+static const struct usb_device_id keyspan_2port_ids[] = {
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28x_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa28xa_product_id) },
@@ -532,7 +532,7 @@ static struct usb_device_id keyspan_2port_ids[] = {
{ } /* Terminating entry */
};
-static struct usb_device_id keyspan_4port_ids[] = {
+static const struct usb_device_id keyspan_4port_ids[] = {
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49w_product_id) },
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49wlc_product_id)},
{ USB_DEVICE(KEYSPAN_VENDOR_ID, keyspan_usa49wg_product_id)},
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index 1296a097f5c..185fe9a7d4e 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -125,7 +125,7 @@ struct keyspan_pda_private {
#define ENTREGRA_VENDOR_ID 0x1645
#define ENTREGRA_FAKE_ID 0x8093
-static struct usb_device_id id_table_combined [] = {
+static const struct usb_device_id id_table_combined[] = {
#ifdef KEYSPAN
{ USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_FAKE_ID) },
#endif
@@ -147,20 +147,20 @@ static struct usb_driver keyspan_pda_driver = {
.no_dynamic_id = 1,
};
-static struct usb_device_id id_table_std [] = {
+static const struct usb_device_id id_table_std[] = {
{ USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_ID) },
{ } /* Terminating entry */
};
#ifdef KEYSPAN
-static struct usb_device_id id_table_fake [] = {
+static const struct usb_device_id id_table_fake[] = {
{ USB_DEVICE(KEYSPAN_VENDOR_ID, KEYSPAN_PDA_FAKE_ID) },
{ } /* Terminating entry */
};
#endif
#ifdef XIRCOM
-static struct usb_device_id id_table_fake_xircom [] = {
+static const struct usb_device_id id_table_fake_xircom[] = {
{ USB_DEVICE(XIRCOM_VENDOR_ID, XIRCOM_FAKE_ID) },
{ USB_DEVICE(ENTREGRA_VENDOR_ID, ENTREGRA_FAKE_ID) },
{ }
@@ -429,13 +429,20 @@ static int keyspan_pda_get_modem_info(struct usb_serial *serial,
unsigned char *value)
{
int rc;
- unsigned char data;
+ u8 *data;
+
+ data = kmalloc(1, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
rc = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
3, /* get pins */
USB_TYPE_VENDOR|USB_RECIP_INTERFACE|USB_DIR_IN,
- 0, 0, &data, 1, 2000);
+ 0, 0, data, 1, 2000);
if (rc >= 0)
- *value = data;
+ *value = *data;
+
+ kfree(data);
return rc;
}
@@ -543,7 +550,14 @@ static int keyspan_pda_write(struct tty_struct *tty,
device how much room it really has. This is done only on
scheduler time, since usb_control_msg() sleeps. */
if (count > priv->tx_room && !in_interrupt()) {
- unsigned char room;
+ u8 *room;
+
+ room = kmalloc(1, GFP_KERNEL);
+ if (!room) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+
rc = usb_control_msg(serial->dev,
usb_rcvctrlpipe(serial->dev, 0),
6, /* write_room */
@@ -551,9 +565,14 @@ static int keyspan_pda_write(struct tty_struct *tty,
| USB_DIR_IN,
0, /* value: 0 means "remaining room" */
0, /* index */
- &room,
+ room,
1,
2000);
+ if (rc > 0) {
+ dbg(" roomquery says %d", *room);
+ priv->tx_room = *room;
+ }
+ kfree(room);
if (rc < 0) {
dbg(" roomquery failed");
goto exit;
@@ -563,8 +582,6 @@ static int keyspan_pda_write(struct tty_struct *tty,
rc = -EIO; /* device didn't return any data */
goto exit;
}
- dbg(" roomquery says %d", room);
- priv->tx_room = room;
}
if (count > priv->tx_room) {
/* we're about to completely fill the Tx buffer, so
@@ -684,18 +701,22 @@ static int keyspan_pda_open(struct tty_struct *tty,
struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
- unsigned char room;
+ u8 *room;
int rc = 0;
struct keyspan_pda_private *priv;
/* find out how much room is in the Tx ring */
+ room = kmalloc(1, GFP_KERNEL);
+ if (!room)
+ return -ENOMEM;
+
rc = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
6, /* write_room */
USB_TYPE_VENDOR | USB_RECIP_INTERFACE
| USB_DIR_IN,
0, /* value */
0, /* index */
- &room,
+ room,
1,
2000);
if (rc < 0) {
@@ -708,8 +729,8 @@ static int keyspan_pda_open(struct tty_struct *tty,
goto error;
}
priv = usb_get_serial_port_data(port);
- priv->tx_room = room;
- priv->tx_throttled = room ? 0 : 1;
+ priv->tx_room = *room;
+ priv->tx_throttled = *room ? 0 : 1;
/*Start reading from the device*/
port->interrupt_in_urb->dev = serial->dev;
@@ -718,8 +739,8 @@ static int keyspan_pda_open(struct tty_struct *tty,
dbg("%s - usb_submit_urb(read int) failed", __func__);
goto error;
}
-
error:
+ kfree(room);
return rc;
}
static void keyspan_pda_close(struct usb_serial_port *port)
@@ -789,6 +810,13 @@ static int keyspan_pda_fake_startup(struct usb_serial *serial)
return 1;
}
+#ifdef KEYSPAN
+MODULE_FIRMWARE("keyspan_pda/keyspan_pda.fw");
+#endif
+#ifdef XIRCOM
+MODULE_FIRMWARE("keyspan_pda/xircom_pgs.fw");
+#endif
+
static int keyspan_pda_startup(struct usb_serial *serial)
{
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index 3a7873806f4..8eef91ba4b1 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -94,7 +94,7 @@ static int klsi_105_tiocmset(struct tty_struct *tty, struct file *file,
/*
* All of the device info needed for the KLSI converters.
*/
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(PALMCONNECT_VID, PALMCONNECT_PID) },
{ USB_DEVICE(KLSI_VID, KLSI_KL5KUSB105D_PID) },
{ } /* Terminating entry */
@@ -212,10 +212,19 @@ static int klsi_105_get_line_state(struct usb_serial_port *port,
unsigned long *line_state_p)
{
int rc;
- __u8 status_buf[KLSI_STATUSBUF_LEN] = { -1, -1};
+ u8 *status_buf;
__u16 status;
dev_info(&port->serial->dev->dev, "sending SIO Poll request\n");
+
+ status_buf = kmalloc(KLSI_STATUSBUF_LEN, GFP_KERNEL);
+ if (!status_buf) {
+ dev_err(&port->dev, "%s - out of memory for status buffer.\n",
+ __func__);
+ return -ENOMEM;
+ }
+ status_buf[0] = 0xff;
+ status_buf[1] = 0xff;
rc = usb_control_msg(port->serial->dev,
usb_rcvctrlpipe(port->serial->dev, 0),
KL5KUSB105A_SIO_POLL,
@@ -236,6 +245,8 @@ static int klsi_105_get_line_state(struct usb_serial_port *port,
*line_state_p = klsi_105_status2linestate(status);
}
+
+ kfree(status_buf);
return rc;
}
@@ -364,7 +375,7 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
int rc;
int i;
unsigned long line_state;
- struct klsi_105_port_settings cfg;
+ struct klsi_105_port_settings *cfg;
unsigned long flags;
dbg("%s port %d", __func__, port->number);
@@ -376,12 +387,18 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
* Then read the modem line control and store values in
* priv->line_state.
*/
- cfg.pktlen = 5;
- cfg.baudrate = kl5kusb105a_sio_b9600;
- cfg.databits = kl5kusb105a_dtb_8;
- cfg.unknown1 = 0;
- cfg.unknown2 = 1;
- klsi_105_chg_port_settings(port, &cfg);
+ cfg = kmalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!cfg) {
+ dev_err(&port->dev, "%s - out of memory for config buffer.\n",
+ __func__);
+ return -ENOMEM;
+ }
+ cfg->pktlen = 5;
+ cfg->baudrate = kl5kusb105a_sio_b9600;
+ cfg->databits = kl5kusb105a_dtb_8;
+ cfg->unknown1 = 0;
+ cfg->unknown2 = 1;
+ klsi_105_chg_port_settings(port, cfg);
/* set up termios structure */
spin_lock_irqsave(&priv->lock, flags);
@@ -391,11 +408,11 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
priv->termios.c_lflag = tty->termios->c_lflag;
for (i = 0; i < NCCS; i++)
priv->termios.c_cc[i] = tty->termios->c_cc[i];
- priv->cfg.pktlen = cfg.pktlen;
- priv->cfg.baudrate = cfg.baudrate;
- priv->cfg.databits = cfg.databits;
- priv->cfg.unknown1 = cfg.unknown1;
- priv->cfg.unknown2 = cfg.unknown2;
+ priv->cfg.pktlen = cfg->pktlen;
+ priv->cfg.baudrate = cfg->baudrate;
+ priv->cfg.databits = cfg->databits;
+ priv->cfg.unknown1 = cfg->unknown1;
+ priv->cfg.unknown2 = cfg->unknown2;
spin_unlock_irqrestore(&priv->lock, flags);
/* READ_ON and urb submission */
@@ -441,6 +458,7 @@ static int klsi_105_open(struct tty_struct *tty, struct usb_serial_port *port)
retval = rc;
exit:
+ kfree(cfg);
return retval;
} /* klsi_105_open */
@@ -681,7 +699,6 @@ static void klsi_105_read_bulk_callback(struct urb *urb)
bytes_sent = urb->actual_length - 2;
}
- tty_buffer_request_room(tty, bytes_sent);
tty_insert_flip_string(tty, data + 2, bytes_sent);
tty_flip_buffer_push(tty);
tty_kref_put(tty);
@@ -714,10 +731,17 @@ static void klsi_105_set_termios(struct tty_struct *tty,
unsigned int old_iflag = old_termios->c_iflag;
unsigned int cflag = tty->termios->c_cflag;
unsigned int old_cflag = old_termios->c_cflag;
- struct klsi_105_port_settings cfg;
+ struct klsi_105_port_settings *cfg;
unsigned long flags;
speed_t baud;
+ cfg = kmalloc(sizeof(*cfg), GFP_KERNEL);
+ if (!cfg) {
+ dev_err(&port->dev, "%s - out of memory for config buffer.\n",
+ __func__);
+ return;
+ }
+
/* lock while we are modifying the settings */
spin_lock_irqsave(&priv->lock, flags);
@@ -793,11 +817,11 @@ static void klsi_105_set_termios(struct tty_struct *tty,
case CS5:
dbg("%s - 5 bits/byte not supported", __func__);
spin_unlock_irqrestore(&priv->lock, flags);
- return ;
+ goto err;
case CS6:
dbg("%s - 6 bits/byte not supported", __func__);
spin_unlock_irqrestore(&priv->lock, flags);
- return ;
+ goto err;
case CS7:
priv->cfg.databits = kl5kusb105a_dtb_7;
break;
@@ -856,11 +880,13 @@ static void klsi_105_set_termios(struct tty_struct *tty,
#endif
;
}
- memcpy(&cfg, &priv->cfg, sizeof(cfg));
+ memcpy(cfg, &priv->cfg, sizeof(*cfg));
spin_unlock_irqrestore(&priv->lock, flags);
/* now commit changes to device */
- klsi_105_chg_port_settings(port, &cfg);
+ klsi_105_chg_port_settings(port, cfg);
+err:
+ kfree(cfg);
} /* klsi_105_set_termios */
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index 45ea694b3ae..c113a2a0e10 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -86,7 +86,7 @@ static void kobil_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old);
static void kobil_init_termios(struct tty_struct *tty);
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(KOBIL_VENDOR_ID, KOBIL_ADAPTER_B_PRODUCT_ID) },
{ USB_DEVICE(KOBIL_VENDOR_ID, KOBIL_ADAPTER_K_PRODUCT_ID) },
{ USB_DEVICE(KOBIL_VENDOR_ID, KOBIL_USBTWIN_PRODUCT_ID) },
@@ -388,7 +388,6 @@ static void kobil_read_int_callback(struct urb *urb)
*/
/* END DEBUG */
- tty_buffer_request_room(tty, urb->actual_length);
tty_insert_flip_string(tty, data, urb->actual_length);
tty_flip_buffer_push(tty);
}
@@ -624,7 +623,6 @@ static void kobil_set_termios(struct tty_struct *tty,
unsigned short urb_val = 0;
int c_cflag = tty->termios->c_cflag;
speed_t speed;
- void *settings;
priv = usb_get_serial_port_data(port);
if (priv->device_type == KOBIL_USBTWIN_PRODUCT_ID ||
@@ -647,25 +645,13 @@ static void kobil_set_termios(struct tty_struct *tty,
}
urb_val |= (c_cflag & CSTOPB) ? SUSBCR_SPASB_2StopBits :
SUSBCR_SPASB_1StopBit;
-
- settings = kzalloc(50, GFP_KERNEL);
- if (!settings)
- return;
-
- sprintf(settings, "%d ", speed);
-
if (c_cflag & PARENB) {
- if (c_cflag & PARODD) {
+ if (c_cflag & PARODD)
urb_val |= SUSBCR_SPASB_OddParity;
- strcat(settings, "Odd Parity");
- } else {
+ else
urb_val |= SUSBCR_SPASB_EvenParity;
- strcat(settings, "Even Parity");
- }
- } else {
+ } else
urb_val |= SUSBCR_SPASB_NoParity;
- strcat(settings, "No Parity");
- }
tty->termios->c_cflag &= ~CMSPAR;
tty_encode_baud_rate(tty, speed, speed);
@@ -675,11 +661,10 @@ static void kobil_set_termios(struct tty_struct *tty,
USB_TYPE_VENDOR | USB_RECIP_ENDPOINT | USB_DIR_OUT,
urb_val,
0,
- settings,
+ NULL,
0,
KOBIL_TIMEOUT
);
- kfree(settings);
}
static int kobil_ioctl(struct tty_struct *tty, struct file *file,
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index cd009cb280a..2849f8c3201 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -75,6 +75,7 @@
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
+#include <asm/unaligned.h>
#include <linux/usb.h>
#include <linux/usb/serial.h>
#include "mct_u232.h"
@@ -110,7 +111,7 @@ static void mct_u232_unthrottle(struct tty_struct *tty);
/*
* All of the device info needed for the MCT USB-RS232 converter.
*/
-static struct usb_device_id id_table_combined [] = {
+static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(MCT_U232_VID, MCT_U232_PID) },
{ USB_DEVICE(MCT_U232_VID, MCT_U232_SITECOM_PID) },
{ USB_DEVICE(MCT_U232_VID, MCT_U232_DU_H3SP_PID) },
@@ -231,19 +232,22 @@ static int mct_u232_calculate_baud_rate(struct usb_serial *serial,
static int mct_u232_set_baud_rate(struct tty_struct *tty,
struct usb_serial *serial, struct usb_serial_port *port, speed_t value)
{
- __le32 divisor;
+ unsigned int divisor;
int rc;
- unsigned char zero_byte = 0;
+ unsigned char *buf;
unsigned char cts_enable_byte = 0;
speed_t speed;
- divisor = cpu_to_le32(mct_u232_calculate_baud_rate(serial, value,
- &speed));
+ buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+ divisor = mct_u232_calculate_baud_rate(serial, value, &speed);
+ put_unaligned_le32(cpu_to_le32(divisor), buf);
rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
MCT_U232_SET_BAUD_RATE_REQUEST,
MCT_U232_SET_REQUEST_TYPE,
- 0, 0, &divisor, MCT_U232_SET_BAUD_RATE_SIZE,
+ 0, 0, buf, MCT_U232_SET_BAUD_RATE_SIZE,
WDR_TIMEOUT);
if (rc < 0) /*FIXME: What value speed results */
dev_err(&port->dev, "Set BAUD RATE %d failed (error = %d)\n",
@@ -269,10 +273,11 @@ static int mct_u232_set_baud_rate(struct tty_struct *tty,
a device which is not asserting 'CTS'.
*/
+ buf[0] = 0;
rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
MCT_U232_SET_UNKNOWN1_REQUEST,
MCT_U232_SET_REQUEST_TYPE,
- 0, 0, &zero_byte, MCT_U232_SET_UNKNOWN1_SIZE,
+ 0, 0, buf, MCT_U232_SET_UNKNOWN1_SIZE,
WDR_TIMEOUT);
if (rc < 0)
dev_err(&port->dev, "Sending USB device request code %d "
@@ -284,30 +289,40 @@ static int mct_u232_set_baud_rate(struct tty_struct *tty,
dbg("set_baud_rate: send second control message, data = %02X",
cts_enable_byte);
+ buf[0] = cts_enable_byte;
rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
MCT_U232_SET_CTS_REQUEST,
MCT_U232_SET_REQUEST_TYPE,
- 0, 0, &cts_enable_byte, MCT_U232_SET_CTS_SIZE,
+ 0, 0, buf, MCT_U232_SET_CTS_SIZE,
WDR_TIMEOUT);
if (rc < 0)
dev_err(&port->dev, "Sending USB device request code %d "
"failed (error = %d)\n", MCT_U232_SET_CTS_REQUEST, rc);
+ kfree(buf);
return rc;
} /* mct_u232_set_baud_rate */
static int mct_u232_set_line_ctrl(struct usb_serial *serial, unsigned char lcr)
{
int rc;
+ unsigned char *buf;
+
+ buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+
+ buf[0] = lcr;
rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
MCT_U232_SET_LINE_CTRL_REQUEST,
MCT_U232_SET_REQUEST_TYPE,
- 0, 0, &lcr, MCT_U232_SET_LINE_CTRL_SIZE,
+ 0, 0, buf, MCT_U232_SET_LINE_CTRL_SIZE,
WDR_TIMEOUT);
if (rc < 0)
dev_err(&serial->dev->dev,
"Set LINE CTRL 0x%x failed (error = %d)\n", lcr, rc);
dbg("set_line_ctrl: 0x%x", lcr);
+ kfree(buf);
return rc;
} /* mct_u232_set_line_ctrl */
@@ -315,23 +330,31 @@ static int mct_u232_set_modem_ctrl(struct usb_serial *serial,
unsigned int control_state)
{
int rc;
- unsigned char mcr = MCT_U232_MCR_NONE;
+ unsigned char mcr;
+ unsigned char *buf;
+
+ buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL);
+ if (buf == NULL)
+ return -ENOMEM;
+ mcr = MCT_U232_MCR_NONE;
if (control_state & TIOCM_DTR)
mcr |= MCT_U232_MCR_DTR;
if (control_state & TIOCM_RTS)
mcr |= MCT_U232_MCR_RTS;
+ buf[0] = mcr;
rc = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0),
MCT_U232_SET_MODEM_CTRL_REQUEST,
MCT_U232_SET_REQUEST_TYPE,
- 0, 0, &mcr, MCT_U232_SET_MODEM_CTRL_SIZE,
+ 0, 0, buf, MCT_U232_SET_MODEM_CTRL_SIZE,
WDR_TIMEOUT);
if (rc < 0)
dev_err(&serial->dev->dev,
"Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc);
dbg("set_modem_ctrl: state=0x%x ==> mcr=0x%x", control_state, mcr);
+ kfree(buf);
return rc;
} /* mct_u232_set_modem_ctrl */
@@ -339,17 +362,27 @@ static int mct_u232_get_modem_stat(struct usb_serial *serial,
unsigned char *msr)
{
int rc;
+ unsigned char *buf;
+
+ buf = kmalloc(MCT_U232_MAX_SIZE, GFP_KERNEL);
+ if (buf == NULL) {
+ *msr = 0;
+ return -ENOMEM;
+ }
rc = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
MCT_U232_GET_MODEM_STAT_REQUEST,
MCT_U232_GET_REQUEST_TYPE,
- 0, 0, msr, MCT_U232_GET_MODEM_STAT_SIZE,
+ 0, 0, buf, MCT_U232_GET_MODEM_STAT_SIZE,
WDR_TIMEOUT);
if (rc < 0) {
dev_err(&serial->dev->dev,
"Get MODEM STATus failed (error = %d)\n", rc);
*msr = 0;
+ } else {
+ *msr = buf[0];
}
dbg("get_modem_stat: 0x%x", *msr);
+ kfree(buf);
return rc;
} /* mct_u232_get_modem_stat */
diff --git a/drivers/usb/serial/mct_u232.h b/drivers/usb/serial/mct_u232.h
index 07b6bec31dc..7417d5ce1e2 100644
--- a/drivers/usb/serial/mct_u232.h
+++ b/drivers/usb/serial/mct_u232.h
@@ -73,6 +73,8 @@
#define MCT_U232_SET_CTS_REQUEST 12
#define MCT_U232_SET_CTS_SIZE 1
+#define MCT_U232_MAX_SIZE 4 /* of MCT_XXX_SIZE */
+
/*
* Baud rate (divisor)
* Actually, there are two of them, MCT website calls them "Philips solution"
diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
index 763e32a44be..0d47f2c4d59 100644
--- a/drivers/usb/serial/mos7720.c
+++ b/drivers/usb/serial/mos7720.c
@@ -81,12 +81,15 @@ struct moschip_serial {
static int debug;
+static struct usb_serial_driver moschip7720_2port_driver;
+
#define USB_VENDOR_ID_MOSCHIP 0x9710
#define MOSCHIP_DEVICE_ID_7720 0x7720
#define MOSCHIP_DEVICE_ID_7715 0x7715
-static struct usb_device_id moschip_port_id_table[] = {
+static const struct usb_device_id moschip_port_id_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7720) },
+ { USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7715) },
{ } /* terminating entry */
};
MODULE_DEVICE_TABLE(usb, moschip_port_id_table);
@@ -106,7 +109,7 @@ static void mos7720_interrupt_callback(struct urb *urb)
__u8 sp1;
__u8 sp2;
- dbg("%s", " : Entering\n");
+ dbg(" : Entering");
switch (status) {
case 0:
@@ -186,6 +189,75 @@ exit:
}
/*
+ * mos7715_interrupt_callback
+ * this is the 7715's callback function for when we have received data on
+ * the interrupt endpoint.
+ */
+static void mos7715_interrupt_callback(struct urb *urb)
+{
+ int result;
+ int length;
+ int status = urb->status;
+ __u8 *data;
+ __u8 iir;
+
+ switch (status) {
+ case 0:
+ /* success */
+ break;
+ case -ECONNRESET:
+ case -ENOENT:
+ case -ESHUTDOWN:
+ /* this urb is terminated, clean up */
+ dbg("%s - urb shutting down with status: %d", __func__,
+ status);
+ return;
+ default:
+ dbg("%s - nonzero urb status received: %d", __func__,
+ status);
+ goto exit;
+ }
+
+ length = urb->actual_length;
+ data = urb->transfer_buffer;
+
+ /* Structure of data from 7715 device:
+ * Byte 1: IIR serial Port
+ * Byte 2: unused
+ * Byte 2: DSR parallel port
+ * Byte 4: FIFO status for both */
+
+ if (unlikely(length != 4)) {
+ dbg("Wrong data !!!");
+ return;
+ }
+
+ iir = data[0];
+ if (!(iir & 0x01)) { /* serial port interrupt pending */
+ switch (iir & 0x0f) {
+ case SERIAL_IIR_RLS:
+ dbg("Serial Port: Receiver status error or address "
+ "bit detected in 9-bit mode\n");
+ break;
+ case SERIAL_IIR_CTI:
+ dbg("Serial Port: Receiver time out");
+ break;
+ case SERIAL_IIR_MS:
+ dbg("Serial Port: Modem status change");
+ break;
+ }
+ }
+
+exit:
+ result = usb_submit_urb(urb, GFP_ATOMIC);
+ if (result)
+ dev_err(&urb->dev->dev,
+ "%s - Error %d submitting control urb\n",
+ __func__, result);
+ return;
+}
+
+/*
* mos7720_bulk_in_callback
* this is the callback function for when we have received data on the
* bulk in endpoint.
@@ -206,7 +278,7 @@ static void mos7720_bulk_in_callback(struct urb *urb)
mos7720_port = urb->context;
if (!mos7720_port) {
- dbg("%s", "NULL mos7720_port pointer \n");
+ dbg("NULL mos7720_port pointer");
return ;
}
@@ -218,7 +290,6 @@ static void mos7720_bulk_in_callback(struct urb *urb)
tty = tty_port_tty_get(&port->port);
if (tty && urb->actual_length) {
- tty_buffer_request_room(tty, urb->actual_length);
tty_insert_flip_string(tty, data, urb->actual_length);
tty_flip_buffer_push(tty);
}
@@ -275,17 +346,15 @@ static void mos7720_bulk_out_data_callback(struct urb *urb)
* this function will be used for sending command to device
*/
static int send_mos_cmd(struct usb_serial *serial, __u8 request, __u16 value,
- __u16 index, void *data)
+ __u16 index, u8 *data)
{
int status;
- unsigned int pipe;
+ u8 *buf;
u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
- __u8 requesttype;
- __u16 size = 0x0000;
if (value < MOS_MAX_PORT) {
if (product == MOSCHIP_DEVICE_ID_7715)
- value = value*0x100+0x100;
+ value = 0x0200; /* identifies the 7715's serial port */
else
value = value*0x100+0x200;
} else {
@@ -298,27 +367,58 @@ static int send_mos_cmd(struct usb_serial *serial, __u8 request, __u16 value,
}
if (request == MOS_WRITE) {
- request = (__u8)MOS_WRITE;
- requesttype = (__u8)0x40;
- value = value + (__u16)*((unsigned char *)data);
- data = NULL;
- pipe = usb_sndctrlpipe(serial->dev, 0);
+ value = value + *data;
+ status = usb_control_msg(serial->dev,
+ usb_sndctrlpipe(serial->dev, 0), MOS_WRITE,
+ 0x40, value, index, NULL, 0, MOS_WDR_TIMEOUT);
} else {
- request = (__u8)MOS_READ;
- requesttype = (__u8)0xC0;
- size = 0x01;
- pipe = usb_rcvctrlpipe(serial->dev, 0);
+ buf = kmalloc(1, GFP_KERNEL);
+ if (!buf) {
+ status = -ENOMEM;
+ goto out;
+ }
+ status = usb_control_msg(serial->dev,
+ usb_rcvctrlpipe(serial->dev, 0), MOS_READ,
+ 0xc0, value, index, buf, 1, MOS_WDR_TIMEOUT);
+ *data = *buf;
+ kfree(buf);
}
-
- status = usb_control_msg(serial->dev, pipe, request, requesttype,
- value, index, data, size, MOS_WDR_TIMEOUT);
-
+out:
if (status < 0)
- dbg("Command Write failed Value %x index %x\n", value, index);
+ dbg("Command Write failed Value %x index %x", value, index);
return status;
}
+
+/*
+ * mos77xx_probe
+ * this function installs the appropriate read interrupt endpoint callback
+ * depending on whether the device is a 7720 or 7715, thus avoiding costly
+ * run-time checks in the high-frequency callback routine itself.
+ */
+static int mos77xx_probe(struct usb_serial *serial,
+ const struct usb_device_id *id)
+{
+ if (id->idProduct == MOSCHIP_DEVICE_ID_7715)
+ moschip7720_2port_driver.read_int_callback =
+ mos7715_interrupt_callback;
+ else
+ moschip7720_2port_driver.read_int_callback =
+ mos7720_interrupt_callback;
+
+ return 0;
+}
+
+static int mos77xx_calc_num_ports(struct usb_serial *serial)
+{
+ u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
+ if (product == MOSCHIP_DEVICE_ID_7715)
+ return 1;
+
+ return 2;
+}
+
static int mos7720_open(struct tty_struct *tty, struct usb_serial_port *port)
{
struct usb_serial *serial;
@@ -390,7 +490,7 @@ static int mos7720_open(struct tty_struct *tty, struct usb_serial_port *port)
*/
port_number = port->number - port->serial->minor;
send_mos_cmd(port->serial, MOS_READ, port_number, UART_LSR, &data);
- dbg("SS::%p LSR:%x\n", mos7720_port, data);
+ dbg("SS::%p LSR:%x", mos7720_port, data);
dbg("Check:Sending Command ..........");
@@ -729,7 +829,7 @@ static void mos7720_throttle(struct tty_struct *tty)
struct moschip_port *mos7720_port;
int status;
- dbg("%s- port %d\n", __func__, port->number);
+ dbg("%s- port %d", __func__, port->number);
mos7720_port = usb_get_serial_port_data(port);
@@ -1208,7 +1308,7 @@ static void mos7720_set_termios(struct tty_struct *tty,
return;
}
- dbg("%s\n", "setting termios - ASPIRE");
+ dbg("setting termios - ASPIRE");
cflag = tty->termios->c_cflag;
@@ -1226,7 +1326,7 @@ static void mos7720_set_termios(struct tty_struct *tty,
change_port_settings(tty, mos7720_port, old_termios);
if (!port->read_urb) {
- dbg("%s", "URB KILLED !!!!!\n");
+ dbg("URB KILLED !!!!!");
return;
}
@@ -1495,6 +1595,7 @@ static int mos7720_startup(struct usb_serial *serial)
struct usb_device *dev;
int i;
char data;
+ u16 product = le16_to_cpu(serial->dev->descriptor.idProduct);
dbg("%s: Entering ..........", __func__);
@@ -1514,6 +1615,29 @@ static int mos7720_startup(struct usb_serial *serial)
usb_set_serial_data(serial, mos7720_serial);
+ /*
+ * The 7715 uses the first bulk in/out endpoint pair for the parallel
+ * port, and the second for the serial port. Because the usbserial core
+ * assumes both pairs are serial ports, we must engage in a bit of
+ * subterfuge and swap the pointers for ports 0 and 1 in order to make
+ * port 0 point to the serial port. However, both moschip devices use a
+ * single interrupt-in endpoint for both ports (as mentioned a little
+ * further down), and this endpoint was assigned to port 0. So after
+ * the swap, we must copy the interrupt endpoint elements from port 1
+ * (as newly assigned) to port 0, and null out port 1 pointers.
+ */
+ if (product == MOSCHIP_DEVICE_ID_7715) {
+ struct usb_serial_port *tmp = serial->port[0];
+ serial->port[0] = serial->port[1];
+ serial->port[1] = tmp;
+ serial->port[0]->interrupt_in_urb = tmp->interrupt_in_urb;
+ serial->port[0]->interrupt_in_buffer = tmp->interrupt_in_buffer;
+ serial->port[0]->interrupt_in_endpointAddress =
+ tmp->interrupt_in_endpointAddress;
+ serial->port[1]->interrupt_in_urb = NULL;
+ serial->port[1]->interrupt_in_buffer = NULL;
+ }
+
/* we set up the pointers to the endpoints in the mos7720_open *
* function, as the structures aren't created yet. */
@@ -1529,7 +1653,7 @@ static int mos7720_startup(struct usb_serial *serial)
/* Initialize all port interrupt end point to port 0 int
* endpoint. Our device has only one interrupt endpoint
- * comman to all ports */
+ * common to all ports */
serial->port[i]->interrupt_in_endpointAddress =
serial->port[0]->interrupt_in_endpointAddress;
@@ -1584,11 +1708,12 @@ static struct usb_serial_driver moschip7720_2port_driver = {
.description = "Moschip 2 port adapter",
.usb_driver = &usb_driver,
.id_table = moschip_port_id_table,
- .num_ports = 2,
+ .calc_num_ports = mos77xx_calc_num_ports,
.open = mos7720_open,
.close = mos7720_close,
.throttle = mos7720_throttle,
.unthrottle = mos7720_unthrottle,
+ .probe = mos77xx_probe,
.attach = mos7720_startup,
.release = mos7720_release,
.ioctl = mos7720_ioctl,
@@ -1600,7 +1725,7 @@ static struct usb_serial_driver moschip7720_2port_driver = {
.chars_in_buffer = mos7720_chars_in_buffer,
.break_ctl = mos7720_break,
.read_bulk_callback = mos7720_bulk_in_callback,
- .read_int_callback = mos7720_interrupt_callback,
+ .read_int_callback = NULL /* dynamically assigned in probe() */
};
static int __init moschip7720_init(void)
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 2cfe2451ed9..2fda1c0182b 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -181,7 +181,7 @@
#define URB_TRANSFER_BUFFER_SIZE 32 /* URB Size */
-static struct usb_device_id moschip_port_id_table[] = {
+static const struct usb_device_id moschip_port_id_table[] = {
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
@@ -198,7 +198,7 @@ static struct usb_device_id moschip_port_id_table[] = {
{} /* terminating entry */
};
-static __devinitdata struct usb_device_id moschip_id_table_combined[] = {
+static const struct usb_device_id moschip_id_table_combined[] __devinitconst = {
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
{USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
{USB_DEVICE(USB_VENDOR_ID_BANDB, BANDB_DEVICE_ID_USO9ML2_2)},
@@ -283,12 +283,19 @@ static int mos7840_get_reg_sync(struct usb_serial_port *port, __u16 reg,
{
struct usb_device *dev = port->serial->dev;
int ret = 0;
+ u8 *buf;
+
+ buf = kmalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
- MCS_RD_RTYPE, 0, reg, val, VENDOR_READ_LENGTH,
+ MCS_RD_RTYPE, 0, reg, buf, VENDOR_READ_LENGTH,
MOS_WDR_TIMEOUT);
+ *val = buf[0];
dbg("mos7840_get_reg_sync offset is %x, return val %x", reg, *val);
- *val = (*val) & 0x00ff;
+
+ kfree(buf);
return ret;
}
@@ -341,6 +348,11 @@ static int mos7840_get_uart_reg(struct usb_serial_port *port, __u16 reg,
struct usb_device *dev = port->serial->dev;
int ret = 0;
__u16 Wval;
+ u8 *buf;
+
+ buf = kmalloc(VENDOR_READ_LENGTH, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
/* dbg("application number is %4x",
(((__u16)port->number - (__u16)(port->serial->minor))+1)<<8); */
@@ -364,9 +376,11 @@ static int mos7840_get_uart_reg(struct usb_serial_port *port, __u16 reg,
}
}
ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), MCS_RDREQ,
- MCS_RD_RTYPE, Wval, reg, val, VENDOR_READ_LENGTH,
+ MCS_RD_RTYPE, Wval, reg, buf, VENDOR_READ_LENGTH,
MOS_WDR_TIMEOUT);
- *val = (*val) & 0x00ff;
+ *val = buf[0];
+
+ kfree(buf);
return ret;
}
@@ -750,7 +764,6 @@ static void mos7840_bulk_in_callback(struct urb *urb)
if (urb->actual_length) {
tty = tty_port_tty_get(&mos7840_port->port->port);
if (tty) {
- tty_buffer_request_room(tty, urb->actual_length);
tty_insert_flip_string(tty, data, urb->actual_length);
dbg(" %s ", data);
tty_flip_buffer_push(tty);
diff --git a/drivers/usb/serial/moto_modem.c b/drivers/usb/serial/moto_modem.c
index 99bd00f5188..cf1718394e1 100644
--- a/drivers/usb/serial/moto_modem.c
+++ b/drivers/usb/serial/moto_modem.c
@@ -21,7 +21,7 @@
#include <linux/usb.h>
#include <linux/usb/serial.h>
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x05c6, 0x3197) }, /* unknown Motorola phone */
{ USB_DEVICE(0x0c44, 0x0022) }, /* unknown Mororola phone */
{ USB_DEVICE(0x22b8, 0x2a64) }, /* Motorola KRZR K1m */
diff --git a/drivers/usb/serial/navman.c b/drivers/usb/serial/navman.c
index 5ceaa4c6be0..04a6cbbed2c 100644
--- a/drivers/usb/serial/navman.c
+++ b/drivers/usb/serial/navman.c
@@ -22,7 +22,7 @@
static int debug;
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0a99, 0x0001) }, /* Talon Technology device */
{ },
};
@@ -66,7 +66,6 @@ static void navman_read_int_callback(struct urb *urb)
tty = tty_port_tty_get(&port->port);
if (tty && urb->actual_length) {
- tty_buffer_request_room(tty, urb->actual_length);
tty_insert_flip_string(tty, data, urb->actual_length);
tty_flip_buffer_push(tty);
}
diff --git a/drivers/usb/serial/omninet.c b/drivers/usb/serial/omninet.c
index 062265038bf..89c724c0ac0 100644
--- a/drivers/usb/serial/omninet.c
+++ b/drivers/usb/serial/omninet.c
@@ -75,7 +75,7 @@ static void omninet_disconnect(struct usb_serial *serial);
static void omninet_release(struct usb_serial *serial);
static int omninet_attach(struct usb_serial *serial);
-static struct usb_device_id id_table[] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(ZYXEL_VENDOR_ID, ZYXEL_OMNINET_ID) },
{ USB_DEVICE(ZYXEL_VENDOR_ID, BT_IGNITIONPRO_ID) },
{ } /* Terminating entry */
@@ -218,8 +218,8 @@ static void omninet_read_bulk_callback(struct urb *urb)
if (debug && header->oh_xxx != 0x30) {
if (urb->actual_length) {
- printk(KERN_DEBUG __FILE__
- ": omninet_read %d: ", header->oh_len);
+ printk(KERN_DEBUG "%s: omninet_read %d: ",
+ __FILE__, header->oh_len);
for (i = 0; i < (header->oh_len +
OMNINET_HEADERLEN); i++)
printk("%.2x ", data[i]);
@@ -332,7 +332,7 @@ static void omninet_write_bulk_callback(struct urb *urb)
struct usb_serial_port *port = urb->context;
int status = urb->status;
- dbg("%s - port %0x\n", __func__, port->number);
+ dbg("%s - port %0x", __func__, port->number);
port->write_urb_busy = 0;
if (status) {
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index 4cdb975caa8..f37476e2268 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -22,7 +22,7 @@
static int debug;
-static struct usb_device_id id_table[] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x065a, 0x0009) },
{ },
};
@@ -55,7 +55,6 @@ static void opticon_bulk_callback(struct urb *urb)
int status = urb->status;
struct tty_struct *tty;
int result;
- int available_room = 0;
int data_length;
dbg("%s - port %d", __func__, port->number);
@@ -96,13 +95,9 @@ static void opticon_bulk_callback(struct urb *urb)
/* real data, send it to the tty layer */
tty = tty_port_tty_get(&port->port);
if (tty) {
- available_room = tty_buffer_request_room(tty,
- data_length);
- if (available_room) {
- tty_insert_flip_string(tty, data,
- available_room);
- tty_flip_buffer_push(tty);
- }
+ tty_insert_flip_string(tty, data,
+ data_length);
+ tty_flip_buffer_push(tty);
tty_kref_put(tty);
}
} else {
@@ -217,7 +212,7 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
spin_lock_irqsave(&priv->lock, flags);
if (priv->outstanding_urbs > URB_UPPER_LIMIT) {
spin_unlock_irqrestore(&priv->lock, flags);
- dbg("%s - write limit hit\n", __func__);
+ dbg("%s - write limit hit", __func__);
return 0;
}
priv->outstanding_urbs++;
@@ -288,7 +283,7 @@ static int opticon_write_room(struct tty_struct *tty)
spin_lock_irqsave(&priv->lock, flags);
if (priv->outstanding_urbs > URB_UPPER_LIMIT * 2 / 3) {
spin_unlock_irqrestore(&priv->lock, flags);
- dbg("%s - write limit hit\n", __func__);
+ dbg("%s - write limit hit", __func__);
return 0;
}
spin_unlock_irqrestore(&priv->lock, flags);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 6e94a6711f0..847b805d63a 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -336,15 +336,42 @@ static int option_resume(struct usb_serial *serial);
#define AIRPLUS_VENDOR_ID 0x1011
#define AIRPLUS_PRODUCT_MCD650 0x3198
+/* Longcheer/Longsung vendor ID; makes whitelabel devices that
+ * many other vendors like 4G Systems, Alcatel, ChinaBird,
+ * Mobidata, etc sell under their own brand names.
+ */
+#define LONGCHEER_VENDOR_ID 0x1c9e
+
/* 4G Systems products */
-#define FOUR_G_SYSTEMS_VENDOR_ID 0x1c9e
+/* This is the 4G XS Stick W14 a.k.a. Mobilcom Debitel Surf-Stick *
+ * It seems to contain a Qualcomm QSC6240/6290 chipset */
#define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603
/* Haier products */
#define HAIER_VENDOR_ID 0x201e
#define HAIER_PRODUCT_CE100 0x2009
-static struct usb_device_id option_ids[] = {
+/* some devices interfaces need special handling due to a number of reasons */
+enum option_blacklist_reason {
+ OPTION_BLACKLIST_NONE = 0,
+ OPTION_BLACKLIST_SENDSETUP = 1,
+ OPTION_BLACKLIST_RESERVED_IF = 2
+};
+
+struct option_blacklist_info {
+ const u32 infolen; /* number of interface numbers on blacklist */
+ const u8 *ifaceinfo; /* pointer to the array holding the numbers */
+ enum option_blacklist_reason reason;
+};
+
+static const u8 four_g_w14_no_sendsetup[] = { 0, 1 };
+static const struct option_blacklist_info four_g_w14_blacklist = {
+ .infolen = ARRAY_SIZE(four_g_w14_no_sendsetup),
+ .ifaceinfo = four_g_w14_no_sendsetup,
+ .reason = OPTION_BLACKLIST_SENDSETUP
+};
+
+static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA_LIGHT) },
@@ -644,7 +671,9 @@ static struct usb_device_id option_ids[] = {
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) },
{ USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
{ USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) },
- { USB_DEVICE(FOUR_G_SYSTEMS_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14) },
+ { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
+ .driver_info = (kernel_ulong_t)&four_g_w14_blacklist
+ },
{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
{ } /* Terminating entry */
};
@@ -709,6 +738,7 @@ struct option_intf_private {
spinlock_t susp_lock;
unsigned int suspended:1;
int in_flight;
+ struct option_blacklist_info *blacklist_info;
};
struct option_port_private {
@@ -778,9 +808,27 @@ static int option_probe(struct usb_serial *serial,
if (!data)
return -ENOMEM;
spin_lock_init(&data->susp_lock);
+ data->blacklist_info = (struct option_blacklist_info*) id->driver_info;
return 0;
}
+static enum option_blacklist_reason is_blacklisted(const u8 ifnum,
+ const struct option_blacklist_info *blacklist)
+{
+ const u8 *info;
+ int i;
+
+ if (blacklist) {
+ info = blacklist->ifaceinfo;
+
+ for (i = 0; i < blacklist->infolen; i++) {
+ if (info[i] == ifnum)
+ return blacklist->reason;
+ }
+ }
+ return OPTION_BLACKLIST_NONE;
+}
+
static void option_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
@@ -921,7 +969,6 @@ static void option_indat_callback(struct urb *urb)
} else {
tty = tty_port_tty_get(&port->port);
if (urb->actual_length) {
- tty_buffer_request_room(tty, urb->actual_length);
tty_insert_flip_string(tty, data, urb->actual_length);
tty_flip_buffer_push(tty);
} else
@@ -929,9 +976,9 @@ static void option_indat_callback(struct urb *urb)
tty_kref_put(tty);
/* Resubmit urb so we continue receiving */
- if (port->port.count && status != -ESHUTDOWN) {
+ if (status != -ESHUTDOWN) {
err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err)
+ if (err && err != -EPERM)
printk(KERN_ERR "%s: resubmit read urb failed. "
"(%d)", __func__, err);
else
@@ -985,7 +1032,7 @@ static void option_instat_callback(struct urb *urb)
(struct usb_ctrlrequest *)urb->transfer_buffer;
if (!req_pkt) {
- dbg("%s: NULL req_pkt\n", __func__);
+ dbg("%s: NULL req_pkt", __func__);
return;
}
if ((req_pkt->bRequestType == 0xA1) &&
@@ -1211,11 +1258,19 @@ static void option_setup_urbs(struct usb_serial *serial)
static int option_send_setup(struct usb_serial_port *port)
{
struct usb_serial *serial = port->serial;
+ struct option_intf_private *intfdata =
+ (struct option_intf_private *) serial->private;
struct option_port_private *portdata;
int ifNum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
int val = 0;
dbg("%s", __func__);
+ if (is_blacklisted(ifNum, intfdata->blacklist_info) ==
+ OPTION_BLACKLIST_SENDSETUP) {
+ dbg("No send_setup on blacklisted interface #%d\n", ifNum);
+ return -EIO;
+ }
+
portdata = usb_get_serial_port_data(port);
if (portdata->dtr_state)
@@ -1401,7 +1456,7 @@ static int option_resume(struct usb_serial *serial)
for (i = 0; i < serial->num_ports; i++) {
port = serial->port[i];
if (!port->interrupt_in_urb) {
- dbg("%s: No interrupt URB for port %d\n", __func__, i);
+ dbg("%s: No interrupt URB for port %d", __func__, i);
continue;
}
err = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
diff --git a/drivers/usb/serial/oti6858.c b/drivers/usb/serial/oti6858.c
index c644e26394b..deeacdea05d 100644
--- a/drivers/usb/serial/oti6858.c
+++ b/drivers/usb/serial/oti6858.c
@@ -58,7 +58,7 @@
#define OTI6858_AUTHOR "Tomasz Michal Lukaszewski <FIXME@FIXME>"
#define OTI6858_VERSION "0.1"
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(OTI6858_VENDOR_ID, OTI6858_PRODUCT_ID) },
{ }
};
@@ -302,7 +302,7 @@ void send_data(struct work_struct *work)
struct usb_serial_port *port = priv->port;
int count = 0, result;
unsigned long flags;
- unsigned char allow;
+ u8 *allow;
dbg("%s(port = %d)", __func__, port->number);
@@ -321,13 +321,20 @@ void send_data(struct work_struct *work)
count = port->bulk_out_size;
if (count != 0) {
+ allow = kmalloc(1, GFP_KERNEL);
+ if (!allow) {
+ dev_err(&port->dev, "%s(): kmalloc failed\n",
+ __func__);
+ return;
+ }
result = usb_control_msg(port->serial->dev,
usb_rcvctrlpipe(port->serial->dev, 0),
OTI6858_REQ_T_CHECK_TXBUFF,
OTI6858_REQ_CHECK_TXBUFF,
- count, 0, &allow, 1, 100);
- if (result != 1 || allow != 0)
+ count, 0, allow, 1, 100);
+ if (result != 1 || *allow != 0)
count = 0;
+ kfree(allow);
}
if (count == 0) {
@@ -578,9 +585,6 @@ static int oti6858_open(struct tty_struct *tty, struct usb_serial_port *port)
usb_clear_halt(serial->dev, port->write_urb->pipe);
usb_clear_halt(serial->dev, port->read_urb->pipe);
- if (port->port.count != 1)
- return 0;
-
buf = kmalloc(OTI6858_CTRL_PKT_SIZE, GFP_KERNEL);
if (buf == NULL) {
dev_err(&port->dev, "%s(): out of memory!\n", __func__);
@@ -927,10 +931,6 @@ static void oti6858_read_bulk_callback(struct urb *urb)
spin_unlock_irqrestore(&priv->lock, flags);
if (status != 0) {
- if (!port->port.count) {
- dbg("%s(): port is closed, exiting", __func__);
- return;
- }
/*
if (status == -EPROTO) {
* PL2303 mysteriously fails with -EPROTO reschedule
@@ -954,14 +954,12 @@ static void oti6858_read_bulk_callback(struct urb *urb)
}
tty_kref_put(tty);
- /* schedule the interrupt urb if we are still open */
- if (port->port.count != 0) {
- port->interrupt_in_urb->dev = port->serial->dev;
- result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
- if (result != 0) {
- dev_err(&port->dev, "%s(): usb_submit_urb() failed,"
- " error %d\n", __func__, result);
- }
+ /* schedule the interrupt urb */
+ port->interrupt_in_urb->dev = port->serial->dev;
+ result = usb_submit_urb(port->interrupt_in_urb, GFP_ATOMIC);
+ if (result != 0 && result != -EPERM) {
+ dev_err(&port->dev, "%s(): usb_submit_urb() failed,"
+ " error %d\n", __func__, result);
}
}
diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
index 9ec1a49e236..73d5f346d3e 100644
--- a/drivers/usb/serial/pl2303.c
+++ b/drivers/usb/serial/pl2303.c
@@ -50,7 +50,7 @@ struct pl2303_buf {
char *buf_put;
};
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ2) },
{ USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_DCU11) },
@@ -451,7 +451,6 @@ static void pl2303_send(struct usb_serial_port *port)
port->write_urb->transfer_buffer);
port->write_urb->transfer_buffer_length = count;
- port->write_urb->dev = port->serial->dev;
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
if (result) {
dev_err(&port->dev, "%s - failed submitting write urb,"
@@ -769,7 +768,6 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
pl2303_set_termios(tty, port, &tmp_termios);
dbg("%s - submitting read urb", __func__);
- port->read_urb->dev = serial->dev;
result = usb_submit_urb(port->read_urb, GFP_KERNEL);
if (result) {
dev_err(&port->dev, "%s - failed submitting read urb,"
@@ -779,7 +777,6 @@ static int pl2303_open(struct tty_struct *tty, struct usb_serial_port *port)
}
dbg("%s - submitting interrupt urb", __func__);
- port->interrupt_in_urb->dev = serial->dev;
result = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
if (result) {
dev_err(&port->dev, "%s - failed submitting interrupt urb,"
@@ -895,10 +892,23 @@ static int wait_modem_info(struct usb_serial_port *port, unsigned int arg)
static int pl2303_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg)
{
+ struct serial_struct ser;
struct usb_serial_port *port = tty->driver_data;
dbg("%s (%d) cmd = 0x%04x", __func__, port->number, cmd);
switch (cmd) {
+ case TIOCGSERIAL:
+ memset(&ser, 0, sizeof ser);
+ ser.type = PORT_16654;
+ ser.line = port->serial->minor;
+ ser.port = port->number;
+ ser.baud_base = 460800;
+
+ if (copy_to_user((void __user *)arg, &ser, sizeof ser))
+ return -EFAULT;
+
+ return 0;
+
case TIOCMIWAIT:
dbg("%s (%d) TIOCMIWAIT", __func__, port->number);
return wait_modem_info(port, arg);
@@ -1042,7 +1052,6 @@ static void pl2303_push_data(struct tty_struct *tty,
tty_flag = TTY_FRAME;
dbg("%s - tty_flag = %d", __func__, tty_flag);
- tty_buffer_request_room(tty, urb->actual_length + 1);
/* overrun is special, not associated with a char */
if (line_status & UART_OVERRUN_ERROR)
tty_insert_flip_char(tty, 0, TTY_OVERRUN);
@@ -1072,16 +1081,11 @@ static void pl2303_read_bulk_callback(struct urb *urb)
if (status) {
dbg("%s - urb status = %d", __func__, status);
- if (!port->port.count) {
- dbg("%s - port is closed, exiting.", __func__);
- return;
- }
if (status == -EPROTO) {
/* PL2303 mysteriously fails with -EPROTO reschedule
* the read */
dbg("%s - caught -EPROTO, resubmitting the urb",
__func__);
- urb->dev = port->serial->dev;
result = usb_submit_urb(urb, GFP_ATOMIC);
if (result)
dev_err(&urb->dev->dev, "%s - failed"
@@ -1108,15 +1112,10 @@ static void pl2303_read_bulk_callback(struct urb *urb)
}
tty_kref_put(tty);
/* Schedule the next read _if_ we are still open */
- if (port->port.count) {
- urb->dev = port->serial->dev;
- result = usb_submit_urb(urb, GFP_ATOMIC);
- if (result)
- dev_err(&urb->dev->dev, "%s - failed resubmitting"
- " read urb, error %d\n", __func__, result);
- }
-
- return;
+ result = usb_submit_urb(urb, GFP_ATOMIC);
+ if (result && result != -EPERM)
+ dev_err(&urb->dev->dev, "%s - failed resubmitting"
+ " read urb, error %d\n", __func__, result);
}
static void pl2303_write_bulk_callback(struct urb *urb)
@@ -1146,7 +1145,6 @@ static void pl2303_write_bulk_callback(struct urb *urb)
dbg("%s - nonzero write bulk status received: %d", __func__,
status);
port->write_urb->transfer_buffer_length = 1;
- port->write_urb->dev = port->serial->dev;
result = usb_submit_urb(port->write_urb, GFP_ATOMIC);
if (result)
dev_err(&urb->dev->dev, "%s - failed resubmitting write"
diff --git a/drivers/usb/serial/qcaux.c b/drivers/usb/serial/qcaux.c
new file mode 100644
index 00000000000..0b936206171
--- /dev/null
+++ b/drivers/usb/serial/qcaux.c
@@ -0,0 +1,96 @@
+/*
+ * Qualcomm USB Auxiliary Serial Port driver
+ *
+ * Copyright (C) 2008 Greg Kroah-Hartman <greg@kroah.com>
+ * Copyright (C) 2010 Dan Williams <dcbw@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Devices listed here usually provide a CDC ACM port on which normal modem
+ * AT commands and PPP can be used. But when that port is in-use by PPP it
+ * cannot be used simultaneously for status or signal strength. Instead, the
+ * ports here can be queried for that information using the Qualcomm DM
+ * protocol.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/usb/serial.h>
+
+/* NOTE: for now, only use this driver for devices that provide a CDC-ACM port
+ * for normal AT commands, but also provide secondary USB interfaces for the
+ * QCDM-capable ports. Devices that do not provide a CDC-ACM port should
+ * probably be driven by option.ko.
+ */
+
+/* UTStarcom/Pantech/Curitel devices */
+#define UTSTARCOM_VENDOR_ID 0x106c
+#define UTSTARCOM_PRODUCT_PC5740 0x3701
+#define UTSTARCOM_PRODUCT_PC5750 0x3702 /* aka Pantech PX-500 */
+#define UTSTARCOM_PRODUCT_UM150 0x3711
+#define UTSTARCOM_PRODUCT_UM175_V1 0x3712
+#define UTSTARCOM_PRODUCT_UM175_V2 0x3714
+#define UTSTARCOM_PRODUCT_UM175_ALLTEL 0x3715
+
+/* CMOTECH devices */
+#define CMOTECH_VENDOR_ID 0x16d8
+#define CMOTECH_PRODUCT_CDU550 0x5553
+#define CMOTECH_PRODUCT_CDX650 0x6512
+
+static struct usb_device_id id_table[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5740, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_PC5750, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM150, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM175_V1, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM175_V2, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, UTSTARCOM_PRODUCT_UM175_ALLTEL, 0xff, 0x00, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDU550, 0xff, 0xff, 0x00) },
+ { USB_DEVICE_AND_INTERFACE_INFO(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CDX650, 0xff, 0xff, 0x00) },
+ { },
+};
+MODULE_DEVICE_TABLE(usb, id_table);
+
+static struct usb_driver qcaux_driver = {
+ .name = "qcaux",
+ .probe = usb_serial_probe,
+ .disconnect = usb_serial_disconnect,
+ .id_table = id_table,
+ .no_dynamic_id = 1,
+};
+
+static struct usb_serial_driver qcaux_device = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "qcaux",
+ },
+ .id_table = id_table,
+ .num_ports = 1,
+};
+
+static int __init qcaux_init(void)
+{
+ int retval;
+
+ retval = usb_serial_register(&qcaux_device);
+ if (retval)
+ return retval;
+ retval = usb_register(&qcaux_driver);
+ if (retval)
+ usb_serial_deregister(&qcaux_device);
+ return retval;
+}
+
+static void __exit qcaux_exit(void)
+{
+ usb_deregister(&qcaux_driver);
+ usb_serial_deregister(&qcaux_device);
+}
+
+module_init(qcaux_init);
+module_exit(qcaux_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 7528b8d57f1..310ff6ec656 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -21,7 +21,7 @@
static int debug;
-static struct usb_device_id id_table[] = {
+static const struct usb_device_id id_table[] = {
{USB_DEVICE(0x05c6, 0x9211)}, /* Acer Gobi QDL device */
{USB_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
{USB_DEVICE(0x03f0, 0x1f1d)}, /* HP un2400 Gobi Modem Device */
diff --git a/drivers/usb/serial/siemens_mpi.c b/drivers/usb/serial/siemens_mpi.c
index 951ea0c6ba7..cb8195cabfd 100644
--- a/drivers/usb/serial/siemens_mpi.c
+++ b/drivers/usb/serial/siemens_mpi.c
@@ -22,7 +22,7 @@
#define DRIVER_DESC "Driver for Siemens USB/MPI adapter"
-static struct usb_device_id id_table[] = {
+static const struct usb_device_id id_table[] = {
/* Vendor and product id for 6ES7-972-0CB20-0XA0 */
{ USB_DEVICE(0x908, 0x0004) },
{ },
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 3eb6143bb64..34e6f894cba 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -226,7 +226,7 @@ static const struct sierra_iface_info direct_ip_interface_blacklist = {
.ifaceinfo = direct_ip_non_serial_ifaces,
};
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */
{ USB_DEVICE(0x03F0, 0x1B1D) }, /* HP ev2200 a.k.a MC5720 */
{ USB_DEVICE(0x03F0, 0x1E1D) }, /* HP hs2300 a.k.a MC8775 */
@@ -304,16 +304,6 @@ static struct usb_device_id id_table [] = {
};
MODULE_DEVICE_TABLE(usb, id_table);
-static struct usb_driver sierra_driver = {
- .name = "sierra",
- .probe = usb_serial_probe,
- .disconnect = usb_serial_disconnect,
- .suspend = usb_serial_suspend,
- .resume = usb_serial_resume,
- .id_table = id_table,
- .no_dynamic_id = 1,
- .supports_autosuspend = 1,
-};
struct sierra_port_private {
spinlock_t lock; /* lock the structure */
@@ -477,7 +467,7 @@ static void sierra_outdat_callback(struct urb *urb)
static int sierra_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count)
{
- struct sierra_port_private *portdata = usb_get_serial_port_data(port);
+ struct sierra_port_private *portdata;
struct sierra_intf_private *intfdata;
struct usb_serial *serial = port->serial;
unsigned long flags;
@@ -604,14 +594,15 @@ static void sierra_indat_callback(struct urb *urb)
} else {
if (urb->actual_length) {
tty = tty_port_tty_get(&port->port);
-
- tty_buffer_request_room(tty, urb->actual_length);
- tty_insert_flip_string(tty, data, urb->actual_length);
- tty_flip_buffer_push(tty);
-
- tty_kref_put(tty);
- usb_serial_debug_data(debug, &port->dev, __func__,
- urb->actual_length, data);
+ if (tty) {
+ tty_insert_flip_string(tty, data,
+ urb->actual_length);
+ tty_flip_buffer_push(tty);
+
+ tty_kref_put(tty);
+ usb_serial_debug_data(debug, &port->dev,
+ __func__, urb->actual_length, data);
+ }
} else {
dev_dbg(&port->dev, "%s: empty read urb"
" received\n", __func__);
@@ -619,10 +610,10 @@ static void sierra_indat_callback(struct urb *urb)
}
/* Resubmit urb so we continue receiving */
- if (port->port.count && status != -ESHUTDOWN && status != -EPERM) {
+ if (status != -ESHUTDOWN && status != -EPERM) {
usb_mark_last_busy(port->serial->dev);
err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err)
+ if (err && err != -EPERM)
dev_err(&port->dev, "resubmit read urb failed."
"(%d)\n", err);
}
@@ -681,11 +672,11 @@ static void sierra_instat_callback(struct urb *urb)
dev_dbg(&port->dev, "%s: error %d\n", __func__, status);
/* Resubmit urb so we continue receiving IRQ data */
- if (port->port.count && status != -ESHUTDOWN && status != -ENOENT) {
+ if (status != -ESHUTDOWN && status != -ENOENT) {
usb_mark_last_busy(serial->dev);
urb->dev = serial->dev;
err = usb_submit_urb(urb, GFP_ATOMIC);
- if (err)
+ if (err && err != -EPERM)
dev_err(&port->dev, "%s: resubmit intr urb "
"failed. (%d)\n", __func__, err);
}
@@ -1061,11 +1052,31 @@ static int sierra_resume(struct usb_serial *serial)
return ec ? -EIO : 0;
}
+
+static int sierra_reset_resume(struct usb_interface *intf)
+{
+ struct usb_serial *serial = usb_get_intfdata(intf);
+ dev_err(&serial->dev->dev, "%s\n", __func__);
+ return usb_serial_resume(intf);
+}
#else
#define sierra_suspend NULL
#define sierra_resume NULL
+#define sierra_reset_resume NULL
#endif
+static struct usb_driver sierra_driver = {
+ .name = "sierra",
+ .probe = usb_serial_probe,
+ .disconnect = usb_serial_disconnect,
+ .suspend = usb_serial_suspend,
+ .resume = usb_serial_resume,
+ .reset_resume = sierra_reset_resume,
+ .id_table = id_table,
+ .no_dynamic_id = 1,
+ .supports_autosuspend = 1,
+};
+
static struct usb_serial_driver sierra_device = {
.driver = {
.owner = THIS_MODULE,
diff --git a/drivers/usb/serial/spcp8x5.c b/drivers/usb/serial/spcp8x5.c
index 1e58220403d..5d39191e724 100644
--- a/drivers/usb/serial/spcp8x5.c
+++ b/drivers/usb/serial/spcp8x5.c
@@ -45,7 +45,7 @@ static int debug;
#define SPCP8x5_835_VID 0x04fc
#define SPCP8x5_835_PID 0x0231
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(SPCP8x5_PHILIPS_VID , SPCP8x5_PHILIPS_PID)},
{ USB_DEVICE(SPCP8x5_INTERMATIC_VID, SPCP8x5_INTERMATIC_PID)},
{ USB_DEVICE(SPCP8x5_835_VID, SPCP8x5_835_PID)},
@@ -609,7 +609,7 @@ static void spcp8x5_set_termios(struct tty_struct *tty,
if (i < 0)
dev_err(&port->dev, "Set UART format %#x failed (error = %d)\n",
uartdata, i);
- dbg("0x21:0x40:0:0 %d\n", i);
+ dbg("0x21:0x40:0:0 %d", i);
if (cflag & CRTSCTS) {
/* enable hardware flow control */
@@ -677,7 +677,6 @@ static void spcp8x5_read_bulk_callback(struct urb *urb)
struct tty_struct *tty;
unsigned char *data = urb->transfer_buffer;
unsigned long flags;
- int i;
int result = urb->status;
u8 status;
char tty_flag;
@@ -687,8 +686,6 @@ static void spcp8x5_read_bulk_callback(struct urb *urb)
/* check the urb status */
if (result) {
- if (!port->port.count)
- return;
if (result == -EPROTO) {
/* spcp8x5 mysteriously fails with -EPROTO */
/* reschedule the read */
@@ -726,26 +723,20 @@ static void spcp8x5_read_bulk_callback(struct urb *urb)
tty = tty_port_tty_get(&port->port);
if (tty && urb->actual_length) {
- tty_buffer_request_room(tty, urb->actual_length + 1);
/* overrun is special, not associated with a char */
if (status & UART_OVERRUN_ERROR)
tty_insert_flip_char(tty, 0, TTY_OVERRUN);
- for (i = 0; i < urb->actual_length; ++i)
- tty_insert_flip_char(tty, data[i], tty_flag);
+ tty_insert_flip_string_fixed_flag(tty, data,
+ urb->actual_length, tty_flag);
tty_flip_buffer_push(tty);
}
tty_kref_put(tty);
- /* Schedule the next read _if_ we are still open */
- if (port->port.count) {
- urb->dev = port->serial->dev;
- result = usb_submit_urb(urb , GFP_ATOMIC);
- if (result)
- dev_dbg(&port->dev, "failed submitting read urb %d\n",
- result);
- }
-
- return;
+ /* Schedule the next read */
+ urb->dev = port->serial->dev;
+ result = usb_submit_urb(urb , GFP_ATOMIC);
+ if (result)
+ dev_dbg(&port->dev, "failed submitting read urb %d\n", result);
}
/* get data from ring buffer and then write to usb bus */
diff --git a/drivers/usb/serial/symbolserial.c b/drivers/usb/serial/symbolserial.c
index b282c0f2d8e..72398888858 100644
--- a/drivers/usb/serial/symbolserial.c
+++ b/drivers/usb/serial/symbolserial.c
@@ -21,7 +21,7 @@
static int debug;
-static struct usb_device_id id_table[] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x05e0, 0x0600) },
{ },
};
@@ -51,7 +51,6 @@ static void symbol_int_callback(struct urb *urb)
int status = urb->status;
struct tty_struct *tty;
int result;
- int available_room = 0;
int data_length;
dbg("%s - port %d", __func__, port->number);
@@ -89,13 +88,8 @@ static void symbol_int_callback(struct urb *urb)
*/
tty = tty_port_tty_get(&port->port);
if (tty) {
- available_room = tty_buffer_request_room(tty,
- data_length);
- if (available_room) {
- tty_insert_flip_string(tty, &data[1],
- available_room);
- tty_flip_buffer_push(tty);
- }
+ tty_insert_flip_string(tty, &data[1], data_length);
+ tty_flip_buffer_push(tty);
tty_kref_put(tty);
}
} else {
diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
index 1e9dc882169..0afe5c71c17 100644
--- a/drivers/usb/serial/ti_usb_3410_5052.c
+++ b/drivers/usb/serial/ti_usb_3410_5052.c
@@ -1271,14 +1271,13 @@ static void ti_recv(struct device *dev, struct tty_struct *tty,
int cnt;
do {
- cnt = tty_buffer_request_room(tty, length);
+ cnt = tty_insert_flip_string(tty, data, length);
if (cnt < length) {
dev_err(dev, "%s - dropping data, %d bytes lost\n",
__func__, length - cnt);
if (cnt == 0)
break;
}
- tty_insert_flip_string(tty, data, cnt);
tty_flip_buffer_push(tty);
data += cnt;
length -= cnt;
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 33c85f7084f..3873660d821 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -358,10 +358,6 @@ static int serial_write(struct tty_struct *tty, const unsigned char *buf,
dbg("%s - port %d, %d byte(s)", __func__, port->number, count);
- /* count is managed under the mutex lock for the tty so cannot
- drop to zero until after the last close completes */
- WARN_ON(!port->port.count);
-
/* pass on to the driver specific version of this function */
retval = port->serial->type->write(tty, port, buf, count);
@@ -373,7 +369,6 @@ static int serial_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
dbg("%s - port %d", __func__, port->number);
- WARN_ON(!port->port.count);
/* pass on to the driver specific version of this function */
return port->serial->type->write_room(tty);
}
@@ -381,7 +376,7 @@ static int serial_write_room(struct tty_struct *tty)
static int serial_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
- dbg("%s = port %d", __func__, port->number);
+ dbg("%s - port %d", __func__, port->number);
/* if the device was unplugged then any remaining characters
fell out of the connector ;) */
@@ -396,7 +391,6 @@ static void serial_throttle(struct tty_struct *tty)
struct usb_serial_port *port = tty->driver_data;
dbg("%s - port %d", __func__, port->number);
- WARN_ON(!port->port.count);
/* pass on to the driver specific version of this function */
if (port->serial->type->throttle)
port->serial->type->throttle(tty);
@@ -407,7 +401,6 @@ static void serial_unthrottle(struct tty_struct *tty)
struct usb_serial_port *port = tty->driver_data;
dbg("%s - port %d", __func__, port->number);
- WARN_ON(!port->port.count);
/* pass on to the driver specific version of this function */
if (port->serial->type->unthrottle)
port->serial->type->unthrottle(tty);
@@ -421,8 +414,6 @@ static int serial_ioctl(struct tty_struct *tty, struct file *file,
dbg("%s - port %d, cmd 0x%.4x", __func__, port->number, cmd);
- WARN_ON(!port->port.count);
-
/* pass on to the driver specific version of this function
if it is available */
if (port->serial->type->ioctl) {
@@ -437,7 +428,6 @@ static void serial_set_termios(struct tty_struct *tty, struct ktermios *old)
struct usb_serial_port *port = tty->driver_data;
dbg("%s - port %d", __func__, port->number);
- WARN_ON(!port->port.count);
/* pass on to the driver specific version of this function
if it is available */
if (port->serial->type->set_termios)
@@ -452,7 +442,6 @@ static int serial_break(struct tty_struct *tty, int break_state)
dbg("%s - port %d", __func__, port->number);
- WARN_ON(!port->port.count);
/* pass on to the driver specific version of this function
if it is available */
if (port->serial->type->break_ctl)
@@ -513,7 +502,6 @@ static int serial_tiocmget(struct tty_struct *tty, struct file *file)
dbg("%s - port %d", __func__, port->number);
- WARN_ON(!port->port.count);
if (port->serial->type->tiocmget)
return port->serial->type->tiocmget(tty, file);
return -EINVAL;
@@ -526,7 +514,6 @@ static int serial_tiocmset(struct tty_struct *tty, struct file *file,
dbg("%s - port %d", __func__, port->number);
- WARN_ON(!port->port.count);
if (port->serial->type->tiocmset)
return port->serial->type->tiocmset(tty, file, set, clear);
return -EINVAL;
diff --git a/drivers/usb/serial/usb_debug.c b/drivers/usb/serial/usb_debug.c
index 7b5bfc4edd3..252cc2d993b 100644
--- a/drivers/usb/serial/usb_debug.c
+++ b/drivers/usb/serial/usb_debug.c
@@ -29,7 +29,7 @@ static char USB_DEBUG_BRK[USB_DEBUG_BRK_SIZE] = {
0xff,
};
-static struct usb_device_id id_table [] = {
+static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x0525, 0x127a) },
{ },
};
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
index ad1f9232292..094942707c7 100644
--- a/drivers/usb/serial/visor.c
+++ b/drivers/usb/serial/visor.c
@@ -368,7 +368,7 @@ static int visor_write(struct tty_struct *tty, struct usb_serial_port *port,
spin_lock_irqsave(&priv->lock, flags);
if (priv->outstanding_urbs > URB_UPPER_LIMIT) {
spin_unlock_irqrestore(&priv->lock, flags);
- dbg("%s - write limit hit\n", __func__);
+ dbg("%s - write limit hit", __func__);
return 0;
}
priv->outstanding_urbs++;
@@ -446,7 +446,7 @@ static int visor_write_room(struct tty_struct *tty)
spin_lock_irqsave(&priv->lock, flags);
if (priv->outstanding_urbs > URB_UPPER_LIMIT * 2 / 3) {
spin_unlock_irqrestore(&priv->lock, flags);
- dbg("%s - write limit hit\n", __func__);
+ dbg("%s - write limit hit", __func__);
return 0;
}
spin_unlock_irqrestore(&priv->lock, flags);
@@ -503,13 +503,9 @@ static void visor_read_bulk_callback(struct urb *urb)
if (urb->actual_length) {
tty = tty_port_tty_get(&port->port);
if (tty) {
- available_room = tty_buffer_request_room(tty,
- urb->actual_length);
- if (available_room) {
- tty_insert_flip_string(tty, data,
- available_room);
- tty_flip_buffer_push(tty);
- }
+ tty_insert_flip_string(tty, data,
+ urb->actual_length);
+ tty_flip_buffer_push(tty);
tty_kref_put(tty);
}
spin_lock(&priv->lock);
@@ -807,10 +803,14 @@ static int clie_3_5_startup(struct usb_serial *serial)
{
struct device *dev = &serial->dev->dev;
int result;
- u8 data;
+ u8 *data;
dbg("%s", __func__);
+ data = kmalloc(1, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
/*
* Note that PEG-300 series devices expect the following two calls.
*/
@@ -818,36 +818,42 @@ static int clie_3_5_startup(struct usb_serial *serial)
/* get the config number */
result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
USB_REQ_GET_CONFIGURATION, USB_DIR_IN,
- 0, 0, &data, 1, 3000);
+ 0, 0, data, 1, 3000);
if (result < 0) {
dev_err(dev, "%s: get config number failed: %d\n",
__func__, result);
- return result;
+ goto out;
}
if (result != 1) {
dev_err(dev, "%s: get config number bad return length: %d\n",
__func__, result);
- return -EIO;
+ result = -EIO;
+ goto out;
}
/* get the interface number */
result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
USB_REQ_GET_INTERFACE,
USB_DIR_IN | USB_RECIP_INTERFACE,
- 0, 0, &data, 1, 3000);
+ 0, 0, data, 1, 3000);
if (result < 0) {
dev_err(dev, "%s: get interface number failed: %d\n",
__func__, result);
- return result;
+ goto out;
}
if (result != 1) {
dev_err(dev,
"%s: get interface number bad return length: %d\n",
__func__, result);
- return -EIO;
+ result = -EIO;
+ goto out;
}
- return generic_startup(serial);
+ result = generic_startup(serial);
+out:
+ kfree(data);
+
+ return result;
}
static int treo_attach(struct usb_serial *serial)
diff --git a/drivers/usb/serial/vivopay-serial.c b/drivers/usb/serial/vivopay-serial.c
new file mode 100644
index 00000000000..f719d00972f
--- /dev/null
+++ b/drivers/usb/serial/vivopay-serial.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2001-2005 Greg Kroah-Hartman (greg@kroah.com)
+ * Copyright (C) 2009 Outpost Embedded, LLC
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+#include <linux/module.h>
+#include <linux/usb.h>
+#include <linux/usb/serial.h>
+
+
+#define DRIVER_VERSION "v1.0"
+#define DRIVER_DESC "ViVOpay USB Serial Driver"
+
+#define VIVOPAY_VENDOR_ID 0x1d5f
+
+
+static struct usb_device_id id_table [] = {
+ /* ViVOpay 8800 */
+ { USB_DEVICE(VIVOPAY_VENDOR_ID, 0x1004) },
+ { },
+};
+
+MODULE_DEVICE_TABLE(usb, id_table);
+
+static struct usb_driver vivopay_serial_driver = {
+ .name = "vivopay-serial",
+ .probe = usb_serial_probe,
+ .disconnect = usb_serial_disconnect,
+ .id_table = id_table,
+ .no_dynamic_id = 1,
+};
+
+static struct usb_serial_driver vivopay_serial_device = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = "vivopay-serial",
+ },
+ .id_table = id_table,
+ .usb_driver = &vivopay_serial_driver,
+ .num_ports = 1,
+};
+
+static int __init vivopay_serial_init(void)
+{
+ int retval;
+ retval = usb_serial_register(&vivopay_serial_device);
+ if (retval)
+ goto failed_usb_serial_register;
+ retval = usb_register(&vivopay_serial_driver);
+ if (retval)
+ goto failed_usb_register;
+ printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
+ DRIVER_DESC "\n");
+ return 0;
+failed_usb_register:
+ usb_serial_deregister(&vivopay_serial_device);
+failed_usb_serial_register:
+ return retval;
+}
+
+static void __exit vivopay_serial_exit(void)
+{
+ usb_deregister(&vivopay_serial_driver);
+ usb_serial_deregister(&vivopay_serial_device);
+}
+
+module_init(vivopay_serial_init);
+module_exit(vivopay_serial_exit);
+
+MODULE_AUTHOR("Forest Bond <forest.bond@outpostembedded.com>");
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c
index 1093d2eb046..12ed8209ca7 100644
--- a/drivers/usb/serial/whiteheat.c
+++ b/drivers/usb/serial/whiteheat.c
@@ -111,17 +111,17 @@ static int debug;
separate ID tables, and then a third table that combines them
just for the purpose of exporting the autoloading information.
*/
-static struct usb_device_id id_table_std [] = {
+static const struct usb_device_id id_table_std[] = {
{ USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_WHITE_HEAT_ID) },
{ } /* Terminating entry */
};
-static struct usb_device_id id_table_prerenumeration [] = {
+static const struct usb_device_id id_table_prerenumeration[] = {
{ USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_FAKE_WHITE_HEAT_ID) },
{ } /* Terminating entry */
};
-static struct usb_device_id id_table_combined [] = {
+static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_WHITE_HEAT_ID) },
{ USB_DEVICE(CONNECT_TECH_VENDOR_ID, CONNECT_TECH_FAKE_WHITE_HEAT_ID) },
{ } /* Terminating entry */
@@ -1492,21 +1492,9 @@ static void rx_data_softint(struct work_struct *work)
wrap = list_entry(tmp, struct whiteheat_urb_wrap, list);
urb = wrap->urb;
- if (tty && urb->actual_length) {
- int len = tty_buffer_request_room(tty,
- urb->actual_length);
- /* This stuff can go away now I suspect */
- if (unlikely(len < urb->actual_length)) {
- spin_lock_irqsave(&info->lock, flags);
- list_add(tmp, &info->rx_urb_q);
- spin_unlock_irqrestore(&info->lock, flags);
- tty_flip_buffer_push(tty);
- schedule_work(&info->rx_work);
- goto out;
- }
- tty_insert_flip_string(tty, urb->transfer_buffer, len);
- sent += len;
- }
+ if (tty && urb->actual_length)
+ sent += tty_insert_flip_string(tty,
+ urb->transfer_buffer, urb->actual_length);
urb->dev = port->serial->dev;
result = usb_submit_urb(urb, GFP_ATOMIC);
diff --git a/drivers/usb/storage/onetouch.c b/drivers/usb/storage/onetouch.c
index 80e65f29921..198bb3ed95b 100644
--- a/drivers/usb/storage/onetouch.c
+++ b/drivers/usb/storage/onetouch.c
@@ -202,7 +202,7 @@ static int onetouch_connect_input(struct us_data *ss)
goto fail1;
onetouch->data = usb_buffer_alloc(udev, ONETOUCH_PKT_LEN,
- GFP_ATOMIC, &onetouch->data_dma);
+ GFP_KERNEL, &onetouch->data_dma);
if (!onetouch->data)
goto fail1;
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index aadc16b5eed..4cc035562cc 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -133,7 +133,7 @@ static int slave_configure(struct scsi_device *sdev)
if (us->fflags & US_FL_MAX_SECTORS_MIN)
max_sectors = PAGE_CACHE_SIZE >> 9;
- if (queue_max_sectors(sdev->request_queue) > max_sectors)
+ if (queue_max_hw_sectors(sdev->request_queue) > max_sectors)
blk_queue_max_hw_sectors(sdev->request_queue,
max_sectors);
} else if (sdev->type == TYPE_TAPE) {
@@ -484,7 +484,7 @@ static ssize_t show_max_sectors(struct device *dev, struct device_attribute *att
{
struct scsi_device *sdev = to_scsi_device(dev);
- return sprintf(buf, "%u\n", queue_max_sectors(sdev->request_queue));
+ return sprintf(buf, "%u\n", queue_max_hw_sectors(sdev->request_queue));
}
/* Input routine for the sysfs max_sectors file */
@@ -494,9 +494,9 @@ static ssize_t store_max_sectors(struct device *dev, struct device_attribute *at
struct scsi_device *sdev = to_scsi_device(dev);
unsigned short ms;
- if (sscanf(buf, "%hu", &ms) > 0 && ms <= SCSI_DEFAULT_MAX_SECTORS) {
+ if (sscanf(buf, "%hu", &ms) > 0) {
blk_queue_max_hw_sectors(sdev->request_queue, ms);
- return strlen(buf);
+ return count;
}
return -EINVAL;
}
@@ -539,7 +539,7 @@ struct scsi_host_template usb_stor_host_template = {
.slave_configure = slave_configure,
/* lots of sg segments can be handled */
- .sg_tablesize = SG_ALL,
+ .sg_tablesize = SCSI_MAX_SG_CHAIN_SEGMENTS,
/* limit the total size of a transfer to 120 KB */
.max_sectors = 240,
diff --git a/drivers/usb/storage/shuttle_usbat.c b/drivers/usb/storage/shuttle_usbat.c
index b62a28814eb..bd3f415893d 100644
--- a/drivers/usb/storage/shuttle_usbat.c
+++ b/drivers/usb/storage/shuttle_usbat.c
@@ -1628,10 +1628,10 @@ static int usbat_hp8200e_transport(struct scsi_cmnd *srb, struct us_data *us)
return USB_STOR_TRANSPORT_ERROR;
}
- if ( (result = usbat_multiple_write(us,
- registers, data, 7)) != USB_STOR_TRANSPORT_GOOD) {
+ result = usbat_multiple_write(us, registers, data, 7);
+
+ if (result != USB_STOR_TRANSPORT_GOOD)
return result;
- }
/*
* Write the 12-byte command header.
@@ -1643,12 +1643,11 @@ static int usbat_hp8200e_transport(struct scsi_cmnd *srb, struct us_data *us)
* AT SPEED 4 IS UNRELIABLE!!!
*/
- if ((result = usbat_write_block(us,
- USBAT_ATA, srb->cmnd, 12,
- (srb->cmnd[0]==GPCMD_BLANK ? 75 : 10), 0) !=
- USB_STOR_TRANSPORT_GOOD)) {
+ result = usbat_write_block(us, USBAT_ATA, srb->cmnd, 12,
+ srb->cmnd[0] == GPCMD_BLANK ? 75 : 10, 0);
+
+ if (result != USB_STOR_TRANSPORT_GOOD)
return result;
- }
/* If there is response data to be read in then do it here. */
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index cc313d16d72..468038126e5 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -47,6 +47,8 @@
#include <linux/errno.h>
#include <linux/slab.h>
+#include <linux/usb/quirks.h>
+
#include <scsi/scsi.h>
#include <scsi/scsi_eh.h>
#include <scsi/scsi_device.h>
@@ -1297,6 +1299,10 @@ int usb_stor_port_reset(struct us_data *us)
{
int result;
+ /*for these devices we must use the class specific method */
+ if (us->pusb_dev->quirks & USB_QUIRK_RESET_MORPHS)
+ return -EPERM;
+
result = usb_lock_device_for_reset(us->pusb_dev, us->pusb_intf);
if (result < 0)
US_DEBUGP("unable to lock device for reset: %d\n", result);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 49575fba375..98b549b1cab 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1147,8 +1147,8 @@ UNUSUAL_DEV( 0x0af0, 0x7401, 0x0000, 0x0000,
0 ),
/* Reported by Jan Dumon <j.dumon@option.com>
- * This device (wrongly) has a vendor-specific device descriptor.
- * The entry is needed so usb-storage can bind to it's mass-storage
+ * These devices (wrongly) have a vendor-specific device descriptor.
+ * These entries are needed so usb-storage can bind to their mass-storage
* interface as an interface driver */
UNUSUAL_DEV( 0x0af0, 0x7501, 0x0000, 0x0000,
"Option",
@@ -1156,6 +1156,90 @@ UNUSUAL_DEV( 0x0af0, 0x7501, 0x0000, 0x0000,
US_SC_DEVICE, US_PR_DEVICE, NULL,
0 ),
+UNUSUAL_DEV( 0x0af0, 0x7701, 0x0000, 0x0000,
+ "Option",
+ "GI 0451 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0x7706, 0x0000, 0x0000,
+ "Option",
+ "GI 0451 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0x7901, 0x0000, 0x0000,
+ "Option",
+ "GI 0452 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0x7A01, 0x0000, 0x0000,
+ "Option",
+ "GI 0461 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0x7A05, 0x0000, 0x0000,
+ "Option",
+ "GI 0461 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0x8300, 0x0000, 0x0000,
+ "Option",
+ "GI 033x SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0x8302, 0x0000, 0x0000,
+ "Option",
+ "GI 033x SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0x8304, 0x0000, 0x0000,
+ "Option",
+ "GI 033x SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0xc100, 0x0000, 0x0000,
+ "Option",
+ "GI 070x SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0xd057, 0x0000, 0x0000,
+ "Option",
+ "GI 1505 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0xd058, 0x0000, 0x0000,
+ "Option",
+ "GI 1509 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0xd157, 0x0000, 0x0000,
+ "Option",
+ "GI 1515 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0xd257, 0x0000, 0x0000,
+ "Option",
+ "GI 1215 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
+UNUSUAL_DEV( 0x0af0, 0xd357, 0x0000, 0x0000,
+ "Option",
+ "GI 1505 SD-Card",
+ US_SC_DEVICE, US_PR_DEVICE, NULL,
+ 0 ),
+
/* Reported by Ben Efros <ben@pc-doctor.com> */
UNUSUAL_DEV( 0x0bc2, 0x3010, 0x0000, 0x0000,
"Seagate",
diff --git a/drivers/usb/usb-skeleton.c b/drivers/usb/usb-skeleton.c
index b1e579c5c97..61522787f39 100644
--- a/drivers/usb/usb-skeleton.c
+++ b/drivers/usb/usb-skeleton.c
@@ -28,7 +28,7 @@
#define USB_SKEL_PRODUCT_ID 0xfff0
/* table of devices that work with this driver */
-static struct usb_device_id skel_table[] = {
+static const struct usb_device_id skel_table[] = {
{ USB_DEVICE(USB_SKEL_VENDOR_ID, USB_SKEL_PRODUCT_ID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/wusbcore/cbaf.c b/drivers/usb/wusbcore/cbaf.c
index 25eae405f62..51a8e0d5789 100644
--- a/drivers/usb/wusbcore/cbaf.c
+++ b/drivers/usb/wusbcore/cbaf.c
@@ -641,7 +641,7 @@ static void cbaf_disconnect(struct usb_interface *iface)
kzfree(cbaf);
}
-static struct usb_device_id cbaf_id_table[] = {
+static const struct usb_device_id cbaf_id_table[] = {
{ USB_INTERFACE_INFO(0xef, 0x03, 0x01), },
{ },
};
diff --git a/drivers/usb/wusbcore/devconnect.c b/drivers/usb/wusbcore/devconnect.c
index dced419f7ab..1c918286159 100644
--- a/drivers/usb/wusbcore/devconnect.c
+++ b/drivers/usb/wusbcore/devconnect.c
@@ -868,7 +868,7 @@ static struct usb_wireless_cap_descriptor wusb_cap_descr_default = {
* reference that we'll drop.
*
* First we need to determine if the device is a WUSB device (else we
- * ignore it). For that we use the speed setting (USB_SPEED_VARIABLE)
+ * ignore it). For that we use the speed setting (USB_SPEED_WIRELESS)
* [FIXME: maybe we'd need something more definitive]. If so, we track
* it's usb_busd and from there, the WUSB HC.
*
diff --git a/drivers/usb/wusbcore/mmc.c b/drivers/usb/wusbcore/mmc.c
index 3b52161e6e9..2d827397e30 100644
--- a/drivers/usb/wusbcore/mmc.c
+++ b/drivers/usb/wusbcore/mmc.c
@@ -263,7 +263,7 @@ int wusbhc_chid_set(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid)
{
int result = 0;
- if (memcmp(chid, &wusb_ckhdid_zero, sizeof(chid)) == 0)
+ if (memcmp(chid, &wusb_ckhdid_zero, sizeof(*chid)) == 0)
chid = NULL;
mutex_lock(&wusbhc->mutex);
diff --git a/drivers/vhost/Kconfig b/drivers/vhost/Kconfig
new file mode 100644
index 00000000000..e4e2fd1b510
--- /dev/null
+++ b/drivers/vhost/Kconfig
@@ -0,0 +1,11 @@
+config VHOST_NET
+ tristate "Host kernel accelerator for virtio net (EXPERIMENTAL)"
+ depends on NET && EVENTFD && (TUN || !TUN) && (MACVTAP || !MACVTAP) && EXPERIMENTAL
+ ---help---
+ This kernel module can be loaded in host kernel to accelerate
+ guest networking with virtio_net. Not to be confused with virtio_net
+ module itself which needs to be loaded in guest kernel.
+
+ To compile this driver as a module, choose M here: the module will
+ be called vhost_net.
+
diff --git a/drivers/vhost/Makefile b/drivers/vhost/Makefile
new file mode 100644
index 00000000000..72dd02050bb
--- /dev/null
+++ b/drivers/vhost/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_VHOST_NET) += vhost_net.o
+vhost_net-y := vhost.o net.o
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
new file mode 100644
index 00000000000..ad37da2b6cb
--- /dev/null
+++ b/drivers/vhost/net.c
@@ -0,0 +1,669 @@
+/* Copyright (C) 2009 Red Hat, Inc.
+ * Author: Michael S. Tsirkin <mst@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ *
+ * virtio-net server in host kernel.
+ */
+
+#include <linux/compat.h>
+#include <linux/eventfd.h>
+#include <linux/vhost.h>
+#include <linux/virtio_net.h>
+#include <linux/mmu_context.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/rcupdate.h>
+#include <linux/file.h>
+
+#include <linux/net.h>
+#include <linux/if_packet.h>
+#include <linux/if_arp.h>
+#include <linux/if_tun.h>
+#include <linux/if_macvlan.h>
+
+#include <net/sock.h>
+
+#include "vhost.h"
+
+/* Max number of bytes transferred before requeueing the job.
+ * Using this limit prevents one virtqueue from starving others. */
+#define VHOST_NET_WEIGHT 0x80000
+
+enum {
+ VHOST_NET_VQ_RX = 0,
+ VHOST_NET_VQ_TX = 1,
+ VHOST_NET_VQ_MAX = 2,
+};
+
+enum vhost_net_poll_state {
+ VHOST_NET_POLL_DISABLED = 0,
+ VHOST_NET_POLL_STARTED = 1,
+ VHOST_NET_POLL_STOPPED = 2,
+};
+
+struct vhost_net {
+ struct vhost_dev dev;
+ struct vhost_virtqueue vqs[VHOST_NET_VQ_MAX];
+ struct vhost_poll poll[VHOST_NET_VQ_MAX];
+ /* Tells us whether we are polling a socket for TX.
+ * We only do this when socket buffer fills up.
+ * Protected by tx vq lock. */
+ enum vhost_net_poll_state tx_poll_state;
+};
+
+/* Pop first len bytes from iovec. Return number of segments used. */
+static int move_iovec_hdr(struct iovec *from, struct iovec *to,
+ size_t len, int iov_count)
+{
+ int seg = 0;
+ size_t size;
+ while (len && seg < iov_count) {
+ size = min(from->iov_len, len);
+ to->iov_base = from->iov_base;
+ to->iov_len = size;
+ from->iov_len -= size;
+ from->iov_base += size;
+ len -= size;
+ ++from;
+ ++to;
+ ++seg;
+ }
+ return seg;
+}
+
+/* Caller must have TX VQ lock */
+static void tx_poll_stop(struct vhost_net *net)
+{
+ if (likely(net->tx_poll_state != VHOST_NET_POLL_STARTED))
+ return;
+ vhost_poll_stop(net->poll + VHOST_NET_VQ_TX);
+ net->tx_poll_state = VHOST_NET_POLL_STOPPED;
+}
+
+/* Caller must have TX VQ lock */
+static void tx_poll_start(struct vhost_net *net, struct socket *sock)
+{
+ if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED))
+ return;
+ vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file);
+ net->tx_poll_state = VHOST_NET_POLL_STARTED;
+}
+
+/* Expects to be always run from workqueue - which acts as
+ * read-size critical section for our kind of RCU. */
+static void handle_tx(struct vhost_net *net)
+{
+ struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_TX];
+ unsigned head, out, in, s;
+ struct msghdr msg = {
+ .msg_name = NULL,
+ .msg_namelen = 0,
+ .msg_control = NULL,
+ .msg_controllen = 0,
+ .msg_iov = vq->iov,
+ .msg_flags = MSG_DONTWAIT,
+ };
+ size_t len, total_len = 0;
+ int err, wmem;
+ size_t hdr_size;
+ struct socket *sock = rcu_dereference(vq->private_data);
+ if (!sock)
+ return;
+
+ wmem = atomic_read(&sock->sk->sk_wmem_alloc);
+ if (wmem >= sock->sk->sk_sndbuf) {
+ mutex_lock(&vq->mutex);
+ tx_poll_start(net, sock);
+ mutex_unlock(&vq->mutex);
+ return;
+ }
+
+ use_mm(net->dev.mm);
+ mutex_lock(&vq->mutex);
+ vhost_disable_notify(vq);
+
+ if (wmem < sock->sk->sk_sndbuf * 2)
+ tx_poll_stop(net);
+ hdr_size = vq->hdr_size;
+
+ for (;;) {
+ head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
+ ARRAY_SIZE(vq->iov),
+ &out, &in,
+ NULL, NULL);
+ /* Nothing new? Wait for eventfd to tell us they refilled. */
+ if (head == vq->num) {
+ wmem = atomic_read(&sock->sk->sk_wmem_alloc);
+ if (wmem >= sock->sk->sk_sndbuf * 3 / 4) {
+ tx_poll_start(net, sock);
+ set_bit(SOCK_ASYNC_NOSPACE, &sock->flags);
+ break;
+ }
+ if (unlikely(vhost_enable_notify(vq))) {
+ vhost_disable_notify(vq);
+ continue;
+ }
+ break;
+ }
+ if (in) {
+ vq_err(vq, "Unexpected descriptor format for TX: "
+ "out %d, int %d\n", out, in);
+ break;
+ }
+ /* Skip header. TODO: support TSO. */
+ s = move_iovec_hdr(vq->iov, vq->hdr, hdr_size, out);
+ msg.msg_iovlen = out;
+ len = iov_length(vq->iov, out);
+ /* Sanity check */
+ if (!len) {
+ vq_err(vq, "Unexpected header len for TX: "
+ "%zd expected %zd\n",
+ iov_length(vq->hdr, s), hdr_size);
+ break;
+ }
+ /* TODO: Check specific error and bomb out unless ENOBUFS? */
+ err = sock->ops->sendmsg(NULL, sock, &msg, len);
+ if (unlikely(err < 0)) {
+ vhost_discard_vq_desc(vq);
+ tx_poll_start(net, sock);
+ break;
+ }
+ if (err != len)
+ pr_err("Truncated TX packet: "
+ " len %d != %zd\n", err, len);
+ vhost_add_used_and_signal(&net->dev, vq, head, 0);
+ total_len += len;
+ if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
+ vhost_poll_queue(&vq->poll);
+ break;
+ }
+ }
+
+ mutex_unlock(&vq->mutex);
+ unuse_mm(net->dev.mm);
+}
+
+/* Expects to be always run from workqueue - which acts as
+ * read-size critical section for our kind of RCU. */
+static void handle_rx(struct vhost_net *net)
+{
+ struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX];
+ unsigned head, out, in, log, s;
+ struct vhost_log *vq_log;
+ struct msghdr msg = {
+ .msg_name = NULL,
+ .msg_namelen = 0,
+ .msg_control = NULL, /* FIXME: get and handle RX aux data. */
+ .msg_controllen = 0,
+ .msg_iov = vq->iov,
+ .msg_flags = MSG_DONTWAIT,
+ };
+
+ struct virtio_net_hdr hdr = {
+ .flags = 0,
+ .gso_type = VIRTIO_NET_HDR_GSO_NONE
+ };
+
+ size_t len, total_len = 0;
+ int err;
+ size_t hdr_size;
+ struct socket *sock = rcu_dereference(vq->private_data);
+ if (!sock || skb_queue_empty(&sock->sk->sk_receive_queue))
+ return;
+
+ use_mm(net->dev.mm);
+ mutex_lock(&vq->mutex);
+ vhost_disable_notify(vq);
+ hdr_size = vq->hdr_size;
+
+ vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ?
+ vq->log : NULL;
+
+ for (;;) {
+ head = vhost_get_vq_desc(&net->dev, vq, vq->iov,
+ ARRAY_SIZE(vq->iov),
+ &out, &in,
+ vq_log, &log);
+ /* OK, now we need to know about added descriptors. */
+ if (head == vq->num) {
+ if (unlikely(vhost_enable_notify(vq))) {
+ /* They have slipped one in as we were
+ * doing that: check again. */
+ vhost_disable_notify(vq);
+ continue;
+ }
+ /* Nothing new? Wait for eventfd to tell us
+ * they refilled. */
+ break;
+ }
+ /* We don't need to be notified again. */
+ if (out) {
+ vq_err(vq, "Unexpected descriptor format for RX: "
+ "out %d, int %d\n",
+ out, in);
+ break;
+ }
+ /* Skip header. TODO: support TSO/mergeable rx buffers. */
+ s = move_iovec_hdr(vq->iov, vq->hdr, hdr_size, in);
+ msg.msg_iovlen = in;
+ len = iov_length(vq->iov, in);
+ /* Sanity check */
+ if (!len) {
+ vq_err(vq, "Unexpected header len for RX: "
+ "%zd expected %zd\n",
+ iov_length(vq->hdr, s), hdr_size);
+ break;
+ }
+ err = sock->ops->recvmsg(NULL, sock, &msg,
+ len, MSG_DONTWAIT | MSG_TRUNC);
+ /* TODO: Check specific error and bomb out unless EAGAIN? */
+ if (err < 0) {
+ vhost_discard_vq_desc(vq);
+ break;
+ }
+ /* TODO: Should check and handle checksum. */
+ if (err > len) {
+ pr_err("Discarded truncated rx packet: "
+ " len %d > %zd\n", err, len);
+ vhost_discard_vq_desc(vq);
+ continue;
+ }
+ len = err;
+ err = memcpy_toiovec(vq->hdr, (unsigned char *)&hdr, hdr_size);
+ if (err) {
+ vq_err(vq, "Unable to write vnet_hdr at addr %p: %d\n",
+ vq->iov->iov_base, err);
+ break;
+ }
+ len += hdr_size;
+ vhost_add_used_and_signal(&net->dev, vq, head, len);
+ if (unlikely(vq_log))
+ vhost_log_write(vq, vq_log, log, len);
+ total_len += len;
+ if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
+ vhost_poll_queue(&vq->poll);
+ break;
+ }
+ }
+
+ mutex_unlock(&vq->mutex);
+ unuse_mm(net->dev.mm);
+}
+
+static void handle_tx_kick(struct work_struct *work)
+{
+ struct vhost_virtqueue *vq;
+ struct vhost_net *net;
+ vq = container_of(work, struct vhost_virtqueue, poll.work);
+ net = container_of(vq->dev, struct vhost_net, dev);
+ handle_tx(net);
+}
+
+static void handle_rx_kick(struct work_struct *work)
+{
+ struct vhost_virtqueue *vq;
+ struct vhost_net *net;
+ vq = container_of(work, struct vhost_virtqueue, poll.work);
+ net = container_of(vq->dev, struct vhost_net, dev);
+ handle_rx(net);
+}
+
+static void handle_tx_net(struct work_struct *work)
+{
+ struct vhost_net *net;
+ net = container_of(work, struct vhost_net, poll[VHOST_NET_VQ_TX].work);
+ handle_tx(net);
+}
+
+static void handle_rx_net(struct work_struct *work)
+{
+ struct vhost_net *net;
+ net = container_of(work, struct vhost_net, poll[VHOST_NET_VQ_RX].work);
+ handle_rx(net);
+}
+
+static int vhost_net_open(struct inode *inode, struct file *f)
+{
+ struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL);
+ int r;
+ if (!n)
+ return -ENOMEM;
+ n->vqs[VHOST_NET_VQ_TX].handle_kick = handle_tx_kick;
+ n->vqs[VHOST_NET_VQ_RX].handle_kick = handle_rx_kick;
+ r = vhost_dev_init(&n->dev, n->vqs, VHOST_NET_VQ_MAX);
+ if (r < 0) {
+ kfree(n);
+ return r;
+ }
+
+ vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT);
+ vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN);
+ n->tx_poll_state = VHOST_NET_POLL_DISABLED;
+
+ f->private_data = n;
+
+ return 0;
+}
+
+static void vhost_net_disable_vq(struct vhost_net *n,
+ struct vhost_virtqueue *vq)
+{
+ if (!vq->private_data)
+ return;
+ if (vq == n->vqs + VHOST_NET_VQ_TX) {
+ tx_poll_stop(n);
+ n->tx_poll_state = VHOST_NET_POLL_DISABLED;
+ } else
+ vhost_poll_stop(n->poll + VHOST_NET_VQ_RX);
+}
+
+static void vhost_net_enable_vq(struct vhost_net *n,
+ struct vhost_virtqueue *vq)
+{
+ struct socket *sock = vq->private_data;
+ if (!sock)
+ return;
+ if (vq == n->vqs + VHOST_NET_VQ_TX) {
+ n->tx_poll_state = VHOST_NET_POLL_STOPPED;
+ tx_poll_start(n, sock);
+ } else
+ vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file);
+}
+
+static struct socket *vhost_net_stop_vq(struct vhost_net *n,
+ struct vhost_virtqueue *vq)
+{
+ struct socket *sock;
+
+ mutex_lock(&vq->mutex);
+ sock = vq->private_data;
+ vhost_net_disable_vq(n, vq);
+ rcu_assign_pointer(vq->private_data, NULL);
+ mutex_unlock(&vq->mutex);
+ return sock;
+}
+
+static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock,
+ struct socket **rx_sock)
+{
+ *tx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_TX);
+ *rx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_RX);
+}
+
+static void vhost_net_flush_vq(struct vhost_net *n, int index)
+{
+ vhost_poll_flush(n->poll + index);
+ vhost_poll_flush(&n->dev.vqs[index].poll);
+}
+
+static void vhost_net_flush(struct vhost_net *n)
+{
+ vhost_net_flush_vq(n, VHOST_NET_VQ_TX);
+ vhost_net_flush_vq(n, VHOST_NET_VQ_RX);
+}
+
+static int vhost_net_release(struct inode *inode, struct file *f)
+{
+ struct vhost_net *n = f->private_data;
+ struct socket *tx_sock;
+ struct socket *rx_sock;
+
+ vhost_net_stop(n, &tx_sock, &rx_sock);
+ vhost_net_flush(n);
+ vhost_dev_cleanup(&n->dev);
+ if (tx_sock)
+ fput(tx_sock->file);
+ if (rx_sock)
+ fput(rx_sock->file);
+ /* We do an extra flush before freeing memory,
+ * since jobs can re-queue themselves. */
+ vhost_net_flush(n);
+ kfree(n);
+ return 0;
+}
+
+static struct socket *get_raw_socket(int fd)
+{
+ struct {
+ struct sockaddr_ll sa;
+ char buf[MAX_ADDR_LEN];
+ } uaddr;
+ int uaddr_len = sizeof uaddr, r;
+ struct socket *sock = sockfd_lookup(fd, &r);
+ if (!sock)
+ return ERR_PTR(-ENOTSOCK);
+
+ /* Parameter checking */
+ if (sock->sk->sk_type != SOCK_RAW) {
+ r = -ESOCKTNOSUPPORT;
+ goto err;
+ }
+
+ r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa,
+ &uaddr_len, 0);
+ if (r)
+ goto err;
+
+ if (uaddr.sa.sll_family != AF_PACKET) {
+ r = -EPFNOSUPPORT;
+ goto err;
+ }
+ return sock;
+err:
+ fput(sock->file);
+ return ERR_PTR(r);
+}
+
+static struct socket *get_tap_socket(int fd)
+{
+ struct file *file = fget(fd);
+ struct socket *sock;
+ if (!file)
+ return ERR_PTR(-EBADF);
+ sock = tun_get_socket(file);
+ if (!IS_ERR(sock))
+ return sock;
+ sock = macvtap_get_socket(file);
+ if (IS_ERR(sock))
+ fput(file);
+ return sock;
+}
+
+static struct socket *get_socket(int fd)
+{
+ struct socket *sock;
+ /* special case to disable backend */
+ if (fd == -1)
+ return NULL;
+ sock = get_raw_socket(fd);
+ if (!IS_ERR(sock))
+ return sock;
+ sock = get_tap_socket(fd);
+ if (!IS_ERR(sock))
+ return sock;
+ return ERR_PTR(-ENOTSOCK);
+}
+
+static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
+{
+ struct socket *sock, *oldsock;
+ struct vhost_virtqueue *vq;
+ int r;
+
+ mutex_lock(&n->dev.mutex);
+ r = vhost_dev_check_owner(&n->dev);
+ if (r)
+ goto err;
+
+ if (index >= VHOST_NET_VQ_MAX) {
+ r = -ENOBUFS;
+ goto err;
+ }
+ vq = n->vqs + index;
+ mutex_lock(&vq->mutex);
+
+ /* Verify that ring has been setup correctly. */
+ if (!vhost_vq_access_ok(vq)) {
+ r = -EFAULT;
+ goto err;
+ }
+ sock = get_socket(fd);
+ if (IS_ERR(sock)) {
+ r = PTR_ERR(sock);
+ goto err;
+ }
+
+ /* start polling new socket */
+ oldsock = vq->private_data;
+ if (sock == oldsock)
+ goto done;
+
+ vhost_net_disable_vq(n, vq);
+ rcu_assign_pointer(vq->private_data, sock);
+ vhost_net_enable_vq(n, vq);
+ mutex_unlock(&vq->mutex);
+done:
+ if (oldsock) {
+ vhost_net_flush_vq(n, index);
+ fput(oldsock->file);
+ }
+err:
+ mutex_unlock(&n->dev.mutex);
+ return r;
+}
+
+static long vhost_net_reset_owner(struct vhost_net *n)
+{
+ struct socket *tx_sock = NULL;
+ struct socket *rx_sock = NULL;
+ long err;
+ mutex_lock(&n->dev.mutex);
+ err = vhost_dev_check_owner(&n->dev);
+ if (err)
+ goto done;
+ vhost_net_stop(n, &tx_sock, &rx_sock);
+ vhost_net_flush(n);
+ err = vhost_dev_reset_owner(&n->dev);
+done:
+ mutex_unlock(&n->dev.mutex);
+ if (tx_sock)
+ fput(tx_sock->file);
+ if (rx_sock)
+ fput(rx_sock->file);
+ return err;
+}
+
+static int vhost_net_set_features(struct vhost_net *n, u64 features)
+{
+ size_t hdr_size = features & (1 << VHOST_NET_F_VIRTIO_NET_HDR) ?
+ sizeof(struct virtio_net_hdr) : 0;
+ int i;
+ mutex_lock(&n->dev.mutex);
+ if ((features & (1 << VHOST_F_LOG_ALL)) &&
+ !vhost_log_access_ok(&n->dev)) {
+ mutex_unlock(&n->dev.mutex);
+ return -EFAULT;
+ }
+ n->dev.acked_features = features;
+ smp_wmb();
+ for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
+ mutex_lock(&n->vqs[i].mutex);
+ n->vqs[i].hdr_size = hdr_size;
+ mutex_unlock(&n->vqs[i].mutex);
+ }
+ vhost_net_flush(n);
+ mutex_unlock(&n->dev.mutex);
+ return 0;
+}
+
+static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
+ unsigned long arg)
+{
+ struct vhost_net *n = f->private_data;
+ void __user *argp = (void __user *)arg;
+ u64 __user *featurep = argp;
+ struct vhost_vring_file backend;
+ u64 features;
+ int r;
+ switch (ioctl) {
+ case VHOST_NET_SET_BACKEND:
+ r = copy_from_user(&backend, argp, sizeof backend);
+ if (r < 0)
+ return r;
+ return vhost_net_set_backend(n, backend.index, backend.fd);
+ case VHOST_GET_FEATURES:
+ features = VHOST_FEATURES;
+ return copy_to_user(featurep, &features, sizeof features);
+ case VHOST_SET_FEATURES:
+ r = copy_from_user(&features, featurep, sizeof features);
+ if (r < 0)
+ return r;
+ if (features & ~VHOST_FEATURES)
+ return -EOPNOTSUPP;
+ return vhost_net_set_features(n, features);
+ case VHOST_RESET_OWNER:
+ return vhost_net_reset_owner(n);
+ default:
+ mutex_lock(&n->dev.mutex);
+ r = vhost_dev_ioctl(&n->dev, ioctl, arg);
+ vhost_net_flush(n);
+ mutex_unlock(&n->dev.mutex);
+ return r;
+ }
+}
+
+#ifdef CONFIG_COMPAT
+static long vhost_net_compat_ioctl(struct file *f, unsigned int ioctl,
+ unsigned long arg)
+{
+ return vhost_net_ioctl(f, ioctl, (unsigned long)compat_ptr(arg));
+}
+#endif
+
+const static struct file_operations vhost_net_fops = {
+ .owner = THIS_MODULE,
+ .release = vhost_net_release,
+ .unlocked_ioctl = vhost_net_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = vhost_net_compat_ioctl,
+#endif
+ .open = vhost_net_open,
+};
+
+static struct miscdevice vhost_net_misc = {
+ VHOST_NET_MINOR,
+ "vhost-net",
+ &vhost_net_fops,
+};
+
+int vhost_net_init(void)
+{
+ int r = vhost_init();
+ if (r)
+ goto err_init;
+ r = misc_register(&vhost_net_misc);
+ if (r)
+ goto err_reg;
+ return 0;
+err_reg:
+ vhost_cleanup();
+err_init:
+ return r;
+
+}
+module_init(vhost_net_init);
+
+void vhost_net_exit(void)
+{
+ misc_deregister(&vhost_net_misc);
+ vhost_cleanup();
+}
+module_exit(vhost_net_exit);
+
+MODULE_VERSION("0.0.1");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Michael S. Tsirkin");
+MODULE_DESCRIPTION("Host kernel accelerator for virtio net");
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
new file mode 100644
index 00000000000..7cd55e07879
--- /dev/null
+++ b/drivers/vhost/vhost.c
@@ -0,0 +1,1104 @@
+/* Copyright (C) 2009 Red Hat, Inc.
+ * Copyright (C) 2006 Rusty Russell IBM Corporation
+ *
+ * Author: Michael S. Tsirkin <mst@redhat.com>
+ *
+ * Inspiration, some code, and most witty comments come from
+ * Documentation/lguest/lguest.c, by Rusty Russell
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ *
+ * Generic code for virtio server in host kernel.
+ */
+
+#include <linux/eventfd.h>
+#include <linux/vhost.h>
+#include <linux/virtio_net.h>
+#include <linux/mm.h>
+#include <linux/miscdevice.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/rcupdate.h>
+#include <linux/poll.h>
+#include <linux/file.h>
+#include <linux/highmem.h>
+
+#include <linux/net.h>
+#include <linux/if_packet.h>
+#include <linux/if_arp.h>
+
+#include <net/sock.h>
+
+#include "vhost.h"
+
+enum {
+ VHOST_MEMORY_MAX_NREGIONS = 64,
+ VHOST_MEMORY_F_LOG = 0x1,
+};
+
+static struct workqueue_struct *vhost_workqueue;
+
+static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
+ poll_table *pt)
+{
+ struct vhost_poll *poll;
+ poll = container_of(pt, struct vhost_poll, table);
+
+ poll->wqh = wqh;
+ add_wait_queue(wqh, &poll->wait);
+}
+
+static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
+ void *key)
+{
+ struct vhost_poll *poll;
+ poll = container_of(wait, struct vhost_poll, wait);
+ if (!((unsigned long)key & poll->mask))
+ return 0;
+
+ queue_work(vhost_workqueue, &poll->work);
+ return 0;
+}
+
+/* Init poll structure */
+void vhost_poll_init(struct vhost_poll *poll, work_func_t func,
+ unsigned long mask)
+{
+ INIT_WORK(&poll->work, func);
+ init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
+ init_poll_funcptr(&poll->table, vhost_poll_func);
+ poll->mask = mask;
+}
+
+/* Start polling a file. We add ourselves to file's wait queue. The caller must
+ * keep a reference to a file until after vhost_poll_stop is called. */
+void vhost_poll_start(struct vhost_poll *poll, struct file *file)
+{
+ unsigned long mask;
+ mask = file->f_op->poll(file, &poll->table);
+ if (mask)
+ vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
+}
+
+/* Stop polling a file. After this function returns, it becomes safe to drop the
+ * file reference. You must also flush afterwards. */
+void vhost_poll_stop(struct vhost_poll *poll)
+{
+ remove_wait_queue(poll->wqh, &poll->wait);
+}
+
+/* Flush any work that has been scheduled. When calling this, don't hold any
+ * locks that are also used by the callback. */
+void vhost_poll_flush(struct vhost_poll *poll)
+{
+ flush_work(&poll->work);
+}
+
+void vhost_poll_queue(struct vhost_poll *poll)
+{
+ queue_work(vhost_workqueue, &poll->work);
+}
+
+static void vhost_vq_reset(struct vhost_dev *dev,
+ struct vhost_virtqueue *vq)
+{
+ vq->num = 1;
+ vq->desc = NULL;
+ vq->avail = NULL;
+ vq->used = NULL;
+ vq->last_avail_idx = 0;
+ vq->avail_idx = 0;
+ vq->last_used_idx = 0;
+ vq->used_flags = 0;
+ vq->used_flags = 0;
+ vq->log_used = false;
+ vq->log_addr = -1ull;
+ vq->hdr_size = 0;
+ vq->private_data = NULL;
+ vq->log_base = NULL;
+ vq->error_ctx = NULL;
+ vq->error = NULL;
+ vq->kick = NULL;
+ vq->call_ctx = NULL;
+ vq->call = NULL;
+ vq->log_ctx = NULL;
+}
+
+long vhost_dev_init(struct vhost_dev *dev,
+ struct vhost_virtqueue *vqs, int nvqs)
+{
+ int i;
+ dev->vqs = vqs;
+ dev->nvqs = nvqs;
+ mutex_init(&dev->mutex);
+ dev->log_ctx = NULL;
+ dev->log_file = NULL;
+ dev->memory = NULL;
+ dev->mm = NULL;
+
+ for (i = 0; i < dev->nvqs; ++i) {
+ dev->vqs[i].dev = dev;
+ mutex_init(&dev->vqs[i].mutex);
+ vhost_vq_reset(dev, dev->vqs + i);
+ if (dev->vqs[i].handle_kick)
+ vhost_poll_init(&dev->vqs[i].poll,
+ dev->vqs[i].handle_kick,
+ POLLIN);
+ }
+ return 0;
+}
+
+/* Caller should have device mutex */
+long vhost_dev_check_owner(struct vhost_dev *dev)
+{
+ /* Are you the owner? If not, I don't think you mean to do that */
+ return dev->mm == current->mm ? 0 : -EPERM;
+}
+
+/* Caller should have device mutex */
+static long vhost_dev_set_owner(struct vhost_dev *dev)
+{
+ /* Is there an owner already? */
+ if (dev->mm)
+ return -EBUSY;
+ /* No owner, become one */
+ dev->mm = get_task_mm(current);
+ return 0;
+}
+
+/* Caller should have device mutex */
+long vhost_dev_reset_owner(struct vhost_dev *dev)
+{
+ struct vhost_memory *memory;
+
+ /* Restore memory to default empty mapping. */
+ memory = kmalloc(offsetof(struct vhost_memory, regions), GFP_KERNEL);
+ if (!memory)
+ return -ENOMEM;
+
+ vhost_dev_cleanup(dev);
+
+ memory->nregions = 0;
+ dev->memory = memory;
+ return 0;
+}
+
+/* Caller should have device mutex */
+void vhost_dev_cleanup(struct vhost_dev *dev)
+{
+ int i;
+ for (i = 0; i < dev->nvqs; ++i) {
+ if (dev->vqs[i].kick && dev->vqs[i].handle_kick) {
+ vhost_poll_stop(&dev->vqs[i].poll);
+ vhost_poll_flush(&dev->vqs[i].poll);
+ }
+ if (dev->vqs[i].error_ctx)
+ eventfd_ctx_put(dev->vqs[i].error_ctx);
+ if (dev->vqs[i].error)
+ fput(dev->vqs[i].error);
+ if (dev->vqs[i].kick)
+ fput(dev->vqs[i].kick);
+ if (dev->vqs[i].call_ctx)
+ eventfd_ctx_put(dev->vqs[i].call_ctx);
+ if (dev->vqs[i].call)
+ fput(dev->vqs[i].call);
+ vhost_vq_reset(dev, dev->vqs + i);
+ }
+ if (dev->log_ctx)
+ eventfd_ctx_put(dev->log_ctx);
+ dev->log_ctx = NULL;
+ if (dev->log_file)
+ fput(dev->log_file);
+ dev->log_file = NULL;
+ /* No one will access memory at this point */
+ kfree(dev->memory);
+ dev->memory = NULL;
+ if (dev->mm)
+ mmput(dev->mm);
+ dev->mm = NULL;
+}
+
+static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
+{
+ u64 a = addr / VHOST_PAGE_SIZE / 8;
+ /* Make sure 64 bit math will not overflow. */
+ if (a > ULONG_MAX - (unsigned long)log_base ||
+ a + (unsigned long)log_base > ULONG_MAX)
+ return -EFAULT;
+
+ return access_ok(VERIFY_WRITE, log_base + a,
+ (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
+}
+
+/* Caller should have vq mutex and device mutex. */
+static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem,
+ int log_all)
+{
+ int i;
+ for (i = 0; i < mem->nregions; ++i) {
+ struct vhost_memory_region *m = mem->regions + i;
+ unsigned long a = m->userspace_addr;
+ if (m->memory_size > ULONG_MAX)
+ return 0;
+ else if (!access_ok(VERIFY_WRITE, (void __user *)a,
+ m->memory_size))
+ return 0;
+ else if (log_all && !log_access_ok(log_base,
+ m->guest_phys_addr,
+ m->memory_size))
+ return 0;
+ }
+ return 1;
+}
+
+/* Can we switch to this memory table? */
+/* Caller should have device mutex but not vq mutex */
+static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
+ int log_all)
+{
+ int i;
+ for (i = 0; i < d->nvqs; ++i) {
+ int ok;
+ mutex_lock(&d->vqs[i].mutex);
+ /* If ring is inactive, will check when it's enabled. */
+ if (d->vqs[i].private_data)
+ ok = vq_memory_access_ok(d->vqs[i].log_base, mem,
+ log_all);
+ else
+ ok = 1;
+ mutex_unlock(&d->vqs[i].mutex);
+ if (!ok)
+ return 0;
+ }
+ return 1;
+}
+
+static int vq_access_ok(unsigned int num,
+ struct vring_desc __user *desc,
+ struct vring_avail __user *avail,
+ struct vring_used __user *used)
+{
+ return access_ok(VERIFY_READ, desc, num * sizeof *desc) &&
+ access_ok(VERIFY_READ, avail,
+ sizeof *avail + num * sizeof *avail->ring) &&
+ access_ok(VERIFY_WRITE, used,
+ sizeof *used + num * sizeof *used->ring);
+}
+
+/* Can we log writes? */
+/* Caller should have device mutex but not vq mutex */
+int vhost_log_access_ok(struct vhost_dev *dev)
+{
+ return memory_access_ok(dev, dev->memory, 1);
+}
+
+/* Verify access for write logging. */
+/* Caller should have vq mutex and device mutex */
+static int vq_log_access_ok(struct vhost_virtqueue *vq, void __user *log_base)
+{
+ return vq_memory_access_ok(log_base, vq->dev->memory,
+ vhost_has_feature(vq->dev, VHOST_F_LOG_ALL)) &&
+ (!vq->log_used || log_access_ok(log_base, vq->log_addr,
+ sizeof *vq->used +
+ vq->num * sizeof *vq->used->ring));
+}
+
+/* Can we start vq? */
+/* Caller should have vq mutex and device mutex */
+int vhost_vq_access_ok(struct vhost_virtqueue *vq)
+{
+ return vq_access_ok(vq->num, vq->desc, vq->avail, vq->used) &&
+ vq_log_access_ok(vq, vq->log_base);
+}
+
+static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
+{
+ struct vhost_memory mem, *newmem, *oldmem;
+ unsigned long size = offsetof(struct vhost_memory, regions);
+ long r;
+ r = copy_from_user(&mem, m, size);
+ if (r)
+ return r;
+ if (mem.padding)
+ return -EOPNOTSUPP;
+ if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS)
+ return -E2BIG;
+ newmem = kmalloc(size + mem.nregions * sizeof *m->regions, GFP_KERNEL);
+ if (!newmem)
+ return -ENOMEM;
+
+ memcpy(newmem, &mem, size);
+ r = copy_from_user(newmem->regions, m->regions,
+ mem.nregions * sizeof *m->regions);
+ if (r) {
+ kfree(newmem);
+ return r;
+ }
+
+ if (!memory_access_ok(d, newmem, vhost_has_feature(d, VHOST_F_LOG_ALL)))
+ return -EFAULT;
+ oldmem = d->memory;
+ rcu_assign_pointer(d->memory, newmem);
+ synchronize_rcu();
+ kfree(oldmem);
+ return 0;
+}
+
+static int init_used(struct vhost_virtqueue *vq,
+ struct vring_used __user *used)
+{
+ int r = put_user(vq->used_flags, &used->flags);
+ if (r)
+ return r;
+ return get_user(vq->last_used_idx, &used->idx);
+}
+
+static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
+{
+ struct file *eventfp, *filep = NULL,
+ *pollstart = NULL, *pollstop = NULL;
+ struct eventfd_ctx *ctx = NULL;
+ u32 __user *idxp = argp;
+ struct vhost_virtqueue *vq;
+ struct vhost_vring_state s;
+ struct vhost_vring_file f;
+ struct vhost_vring_addr a;
+ u32 idx;
+ long r;
+
+ r = get_user(idx, idxp);
+ if (r < 0)
+ return r;
+ if (idx > d->nvqs)
+ return -ENOBUFS;
+
+ vq = d->vqs + idx;
+
+ mutex_lock(&vq->mutex);
+
+ switch (ioctl) {
+ case VHOST_SET_VRING_NUM:
+ /* Resizing ring with an active backend?
+ * You don't want to do that. */
+ if (vq->private_data) {
+ r = -EBUSY;
+ break;
+ }
+ r = copy_from_user(&s, argp, sizeof s);
+ if (r < 0)
+ break;
+ if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) {
+ r = -EINVAL;
+ break;
+ }
+ vq->num = s.num;
+ break;
+ case VHOST_SET_VRING_BASE:
+ /* Moving base with an active backend?
+ * You don't want to do that. */
+ if (vq->private_data) {
+ r = -EBUSY;
+ break;
+ }
+ r = copy_from_user(&s, argp, sizeof s);
+ if (r < 0)
+ break;
+ if (s.num > 0xffff) {
+ r = -EINVAL;
+ break;
+ }
+ vq->last_avail_idx = s.num;
+ /* Forget the cached index value. */
+ vq->avail_idx = vq->last_avail_idx;
+ break;
+ case VHOST_GET_VRING_BASE:
+ s.index = idx;
+ s.num = vq->last_avail_idx;
+ r = copy_to_user(argp, &s, sizeof s);
+ break;
+ case VHOST_SET_VRING_ADDR:
+ r = copy_from_user(&a, argp, sizeof a);
+ if (r < 0)
+ break;
+ if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) {
+ r = -EOPNOTSUPP;
+ break;
+ }
+ /* For 32bit, verify that the top 32bits of the user
+ data are set to zero. */
+ if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
+ (u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
+ (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr) {
+ r = -EFAULT;
+ break;
+ }
+ if ((a.avail_user_addr & (sizeof *vq->avail->ring - 1)) ||
+ (a.used_user_addr & (sizeof *vq->used->ring - 1)) ||
+ (a.log_guest_addr & (sizeof *vq->used->ring - 1))) {
+ r = -EINVAL;
+ break;
+ }
+
+ /* We only verify access here if backend is configured.
+ * If it is not, we don't as size might not have been setup.
+ * We will verify when backend is configured. */
+ if (vq->private_data) {
+ if (!vq_access_ok(vq->num,
+ (void __user *)(unsigned long)a.desc_user_addr,
+ (void __user *)(unsigned long)a.avail_user_addr,
+ (void __user *)(unsigned long)a.used_user_addr)) {
+ r = -EINVAL;
+ break;
+ }
+
+ /* Also validate log access for used ring if enabled. */
+ if ((a.flags & (0x1 << VHOST_VRING_F_LOG)) &&
+ !log_access_ok(vq->log_base, a.log_guest_addr,
+ sizeof *vq->used +
+ vq->num * sizeof *vq->used->ring)) {
+ r = -EINVAL;
+ break;
+ }
+ }
+
+ r = init_used(vq, (struct vring_used __user *)(unsigned long)
+ a.used_user_addr);
+ if (r)
+ break;
+ vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
+ vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
+ vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
+ vq->log_addr = a.log_guest_addr;
+ vq->used = (void __user *)(unsigned long)a.used_user_addr;
+ break;
+ case VHOST_SET_VRING_KICK:
+ r = copy_from_user(&f, argp, sizeof f);
+ if (r < 0)
+ break;
+ eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
+ if (IS_ERR(eventfp))
+ return PTR_ERR(eventfp);
+ if (eventfp != vq->kick) {
+ pollstop = filep = vq->kick;
+ pollstart = vq->kick = eventfp;
+ } else
+ filep = eventfp;
+ break;
+ case VHOST_SET_VRING_CALL:
+ r = copy_from_user(&f, argp, sizeof f);
+ if (r < 0)
+ break;
+ eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
+ if (IS_ERR(eventfp))
+ return PTR_ERR(eventfp);
+ if (eventfp != vq->call) {
+ filep = vq->call;
+ ctx = vq->call_ctx;
+ vq->call = eventfp;
+ vq->call_ctx = eventfp ?
+ eventfd_ctx_fileget(eventfp) : NULL;
+ } else
+ filep = eventfp;
+ break;
+ case VHOST_SET_VRING_ERR:
+ r = copy_from_user(&f, argp, sizeof f);
+ if (r < 0)
+ break;
+ eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
+ if (IS_ERR(eventfp))
+ return PTR_ERR(eventfp);
+ if (eventfp != vq->error) {
+ filep = vq->error;
+ vq->error = eventfp;
+ ctx = vq->error_ctx;
+ vq->error_ctx = eventfp ?
+ eventfd_ctx_fileget(eventfp) : NULL;
+ } else
+ filep = eventfp;
+ break;
+ default:
+ r = -ENOIOCTLCMD;
+ }
+
+ if (pollstop && vq->handle_kick)
+ vhost_poll_stop(&vq->poll);
+
+ if (ctx)
+ eventfd_ctx_put(ctx);
+ if (filep)
+ fput(filep);
+
+ if (pollstart && vq->handle_kick)
+ vhost_poll_start(&vq->poll, vq->kick);
+
+ mutex_unlock(&vq->mutex);
+
+ if (pollstop && vq->handle_kick)
+ vhost_poll_flush(&vq->poll);
+ return r;
+}
+
+/* Caller must have device mutex */
+long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+ struct file *eventfp, *filep = NULL;
+ struct eventfd_ctx *ctx = NULL;
+ u64 p;
+ long r;
+ int i, fd;
+
+ /* If you are not the owner, you can become one */
+ if (ioctl == VHOST_SET_OWNER) {
+ r = vhost_dev_set_owner(d);
+ goto done;
+ }
+
+ /* You must be the owner to do anything else */
+ r = vhost_dev_check_owner(d);
+ if (r)
+ goto done;
+
+ switch (ioctl) {
+ case VHOST_SET_MEM_TABLE:
+ r = vhost_set_memory(d, argp);
+ break;
+ case VHOST_SET_LOG_BASE:
+ r = copy_from_user(&p, argp, sizeof p);
+ if (r < 0)
+ break;
+ if ((u64)(unsigned long)p != p) {
+ r = -EFAULT;
+ break;
+ }
+ for (i = 0; i < d->nvqs; ++i) {
+ struct vhost_virtqueue *vq;
+ void __user *base = (void __user *)(unsigned long)p;
+ vq = d->vqs + i;
+ mutex_lock(&vq->mutex);
+ /* If ring is inactive, will check when it's enabled. */
+ if (vq->private_data && !vq_log_access_ok(vq, base))
+ r = -EFAULT;
+ else
+ vq->log_base = base;
+ mutex_unlock(&vq->mutex);
+ }
+ break;
+ case VHOST_SET_LOG_FD:
+ r = get_user(fd, (int __user *)argp);
+ if (r < 0)
+ break;
+ eventfp = fd == -1 ? NULL : eventfd_fget(fd);
+ if (IS_ERR(eventfp)) {
+ r = PTR_ERR(eventfp);
+ break;
+ }
+ if (eventfp != d->log_file) {
+ filep = d->log_file;
+ ctx = d->log_ctx;
+ d->log_ctx = eventfp ?
+ eventfd_ctx_fileget(eventfp) : NULL;
+ } else
+ filep = eventfp;
+ for (i = 0; i < d->nvqs; ++i) {
+ mutex_lock(&d->vqs[i].mutex);
+ d->vqs[i].log_ctx = d->log_ctx;
+ mutex_unlock(&d->vqs[i].mutex);
+ }
+ if (ctx)
+ eventfd_ctx_put(ctx);
+ if (filep)
+ fput(filep);
+ break;
+ default:
+ r = vhost_set_vring(d, ioctl, argp);
+ break;
+ }
+done:
+ return r;
+}
+
+static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
+ __u64 addr, __u32 len)
+{
+ struct vhost_memory_region *reg;
+ int i;
+ /* linear search is not brilliant, but we really have on the order of 6
+ * regions in practice */
+ for (i = 0; i < mem->nregions; ++i) {
+ reg = mem->regions + i;
+ if (reg->guest_phys_addr <= addr &&
+ reg->guest_phys_addr + reg->memory_size - 1 >= addr)
+ return reg;
+ }
+ return NULL;
+}
+
+/* TODO: This is really inefficient. We need something like get_user()
+ * (instruction directly accesses the data, with an exception table entry
+ * returning -EFAULT). See Documentation/x86/exception-tables.txt.
+ */
+static int set_bit_to_user(int nr, void __user *addr)
+{
+ unsigned long log = (unsigned long)addr;
+ struct page *page;
+ void *base;
+ int bit = nr + (log % PAGE_SIZE) * 8;
+ int r;
+ r = get_user_pages_fast(log, 1, 1, &page);
+ if (r < 0)
+ return r;
+ BUG_ON(r != 1);
+ base = kmap_atomic(page, KM_USER0);
+ set_bit(bit, base);
+ kunmap_atomic(base, KM_USER0);
+ set_page_dirty_lock(page);
+ put_page(page);
+ return 0;
+}
+
+static int log_write(void __user *log_base,
+ u64 write_address, u64 write_length)
+{
+ int r;
+ if (!write_length)
+ return 0;
+ write_address /= VHOST_PAGE_SIZE;
+ for (;;) {
+ u64 base = (u64)(unsigned long)log_base;
+ u64 log = base + write_address / 8;
+ int bit = write_address % 8;
+ if ((u64)(unsigned long)log != log)
+ return -EFAULT;
+ r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
+ if (r < 0)
+ return r;
+ if (write_length <= VHOST_PAGE_SIZE)
+ break;
+ write_length -= VHOST_PAGE_SIZE;
+ write_address += VHOST_PAGE_SIZE;
+ }
+ return r;
+}
+
+int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
+ unsigned int log_num, u64 len)
+{
+ int i, r;
+
+ /* Make sure data written is seen before log. */
+ smp_wmb();
+ for (i = 0; i < log_num; ++i) {
+ u64 l = min(log[i].len, len);
+ r = log_write(vq->log_base, log[i].addr, l);
+ if (r < 0)
+ return r;
+ len -= l;
+ if (!len)
+ return 0;
+ }
+ if (vq->log_ctx)
+ eventfd_signal(vq->log_ctx, 1);
+ /* Length written exceeds what we have stored. This is a bug. */
+ BUG();
+ return 0;
+}
+
+int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
+ struct iovec iov[], int iov_size)
+{
+ const struct vhost_memory_region *reg;
+ struct vhost_memory *mem;
+ struct iovec *_iov;
+ u64 s = 0;
+ int ret = 0;
+
+ rcu_read_lock();
+
+ mem = rcu_dereference(dev->memory);
+ while ((u64)len > s) {
+ u64 size;
+ if (ret >= iov_size) {
+ ret = -ENOBUFS;
+ break;
+ }
+ reg = find_region(mem, addr, len);
+ if (!reg) {
+ ret = -EFAULT;
+ break;
+ }
+ _iov = iov + ret;
+ size = reg->memory_size - addr + reg->guest_phys_addr;
+ _iov->iov_len = min((u64)len, size);
+ _iov->iov_base = (void *)(unsigned long)
+ (reg->userspace_addr + addr - reg->guest_phys_addr);
+ s += size;
+ addr += size;
+ ++ret;
+ }
+
+ rcu_read_unlock();
+ return ret;
+}
+
+/* Each buffer in the virtqueues is actually a chain of descriptors. This
+ * function returns the next descriptor in the chain,
+ * or -1U if we're at the end. */
+static unsigned next_desc(struct vring_desc *desc)
+{
+ unsigned int next;
+
+ /* If this descriptor says it doesn't chain, we're done. */
+ if (!(desc->flags & VRING_DESC_F_NEXT))
+ return -1U;
+
+ /* Check they're not leading us off end of descriptors. */
+ next = desc->next;
+ /* Make sure compiler knows to grab that: we don't want it changing! */
+ /* We will use the result as an index in an array, so most
+ * architectures only need a compiler barrier here. */
+ read_barrier_depends();
+
+ return next;
+}
+
+static unsigned get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
+ struct iovec iov[], unsigned int iov_size,
+ unsigned int *out_num, unsigned int *in_num,
+ struct vhost_log *log, unsigned int *log_num,
+ struct vring_desc *indirect)
+{
+ struct vring_desc desc;
+ unsigned int i = 0, count, found = 0;
+ int ret;
+
+ /* Sanity check */
+ if (indirect->len % sizeof desc) {
+ vq_err(vq, "Invalid length in indirect descriptor: "
+ "len 0x%llx not multiple of 0x%zx\n",
+ (unsigned long long)indirect->len,
+ sizeof desc);
+ return -EINVAL;
+ }
+
+ ret = translate_desc(dev, indirect->addr, indirect->len, vq->indirect,
+ ARRAY_SIZE(vq->indirect));
+ if (ret < 0) {
+ vq_err(vq, "Translation failure %d in indirect.\n", ret);
+ return ret;
+ }
+
+ /* We will use the result as an address to read from, so most
+ * architectures only need a compiler barrier here. */
+ read_barrier_depends();
+
+ count = indirect->len / sizeof desc;
+ /* Buffers are chained via a 16 bit next field, so
+ * we can have at most 2^16 of these. */
+ if (count > USHORT_MAX + 1) {
+ vq_err(vq, "Indirect buffer length too big: %d\n",
+ indirect->len);
+ return -E2BIG;
+ }
+
+ do {
+ unsigned iov_count = *in_num + *out_num;
+ if (++found > count) {
+ vq_err(vq, "Loop detected: last one at %u "
+ "indirect size %u\n",
+ i, count);
+ return -EINVAL;
+ }
+ if (memcpy_fromiovec((unsigned char *)&desc, vq->indirect,
+ sizeof desc)) {
+ vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
+ i, (size_t)indirect->addr + i * sizeof desc);
+ return -EINVAL;
+ }
+ if (desc.flags & VRING_DESC_F_INDIRECT) {
+ vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
+ i, (size_t)indirect->addr + i * sizeof desc);
+ return -EINVAL;
+ }
+
+ ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
+ iov_size - iov_count);
+ if (ret < 0) {
+ vq_err(vq, "Translation failure %d indirect idx %d\n",
+ ret, i);
+ return ret;
+ }
+ /* If this is an input descriptor, increment that count. */
+ if (desc.flags & VRING_DESC_F_WRITE) {
+ *in_num += ret;
+ if (unlikely(log)) {
+ log[*log_num].addr = desc.addr;
+ log[*log_num].len = desc.len;
+ ++*log_num;
+ }
+ } else {
+ /* If it's an output descriptor, they're all supposed
+ * to come before any input descriptors. */
+ if (*in_num) {
+ vq_err(vq, "Indirect descriptor "
+ "has out after in: idx %d\n", i);
+ return -EINVAL;
+ }
+ *out_num += ret;
+ }
+ } while ((i = next_desc(&desc)) != -1);
+ return 0;
+}
+
+/* This looks in the virtqueue and for the first available buffer, and converts
+ * it to an iovec for convenient access. Since descriptors consist of some
+ * number of output then some number of input descriptors, it's actually two
+ * iovecs, but we pack them into one and note how many of each there were.
+ *
+ * This function returns the descriptor number found, or vq->num (which
+ * is never a valid descriptor number) if none was found. */
+unsigned vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
+ struct iovec iov[], unsigned int iov_size,
+ unsigned int *out_num, unsigned int *in_num,
+ struct vhost_log *log, unsigned int *log_num)
+{
+ struct vring_desc desc;
+ unsigned int i, head, found = 0;
+ u16 last_avail_idx;
+ int ret;
+
+ /* Check it isn't doing very strange things with descriptor numbers. */
+ last_avail_idx = vq->last_avail_idx;
+ if (get_user(vq->avail_idx, &vq->avail->idx)) {
+ vq_err(vq, "Failed to access avail idx at %p\n",
+ &vq->avail->idx);
+ return vq->num;
+ }
+
+ if ((u16)(vq->avail_idx - last_avail_idx) > vq->num) {
+ vq_err(vq, "Guest moved used index from %u to %u",
+ last_avail_idx, vq->avail_idx);
+ return vq->num;
+ }
+
+ /* If there's nothing new since last we looked, return invalid. */
+ if (vq->avail_idx == last_avail_idx)
+ return vq->num;
+
+ /* Only get avail ring entries after they have been exposed by guest. */
+ smp_rmb();
+
+ /* Grab the next descriptor number they're advertising, and increment
+ * the index we've seen. */
+ if (get_user(head, &vq->avail->ring[last_avail_idx % vq->num])) {
+ vq_err(vq, "Failed to read head: idx %d address %p\n",
+ last_avail_idx,
+ &vq->avail->ring[last_avail_idx % vq->num]);
+ return vq->num;
+ }
+
+ /* If their number is silly, that's an error. */
+ if (head >= vq->num) {
+ vq_err(vq, "Guest says index %u > %u is available",
+ head, vq->num);
+ return vq->num;
+ }
+
+ /* When we start there are none of either input nor output. */
+ *out_num = *in_num = 0;
+ if (unlikely(log))
+ *log_num = 0;
+
+ i = head;
+ do {
+ unsigned iov_count = *in_num + *out_num;
+ if (i >= vq->num) {
+ vq_err(vq, "Desc index is %u > %u, head = %u",
+ i, vq->num, head);
+ return vq->num;
+ }
+ if (++found > vq->num) {
+ vq_err(vq, "Loop detected: last one at %u "
+ "vq size %u head %u\n",
+ i, vq->num, head);
+ return vq->num;
+ }
+ ret = copy_from_user(&desc, vq->desc + i, sizeof desc);
+ if (ret) {
+ vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
+ i, vq->desc + i);
+ return vq->num;
+ }
+ if (desc.flags & VRING_DESC_F_INDIRECT) {
+ ret = get_indirect(dev, vq, iov, iov_size,
+ out_num, in_num,
+ log, log_num, &desc);
+ if (ret < 0) {
+ vq_err(vq, "Failure detected "
+ "in indirect descriptor at idx %d\n", i);
+ return vq->num;
+ }
+ continue;
+ }
+
+ ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
+ iov_size - iov_count);
+ if (ret < 0) {
+ vq_err(vq, "Translation failure %d descriptor idx %d\n",
+ ret, i);
+ return vq->num;
+ }
+ if (desc.flags & VRING_DESC_F_WRITE) {
+ /* If this is an input descriptor,
+ * increment that count. */
+ *in_num += ret;
+ if (unlikely(log)) {
+ log[*log_num].addr = desc.addr;
+ log[*log_num].len = desc.len;
+ ++*log_num;
+ }
+ } else {
+ /* If it's an output descriptor, they're all supposed
+ * to come before any input descriptors. */
+ if (*in_num) {
+ vq_err(vq, "Descriptor has out after in: "
+ "idx %d\n", i);
+ return vq->num;
+ }
+ *out_num += ret;
+ }
+ } while ((i = next_desc(&desc)) != -1);
+
+ /* On success, increment avail index. */
+ vq->last_avail_idx++;
+ return head;
+}
+
+/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
+void vhost_discard_vq_desc(struct vhost_virtqueue *vq)
+{
+ vq->last_avail_idx--;
+}
+
+/* After we've used one of their buffers, we tell them about it. We'll then
+ * want to notify the guest, using eventfd. */
+int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
+{
+ struct vring_used_elem *used;
+
+ /* The virtqueue contains a ring of used buffers. Get a pointer to the
+ * next entry in that used ring. */
+ used = &vq->used->ring[vq->last_used_idx % vq->num];
+ if (put_user(head, &used->id)) {
+ vq_err(vq, "Failed to write used id");
+ return -EFAULT;
+ }
+ if (put_user(len, &used->len)) {
+ vq_err(vq, "Failed to write used len");
+ return -EFAULT;
+ }
+ /* Make sure buffer is written before we update index. */
+ smp_wmb();
+ if (put_user(vq->last_used_idx + 1, &vq->used->idx)) {
+ vq_err(vq, "Failed to increment used idx");
+ return -EFAULT;
+ }
+ if (unlikely(vq->log_used)) {
+ /* Make sure data is seen before log. */
+ smp_wmb();
+ /* Log used ring entry write. */
+ log_write(vq->log_base,
+ vq->log_addr + ((void *)used - (void *)vq->used),
+ sizeof *used);
+ /* Log used index update. */
+ log_write(vq->log_base,
+ vq->log_addr + offsetof(struct vring_used, idx),
+ sizeof vq->used->idx);
+ if (vq->log_ctx)
+ eventfd_signal(vq->log_ctx, 1);
+ }
+ vq->last_used_idx++;
+ return 0;
+}
+
+/* This actually signals the guest, using eventfd. */
+void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
+{
+ __u16 flags = 0;
+ if (get_user(flags, &vq->avail->flags)) {
+ vq_err(vq, "Failed to get flags");
+ return;
+ }
+
+ /* If they don't want an interrupt, don't signal, unless empty. */
+ if ((flags & VRING_AVAIL_F_NO_INTERRUPT) &&
+ (vq->avail_idx != vq->last_avail_idx ||
+ !vhost_has_feature(dev, VIRTIO_F_NOTIFY_ON_EMPTY)))
+ return;
+
+ /* Signal the Guest tell them we used something up. */
+ if (vq->call_ctx)
+ eventfd_signal(vq->call_ctx, 1);
+}
+
+/* And here's the combo meal deal. Supersize me! */
+void vhost_add_used_and_signal(struct vhost_dev *dev,
+ struct vhost_virtqueue *vq,
+ unsigned int head, int len)
+{
+ vhost_add_used(vq, head, len);
+ vhost_signal(dev, vq);
+}
+
+/* OK, now we need to know about added descriptors. */
+bool vhost_enable_notify(struct vhost_virtqueue *vq)
+{
+ u16 avail_idx;
+ int r;
+ if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
+ return false;
+ vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
+ r = put_user(vq->used_flags, &vq->used->flags);
+ if (r) {
+ vq_err(vq, "Failed to enable notification at %p: %d\n",
+ &vq->used->flags, r);
+ return false;
+ }
+ /* They could have slipped one in as we were doing that: make
+ * sure it's written, then check again. */
+ smp_mb();
+ r = get_user(avail_idx, &vq->avail->idx);
+ if (r) {
+ vq_err(vq, "Failed to check avail idx at %p: %d\n",
+ &vq->avail->idx, r);
+ return false;
+ }
+
+ return avail_idx != vq->last_avail_idx;
+}
+
+/* We don't need to be notified again. */
+void vhost_disable_notify(struct vhost_virtqueue *vq)
+{
+ int r;
+ if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
+ return;
+ vq->used_flags |= VRING_USED_F_NO_NOTIFY;
+ r = put_user(vq->used_flags, &vq->used->flags);
+ if (r)
+ vq_err(vq, "Failed to enable notification at %p: %d\n",
+ &vq->used->flags, r);
+}
+
+int vhost_init(void)
+{
+ vhost_workqueue = create_singlethread_workqueue("vhost");
+ if (!vhost_workqueue)
+ return -ENOMEM;
+ return 0;
+}
+
+void vhost_cleanup(void)
+{
+ destroy_workqueue(vhost_workqueue);
+}
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
new file mode 100644
index 00000000000..44591ba9b07
--- /dev/null
+++ b/drivers/vhost/vhost.h
@@ -0,0 +1,161 @@
+#ifndef _VHOST_H
+#define _VHOST_H
+
+#include <linux/eventfd.h>
+#include <linux/vhost.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+#include <linux/poll.h>
+#include <linux/file.h>
+#include <linux/skbuff.h>
+#include <linux/uio.h>
+#include <linux/virtio_config.h>
+#include <linux/virtio_ring.h>
+
+struct vhost_device;
+
+enum {
+ /* Enough place for all fragments, head, and virtio net header. */
+ VHOST_NET_MAX_SG = MAX_SKB_FRAGS + 2,
+};
+
+/* Poll a file (eventfd or socket) */
+/* Note: there's nothing vhost specific about this structure. */
+struct vhost_poll {
+ poll_table table;
+ wait_queue_head_t *wqh;
+ wait_queue_t wait;
+ /* struct which will handle all actual work. */
+ struct work_struct work;
+ unsigned long mask;
+};
+
+void vhost_poll_init(struct vhost_poll *poll, work_func_t func,
+ unsigned long mask);
+void vhost_poll_start(struct vhost_poll *poll, struct file *file);
+void vhost_poll_stop(struct vhost_poll *poll);
+void vhost_poll_flush(struct vhost_poll *poll);
+void vhost_poll_queue(struct vhost_poll *poll);
+
+struct vhost_log {
+ u64 addr;
+ u64 len;
+};
+
+/* The virtqueue structure describes a queue attached to a device. */
+struct vhost_virtqueue {
+ struct vhost_dev *dev;
+
+ /* The actual ring of buffers. */
+ struct mutex mutex;
+ unsigned int num;
+ struct vring_desc __user *desc;
+ struct vring_avail __user *avail;
+ struct vring_used __user *used;
+ struct file *kick;
+ struct file *call;
+ struct file *error;
+ struct eventfd_ctx *call_ctx;
+ struct eventfd_ctx *error_ctx;
+ struct eventfd_ctx *log_ctx;
+
+ struct vhost_poll poll;
+
+ /* The routine to call when the Guest pings us, or timeout. */
+ work_func_t handle_kick;
+
+ /* Last available index we saw. */
+ u16 last_avail_idx;
+
+ /* Caches available index value from user. */
+ u16 avail_idx;
+
+ /* Last index we used. */
+ u16 last_used_idx;
+
+ /* Used flags */
+ u16 used_flags;
+
+ /* Log writes to used structure. */
+ bool log_used;
+ u64 log_addr;
+
+ struct iovec indirect[VHOST_NET_MAX_SG];
+ struct iovec iov[VHOST_NET_MAX_SG];
+ struct iovec hdr[VHOST_NET_MAX_SG];
+ size_t hdr_size;
+ /* We use a kind of RCU to access private pointer.
+ * All readers access it from workqueue, which makes it possible to
+ * flush the workqueue instead of synchronize_rcu. Therefore readers do
+ * not need to call rcu_read_lock/rcu_read_unlock: the beginning of
+ * work item execution acts instead of rcu_read_lock() and the end of
+ * work item execution acts instead of rcu_read_lock().
+ * Writers use virtqueue mutex. */
+ void *private_data;
+ /* Log write descriptors */
+ void __user *log_base;
+ struct vhost_log log[VHOST_NET_MAX_SG];
+};
+
+struct vhost_dev {
+ /* Readers use RCU to access memory table pointer
+ * log base pointer and features.
+ * Writers use mutex below.*/
+ struct vhost_memory *memory;
+ struct mm_struct *mm;
+ struct mutex mutex;
+ unsigned acked_features;
+ struct vhost_virtqueue *vqs;
+ int nvqs;
+ struct file *log_file;
+ struct eventfd_ctx *log_ctx;
+};
+
+long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue *vqs, int nvqs);
+long vhost_dev_check_owner(struct vhost_dev *);
+long vhost_dev_reset_owner(struct vhost_dev *);
+void vhost_dev_cleanup(struct vhost_dev *);
+long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, unsigned long arg);
+int vhost_vq_access_ok(struct vhost_virtqueue *vq);
+int vhost_log_access_ok(struct vhost_dev *);
+
+unsigned vhost_get_vq_desc(struct vhost_dev *, struct vhost_virtqueue *,
+ struct iovec iov[], unsigned int iov_count,
+ unsigned int *out_num, unsigned int *in_num,
+ struct vhost_log *log, unsigned int *log_num);
+void vhost_discard_vq_desc(struct vhost_virtqueue *);
+
+int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
+void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
+void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
+ unsigned int head, int len);
+void vhost_disable_notify(struct vhost_virtqueue *);
+bool vhost_enable_notify(struct vhost_virtqueue *);
+
+int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
+ unsigned int log_num, u64 len);
+
+int vhost_init(void);
+void vhost_cleanup(void);
+
+#define vq_err(vq, fmt, ...) do { \
+ pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \
+ if ((vq)->error_ctx) \
+ eventfd_signal((vq)->error_ctx, 1);\
+ } while (0)
+
+enum {
+ VHOST_FEATURES = (1 << VIRTIO_F_NOTIFY_ON_EMPTY) |
+ (1 << VIRTIO_RING_F_INDIRECT_DESC) |
+ (1 << VHOST_F_LOG_ALL) |
+ (1 << VHOST_NET_F_VIRTIO_NET_HDR),
+};
+
+static inline int vhost_has_feature(struct vhost_dev *dev, int bit)
+{
+ unsigned acked_features = rcu_dereference(dev->acked_features);
+ return acked_features & (1 << bit);
+}
+
+#endif
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index fc7d9bbb548..8e8f18d29d7 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -37,6 +37,7 @@ config VGACON_SOFT_SCROLLBACK
config VGACON_SOFT_SCROLLBACK_SIZE
int "Scrollback Buffer Size (in KB)"
depends on VGACON_SOFT_SCROLLBACK
+ range 1 1024
default "64"
help
Enter the amount of System RAM to allocate for the scrollback
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 3681c6a8821..b0a3fa00706 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -3025,6 +3025,20 @@ static int fbcon_fb_unregistered(struct fb_info *info)
return 0;
}
+static void fbcon_remap_all(int idx)
+{
+ int i;
+ for (i = first_fb_vc; i <= last_fb_vc; i++)
+ set_con2fb_map(i, idx, 0);
+
+ if (con_is_bound(&fb_con)) {
+ printk(KERN_INFO "fbcon: Remapping primary device, "
+ "fb%i, to tty %i-%i\n", idx,
+ first_fb_vc + 1, last_fb_vc + 1);
+ info_idx = idx;
+ }
+}
+
#ifdef CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY
static void fbcon_select_primary(struct fb_info *info)
{
@@ -3225,6 +3239,10 @@ static int fbcon_event_notify(struct notifier_block *self,
caps = event->data;
fbcon_get_requirement(info, caps);
break;
+ case FB_EVENT_REMAP_ALL_CONSOLE:
+ idx = info->node;
+ fbcon_remap_all(idx);
+ break;
}
done:
return ret;
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index cc4bbbe44ac..182dd6f8aad 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -112,7 +112,7 @@ static int vga_video_font_height;
static int vga_scan_lines __read_mostly;
static unsigned int vga_rolled_over;
-int vgacon_text_mode_force = 0;
+static int vgacon_text_mode_force;
bool vgacon_text_force(void)
{
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index 99bbd282ce6..a15b44e9c00 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1513,7 +1513,6 @@ register_framebuffer(struct fb_info *fb_info)
fb_info->fix.id,
registered_fb[i]->fix.id);
unregister_framebuffer(registered_fb[i]);
- break;
}
}
}
diff --git a/drivers/video/omap/lcd_ams_delta.c b/drivers/video/omap/lcd_ams_delta.c
index 567db6ac32c..6978ae4ef83 100644
--- a/drivers/video/omap/lcd_ams_delta.c
+++ b/drivers/video/omap/lcd_ams_delta.c
@@ -24,6 +24,7 @@
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/delay.h>
+#include <linux/lcd.h>
#include <plat/board-ams-delta.h>
#include <mach/hardware.h>
@@ -32,6 +33,71 @@
#define AMS_DELTA_DEFAULT_CONTRAST 112
+#define AMS_DELTA_MAX_CONTRAST 0x00FF
+#define AMS_DELTA_LCD_POWER 0x0100
+
+
+/* LCD class device section */
+
+static int ams_delta_lcd;
+
+static int ams_delta_lcd_set_power(struct lcd_device *dev, int power)
+{
+ if (power == FB_BLANK_UNBLANK) {
+ if (!(ams_delta_lcd & AMS_DELTA_LCD_POWER)) {
+ omap_writeb(ams_delta_lcd & AMS_DELTA_MAX_CONTRAST,
+ OMAP_PWL_ENABLE);
+ omap_writeb(1, OMAP_PWL_CLK_ENABLE);
+ ams_delta_lcd |= AMS_DELTA_LCD_POWER;
+ }
+ } else {
+ if (ams_delta_lcd & AMS_DELTA_LCD_POWER) {
+ omap_writeb(0, OMAP_PWL_ENABLE);
+ omap_writeb(0, OMAP_PWL_CLK_ENABLE);
+ ams_delta_lcd &= ~AMS_DELTA_LCD_POWER;
+ }
+ }
+ return 0;
+}
+
+static int ams_delta_lcd_set_contrast(struct lcd_device *dev, int value)
+{
+ if ((value >= 0) && (value <= AMS_DELTA_MAX_CONTRAST)) {
+ omap_writeb(value, OMAP_PWL_ENABLE);
+ ams_delta_lcd &= ~AMS_DELTA_MAX_CONTRAST;
+ ams_delta_lcd |= value;
+ }
+ return 0;
+}
+
+#ifdef CONFIG_LCD_CLASS_DEVICE
+static int ams_delta_lcd_get_power(struct lcd_device *dev)
+{
+ if (ams_delta_lcd & AMS_DELTA_LCD_POWER)
+ return FB_BLANK_UNBLANK;
+ else
+ return FB_BLANK_POWERDOWN;
+}
+
+static int ams_delta_lcd_get_contrast(struct lcd_device *dev)
+{
+ if (!(ams_delta_lcd & AMS_DELTA_LCD_POWER))
+ return 0;
+
+ return ams_delta_lcd & AMS_DELTA_MAX_CONTRAST;
+}
+
+static struct lcd_ops ams_delta_lcd_ops = {
+ .get_power = ams_delta_lcd_get_power,
+ .set_power = ams_delta_lcd_set_power,
+ .get_contrast = ams_delta_lcd_get_contrast,
+ .set_contrast = ams_delta_lcd_set_contrast,
+};
+#endif
+
+
+/* omapfb panel section */
+
static int ams_delta_panel_init(struct lcd_panel *panel,
struct omapfb_device *fbdev)
{
@@ -48,10 +114,6 @@ static int ams_delta_panel_enable(struct lcd_panel *panel)
AMS_DELTA_LATCH2_LCD_NDISP);
ams_delta_latch2_write(AMS_DELTA_LATCH2_LCD_VBLEN,
AMS_DELTA_LATCH2_LCD_VBLEN);
-
- omap_writeb(1, OMAP_PWL_CLK_ENABLE);
- omap_writeb(AMS_DELTA_DEFAULT_CONTRAST, OMAP_PWL_ENABLE);
-
return 0;
}
@@ -91,8 +153,31 @@ static struct lcd_panel ams_delta_panel = {
.get_caps = ams_delta_panel_get_caps,
};
+
+/* platform driver section */
+
static int ams_delta_panel_probe(struct platform_device *pdev)
{
+ struct lcd_device *lcd_device = NULL;
+#ifdef CONFIG_LCD_CLASS_DEVICE
+ int ret;
+
+ lcd_device = lcd_device_register("omapfb", &pdev->dev, NULL,
+ &ams_delta_lcd_ops);
+
+ if (IS_ERR(lcd_device)) {
+ ret = PTR_ERR(lcd_device);
+ dev_err(&pdev->dev, "failed to register device\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, lcd_device);
+ lcd_device->props.max_contrast = AMS_DELTA_MAX_CONTRAST;
+#endif
+
+ ams_delta_lcd_set_contrast(lcd_device, AMS_DELTA_DEFAULT_CONTRAST);
+ ams_delta_lcd_set_power(lcd_device, FB_BLANK_UNBLANK);
+
omapfb_register_panel(&ams_delta_panel);
return 0;
}
diff --git a/drivers/video/omap/omapfb_main.c b/drivers/video/omap/omapfb_main.c
index 2c4f470fa08..8ce60e1b220 100644
--- a/drivers/video/omap/omapfb_main.c
+++ b/drivers/video/omap/omapfb_main.c
@@ -486,10 +486,11 @@ static int set_color_mode(struct omapfb_plane_struct *plane,
return 0;
case 12:
var->bits_per_pixel = 16;
- plane->color_mode = OMAPFB_COLOR_RGB444;
- return 0;
case 16:
- plane->color_mode = OMAPFB_COLOR_RGB565;
+ if (plane->fbdev->panel->bpp == 12)
+ plane->color_mode = OMAPFB_COLOR_RGB444;
+ else
+ plane->color_mode = OMAPFB_COLOR_RGB565;
return 0;
default:
return -EINVAL;
diff --git a/drivers/video/omap2/displays/Kconfig b/drivers/video/omap2/displays/Kconfig
index b12a59c9c50..dfb57ee5086 100644
--- a/drivers/video/omap2/displays/Kconfig
+++ b/drivers/video/omap2/displays/Kconfig
@@ -13,10 +13,28 @@ config PANEL_SHARP_LS037V7DW01
help
LCD Panel used in TI's SDP3430 and EVM boards
+config PANEL_SHARP_LQ043T1DG01
+ tristate "Sharp LQ043T1DG01 LCD Panel"
+ depends on OMAP2_DSS
+ help
+ LCD Panel used in TI's OMAP3517 EVM boards
+
config PANEL_TAAL
tristate "Taal DSI Panel"
depends on OMAP2_DSS_DSI
help
Taal DSI command mode panel from TPO.
+config PANEL_TOPPOLY_TDO35S
+ tristate "Toppoly TDO35S LCD Panel support"
+ depends on OMAP2_DSS
+ help
+ LCD Panel used in CM-T35
+
+config PANEL_TPO_TD043MTEA1
+ tristate "TPO TD043MTEA1 LCD Panel"
+ depends on OMAP2_DSS && I2C
+ help
+ LCD Panel used in OMAP3 Pandora
+
endmenu
diff --git a/drivers/video/omap2/displays/Makefile b/drivers/video/omap2/displays/Makefile
index 955646440b3..e2bb32168de 100644
--- a/drivers/video/omap2/displays/Makefile
+++ b/drivers/video/omap2/displays/Makefile
@@ -1,4 +1,7 @@
obj-$(CONFIG_PANEL_GENERIC) += panel-generic.o
obj-$(CONFIG_PANEL_SHARP_LS037V7DW01) += panel-sharp-ls037v7dw01.o
+obj-$(CONFIG_PANEL_SHARP_LQ043T1DG01) += panel-sharp-lq043t1dg01.o
obj-$(CONFIG_PANEL_TAAL) += panel-taal.o
+obj-$(CONFIG_PANEL_TOPPOLY_TDO35S) += panel-toppoly-tdo35s.o
+obj-$(CONFIG_PANEL_TPO_TD043MTEA1) += panel-tpo-td043mtea1.o
diff --git a/drivers/video/omap2/displays/panel-generic.c b/drivers/video/omap2/displays/panel-generic.c
index eb48d1afd80..c59e4baed8b 100644
--- a/drivers/video/omap2/displays/panel-generic.c
+++ b/drivers/video/omap2/displays/panel-generic.c
@@ -35,6 +35,35 @@ static struct omap_video_timings generic_panel_timings = {
.vbp = 7,
};
+static int generic_panel_power_on(struct omap_dss_device *dssdev)
+{
+ int r;
+
+ r = omapdss_dpi_display_enable(dssdev);
+ if (r)
+ goto err0;
+
+ if (dssdev->platform_enable) {
+ r = dssdev->platform_enable(dssdev);
+ if (r)
+ goto err1;
+ }
+
+ return 0;
+err1:
+ omapdss_dpi_display_disable(dssdev);
+err0:
+ return r;
+}
+
+static void generic_panel_power_off(struct omap_dss_device *dssdev)
+{
+ if (dssdev->platform_disable)
+ dssdev->platform_disable(dssdev);
+
+ omapdss_dpi_display_disable(dssdev);
+}
+
static int generic_panel_probe(struct omap_dss_device *dssdev)
{
dssdev->panel.config = OMAP_DSS_LCD_TFT;
@@ -51,27 +80,40 @@ static int generic_panel_enable(struct omap_dss_device *dssdev)
{
int r = 0;
- if (dssdev->platform_enable)
- r = dssdev->platform_enable(dssdev);
+ r = generic_panel_power_on(dssdev);
+ if (r)
+ return r;
- return r;
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ return 0;
}
static void generic_panel_disable(struct omap_dss_device *dssdev)
{
- if (dssdev->platform_disable)
- dssdev->platform_disable(dssdev);
+ generic_panel_power_off(dssdev);
+
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
static int generic_panel_suspend(struct omap_dss_device *dssdev)
{
- generic_panel_disable(dssdev);
+ generic_panel_power_off(dssdev);
+ dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
return 0;
}
static int generic_panel_resume(struct omap_dss_device *dssdev)
{
- return generic_panel_enable(dssdev);
+ int r = 0;
+
+ r = generic_panel_power_on(dssdev);
+ if (r)
+ return r;
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ return 0;
}
static struct omap_dss_driver generic_driver = {
diff --git a/drivers/video/omap2/displays/panel-sharp-lq043t1dg01.c b/drivers/video/omap2/displays/panel-sharp-lq043t1dg01.c
new file mode 100644
index 00000000000..10267461991
--- /dev/null
+++ b/drivers/video/omap2/displays/panel-sharp-lq043t1dg01.c
@@ -0,0 +1,159 @@
+/*
+ * LCD panel driver for Sharp LQ043T1DG01
+ *
+ * Copyright (C) 2009 Texas Instruments Inc
+ * Author: Vaibhav Hiremath <hvaibhav@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/err.h>
+
+#include <plat/display.h>
+
+static struct omap_video_timings sharp_lq_timings = {
+ .x_res = 480,
+ .y_res = 272,
+
+ .pixel_clock = 9000,
+
+ .hsw = 42,
+ .hfp = 3,
+ .hbp = 2,
+
+ .vsw = 11,
+ .vfp = 3,
+ .vbp = 2,
+};
+
+static int sharp_lq_panel_power_on(struct omap_dss_device *dssdev)
+{
+ int r;
+
+ r = omapdss_dpi_display_enable(dssdev);
+ if (r)
+ goto err0;
+
+ /* wait couple of vsyncs until enabling the LCD */
+ msleep(50);
+
+ if (dssdev->platform_enable) {
+ r = dssdev->platform_enable(dssdev);
+ if (r)
+ goto err1;
+ }
+
+ return 0;
+err1:
+ omapdss_dpi_display_disable(dssdev);
+err0:
+ return r;
+}
+
+static void sharp_lq_panel_power_off(struct omap_dss_device *dssdev)
+{
+ if (dssdev->platform_disable)
+ dssdev->platform_disable(dssdev);
+
+ /* wait at least 5 vsyncs after disabling the LCD */
+ msleep(100);
+
+ omapdss_dpi_display_disable(dssdev);
+}
+
+static int sharp_lq_panel_probe(struct omap_dss_device *dssdev)
+{
+
+ dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+ OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IEO;
+ dssdev->panel.acb = 0x0;
+ dssdev->panel.timings = sharp_lq_timings;
+
+ return 0;
+}
+
+static void sharp_lq_panel_remove(struct omap_dss_device *dssdev)
+{
+}
+
+static int sharp_lq_panel_enable(struct omap_dss_device *dssdev)
+{
+ int r = 0;
+
+ r = sharp_lq_panel_power_on(dssdev);
+ if (r)
+ return r;
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ return 0;
+}
+
+static void sharp_lq_panel_disable(struct omap_dss_device *dssdev)
+{
+ sharp_lq_panel_power_off(dssdev);
+
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+}
+
+static int sharp_lq_panel_suspend(struct omap_dss_device *dssdev)
+{
+ sharp_lq_panel_power_off(dssdev);
+ dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
+ return 0;
+}
+
+static int sharp_lq_panel_resume(struct omap_dss_device *dssdev)
+{
+ int r = 0;
+
+ r = sharp_lq_panel_power_on(dssdev);
+ if (r)
+ return r;
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ return 0;
+}
+
+static struct omap_dss_driver sharp_lq_driver = {
+ .probe = sharp_lq_panel_probe,
+ .remove = sharp_lq_panel_remove,
+
+ .enable = sharp_lq_panel_enable,
+ .disable = sharp_lq_panel_disable,
+ .suspend = sharp_lq_panel_suspend,
+ .resume = sharp_lq_panel_resume,
+
+ .driver = {
+ .name = "sharp_lq_panel",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init sharp_lq_panel_drv_init(void)
+{
+ return omap_dss_register_driver(&sharp_lq_driver);
+}
+
+static void __exit sharp_lq_panel_drv_exit(void)
+{
+ omap_dss_unregister_driver(&sharp_lq_driver);
+}
+
+module_init(sharp_lq_panel_drv_init);
+module_exit(sharp_lq_panel_drv_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
index bbe880bbe79..8d51a5e6341 100644
--- a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c
@@ -20,19 +20,10 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/device.h>
-#include <linux/regulator/consumer.h>
#include <linux/err.h>
#include <plat/display.h>
-struct sharp_data {
- /* XXX This regulator should actually be in SDP board file, not here,
- * as it doesn't actually power the LCD, but something else that
- * affects the output to LCD (I think. Somebody clarify). It doesn't do
- * harm here, as SDP is the only board using this currently */
- struct regulator *vdvi_reg;
-};
-
static struct omap_video_timings sharp_ls_timings = {
.x_res = 480,
.y_res = 640,
@@ -50,77 +41,81 @@ static struct omap_video_timings sharp_ls_timings = {
static int sharp_ls_panel_probe(struct omap_dss_device *dssdev)
{
- struct sharp_data *sd;
-
dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
OMAP_DSS_LCD_IHS;
dssdev->panel.acb = 0x28;
dssdev->panel.timings = sharp_ls_timings;
- sd = kzalloc(sizeof(*sd), GFP_KERNEL);
- if (!sd)
- return -ENOMEM;
-
- dev_set_drvdata(&dssdev->dev, sd);
-
- sd->vdvi_reg = regulator_get(&dssdev->dev, "vdvi");
- if (IS_ERR(sd->vdvi_reg)) {
- kfree(sd);
- pr_err("failed to get VDVI regulator\n");
- return PTR_ERR(sd->vdvi_reg);
- }
-
return 0;
}
static void sharp_ls_panel_remove(struct omap_dss_device *dssdev)
{
- struct sharp_data *sd = dev_get_drvdata(&dssdev->dev);
-
- regulator_put(sd->vdvi_reg);
-
- kfree(sd);
}
-static int sharp_ls_panel_enable(struct omap_dss_device *dssdev)
+static int sharp_ls_power_on(struct omap_dss_device *dssdev)
{
- struct sharp_data *sd = dev_get_drvdata(&dssdev->dev);
int r = 0;
+ r = omapdss_dpi_display_enable(dssdev);
+ if (r)
+ goto err0;
+
/* wait couple of vsyncs until enabling the LCD */
msleep(50);
- regulator_enable(sd->vdvi_reg);
-
- if (dssdev->platform_enable)
+ if (dssdev->platform_enable) {
r = dssdev->platform_enable(dssdev);
+ if (r)
+ goto err1;
+ }
+ return 0;
+err1:
+ omapdss_dpi_display_disable(dssdev);
+err0:
return r;
}
-static void sharp_ls_panel_disable(struct omap_dss_device *dssdev)
+static void sharp_ls_power_off(struct omap_dss_device *dssdev)
{
- struct sharp_data *sd = dev_get_drvdata(&dssdev->dev);
-
if (dssdev->platform_disable)
dssdev->platform_disable(dssdev);
- regulator_disable(sd->vdvi_reg);
-
/* wait at least 5 vsyncs after disabling the LCD */
msleep(100);
+
+ omapdss_dpi_display_disable(dssdev);
+}
+
+static int sharp_ls_panel_enable(struct omap_dss_device *dssdev)
+{
+ int r;
+ r = sharp_ls_power_on(dssdev);
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+ return r;
+}
+
+static void sharp_ls_panel_disable(struct omap_dss_device *dssdev)
+{
+ sharp_ls_power_off(dssdev);
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
static int sharp_ls_panel_suspend(struct omap_dss_device *dssdev)
{
- sharp_ls_panel_disable(dssdev);
+ sharp_ls_power_off(dssdev);
+ dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
return 0;
}
static int sharp_ls_panel_resume(struct omap_dss_device *dssdev)
{
- return sharp_ls_panel_enable(dssdev);
+ int r;
+ r = sharp_ls_power_on(dssdev);
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+ return r;
}
static struct omap_dss_driver sharp_ls_driver = {
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
index 1f01dfc5e52..fcd6a61a91e 100644
--- a/drivers/video/omap2/displays/panel-taal.c
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -63,6 +63,8 @@
/* #define TAAL_USE_ESD_CHECK */
#define TAAL_ESD_CHECK_PERIOD msecs_to_jiffies(5000)
+static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable);
+
struct taal_data {
struct backlight_device *bldev;
@@ -510,15 +512,12 @@ static int taal_probe(struct omap_dss_device *dssdev)
if (td->esd_wq == NULL) {
dev_err(&dssdev->dev, "can't create ESD workqueue\n");
r = -ENOMEM;
- goto err2;
+ goto err1;
}
INIT_DELAYED_WORK_DEFERRABLE(&td->esd_work, taal_esd_work);
dev_set_drvdata(&dssdev->dev, td);
- dssdev->get_timings = taal_get_timings;
- dssdev->get_resolution = taal_get_resolution;
-
/* if no platform set_backlight() defined, presume DSI backlight
* control */
if (!dssdev->set_backlight)
@@ -528,7 +527,7 @@ static int taal_probe(struct omap_dss_device *dssdev)
&taal_bl_ops);
if (IS_ERR(bldev)) {
r = PTR_ERR(bldev);
- goto err1;
+ goto err2;
}
td->bldev = bldev;
@@ -621,14 +620,12 @@ static void taal_remove(struct omap_dss_device *dssdev)
kfree(td);
}
-static int taal_enable(struct omap_dss_device *dssdev)
+static int taal_power_on(struct omap_dss_device *dssdev)
{
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
u8 id1, id2, id3;
int r;
- dev_dbg(&dssdev->dev, "enable\n");
-
if (dssdev->platform_enable) {
r = dssdev->platform_enable(dssdev);
if (r)
@@ -638,6 +635,16 @@ static int taal_enable(struct omap_dss_device *dssdev)
/* it seems we have to wait a bit until taal is ready */
msleep(5);
+ dsi_bus_lock();
+
+ r = omapdss_dsi_display_enable(dssdev);
+ if (r) {
+ dev_err(&dssdev->dev, "failed to enable DSI\n");
+ goto err0;
+ }
+
+ omapdss_dsi_vc_enable_hs(TCH, false);
+
r = taal_sleep_out(td);
if (r)
goto err;
@@ -661,6 +668,10 @@ static int taal_enable(struct omap_dss_device *dssdev)
taal_dcs_write_0(DCS_DISPLAY_ON);
+ r = _taal_enable_te(dssdev, td->te_enabled);
+ if (r)
+ goto err;
+
#ifdef TAAL_USE_ESD_CHECK
queue_delayed_work(td->esd_wq, &td->esd_work, TAAL_ESD_CHECK_PERIOD);
#endif
@@ -676,19 +687,27 @@ static int taal_enable(struct omap_dss_device *dssdev)
td->intro_printed = true;
}
+ omapdss_dsi_vc_enable_hs(TCH, true);
+
+ dsi_bus_unlock();
+
return 0;
err:
+ dsi_bus_unlock();
+
+ omapdss_dsi_display_disable(dssdev);
+err0:
if (dssdev->platform_disable)
dssdev->platform_disable(dssdev);
return r;
}
-static void taal_disable(struct omap_dss_device *dssdev)
+static void taal_power_off(struct omap_dss_device *dssdev)
{
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
- dev_dbg(&dssdev->dev, "disable\n");
+ dsi_bus_lock();
cancel_delayed_work(&td->esd_work);
@@ -698,41 +717,124 @@ static void taal_disable(struct omap_dss_device *dssdev)
/* wait a bit so that the message goes through */
msleep(10);
+ omapdss_dsi_display_disable(dssdev);
+
if (dssdev->platform_disable)
dssdev->platform_disable(dssdev);
td->enabled = 0;
+
+ dsi_bus_unlock();
+}
+
+static int taal_enable(struct omap_dss_device *dssdev)
+{
+ int r;
+ dev_dbg(&dssdev->dev, "enable\n");
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED)
+ return -EINVAL;
+
+ r = taal_power_on(dssdev);
+ if (r)
+ return r;
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ return r;
+}
+
+static void taal_disable(struct omap_dss_device *dssdev)
+{
+ dev_dbg(&dssdev->dev, "disable\n");
+
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+ taal_power_off(dssdev);
+
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
static int taal_suspend(struct omap_dss_device *dssdev)
{
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
- struct backlight_device *bldev = td->bldev;
+ dev_dbg(&dssdev->dev, "suspend\n");
- bldev->props.power = FB_BLANK_POWERDOWN;
- taal_bl_update_status(bldev);
+ if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
+ return -EINVAL;
+
+ taal_power_off(dssdev);
+ dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
return 0;
}
static int taal_resume(struct omap_dss_device *dssdev)
{
+ int r;
+ dev_dbg(&dssdev->dev, "resume\n");
+
+ if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED)
+ return -EINVAL;
+
+ r = taal_power_on(dssdev);
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+ return r;
+}
+
+static void taal_framedone_cb(int err, void *data)
+{
+ struct omap_dss_device *dssdev = data;
+ dev_dbg(&dssdev->dev, "framedone, err %d\n", err);
+ dsi_bus_unlock();
+}
+
+static int taal_update(struct omap_dss_device *dssdev,
+ u16 x, u16 y, u16 w, u16 h)
+{
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
- struct backlight_device *bldev = td->bldev;
+ int r;
- bldev->props.power = FB_BLANK_UNBLANK;
- taal_bl_update_status(bldev);
+ dev_dbg(&dssdev->dev, "update %d, %d, %d x %d\n", x, y, w, h);
+
+ dsi_bus_lock();
+
+ if (!td->enabled) {
+ r = 0;
+ goto err;
+ }
+
+ r = omap_dsi_prepare_update(dssdev, &x, &y, &w, &h);
+ if (r)
+ goto err;
+
+ r = taal_set_update_window(x, y, w, h);
+ if (r)
+ goto err;
+
+ r = omap_dsi_update(dssdev, TCH, x, y, w, h,
+ taal_framedone_cb, dssdev);
+ if (r)
+ goto err;
+ /* note: no bus_unlock here. unlock is in framedone_cb */
return 0;
+err:
+ dsi_bus_unlock();
+ return r;
}
-static void taal_setup_update(struct omap_dss_device *dssdev,
- u16 x, u16 y, u16 w, u16 h)
+static int taal_sync(struct omap_dss_device *dssdev)
{
- taal_set_update_window(x, y, w, h);
+ dev_dbg(&dssdev->dev, "sync\n");
+
+ dsi_bus_lock();
+ dsi_bus_unlock();
+
+ dev_dbg(&dssdev->dev, "sync done\n");
+
+ return 0;
}
-static int taal_enable_te(struct omap_dss_device *dssdev, bool enable)
+static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable)
{
struct taal_data *td = dev_get_drvdata(&dssdev->dev);
int r;
@@ -744,25 +846,32 @@ static int taal_enable_te(struct omap_dss_device *dssdev, bool enable)
else
r = taal_dcs_write_0(DCS_TEAR_OFF);
+ omapdss_dsi_enable_te(dssdev, enable);
+
+ /* XXX for some reason, DSI TE breaks if we don't wait here.
+ * Panel bug? Needs more studying */
+ msleep(100);
+
return r;
}
-static int taal_wait_te(struct omap_dss_device *dssdev)
+static int taal_enable_te(struct omap_dss_device *dssdev, bool enable)
{
- struct taal_data *td = dev_get_drvdata(&dssdev->dev);
- long wait = msecs_to_jiffies(500);
+ int r;
- if (!td->use_ext_te || !td->te_enabled)
- return 0;
+ dsi_bus_lock();
- INIT_COMPLETION(td->te_completion);
- wait = wait_for_completion_timeout(&td->te_completion, wait);
- if (wait == 0) {
- dev_err(&dssdev->dev, "timeout waiting TE\n");
- return -ETIME;
- }
+ r = _taal_enable_te(dssdev, enable);
- return 0;
+ dsi_bus_unlock();
+
+ return r;
+}
+
+static int taal_get_te(struct omap_dss_device *dssdev)
+{
+ struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+ return td->te_enabled;
}
static int taal_rotate(struct omap_dss_device *dssdev, u8 rotate)
@@ -772,16 +881,21 @@ static int taal_rotate(struct omap_dss_device *dssdev, u8 rotate)
dev_dbg(&dssdev->dev, "rotate %d\n", rotate);
+ dsi_bus_lock();
+
if (td->enabled) {
r = taal_set_addr_mode(rotate, td->mirror);
-
if (r)
- return r;
+ goto err;
}
td->rotate = rotate;
+ dsi_bus_unlock();
return 0;
+err:
+ dsi_bus_unlock();
+ return r;
}
static u8 taal_get_rotate(struct omap_dss_device *dssdev)
@@ -797,16 +911,20 @@ static int taal_mirror(struct omap_dss_device *dssdev, bool enable)
dev_dbg(&dssdev->dev, "mirror %d\n", enable);
+ dsi_bus_lock();
if (td->enabled) {
r = taal_set_addr_mode(td->rotate, enable);
-
if (r)
- return r;
+ goto err;
}
td->mirror = enable;
+ dsi_bus_unlock();
return 0;
+err:
+ dsi_bus_unlock();
+ return r;
}
static bool taal_get_mirror(struct omap_dss_device *dssdev)
@@ -820,17 +938,23 @@ static int taal_run_test(struct omap_dss_device *dssdev, int test_num)
u8 id1, id2, id3;
int r;
+ dsi_bus_lock();
+
r = taal_dcs_read_1(DCS_GET_ID1, &id1);
if (r)
- return r;
+ goto err;
r = taal_dcs_read_1(DCS_GET_ID2, &id2);
if (r)
- return r;
+ goto err;
r = taal_dcs_read_1(DCS_GET_ID3, &id3);
if (r)
- return r;
+ goto err;
+ dsi_bus_unlock();
return 0;
+err:
+ dsi_bus_unlock();
+ return r;
}
static int taal_memory_read(struct omap_dss_device *dssdev,
@@ -841,6 +965,10 @@ static int taal_memory_read(struct omap_dss_device *dssdev,
int first = 1;
int plen;
unsigned buf_used = 0;
+ struct taal_data *td = dev_get_drvdata(&dssdev->dev);
+
+ if (!td->enabled)
+ return -ENODEV;
if (size < w * h * 3)
return -ENOMEM;
@@ -849,6 +977,8 @@ static int taal_memory_read(struct omap_dss_device *dssdev,
dssdev->panel.timings.x_res *
dssdev->panel.timings.y_res * 3);
+ dsi_bus_lock();
+
/* plen 1 or 2 goes into short packet. until checksum error is fixed,
* use short packets. plen 32 works, but bigger packets seem to cause
* an error. */
@@ -857,11 +987,11 @@ static int taal_memory_read(struct omap_dss_device *dssdev,
else
plen = 2;
- taal_setup_update(dssdev, x, y, w, h);
+ taal_set_update_window(x, y, w, h);
r = dsi_vc_set_max_rx_packet_size(TCH, plen);
if (r)
- return r;
+ goto err0;
while (buf_used < size) {
u8 dcs_cmd = first ? 0x2e : 0x3e;
@@ -894,7 +1024,8 @@ static int taal_memory_read(struct omap_dss_device *dssdev,
err:
dsi_vc_set_max_rx_packet_size(TCH, 1);
-
+err0:
+ dsi_bus_unlock();
return r;
}
@@ -939,8 +1070,11 @@ static void taal_esd_work(struct work_struct *work)
}
/* Self-diagnostics result is also shown on TE GPIO line. We need
* to re-enable TE after self diagnostics */
- if (td->use_ext_te && td->te_enabled)
- taal_enable_te(dssdev, true);
+ if (td->use_ext_te && td->te_enabled) {
+ r = taal_dcs_write_1(DCS_TEAR_ON, 0);
+ if (r)
+ goto err;
+ }
dsi_bus_unlock();
@@ -958,6 +1092,20 @@ err:
queue_delayed_work(td->esd_wq, &td->esd_work, TAAL_ESD_CHECK_PERIOD);
}
+static int taal_set_update_mode(struct omap_dss_device *dssdev,
+ enum omap_dss_update_mode mode)
+{
+ if (mode != OMAP_DSS_UPDATE_MANUAL)
+ return -EINVAL;
+ return 0;
+}
+
+static enum omap_dss_update_mode taal_get_update_mode(
+ struct omap_dss_device *dssdev)
+{
+ return OMAP_DSS_UPDATE_MANUAL;
+}
+
static struct omap_dss_driver taal_driver = {
.probe = taal_probe,
.remove = taal_remove,
@@ -967,9 +1115,18 @@ static struct omap_dss_driver taal_driver = {
.suspend = taal_suspend,
.resume = taal_resume,
- .setup_update = taal_setup_update,
+ .set_update_mode = taal_set_update_mode,
+ .get_update_mode = taal_get_update_mode,
+
+ .update = taal_update,
+ .sync = taal_sync,
+
+ .get_resolution = taal_get_resolution,
+ .get_recommended_bpp = omapdss_default_get_recommended_bpp,
+
.enable_te = taal_enable_te,
- .wait_for_te = taal_wait_te,
+ .get_te = taal_get_te,
+
.set_rotate = taal_rotate,
.get_rotate = taal_get_rotate,
.set_mirror = taal_mirror,
@@ -977,6 +1134,8 @@ static struct omap_dss_driver taal_driver = {
.run_test = taal_run_test,
.memory_read = taal_memory_read,
+ .get_timings = taal_get_timings,
+
.driver = {
.name = "taal",
.owner = THIS_MODULE,
diff --git a/drivers/video/omap2/displays/panel-toppoly-tdo35s.c b/drivers/video/omap2/displays/panel-toppoly-tdo35s.c
new file mode 100644
index 00000000000..fa434ca6e4b
--- /dev/null
+++ b/drivers/video/omap2/displays/panel-toppoly-tdo35s.c
@@ -0,0 +1,154 @@
+/*
+ * LCD panel driver for Toppoly TDO35S
+ *
+ * Copyright (C) 2009 CompuLab, Ltd.
+ * Author: Mike Rapoport <mike@compulab.co.il>
+ *
+ * Based on generic panel support
+ * Copyright (C) 2008 Nokia Corporation
+ * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+
+#include <plat/display.h>
+
+static struct omap_video_timings toppoly_tdo_panel_timings = {
+ /* 640 x 480 @ 60 Hz Reduced blanking VESA CVT 0.31M3-R */
+ .x_res = 480,
+ .y_res = 640,
+
+ .pixel_clock = 26000,
+
+ .hfp = 104,
+ .hsw = 8,
+ .hbp = 8,
+
+ .vfp = 4,
+ .vsw = 2,
+ .vbp = 2,
+};
+
+static int toppoly_tdo_panel_power_on(struct omap_dss_device *dssdev)
+{
+ int r;
+
+ r = omapdss_dpi_display_enable(dssdev);
+ if (r)
+ goto err0;
+
+ if (dssdev->platform_enable) {
+ r = dssdev->platform_enable(dssdev);
+ if (r)
+ goto err1;
+ }
+
+ return 0;
+err1:
+ omapdss_dpi_display_disable(dssdev);
+err0:
+ return r;
+}
+
+static void toppoly_tdo_panel_power_off(struct omap_dss_device *dssdev)
+{
+ if (dssdev->platform_disable)
+ dssdev->platform_disable(dssdev);
+
+ omapdss_dpi_display_disable(dssdev);
+}
+
+static int toppoly_tdo_panel_probe(struct omap_dss_device *dssdev)
+{
+ dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
+ OMAP_DSS_LCD_IHS;
+ dssdev->panel.timings = toppoly_tdo_panel_timings;
+
+ return 0;
+}
+
+static void toppoly_tdo_panel_remove(struct omap_dss_device *dssdev)
+{
+}
+
+static int toppoly_tdo_panel_enable(struct omap_dss_device *dssdev)
+{
+ int r = 0;
+
+ r = toppoly_tdo_panel_power_on(dssdev);
+ if (r)
+ return r;
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ return 0;
+}
+
+static void toppoly_tdo_panel_disable(struct omap_dss_device *dssdev)
+{
+ toppoly_tdo_panel_power_off(dssdev);
+
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+}
+
+static int toppoly_tdo_panel_suspend(struct omap_dss_device *dssdev)
+{
+ toppoly_tdo_panel_power_off(dssdev);
+ dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
+ return 0;
+}
+
+static int toppoly_tdo_panel_resume(struct omap_dss_device *dssdev)
+{
+ int r = 0;
+
+ r = toppoly_tdo_panel_power_on(dssdev);
+ if (r)
+ return r;
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ return 0;
+}
+
+static struct omap_dss_driver generic_driver = {
+ .probe = toppoly_tdo_panel_probe,
+ .remove = toppoly_tdo_panel_remove,
+
+ .enable = toppoly_tdo_panel_enable,
+ .disable = toppoly_tdo_panel_disable,
+ .suspend = toppoly_tdo_panel_suspend,
+ .resume = toppoly_tdo_panel_resume,
+
+ .driver = {
+ .name = "toppoly_tdo35s_panel",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int __init toppoly_tdo_panel_drv_init(void)
+{
+ return omap_dss_register_driver(&generic_driver);
+}
+
+static void __exit toppoly_tdo_panel_drv_exit(void)
+{
+ omap_dss_unregister_driver(&generic_driver);
+}
+
+module_init(toppoly_tdo_panel_drv_init);
+module_exit(toppoly_tdo_panel_drv_exit);
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
new file mode 100644
index 00000000000..d578feee355
--- /dev/null
+++ b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
@@ -0,0 +1,528 @@
+/*
+ * LCD panel driver for TPO TD043MTEA1
+ *
+ * Author: Gražvydas Ignotas <notasas@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/spi/spi.h>
+#include <linux/regulator/consumer.h>
+#include <linux/gpio.h>
+#include <linux/err.h>
+
+#include <plat/display.h>
+
+#define TPO_R02_MODE(x) ((x) & 7)
+#define TPO_R02_MODE_800x480 7
+#define TPO_R02_NCLK_RISING BIT(3)
+#define TPO_R02_HSYNC_HIGH BIT(4)
+#define TPO_R02_VSYNC_HIGH BIT(5)
+
+#define TPO_R03_NSTANDBY BIT(0)
+#define TPO_R03_EN_CP_CLK BIT(1)
+#define TPO_R03_EN_VGL_PUMP BIT(2)
+#define TPO_R03_EN_PWM BIT(3)
+#define TPO_R03_DRIVING_CAP_100 BIT(4)
+#define TPO_R03_EN_PRE_CHARGE BIT(6)
+#define TPO_R03_SOFTWARE_CTL BIT(7)
+
+#define TPO_R04_NFLIP_H BIT(0)
+#define TPO_R04_NFLIP_V BIT(1)
+#define TPO_R04_CP_CLK_FREQ_1H BIT(2)
+#define TPO_R04_VGL_FREQ_1H BIT(4)
+
+#define TPO_R03_VAL_NORMAL (TPO_R03_NSTANDBY | TPO_R03_EN_CP_CLK | \
+ TPO_R03_EN_VGL_PUMP | TPO_R03_EN_PWM | \
+ TPO_R03_DRIVING_CAP_100 | TPO_R03_EN_PRE_CHARGE | \
+ TPO_R03_SOFTWARE_CTL)
+
+#define TPO_R03_VAL_STANDBY (TPO_R03_DRIVING_CAP_100 | \
+ TPO_R03_EN_PRE_CHARGE | TPO_R03_SOFTWARE_CTL)
+
+static const u16 tpo_td043_def_gamma[12] = {
+ 106, 200, 289, 375, 460, 543, 625, 705, 785, 864, 942, 1020
+};
+
+struct tpo_td043_device {
+ struct spi_device *spi;
+ struct regulator *vcc_reg;
+ u16 gamma[12];
+ u32 mode;
+ u32 hmirror:1;
+ u32 vmirror:1;
+};
+
+static int tpo_td043_write(struct spi_device *spi, u8 addr, u8 data)
+{
+ struct spi_message m;
+ struct spi_transfer xfer;
+ u16 w;
+ int r;
+
+ spi_message_init(&m);
+
+ memset(&xfer, 0, sizeof(xfer));
+
+ w = ((u16)addr << 10) | (1 << 8) | data;
+ xfer.tx_buf = &w;
+ xfer.bits_per_word = 16;
+ xfer.len = 2;
+ spi_message_add_tail(&xfer, &m);
+
+ r = spi_sync(spi, &m);
+ if (r < 0)
+ dev_warn(&spi->dev, "failed to write to LCD reg (%d)\n", r);
+ return r;
+}
+
+static void tpo_td043_write_gamma(struct spi_device *spi, u16 gamma[12])
+{
+ u8 i, val;
+
+ /* gamma bits [9:8] */
+ for (val = i = 0; i < 4; i++)
+ val |= (gamma[i] & 0x300) >> ((i + 1) * 2);
+ tpo_td043_write(spi, 0x11, val);
+
+ for (val = i = 0; i < 4; i++)
+ val |= (gamma[i+4] & 0x300) >> ((i + 1) * 2);
+ tpo_td043_write(spi, 0x12, val);
+
+ for (val = i = 0; i < 4; i++)
+ val |= (gamma[i+8] & 0x300) >> ((i + 1) * 2);
+ tpo_td043_write(spi, 0x13, val);
+
+ /* gamma bits [7:0] */
+ for (val = i = 0; i < 12; i++)
+ tpo_td043_write(spi, 0x14 + i, gamma[i] & 0xff);
+}
+
+static int tpo_td043_write_mirror(struct spi_device *spi, bool h, bool v)
+{
+ u8 reg4 = TPO_R04_NFLIP_H | TPO_R04_NFLIP_V | \
+ TPO_R04_CP_CLK_FREQ_1H | TPO_R04_VGL_FREQ_1H;
+ if (h)
+ reg4 &= ~TPO_R04_NFLIP_H;
+ if (v)
+ reg4 &= ~TPO_R04_NFLIP_V;
+
+ return tpo_td043_write(spi, 4, reg4);
+}
+
+static int tpo_td043_set_hmirror(struct omap_dss_device *dssdev, bool enable)
+{
+ struct tpo_td043_device *tpo_td043 = dev_get_drvdata(&dssdev->dev);
+
+ tpo_td043->hmirror = enable;
+ return tpo_td043_write_mirror(tpo_td043->spi, tpo_td043->hmirror,
+ tpo_td043->vmirror);
+}
+
+static bool tpo_td043_get_hmirror(struct omap_dss_device *dssdev)
+{
+ struct tpo_td043_device *tpo_td043 = dev_get_drvdata(&dssdev->dev);
+
+ return tpo_td043->hmirror;
+}
+
+static ssize_t tpo_td043_vmirror_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tpo_td043_device *tpo_td043 = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", tpo_td043->vmirror);
+}
+
+static ssize_t tpo_td043_vmirror_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct tpo_td043_device *tpo_td043 = dev_get_drvdata(dev);
+ long val;
+ int ret;
+
+ ret = strict_strtol(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ ret = tpo_td043_write_mirror(tpo_td043->spi, tpo_td043->hmirror, val);
+ if (ret < 0)
+ return ret;
+
+ tpo_td043->vmirror = val;
+
+ return count;
+}
+
+static ssize_t tpo_td043_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tpo_td043_device *tpo_td043 = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", tpo_td043->mode);
+}
+
+static ssize_t tpo_td043_mode_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct tpo_td043_device *tpo_td043 = dev_get_drvdata(dev);
+ long val;
+ int ret;
+
+ ret = strict_strtol(buf, 0, &val);
+ if (ret != 0 || val & ~7)
+ return -EINVAL;
+
+ tpo_td043->mode = val;
+
+ val |= TPO_R02_NCLK_RISING;
+ tpo_td043_write(tpo_td043->spi, 2, val);
+
+ return count;
+}
+
+static ssize_t tpo_td043_gamma_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tpo_td043_device *tpo_td043 = dev_get_drvdata(dev);
+ ssize_t len = 0;
+ int ret;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(tpo_td043->gamma); i++) {
+ ret = snprintf(buf + len, PAGE_SIZE - len, "%u ",
+ tpo_td043->gamma[i]);
+ if (ret < 0)
+ return ret;
+ len += ret;
+ }
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static ssize_t tpo_td043_gamma_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct tpo_td043_device *tpo_td043 = dev_get_drvdata(dev);
+ unsigned int g[12];
+ int ret;
+ int i;
+
+ ret = sscanf(buf, "%u %u %u %u %u %u %u %u %u %u %u %u",
+ &g[0], &g[1], &g[2], &g[3], &g[4], &g[5],
+ &g[6], &g[7], &g[8], &g[9], &g[10], &g[11]);
+
+ if (ret != 12)
+ return -EINVAL;
+
+ for (i = 0; i < 12; i++)
+ tpo_td043->gamma[i] = g[i];
+
+ tpo_td043_write_gamma(tpo_td043->spi, tpo_td043->gamma);
+
+ return count;
+}
+
+static DEVICE_ATTR(vmirror, S_IRUGO | S_IWUSR,
+ tpo_td043_vmirror_show, tpo_td043_vmirror_store);
+static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR,
+ tpo_td043_mode_show, tpo_td043_mode_store);
+static DEVICE_ATTR(gamma, S_IRUGO | S_IWUSR,
+ tpo_td043_gamma_show, tpo_td043_gamma_store);
+
+static struct attribute *tpo_td043_attrs[] = {
+ &dev_attr_vmirror.attr,
+ &dev_attr_mode.attr,
+ &dev_attr_gamma.attr,
+ NULL,
+};
+
+static struct attribute_group tpo_td043_attr_group = {
+ .attrs = tpo_td043_attrs,
+};
+
+static const struct omap_video_timings tpo_td043_timings = {
+ .x_res = 800,
+ .y_res = 480,
+
+ .pixel_clock = 36000,
+
+ .hsw = 1,
+ .hfp = 68,
+ .hbp = 214,
+
+ .vsw = 1,
+ .vfp = 39,
+ .vbp = 34,
+};
+
+static int tpo_td043_power_on(struct omap_dss_device *dssdev)
+{
+ struct tpo_td043_device *tpo_td043 = dev_get_drvdata(&dssdev->dev);
+ int nreset_gpio = dssdev->reset_gpio;
+ int r;
+
+ r = omapdss_dpi_display_enable(dssdev);
+ if (r)
+ goto err0;
+
+ if (dssdev->platform_enable) {
+ r = dssdev->platform_enable(dssdev);
+ if (r)
+ goto err1;
+ }
+
+ regulator_enable(tpo_td043->vcc_reg);
+
+ /* wait for power up */
+ msleep(160);
+
+ if (gpio_is_valid(nreset_gpio))
+ gpio_set_value(nreset_gpio, 1);
+
+ tpo_td043_write(tpo_td043->spi, 2,
+ TPO_R02_MODE(tpo_td043->mode) | TPO_R02_NCLK_RISING);
+ tpo_td043_write(tpo_td043->spi, 3, TPO_R03_VAL_NORMAL);
+ tpo_td043_write(tpo_td043->spi, 0x20, 0xf0);
+ tpo_td043_write(tpo_td043->spi, 0x21, 0xf0);
+ tpo_td043_write_mirror(tpo_td043->spi, tpo_td043->hmirror,
+ tpo_td043->vmirror);
+ tpo_td043_write_gamma(tpo_td043->spi, tpo_td043->gamma);
+
+ return 0;
+err1:
+ omapdss_dpi_display_disable(dssdev);
+err0:
+ return r;
+}
+
+static void tpo_td043_power_off(struct omap_dss_device *dssdev)
+{
+ struct tpo_td043_device *tpo_td043 = dev_get_drvdata(&dssdev->dev);
+ int nreset_gpio = dssdev->reset_gpio;
+
+ tpo_td043_write(tpo_td043->spi, 3,
+ TPO_R03_VAL_STANDBY | TPO_R03_EN_PWM);
+
+ if (gpio_is_valid(nreset_gpio))
+ gpio_set_value(nreset_gpio, 0);
+
+ /* wait for at least 2 vsyncs before cutting off power */
+ msleep(50);
+
+ tpo_td043_write(tpo_td043->spi, 3, TPO_R03_VAL_STANDBY);
+
+ regulator_disable(tpo_td043->vcc_reg);
+
+ if (dssdev->platform_disable)
+ dssdev->platform_disable(dssdev);
+
+ omapdss_dpi_display_disable(dssdev);
+}
+
+static int tpo_td043_enable(struct omap_dss_device *dssdev)
+{
+ int ret;
+
+ dev_dbg(&dssdev->dev, "enable\n");
+
+ ret = tpo_td043_power_on(dssdev);
+ if (ret)
+ return ret;
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ return 0;
+}
+
+static void tpo_td043_disable(struct omap_dss_device *dssdev)
+{
+ dev_dbg(&dssdev->dev, "disable\n");
+
+ tpo_td043_power_off(dssdev);
+
+ dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+}
+
+static int tpo_td043_suspend(struct omap_dss_device *dssdev)
+{
+ tpo_td043_power_off(dssdev);
+ dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
+ return 0;
+}
+
+static int tpo_td043_resume(struct omap_dss_device *dssdev)
+{
+ int r = 0;
+
+ r = tpo_td043_power_on(dssdev);
+ if (r)
+ return r;
+
+ dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+
+ return 0;
+}
+
+static int tpo_td043_probe(struct omap_dss_device *dssdev)
+{
+ struct tpo_td043_device *tpo_td043 = dev_get_drvdata(&dssdev->dev);
+ int nreset_gpio = dssdev->reset_gpio;
+ int ret = 0;
+
+ dev_dbg(&dssdev->dev, "probe\n");
+
+ if (tpo_td043 == NULL) {
+ dev_err(&dssdev->dev, "missing tpo_td043_device\n");
+ return -ENODEV;
+ }
+
+ dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IHS |
+ OMAP_DSS_LCD_IVS | OMAP_DSS_LCD_IPC;
+ dssdev->panel.timings = tpo_td043_timings;
+ dssdev->ctrl.pixel_size = 24;
+
+ tpo_td043->mode = TPO_R02_MODE_800x480;
+ memcpy(tpo_td043->gamma, tpo_td043_def_gamma, sizeof(tpo_td043->gamma));
+
+ tpo_td043->vcc_reg = regulator_get(&dssdev->dev, "vcc");
+ if (IS_ERR(tpo_td043->vcc_reg)) {
+ dev_err(&dssdev->dev, "failed to get LCD VCC regulator\n");
+ ret = PTR_ERR(tpo_td043->vcc_reg);
+ goto fail_regulator;
+ }
+
+ if (gpio_is_valid(nreset_gpio)) {
+ ret = gpio_request(nreset_gpio, "lcd reset");
+ if (ret < 0) {
+ dev_err(&dssdev->dev, "couldn't request reset GPIO\n");
+ goto fail_gpio_req;
+ }
+
+ ret = gpio_direction_output(nreset_gpio, 0);
+ if (ret < 0) {
+ dev_err(&dssdev->dev, "couldn't set GPIO direction\n");
+ goto fail_gpio_direction;
+ }
+ }
+
+ ret = sysfs_create_group(&dssdev->dev.kobj, &tpo_td043_attr_group);
+ if (ret)
+ dev_warn(&dssdev->dev, "failed to create sysfs files\n");
+
+ return 0;
+
+fail_gpio_direction:
+ gpio_free(nreset_gpio);
+fail_gpio_req:
+ regulator_put(tpo_td043->vcc_reg);
+fail_regulator:
+ kfree(tpo_td043);
+ return ret;
+}
+
+static void tpo_td043_remove(struct omap_dss_device *dssdev)
+{
+ struct tpo_td043_device *tpo_td043 = dev_get_drvdata(&dssdev->dev);
+ int nreset_gpio = dssdev->reset_gpio;
+
+ dev_dbg(&dssdev->dev, "remove\n");
+
+ sysfs_remove_group(&dssdev->dev.kobj, &tpo_td043_attr_group);
+ regulator_put(tpo_td043->vcc_reg);
+ if (gpio_is_valid(nreset_gpio))
+ gpio_free(nreset_gpio);
+}
+
+static struct omap_dss_driver tpo_td043_driver = {
+ .probe = tpo_td043_probe,
+ .remove = tpo_td043_remove,
+
+ .enable = tpo_td043_enable,
+ .disable = tpo_td043_disable,
+ .suspend = tpo_td043_suspend,
+ .resume = tpo_td043_resume,
+ .set_mirror = tpo_td043_set_hmirror,
+ .get_mirror = tpo_td043_get_hmirror,
+
+ .driver = {
+ .name = "tpo_td043mtea1_panel",
+ .owner = THIS_MODULE,
+ },
+};
+
+static int tpo_td043_spi_probe(struct spi_device *spi)
+{
+ struct omap_dss_device *dssdev = spi->dev.platform_data;
+ struct tpo_td043_device *tpo_td043;
+ int ret;
+
+ if (dssdev == NULL) {
+ dev_err(&spi->dev, "missing dssdev\n");
+ return -ENODEV;
+ }
+
+ spi->bits_per_word = 16;
+ spi->mode = SPI_MODE_0;
+
+ ret = spi_setup(spi);
+ if (ret < 0) {
+ dev_err(&spi->dev, "spi_setup failed: %d\n", ret);
+ return ret;
+ }
+
+ tpo_td043 = kzalloc(sizeof(*tpo_td043), GFP_KERNEL);
+ if (tpo_td043 == NULL)
+ return -ENOMEM;
+
+ tpo_td043->spi = spi;
+ dev_set_drvdata(&spi->dev, tpo_td043);
+ dev_set_drvdata(&dssdev->dev, tpo_td043);
+
+ omap_dss_register_driver(&tpo_td043_driver);
+
+ return 0;
+}
+
+static int __devexit tpo_td043_spi_remove(struct spi_device *spi)
+{
+ struct tpo_td043_device *tpo_td043 = dev_get_drvdata(&spi->dev);
+
+ omap_dss_unregister_driver(&tpo_td043_driver);
+ kfree(tpo_td043);
+
+ return 0;
+}
+
+static struct spi_driver tpo_td043_spi_driver = {
+ .driver = {
+ .name = "tpo_td043mtea1_panel_spi",
+ .bus = &spi_bus_type,
+ .owner = THIS_MODULE,
+ },
+ .probe = tpo_td043_spi_probe,
+ .remove = __devexit_p(tpo_td043_spi_remove),
+};
+
+static int __init tpo_td043_init(void)
+{
+ return spi_register_driver(&tpo_td043_spi_driver);
+}
+
+static void __exit tpo_td043_exit(void)
+{
+ spi_unregister_driver(&tpo_td043_spi_driver);
+}
+
+module_init(tpo_td043_init);
+module_exit(tpo_td043_exit);
+
+MODULE_AUTHOR("Gražvydas Ignotas <notasas@gmail.com>");
+MODULE_DESCRIPTION("TPO TD043MTEA1 LCD Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig
index c63ce767b27..87afb81b2c4 100644
--- a/drivers/video/omap2/dss/Kconfig
+++ b/drivers/video/omap2/dss/Kconfig
@@ -30,19 +30,29 @@ config OMAP2_DSS_COLLECT_IRQ_STATS
depends on OMAP2_DSS_DEBUG_SUPPORT
default n
help
- Collect DSS IRQ statistics, printable via debugfs
+ Collect DSS IRQ statistics, printable via debugfs.
+
+ The statistics can be found from
+ <debugfs>/omapdss/dispc_irq for DISPC interrupts, and
+ <debugfs>/omapdss/dsi_irq for DSI interrupts.
config OMAP2_DSS_RFBI
bool "RFBI support"
default n
help
- MIPI DBI, or RFBI (Remote Framebuffer Interface), support.
+ MIPI DBI support (RFBI, Remote Framebuffer Interface, in Texas
+ Instrument's terminology).
+
+ DBI is a bus between the host processor and a peripheral,
+ such as a display or a framebuffer chip.
+
+ See http://www.mipi.org/ for DBI spesifications.
config OMAP2_DSS_VENC
bool "VENC support"
default y
help
- OMAP Video Encoder support.
+ OMAP Video Encoder support for S-Video and composite TV-out.
config OMAP2_DSS_SDI
bool "SDI support"
@@ -51,12 +61,20 @@ config OMAP2_DSS_SDI
help
SDI (Serial Display Interface) support.
+ SDI is a high speed one-way display serial bus between the host
+ processor and a display.
+
config OMAP2_DSS_DSI
bool "DSI support"
depends on ARCH_OMAP3
default n
help
- MIPI DSI support.
+ MIPI DSI (Display Serial Interface) support.
+
+ DSI is a high speed half-duplex serial interface between the host
+ processor and a peripheral, such as a display or a framebuffer chip.
+
+ See http://www.mipi.org/ for DSI spesifications.
config OMAP2_DSS_USE_DSI_PLL
bool "Use DSI PLL for PCLK (EXPERIMENTAL)"
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
index 82918eec6d2..7ebe50b335e 100644
--- a/drivers/video/omap2/dss/core.c
+++ b/drivers/video/omap2/dss/core.c
@@ -31,6 +31,7 @@
#include <linux/debugfs.h>
#include <linux/io.h>
#include <linux/device.h>
+#include <linux/regulator/consumer.h>
#include <plat/display.h>
#include <plat/clock.h>
@@ -47,6 +48,10 @@ static struct {
struct clk *dss_54m_fck;
struct clk *dss_96m_fck;
unsigned num_clks_enabled;
+
+ struct regulator *vdds_dsi_reg;
+ struct regulator *vdds_sdi_reg;
+ struct regulator *vdda_dac_reg;
} core;
static void dss_clk_enable_all_no_ctx(void);
@@ -284,9 +289,11 @@ static void dss_clk_enable_no_ctx(enum dss_clock clks)
void dss_clk_enable(enum dss_clock clks)
{
+ bool check_ctx = core.num_clks_enabled == 0;
+
dss_clk_enable_no_ctx(clks);
- if (cpu_is_omap34xx() && dss_need_ctx_restore())
+ if (check_ctx && cpu_is_omap34xx() && dss_need_ctx_restore())
restore_all_ctx();
}
@@ -352,6 +359,50 @@ static void dss_clk_disable_all(void)
dss_clk_disable(clks);
}
+/* REGULATORS */
+
+struct regulator *dss_get_vdds_dsi(void)
+{
+ struct regulator *reg;
+
+ if (core.vdds_dsi_reg != NULL)
+ return core.vdds_dsi_reg;
+
+ reg = regulator_get(&core.pdev->dev, "vdds_dsi");
+ if (!IS_ERR(reg))
+ core.vdds_dsi_reg = reg;
+
+ return reg;
+}
+
+struct regulator *dss_get_vdds_sdi(void)
+{
+ struct regulator *reg;
+
+ if (core.vdds_sdi_reg != NULL)
+ return core.vdds_sdi_reg;
+
+ reg = regulator_get(&core.pdev->dev, "vdds_sdi");
+ if (!IS_ERR(reg))
+ core.vdds_sdi_reg = reg;
+
+ return reg;
+}
+
+struct regulator *dss_get_vdda_dac(void)
+{
+ struct regulator *reg;
+
+ if (core.vdda_dac_reg != NULL)
+ return core.vdda_dac_reg;
+
+ reg = regulator_get(&core.pdev->dev, "vdda_dac");
+ if (!IS_ERR(reg))
+ core.vdda_dac_reg = reg;
+
+ return reg;
+}
+
/* DEBUGFS */
#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
static void dss_debug_dump_clocks(struct seq_file *s)
@@ -397,10 +448,12 @@ static int dss_initialize_debugfs(void)
debugfs_create_file("clk", S_IRUGO, dss_debugfs_dir,
&dss_debug_dump_clocks, &dss_debug_fops);
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
debugfs_create_file("dispc_irq", S_IRUGO, dss_debugfs_dir,
&dispc_dump_irqs, &dss_debug_fops);
+#endif
-#ifdef CONFIG_OMAP2_DSS_DSI
+#if defined(CONFIG_OMAP2_DSS_DSI) && defined(CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS)
debugfs_create_file("dsi_irq", S_IRUGO, dss_debugfs_dir,
&dsi_dump_irqs, &dss_debug_fops);
#endif
@@ -473,7 +526,7 @@ static int omap_dss_probe(struct platform_device *pdev)
}
#endif
- r = dpi_init();
+ r = dpi_init(pdev);
if (r) {
DSSERR("Failed to initialize dpi\n");
goto fail0;
@@ -718,16 +771,14 @@ static int dss_driver_probe(struct device *dev)
dss_init_device(core.pdev, dssdev);
- /* skip this if the device is behind a ctrl */
- if (!dssdev->panel.ctrl) {
- force = pdata->default_device == dssdev;
- dss_recheck_connections(dssdev, force);
- }
+ force = pdata->default_device == dssdev;
+ dss_recheck_connections(dssdev, force);
r = dssdrv->probe(dssdev);
if (r) {
DSSERR("driver probe failed: %d\n", r);
+ dss_uninit_device(core.pdev, dssdev);
return r;
}
@@ -760,6 +811,13 @@ int omap_dss_register_driver(struct omap_dss_driver *dssdriver)
dssdriver->driver.bus = &dss_bus_type;
dssdriver->driver.probe = dss_driver_probe;
dssdriver->driver.remove = dss_driver_remove;
+
+ if (dssdriver->get_resolution == NULL)
+ dssdriver->get_resolution = omapdss_default_get_resolution;
+ if (dssdriver->get_recommended_bpp == NULL)
+ dssdriver->get_recommended_bpp =
+ omapdss_default_get_recommended_bpp;
+
return driver_register(&dssdriver->driver);
}
EXPORT_SYMBOL(omap_dss_register_driver);
@@ -808,8 +866,6 @@ static void omap_dss_dev_release(struct device *dev)
int omap_dss_register_device(struct omap_dss_device *dssdev)
{
static int dev_num;
- static int panel_num;
- int r;
WARN_ON(!dssdev->driver_name);
@@ -818,36 +874,12 @@ int omap_dss_register_device(struct omap_dss_device *dssdev)
dssdev->dev.parent = &dss_bus;
dssdev->dev.release = omap_dss_dev_release;
dev_set_name(&dssdev->dev, "display%d", dev_num++);
- r = device_register(&dssdev->dev);
- if (r)
- return r;
-
- if (dssdev->ctrl.panel) {
- struct omap_dss_device *panel = dssdev->ctrl.panel;
-
- panel->panel.ctrl = dssdev;
-
- reset_device(&panel->dev, 1);
- panel->dev.bus = &dss_bus_type;
- panel->dev.parent = &dssdev->dev;
- panel->dev.release = omap_dss_dev_release;
- dev_set_name(&panel->dev, "panel%d", panel_num++);
- r = device_register(&panel->dev);
- if (r)
- return r;
- }
-
- return 0;
+ return device_register(&dssdev->dev);
}
void omap_dss_unregister_device(struct omap_dss_device *dssdev)
{
device_unregister(&dssdev->dev);
-
- if (dssdev->ctrl.panel) {
- struct omap_dss_device *panel = dssdev->ctrl.panel;
- device_unregister(&panel->dev);
- }
}
/* BUS */
@@ -901,6 +933,21 @@ static int __init omap_dss_init(void)
static void __exit omap_dss_exit(void)
{
+ if (core.vdds_dsi_reg != NULL) {
+ regulator_put(core.vdds_dsi_reg);
+ core.vdds_dsi_reg = NULL;
+ }
+
+ if (core.vdds_sdi_reg != NULL) {
+ regulator_put(core.vdds_sdi_reg);
+ core.vdds_sdi_reg = NULL;
+ }
+
+ if (core.vdda_dac_reg != NULL) {
+ regulator_put(core.vdda_dac_reg);
+ core.vdda_dac_reg = NULL;
+ }
+
platform_driver_unregister(&omap_dss_driver);
omap_dss_bus_unregister();
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index de8bfbac9e2..e777e352dbc 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -1725,7 +1725,7 @@ static void _enable_lcd_out(bool enable)
REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 0, 0);
}
-void dispc_enable_lcd_out(bool enable)
+static void dispc_enable_lcd_out(bool enable)
{
struct completion frame_done_completion;
bool is_on;
@@ -1772,7 +1772,7 @@ static void _enable_digit_out(bool enable)
REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 1, 1);
}
-void dispc_enable_digit_out(bool enable)
+static void dispc_enable_digit_out(bool enable)
{
struct completion frame_done_completion;
int r;
@@ -1836,6 +1836,26 @@ void dispc_enable_digit_out(bool enable)
enable_clocks(0);
}
+bool dispc_is_channel_enabled(enum omap_channel channel)
+{
+ if (channel == OMAP_DSS_CHANNEL_LCD)
+ return !!REG_GET(DISPC_CONTROL, 0, 0);
+ else if (channel == OMAP_DSS_CHANNEL_DIGIT)
+ return !!REG_GET(DISPC_CONTROL, 1, 1);
+ else
+ BUG();
+}
+
+void dispc_enable_channel(enum omap_channel channel, bool enable)
+{
+ if (channel == OMAP_DSS_CHANNEL_LCD)
+ dispc_enable_lcd_out(enable);
+ else if (channel == OMAP_DSS_CHANNEL_DIGIT)
+ dispc_enable_digit_out(enable);
+ else
+ BUG();
+}
+
void dispc_lcd_enable_signal_polarity(bool act_high)
{
enable_clocks(1);
@@ -2198,7 +2218,7 @@ unsigned long dispc_fclk_rate(void)
{
unsigned long r = 0;
- if (dss_get_dispc_clk_source() == 0)
+ if (dss_get_dispc_clk_source() == DSS_SRC_DSS1_ALWON_FCLK)
r = dss_clk_get_rate(DSS_CLK_FCK1);
else
#ifdef CONFIG_OMAP2_DSS_DSI
@@ -2251,7 +2271,7 @@ void dispc_dump_clocks(struct seq_file *s)
seq_printf(s, "- DISPC -\n");
seq_printf(s, "dispc fclk source = %s\n",
- dss_get_dispc_clk_source() == 0 ?
+ dss_get_dispc_clk_source() == DSS_SRC_DSS1_ALWON_FCLK ?
"dss1_alwon_fclk" : "dsi1_pll_fclk");
seq_printf(s, "fck\t\t%-16lu\n", dispc_fclk_rate());
@@ -2301,8 +2321,6 @@ void dispc_dump_irqs(struct seq_file *s)
PIS(WAKEUP);
#undef PIS
}
-#else
-void dispc_dump_irqs(struct seq_file *s) { }
#endif
void dispc_dump_regs(struct seq_file *s)
@@ -2854,12 +2872,13 @@ static void dispc_error_worker(struct work_struct *work)
manager = mgr;
enable = mgr->device->state ==
OMAP_DSS_DISPLAY_ACTIVE;
- mgr->device->disable(mgr->device);
+ mgr->device->driver->disable(mgr->device);
break;
}
}
if (manager) {
+ struct omap_dss_device *dssdev = manager->device;
for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
struct omap_overlay *ovl;
ovl = omap_dss_get_overlay(i);
@@ -2874,7 +2893,7 @@ static void dispc_error_worker(struct work_struct *work)
dispc_go(manager->id);
mdelay(50);
if (enable)
- manager->device->enable(manager->device);
+ dssdev->driver->enable(dssdev);
}
}
@@ -2892,12 +2911,13 @@ static void dispc_error_worker(struct work_struct *work)
manager = mgr;
enable = mgr->device->state ==
OMAP_DSS_DISPLAY_ACTIVE;
- mgr->device->disable(mgr->device);
+ mgr->device->driver->disable(mgr->device);
break;
}
}
if (manager) {
+ struct omap_dss_device *dssdev = manager->device;
for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
struct omap_overlay *ovl;
ovl = omap_dss_get_overlay(i);
@@ -2912,7 +2932,7 @@ static void dispc_error_worker(struct work_struct *work)
dispc_go(manager->id);
mdelay(50);
if (enable)
- manager->device->enable(manager->device);
+ dssdev->driver->enable(dssdev);
}
}
@@ -2923,7 +2943,7 @@ static void dispc_error_worker(struct work_struct *work)
mgr = omap_dss_get_overlay_manager(i);
if (mgr->caps & OMAP_DSS_OVL_CAP_DISPC)
- mgr->device->disable(mgr->device);
+ mgr->device->driver->disable(mgr->device);
}
}
diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
index 3b92b84b956..6a74ea116d2 100644
--- a/drivers/video/omap2/dss/display.c
+++ b/drivers/video/omap2/dss/display.c
@@ -53,11 +53,11 @@ static ssize_t display_enabled_store(struct device *dev,
if (enabled != (dssdev->state != OMAP_DSS_DISPLAY_DISABLED)) {
if (enabled) {
- r = dssdev->enable(dssdev);
+ r = dssdev->driver->enable(dssdev);
if (r)
return r;
} else {
- dssdev->disable(dssdev);
+ dssdev->driver->disable(dssdev);
}
}
@@ -69,8 +69,8 @@ static ssize_t display_upd_mode_show(struct device *dev,
{
struct omap_dss_device *dssdev = to_dss_device(dev);
enum omap_dss_update_mode mode = OMAP_DSS_UPDATE_AUTO;
- if (dssdev->get_update_mode)
- mode = dssdev->get_update_mode(dssdev);
+ if (dssdev->driver->get_update_mode)
+ mode = dssdev->driver->get_update_mode(dssdev);
return snprintf(buf, PAGE_SIZE, "%d\n", mode);
}
@@ -94,7 +94,7 @@ static ssize_t display_upd_mode_store(struct device *dev,
return -EINVAL;
}
- r = dssdev->set_update_mode(dssdev, mode);
+ r = dssdev->driver->set_update_mode(dssdev, mode);
if (r)
return r;
@@ -106,7 +106,8 @@ static ssize_t display_tear_show(struct device *dev,
{
struct omap_dss_device *dssdev = to_dss_device(dev);
return snprintf(buf, PAGE_SIZE, "%d\n",
- dssdev->get_te ? dssdev->get_te(dssdev) : 0);
+ dssdev->driver->get_te ?
+ dssdev->driver->get_te(dssdev) : 0);
}
static ssize_t display_tear_store(struct device *dev,
@@ -116,12 +117,12 @@ static ssize_t display_tear_store(struct device *dev,
unsigned long te;
int r;
- if (!dssdev->enable_te || !dssdev->get_te)
+ if (!dssdev->driver->enable_te || !dssdev->driver->get_te)
return -ENOENT;
te = simple_strtoul(buf, NULL, 0);
- r = dssdev->enable_te(dssdev, te);
+ r = dssdev->driver->enable_te(dssdev, te);
if (r)
return r;
@@ -134,10 +135,10 @@ static ssize_t display_timings_show(struct device *dev,
struct omap_dss_device *dssdev = to_dss_device(dev);
struct omap_video_timings t;
- if (!dssdev->get_timings)
+ if (!dssdev->driver->get_timings)
return -ENOENT;
- dssdev->get_timings(dssdev, &t);
+ dssdev->driver->get_timings(dssdev, &t);
return snprintf(buf, PAGE_SIZE, "%u,%u/%u/%u/%u,%u/%u/%u/%u\n",
t.pixel_clock,
@@ -152,7 +153,7 @@ static ssize_t display_timings_store(struct device *dev,
struct omap_video_timings t;
int r, found;
- if (!dssdev->set_timings || !dssdev->check_timings)
+ if (!dssdev->driver->set_timings || !dssdev->driver->check_timings)
return -ENOENT;
found = 0;
@@ -171,11 +172,11 @@ static ssize_t display_timings_store(struct device *dev,
&t.y_res, &t.vfp, &t.vbp, &t.vsw) != 9)
return -EINVAL;
- r = dssdev->check_timings(dssdev, &t);
+ r = dssdev->driver->check_timings(dssdev, &t);
if (r)
return r;
- dssdev->set_timings(dssdev, &t);
+ dssdev->driver->set_timings(dssdev, &t);
return size;
}
@@ -185,9 +186,9 @@ static ssize_t display_rotate_show(struct device *dev,
{
struct omap_dss_device *dssdev = to_dss_device(dev);
int rotate;
- if (!dssdev->get_rotate)
+ if (!dssdev->driver->get_rotate)
return -ENOENT;
- rotate = dssdev->get_rotate(dssdev);
+ rotate = dssdev->driver->get_rotate(dssdev);
return snprintf(buf, PAGE_SIZE, "%u\n", rotate);
}
@@ -198,12 +199,12 @@ static ssize_t display_rotate_store(struct device *dev,
unsigned long rot;
int r;
- if (!dssdev->set_rotate || !dssdev->get_rotate)
+ if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate)
return -ENOENT;
rot = simple_strtoul(buf, NULL, 0);
- r = dssdev->set_rotate(dssdev, rot);
+ r = dssdev->driver->set_rotate(dssdev, rot);
if (r)
return r;
@@ -215,9 +216,9 @@ static ssize_t display_mirror_show(struct device *dev,
{
struct omap_dss_device *dssdev = to_dss_device(dev);
int mirror;
- if (!dssdev->get_mirror)
+ if (!dssdev->driver->get_mirror)
return -ENOENT;
- mirror = dssdev->get_mirror(dssdev);
+ mirror = dssdev->driver->get_mirror(dssdev);
return snprintf(buf, PAGE_SIZE, "%u\n", mirror);
}
@@ -228,12 +229,12 @@ static ssize_t display_mirror_store(struct device *dev,
unsigned long mirror;
int r;
- if (!dssdev->set_mirror || !dssdev->get_mirror)
+ if (!dssdev->driver->set_mirror || !dssdev->driver->get_mirror)
return -ENOENT;
mirror = simple_strtoul(buf, NULL, 0);
- r = dssdev->set_mirror(dssdev, mirror);
+ r = dssdev->driver->set_mirror(dssdev, mirror);
if (r)
return r;
@@ -246,10 +247,10 @@ static ssize_t display_wss_show(struct device *dev,
struct omap_dss_device *dssdev = to_dss_device(dev);
unsigned int wss;
- if (!dssdev->get_wss)
+ if (!dssdev->driver->get_wss)
return -ENOENT;
- wss = dssdev->get_wss(dssdev);
+ wss = dssdev->driver->get_wss(dssdev);
return snprintf(buf, PAGE_SIZE, "0x%05x\n", wss);
}
@@ -261,7 +262,7 @@ static ssize_t display_wss_store(struct device *dev,
unsigned long wss;
int r;
- if (!dssdev->get_wss || !dssdev->set_wss)
+ if (!dssdev->driver->get_wss || !dssdev->driver->set_wss)
return -ENOENT;
if (strict_strtoul(buf, 0, &wss))
@@ -270,7 +271,7 @@ static ssize_t display_wss_store(struct device *dev,
if (wss > 0xfffff)
return -EINVAL;
- r = dssdev->set_wss(dssdev, wss);
+ r = dssdev->driver->set_wss(dssdev, wss);
if (r)
return r;
@@ -303,12 +304,13 @@ static struct device_attribute *display_sysfs_attrs[] = {
NULL
};
-static void default_get_resolution(struct omap_dss_device *dssdev,
+void omapdss_default_get_resolution(struct omap_dss_device *dssdev,
u16 *xres, u16 *yres)
{
*xres = dssdev->panel.timings.x_res;
*yres = dssdev->panel.timings.y_res;
}
+EXPORT_SYMBOL(omapdss_default_get_resolution);
void default_get_overlay_fifo_thresholds(enum omap_plane plane,
u32 fifo_size, enum omap_burst_size *burst_size,
@@ -323,24 +325,8 @@ void default_get_overlay_fifo_thresholds(enum omap_plane plane,
*fifo_low = fifo_size - burst_size_bytes;
}
-static int default_wait_vsync(struct omap_dss_device *dssdev)
+int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev)
{
- unsigned long timeout = msecs_to_jiffies(500);
- u32 irq;
-
- if (dssdev->type == OMAP_DISPLAY_TYPE_VENC)
- irq = DISPC_IRQ_EVSYNC_ODD;
- else
- irq = DISPC_IRQ_VSYNC;
-
- return omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
-}
-
-static int default_get_recommended_bpp(struct omap_dss_device *dssdev)
-{
- if (dssdev->panel.recommended_bpp)
- return dssdev->panel.recommended_bpp;
-
switch (dssdev->type) {
case OMAP_DISPLAY_TYPE_DPI:
if (dssdev->phy.dpi.data_lines == 24)
@@ -362,6 +348,7 @@ static int default_get_recommended_bpp(struct omap_dss_device *dssdev)
BUG();
}
}
+EXPORT_SYMBOL(omapdss_default_get_recommended_bpp);
/* Checks if replication logic should be used. Only use for active matrix,
* when overlay is in RGB12U or RGB16 mode, and LCD interface is
@@ -425,10 +412,6 @@ void dss_init_device(struct platform_device *pdev,
return;
}
- dssdev->get_resolution = default_get_resolution;
- dssdev->get_recommended_bpp = default_get_recommended_bpp;
- dssdev->wait_vsync = default_wait_vsync;
-
switch (dssdev->type) {
case OMAP_DISPLAY_TYPE_DPI:
r = dpi_init_display(dssdev);
@@ -502,13 +485,13 @@ static int dss_suspend_device(struct device *dev, void *data)
return 0;
}
- if (!dssdev->suspend) {
+ if (!dssdev->driver->suspend) {
DSSERR("display '%s' doesn't implement suspend\n",
dssdev->name);
return -ENOSYS;
}
- r = dssdev->suspend(dssdev);
+ r = dssdev->driver->suspend(dssdev);
if (r)
return r;
@@ -537,8 +520,8 @@ static int dss_resume_device(struct device *dev, void *data)
int r;
struct omap_dss_device *dssdev = to_dss_device(dev);
- if (dssdev->activate_after_resume && dssdev->resume) {
- r = dssdev->resume(dssdev);
+ if (dssdev->activate_after_resume && dssdev->driver->resume) {
+ r = dssdev->driver->resume(dssdev);
if (r)
return r;
}
@@ -558,7 +541,7 @@ int dss_resume_all_devices(void)
static int dss_disable_device(struct device *dev, void *data)
{
struct omap_dss_device *dssdev = to_dss_device(dev);
- dssdev->disable(dssdev);
+ dssdev->driver->disable(dssdev);
return 0;
}
@@ -591,10 +574,6 @@ struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from)
int match(struct device *dev, void *data)
{
- /* skip panels connected to controllers */
- if (to_dss_device(dev)->panel.ctrl)
- return 0;
-
return 1;
}
@@ -626,45 +605,21 @@ EXPORT_SYMBOL(omap_dss_find_device);
int omap_dss_start_device(struct omap_dss_device *dssdev)
{
- int r;
-
if (!dssdev->driver) {
DSSDBG("no driver\n");
- r = -ENODEV;
- goto err0;
- }
-
- if (dssdev->ctrl.panel && !dssdev->ctrl.panel->driver) {
- DSSDBG("no panel driver\n");
- r = -ENODEV;
- goto err0;
+ return -ENODEV;
}
if (!try_module_get(dssdev->dev.driver->owner)) {
- r = -ENODEV;
- goto err0;
- }
-
- if (dssdev->ctrl.panel) {
- if (!try_module_get(dssdev->ctrl.panel->dev.driver->owner)) {
- r = -ENODEV;
- goto err1;
- }
+ return -ENODEV;
}
return 0;
-err1:
- module_put(dssdev->dev.driver->owner);
-err0:
- return r;
}
EXPORT_SYMBOL(omap_dss_start_device);
void omap_dss_stop_device(struct omap_dss_device *dssdev)
{
- if (dssdev->ctrl.panel)
- module_put(dssdev->ctrl.panel->dev.driver->owner);
-
module_put(dssdev->dev.driver->owner);
}
EXPORT_SYMBOL(omap_dss_stop_device);
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
index 2d71031baa2..960e977a8bf 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/omap2/dss/dpi.c
@@ -25,7 +25,10 @@
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/delay.h>
+#include <linux/err.h>
#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
#include <plat/display.h>
#include <plat/cpu.h>
@@ -33,7 +36,7 @@
#include "dss.h"
static struct {
- int update_enabled;
+ struct regulator *vdds_dsi_reg;
} dpi;
#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
@@ -53,7 +56,7 @@ static int dpi_set_dsi_clk(bool is_tft, unsigned long pck_req,
if (r)
return r;
- dss_select_clk_source(0, 1);
+ dss_select_dispc_clk_source(DSS_SRC_DSI1_PLL_FCLK);
r = dispc_set_clock_div(&dispc_cinfo);
if (r)
@@ -150,7 +153,7 @@ static int dpi_basic_init(struct omap_dss_device *dssdev)
return 0;
}
-static int dpi_display_enable(struct omap_dss_device *dssdev)
+int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
{
int r;
@@ -160,10 +163,10 @@ static int dpi_display_enable(struct omap_dss_device *dssdev)
goto err0;
}
- if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
- DSSERR("display already enabled\n");
- r = -EINVAL;
- goto err1;
+ if (cpu_is_omap34xx()) {
+ r = regulator_enable(dpi.vdds_dsi_reg);
+ if (r)
+ goto err1;
}
dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
@@ -184,18 +187,10 @@ static int dpi_display_enable(struct omap_dss_device *dssdev)
mdelay(2);
- dispc_enable_lcd_out(1);
-
- r = dssdev->driver->enable(dssdev);
- if (r)
- goto err5;
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
+ dssdev->manager->enable(dssdev->manager);
return 0;
-err5:
- dispc_enable_lcd_out(0);
err4:
#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
dsi_pll_uninit();
@@ -204,78 +199,35 @@ err3:
#endif
err2:
dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
+ if (cpu_is_omap34xx())
+ regulator_disable(dpi.vdds_dsi_reg);
err1:
omap_dss_stop_device(dssdev);
err0:
return r;
}
+EXPORT_SYMBOL(omapdss_dpi_display_enable);
-static int dpi_display_resume(struct omap_dss_device *dssdev);
-
-static void dpi_display_disable(struct omap_dss_device *dssdev)
+void omapdss_dpi_display_disable(struct omap_dss_device *dssdev)
{
- if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED)
- return;
-
- if (dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
- dpi_display_resume(dssdev);
-
- dssdev->driver->disable(dssdev);
-
- dispc_enable_lcd_out(0);
+ dssdev->manager->disable(dssdev->manager);
#ifdef CONFIG_OMAP2_DSS_USE_DSI_PLL
- dss_select_clk_source(0, 0);
+ dss_select_dispc_clk_source(DSS_SRC_DSS1_ALWON_FCLK);
dsi_pll_uninit();
dss_clk_disable(DSS_CLK_FCK2);
#endif
dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
+ if (cpu_is_omap34xx())
+ regulator_disable(dpi.vdds_dsi_reg);
omap_dss_stop_device(dssdev);
}
+EXPORT_SYMBOL(omapdss_dpi_display_disable);
-static int dpi_display_suspend(struct omap_dss_device *dssdev)
-{
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
- return -EINVAL;
-
- DSSDBG("dpi_display_suspend\n");
-
- if (dssdev->driver->suspend)
- dssdev->driver->suspend(dssdev);
-
- dispc_enable_lcd_out(0);
-
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
-
- dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
-
- return 0;
-}
-
-static int dpi_display_resume(struct omap_dss_device *dssdev)
-{
- if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED)
- return -EINVAL;
-
- DSSDBG("dpi_display_resume\n");
-
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
-
- dispc_enable_lcd_out(1);
-
- if (dssdev->driver->resume)
- dssdev->driver->resume(dssdev);
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return 0;
-}
-
-static void dpi_set_timings(struct omap_dss_device *dssdev,
+void dpi_set_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
DSSDBG("dpi_set_timings\n");
@@ -285,8 +237,9 @@ static void dpi_set_timings(struct omap_dss_device *dssdev,
dispc_go(OMAP_DSS_CHANNEL_LCD);
}
}
+EXPORT_SYMBOL(dpi_set_timings);
-static int dpi_check_timings(struct omap_dss_device *dssdev,
+int dpi_check_timings(struct omap_dss_device *dssdev,
struct omap_video_timings *timings)
{
bool is_tft;
@@ -340,56 +293,25 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
return 0;
}
-
-static void dpi_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
-{
- *timings = dssdev->panel.timings;
-}
-
-static int dpi_display_set_update_mode(struct omap_dss_device *dssdev,
- enum omap_dss_update_mode mode)
-{
- if (mode == OMAP_DSS_UPDATE_MANUAL)
- return -EINVAL;
-
- if (mode == OMAP_DSS_UPDATE_DISABLED) {
- dispc_enable_lcd_out(0);
- dpi.update_enabled = 0;
- } else {
- dispc_enable_lcd_out(1);
- dpi.update_enabled = 1;
- }
-
- return 0;
-}
-
-static enum omap_dss_update_mode dpi_display_get_update_mode(
- struct omap_dss_device *dssdev)
-{
- return dpi.update_enabled ? OMAP_DSS_UPDATE_AUTO :
- OMAP_DSS_UPDATE_DISABLED;
-}
+EXPORT_SYMBOL(dpi_check_timings);
int dpi_init_display(struct omap_dss_device *dssdev)
{
DSSDBG("init_display\n");
- dssdev->enable = dpi_display_enable;
- dssdev->disable = dpi_display_disable;
- dssdev->suspend = dpi_display_suspend;
- dssdev->resume = dpi_display_resume;
- dssdev->set_timings = dpi_set_timings;
- dssdev->check_timings = dpi_check_timings;
- dssdev->get_timings = dpi_get_timings;
- dssdev->set_update_mode = dpi_display_set_update_mode;
- dssdev->get_update_mode = dpi_display_get_update_mode;
-
return 0;
}
-int dpi_init(void)
+int dpi_init(struct platform_device *pdev)
{
+ if (cpu_is_omap34xx()) {
+ dpi.vdds_dsi_reg = dss_get_vdds_dsi();
+ if (IS_ERR(dpi.vdds_dsi_reg)) {
+ DSSERR("can't get VDDS_DSI regulator\n");
+ return PTR_ERR(dpi.vdds_dsi_reg);
+ }
+ }
+
return 0;
}
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index 6122178f5f8..3af207b2bde 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -27,11 +27,12 @@
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/mutex.h>
+#include <linux/semaphore.h>
#include <linux/seq_file.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
-#include <linux/kthread.h>
#include <linux/wait.h>
+#include <linux/workqueue.h>
#include <plat/display.h>
#include <plat/clock.h>
@@ -199,7 +200,6 @@ enum dsi_vc_mode {
};
struct dsi_update_region {
- bool dirty;
u16 x, y, w, h;
struct omap_dss_device *device;
};
@@ -224,29 +224,25 @@ static struct
enum dsi_vc_mode mode;
struct omap_dss_device *dssdev;
enum fifo_size fifo_size;
- int dest_per; /* destination peripheral 0-3 */
} vc[4];
struct mutex lock;
- struct mutex bus_lock;
+ struct semaphore bus_lock;
unsigned pll_locked;
struct completion bta_completion;
- struct task_struct *thread;
- wait_queue_head_t waitqueue;
-
- spinlock_t update_lock;
- bool framedone_received;
+ int update_channel;
struct dsi_update_region update_region;
- struct dsi_update_region active_update_region;
- struct completion update_completion;
- enum omap_dss_update_mode user_update_mode;
- enum omap_dss_update_mode update_mode;
bool te_enabled;
- bool use_ext_te;
+
+ struct work_struct framedone_work;
+ void (*framedone_callback)(int, void *);
+ void *framedone_data;
+
+ struct delayed_work framedone_timeout_work;
#ifdef DSI_CATCH_MISSING_TE
struct timer_list te_timer;
@@ -261,8 +257,6 @@ static struct
#ifdef DEBUG
ktime_t perf_setup_time;
ktime_t perf_start_time;
- ktime_t perf_start_time_auto;
- int perf_measure_frames;
#endif
int debug_read;
int debug_write;
@@ -299,16 +293,21 @@ void dsi_restore_context(void)
void dsi_bus_lock(void)
{
- mutex_lock(&dsi.bus_lock);
+ down(&dsi.bus_lock);
}
EXPORT_SYMBOL(dsi_bus_lock);
void dsi_bus_unlock(void)
{
- mutex_unlock(&dsi.bus_lock);
+ up(&dsi.bus_lock);
}
EXPORT_SYMBOL(dsi_bus_unlock);
+static bool dsi_bus_is_locked(void)
+{
+ return dsi.bus_lock.count == 0;
+}
+
static inline int wait_for_bit_change(const struct dsi_reg idx, int bitnum,
int value)
{
@@ -333,12 +332,6 @@ static void dsi_perf_mark_start(void)
dsi.perf_start_time = ktime_get();
}
-static void dsi_perf_mark_start_auto(void)
-{
- dsi.perf_measure_frames = 0;
- dsi.perf_start_time_auto = ktime_get();
-}
-
static void dsi_perf_show(const char *name)
{
ktime_t t, setup_time, trans_time;
@@ -348,9 +341,6 @@ static void dsi_perf_show(const char *name)
if (!dsi_perf)
return;
- if (dsi.update_mode == OMAP_DSS_UPDATE_DISABLED)
- return;
-
t = ktime_get();
setup_time = ktime_sub(dsi.perf_start_time, dsi.perf_setup_time);
@@ -365,76 +355,23 @@ static void dsi_perf_show(const char *name)
total_us = setup_us + trans_us;
- total_bytes = dsi.active_update_region.w *
- dsi.active_update_region.h *
- dsi.active_update_region.device->ctrl.pixel_size / 8;
-
- if (dsi.update_mode == OMAP_DSS_UPDATE_AUTO) {
- static u32 s_total_trans_us, s_total_setup_us;
- static u32 s_min_trans_us = 0xffffffff, s_min_setup_us;
- static u32 s_max_trans_us, s_max_setup_us;
- const int numframes = 100;
- ktime_t total_time_auto;
- u32 total_time_auto_us;
-
- dsi.perf_measure_frames++;
-
- if (setup_us < s_min_setup_us)
- s_min_setup_us = setup_us;
+ total_bytes = dsi.update_region.w *
+ dsi.update_region.h *
+ dsi.update_region.device->ctrl.pixel_size / 8;
- if (setup_us > s_max_setup_us)
- s_max_setup_us = setup_us;
-
- s_total_setup_us += setup_us;
-
- if (trans_us < s_min_trans_us)
- s_min_trans_us = trans_us;
-
- if (trans_us > s_max_trans_us)
- s_max_trans_us = trans_us;
-
- s_total_trans_us += trans_us;
-
- if (dsi.perf_measure_frames < numframes)
- return;
-
- total_time_auto = ktime_sub(t, dsi.perf_start_time_auto);
- total_time_auto_us = (u32)ktime_to_us(total_time_auto);
-
- printk(KERN_INFO "DSI(%s): %u fps, setup %u/%u/%u, "
- "trans %u/%u/%u\n",
- name,
- 1000 * 1000 * numframes / total_time_auto_us,
- s_min_setup_us,
- s_max_setup_us,
- s_total_setup_us / numframes,
- s_min_trans_us,
- s_max_trans_us,
- s_total_trans_us / numframes);
-
- s_total_setup_us = 0;
- s_min_setup_us = 0xffffffff;
- s_max_setup_us = 0;
- s_total_trans_us = 0;
- s_min_trans_us = 0xffffffff;
- s_max_trans_us = 0;
- dsi_perf_mark_start_auto();
- } else {
- printk(KERN_INFO "DSI(%s): %u us + %u us = %u us (%uHz), "
- "%u bytes, %u kbytes/sec\n",
- name,
- setup_us,
- trans_us,
- total_us,
- 1000*1000 / total_us,
- total_bytes,
- total_bytes * 1000 / total_us);
- }
+ printk(KERN_INFO "DSI(%s): %u us + %u us = %u us (%uHz), "
+ "%u bytes, %u kbytes/sec\n",
+ name,
+ setup_us,
+ trans_us,
+ total_us,
+ 1000*1000 / total_us,
+ total_bytes,
+ total_bytes * 1000 / total_us);
}
#else
#define dsi_perf_mark_setup()
#define dsi_perf_mark_start()
-#define dsi_perf_mark_start_auto()
#define dsi_perf_show(x)
#endif
@@ -774,7 +711,7 @@ static unsigned long dsi_fclk_rate(void)
{
unsigned long r;
- if (dss_get_dsi_clk_source() == 0) {
+ if (dss_get_dsi_clk_source() == DSS_SRC_DSS1_ALWON_FCLK) {
/* DSI FCLK source is DSS1_ALWON_FCK, which is dss1_fck */
r = dss_clk_get_rate(DSS_CLK_FCK1);
} else {
@@ -1227,17 +1164,19 @@ void dsi_dump_clocks(struct seq_file *s)
seq_printf(s, "dsi1_pll_fck\t%-16luregm3 %u\t(%s)\n",
cinfo->dsi1_pll_fclk,
cinfo->regm3,
- dss_get_dispc_clk_source() == 0 ? "off" : "on");
+ dss_get_dispc_clk_source() == DSS_SRC_DSS1_ALWON_FCLK ?
+ "off" : "on");
seq_printf(s, "dsi2_pll_fck\t%-16luregm4 %u\t(%s)\n",
cinfo->dsi2_pll_fclk,
cinfo->regm4,
- dss_get_dsi_clk_source() == 0 ? "off" : "on");
+ dss_get_dsi_clk_source() == DSS_SRC_DSS1_ALWON_FCLK ?
+ "off" : "on");
seq_printf(s, "- DSI -\n");
seq_printf(s, "dsi fclk source = %s\n",
- dss_get_dsi_clk_source() == 0 ?
+ dss_get_dsi_clk_source() == DSS_SRC_DSS1_ALWON_FCLK ?
"dss1_alwon_fclk" : "dsi2_pll_fclk");
seq_printf(s, "DSI_FCLK\t%lu\n", dsi_fclk_rate());
@@ -1756,29 +1695,10 @@ static int dsi_force_tx_stop_mode_io(void)
return 0;
}
-static void dsi_vc_print_status(int channel)
-{
- u32 r;
-
- r = dsi_read_reg(DSI_VC_CTRL(channel));
- DSSDBG("vc %d: TX_FIFO_NOT_EMPTY %d, BTA_EN %d, VC_BUSY %d, "
- "TX_FIFO_FULL %d, RX_FIFO_NOT_EMPTY %d, ",
- channel,
- FLD_GET(r, 5, 5),
- FLD_GET(r, 6, 6),
- FLD_GET(r, 15, 15),
- FLD_GET(r, 16, 16),
- FLD_GET(r, 20, 20));
-
- r = dsi_read_reg(DSI_TX_FIFO_VC_EMPTINESS);
- DSSDBG("EMPTINESS %d\n", (r >> (8 * channel)) & 0xff);
-}
-
static int dsi_vc_enable(int channel, bool enable)
{
- if (dsi.update_mode != OMAP_DSS_UPDATE_AUTO)
- DSSDBG("dsi_vc_enable channel %d, enable %d\n",
- channel, enable);
+ DSSDBG("dsi_vc_enable channel %d, enable %d\n",
+ channel, enable);
enable = enable ? 1 : 0;
@@ -1859,10 +1779,12 @@ static void dsi_vc_config_vp(int channel)
}
-static void dsi_vc_enable_hs(int channel, bool enable)
+void omapdss_dsi_vc_enable_hs(int channel, bool enable)
{
DSSDBG("dsi_vc_enable_hs(%d, %d)\n", channel, enable);
+ WARN_ON(!dsi_bus_is_locked());
+
dsi_vc_enable(channel, 0);
dsi_if_enable(0);
@@ -1873,6 +1795,7 @@ static void dsi_vc_enable_hs(int channel, bool enable)
dsi_force_tx_stop_mode_io();
}
+EXPORT_SYMBOL(omapdss_dsi_vc_enable_hs);
static void dsi_vc_flush_long_data(int channel)
{
@@ -1955,11 +1878,10 @@ static u16 dsi_vc_flush_receive_data(int channel)
static int dsi_vc_send_bta(int channel)
{
- if (dsi.update_mode != OMAP_DSS_UPDATE_AUTO &&
- (dsi.debug_write || dsi.debug_read))
+ if (dsi.debug_write || dsi.debug_read)
DSSDBG("dsi_vc_send_bta %d\n", channel);
- WARN_ON(!mutex_is_locked(&dsi.bus_lock));
+ WARN_ON(!dsi_bus_is_locked());
if (REG_GET(DSI_VC_CTRL(channel), 20, 20)) { /* RX_FIFO_NOT_EMPTY */
DSSERR("rx fifo not empty when sending BTA, dumping data:\n");
@@ -2010,10 +1932,9 @@ static inline void dsi_vc_write_long_header(int channel, u8 data_type,
u32 val;
u8 data_id;
- WARN_ON(!mutex_is_locked(&dsi.bus_lock));
+ WARN_ON(!dsi_bus_is_locked());
- /*data_id = data_type | channel << 6; */
- data_id = data_type | dsi.vc[channel].dest_per << 6;
+ data_id = data_type | channel << 6;
val = FLD_VAL(data_id, 7, 0) | FLD_VAL(len, 23, 8) |
FLD_VAL(ecc, 31, 24);
@@ -2056,13 +1977,10 @@ static int dsi_vc_send_long(int channel, u8 data_type, u8 *data, u16 len,
dsi_vc_write_long_header(channel, data_type, len, ecc);
- /*dsi_vc_print_status(0); */
-
p = data;
for (i = 0; i < len >> 2; i++) {
if (dsi.debug_write)
DSSDBG("\tsending full packet %d\n", i);
- /*dsi_vc_print_status(0); */
b1 = *p++;
b2 = *p++;
@@ -2105,7 +2023,7 @@ static int dsi_vc_send_short(int channel, u8 data_type, u16 data, u8 ecc)
u32 r;
u8 data_id;
- WARN_ON(!mutex_is_locked(&dsi.bus_lock));
+ WARN_ON(!dsi_bus_is_locked());
if (dsi.debug_write)
DSSDBG("dsi_vc_send_short(ch%d, dt %#x, b1 %#x, b2 %#x)\n",
@@ -2119,7 +2037,7 @@ static int dsi_vc_send_short(int channel, u8 data_type, u16 data, u8 ecc)
return -EINVAL;
}
- data_id = data_type | dsi.vc[channel].dest_per << 6;
+ data_id = data_type | channel << 6;
r = (data_id << 0) | (data << 8) | (ecc << 24);
@@ -2163,14 +2081,35 @@ int dsi_vc_dcs_write(int channel, u8 *data, int len)
r = dsi_vc_dcs_write_nosync(channel, data, len);
if (r)
- return r;
+ goto err;
r = dsi_vc_send_bta_sync(channel);
+ if (r)
+ goto err;
+ return 0;
+err:
+ DSSERR("dsi_vc_dcs_write(ch %d, cmd 0x%02x, len %d) failed\n",
+ channel, data[0], len);
return r;
}
EXPORT_SYMBOL(dsi_vc_dcs_write);
+int dsi_vc_dcs_write_0(int channel, u8 dcs_cmd)
+{
+ return dsi_vc_dcs_write(channel, &dcs_cmd, 1);
+}
+EXPORT_SYMBOL(dsi_vc_dcs_write_0);
+
+int dsi_vc_dcs_write_1(int channel, u8 dcs_cmd, u8 param)
+{
+ u8 buf[2];
+ buf[0] = dcs_cmd;
+ buf[1] = param;
+ return dsi_vc_dcs_write(channel, buf, 2);
+}
+EXPORT_SYMBOL(dsi_vc_dcs_write_1);
+
int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen)
{
u32 val;
@@ -2182,16 +2121,17 @@ int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen)
r = dsi_vc_send_short(channel, DSI_DT_DCS_READ, dcs_cmd, 0);
if (r)
- return r;
+ goto err;
r = dsi_vc_send_bta_sync(channel);
if (r)
- return r;
+ goto err;
/* RX_FIFO_NOT_EMPTY */
if (REG_GET(DSI_VC_CTRL(channel), 20, 20) == 0) {
DSSERR("RX fifo empty when trying to read.\n");
- return -EIO;
+ r = -EIO;
+ goto err;
}
val = dsi_read_reg(DSI_VC_SHORT_PACKET_HEADER(channel));
@@ -2201,15 +2141,18 @@ int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen)
if (dt == DSI_DT_RX_ACK_WITH_ERR) {
u16 err = FLD_GET(val, 23, 8);
dsi_show_rx_ack_with_err(err);
- return -EIO;
+ r = -EIO;
+ goto err;
} else if (dt == DSI_DT_RX_SHORT_READ_1) {
u8 data = FLD_GET(val, 15, 8);
if (dsi.debug_read)
DSSDBG("\tDCS short response, 1 byte: %02x\n", data);
- if (buflen < 1)
- return -EIO;
+ if (buflen < 1) {
+ r = -EIO;
+ goto err;
+ }
buf[0] = data;
@@ -2219,8 +2162,10 @@ int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen)
if (dsi.debug_read)
DSSDBG("\tDCS short response, 2 byte: %04x\n", data);
- if (buflen < 2)
- return -EIO;
+ if (buflen < 2) {
+ r = -EIO;
+ goto err;
+ }
buf[0] = data & 0xff;
buf[1] = (data >> 8) & 0xff;
@@ -2232,8 +2177,10 @@ int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen)
if (dsi.debug_read)
DSSDBG("\tDCS long response, len %d\n", len);
- if (len > buflen)
- return -EIO;
+ if (len > buflen) {
+ r = -EIO;
+ goto err;
+ }
/* two byte checksum ends the packet, not included in len */
for (w = 0; w < len + 2;) {
@@ -2255,14 +2202,52 @@ int dsi_vc_dcs_read(int channel, u8 dcs_cmd, u8 *buf, int buflen)
}
return len;
-
} else {
DSSERR("\tunknown datatype 0x%02x\n", dt);
- return -EIO;
+ r = -EIO;
+ goto err;
}
+
+ BUG();
+err:
+ DSSERR("dsi_vc_dcs_read(ch %d, cmd 0x%02x) failed\n",
+ channel, dcs_cmd);
+ return r;
+
}
EXPORT_SYMBOL(dsi_vc_dcs_read);
+int dsi_vc_dcs_read_1(int channel, u8 dcs_cmd, u8 *data)
+{
+ int r;
+
+ r = dsi_vc_dcs_read(channel, dcs_cmd, data, 1);
+
+ if (r < 0)
+ return r;
+
+ if (r != 1)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL(dsi_vc_dcs_read_1);
+
+int dsi_vc_dcs_read_2(int channel, u8 dcs_cmd, u16 *data)
+{
+ int r;
+
+ r = dsi_vc_dcs_read(channel, dcs_cmd, (u8 *)data, 2);
+
+ if (r < 0)
+ return r;
+
+ if (r != 2)
+ return -EIO;
+
+ return 0;
+}
+EXPORT_SYMBOL(dsi_vc_dcs_read_2);
int dsi_vc_set_max_rx_packet_size(int channel, u16 len)
{
@@ -2491,15 +2476,15 @@ static int dsi_proto_config(struct omap_dss_device *dssdev)
u32 r;
int buswidth = 0;
- dsi_config_tx_fifo(DSI_FIFO_SIZE_128,
- DSI_FIFO_SIZE_0,
- DSI_FIFO_SIZE_0,
- DSI_FIFO_SIZE_0);
+ dsi_config_tx_fifo(DSI_FIFO_SIZE_32,
+ DSI_FIFO_SIZE_32,
+ DSI_FIFO_SIZE_32,
+ DSI_FIFO_SIZE_32);
- dsi_config_rx_fifo(DSI_FIFO_SIZE_128,
- DSI_FIFO_SIZE_0,
- DSI_FIFO_SIZE_0,
- DSI_FIFO_SIZE_0);
+ dsi_config_rx_fifo(DSI_FIFO_SIZE_32,
+ DSI_FIFO_SIZE_32,
+ DSI_FIFO_SIZE_32,
+ DSI_FIFO_SIZE_32);
/* XXX what values for the timeouts? */
dsi_set_stop_state_counter(1000);
@@ -2537,12 +2522,9 @@ static int dsi_proto_config(struct omap_dss_device *dssdev)
dsi_write_reg(DSI_CTRL, r);
dsi_vc_initial_config(0);
-
- /* set all vc targets to peripheral 0 */
- dsi.vc[0].dest_per = 0;
- dsi.vc[1].dest_per = 0;
- dsi.vc[2].dest_per = 0;
- dsi.vc[3].dest_per = 0;
+ dsi_vc_initial_config(1);
+ dsi_vc_initial_config(2);
+ dsi_vc_initial_config(3);
return 0;
}
@@ -2777,18 +2759,16 @@ static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
unsigned packet_payload;
unsigned packet_len;
u32 l;
- bool use_te_trigger;
- const unsigned channel = 0;
+ const unsigned channel = dsi.update_channel;
/* line buffer is 1024 x 24bits */
/* XXX: for some reason using full buffer size causes considerable TX
* slowdown with update sizes that fill the whole buffer */
const unsigned line_buf_size = 1023 * 3;
- use_te_trigger = dsi.te_enabled && !dsi.use_ext_te;
+ DSSDBG("dsi_update_screen_dispc(%d,%d %dx%d)\n",
+ x, y, w, h);
- if (dsi.update_mode != OMAP_DSS_UPDATE_AUTO)
- DSSDBG("dsi_update_screen_dispc(%d,%d %dx%d)\n",
- x, y, w, h);
+ dsi_vc_config_vp(channel);
bytespp = dssdev->ctrl.pixel_size / 8;
bytespl = w * bytespp;
@@ -2808,15 +2788,12 @@ static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
if (bytespf % packet_payload)
total_len += (bytespf % packet_payload) + 1;
- if (0)
- dsi_vc_print_status(1);
-
l = FLD_VAL(total_len, 23, 0); /* TE_SIZE */
dsi_write_reg(DSI_VC_TE(channel), l);
dsi_vc_write_long_header(channel, DSI_DT_DCS_LONG_WRITE, packet_len, 0);
- if (use_te_trigger)
+ if (dsi.te_enabled)
l = FLD_MOD(l, 1, 30, 30); /* TE_EN */
else
l = FLD_MOD(l, 1, 31, 31); /* TE_START */
@@ -2830,9 +2807,14 @@ static void dsi_update_screen_dispc(struct omap_dss_device *dssdev,
*/
dispc_disable_sidle();
+ dsi_perf_mark_start();
+
+ schedule_delayed_work(&dsi.framedone_timeout_work,
+ msecs_to_jiffies(250));
+
dss_start_update(dssdev);
- if (use_te_trigger) {
+ if (dsi.te_enabled) {
/* disable LP_RX_TO, so that we can receive TE. Time to wait
* for TE is longer than the timer allows */
REG_FLD_MOD(DSI_TIMING2, 0, 15, 15); /* LP_RX_TO */
@@ -2852,110 +2834,64 @@ static void dsi_te_timeout(unsigned long arg)
}
#endif
-static void dsi_framedone_irq_callback(void *data, u32 mask)
+static void dsi_framedone_timeout_work_callback(struct work_struct *work)
{
- /* Note: We get FRAMEDONE when DISPC has finished sending pixels and
- * turns itself off. However, DSI still has the pixels in its buffers,
- * and is sending the data.
- */
+ int r;
+ const int channel = dsi.update_channel;
+
+ DSSERR("Framedone not received for 250ms!\n");
/* SIDLEMODE back to smart-idle */
dispc_enable_sidle();
- dsi.framedone_received = true;
- wake_up(&dsi.waitqueue);
-}
-
-static void dsi_set_update_region(struct omap_dss_device *dssdev,
- u16 x, u16 y, u16 w, u16 h)
-{
- spin_lock(&dsi.update_lock);
- if (dsi.update_region.dirty) {
- dsi.update_region.x = min(x, dsi.update_region.x);
- dsi.update_region.y = min(y, dsi.update_region.y);
- dsi.update_region.w = max(w, dsi.update_region.w);
- dsi.update_region.h = max(h, dsi.update_region.h);
- } else {
- dsi.update_region.x = x;
- dsi.update_region.y = y;
- dsi.update_region.w = w;
- dsi.update_region.h = h;
+ if (dsi.te_enabled) {
+ /* enable LP_RX_TO again after the TE */
+ REG_FLD_MOD(DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
}
- dsi.update_region.device = dssdev;
- dsi.update_region.dirty = true;
-
- spin_unlock(&dsi.update_lock);
-
-}
-
-static int dsi_set_update_mode(struct omap_dss_device *dssdev,
- enum omap_dss_update_mode mode)
-{
- int r = 0;
- int i;
-
- WARN_ON(!mutex_is_locked(&dsi.bus_lock));
-
- if (dsi.update_mode != mode) {
- dsi.update_mode = mode;
-
- /* Mark the overlays dirty, and do apply(), so that we get the
- * overlays configured properly after update mode change. */
- for (i = 0; i < omap_dss_get_num_overlays(); ++i) {
- struct omap_overlay *ovl;
- ovl = omap_dss_get_overlay(i);
- if (ovl->manager == dssdev->manager)
- ovl->info_dirty = true;
- }
-
- r = dssdev->manager->apply(dssdev->manager);
-
- if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE &&
- mode == OMAP_DSS_UPDATE_AUTO) {
- u16 w, h;
-
- DSSDBG("starting auto update\n");
-
- dssdev->get_resolution(dssdev, &w, &h);
-
- dsi_set_update_region(dssdev, 0, 0, w, h);
-
- dsi_perf_mark_start_auto();
+ /* Send BTA after the frame. We need this for the TE to work, as TE
+ * trigger is only sent for BTAs without preceding packet. Thus we need
+ * to BTA after the pixel packets so that next BTA will cause TE
+ * trigger.
+ *
+ * This is not needed when TE is not in use, but we do it anyway to
+ * make sure that the transfer has been completed. It would be more
+ * optimal, but more complex, to wait only just before starting next
+ * transfer. */
+ r = dsi_vc_send_bta_sync(channel);
+ if (r)
+ DSSERR("BTA after framedone failed\n");
- wake_up(&dsi.waitqueue);
- }
+ /* RX_FIFO_NOT_EMPTY */
+ if (REG_GET(DSI_VC_CTRL(channel), 20, 20)) {
+ DSSERR("Received error during frame transfer:\n");
+ dsi_vc_flush_receive_data(channel);
}
- return r;
+ dsi.framedone_callback(-ETIMEDOUT, dsi.framedone_data);
}
-static int dsi_set_te(struct omap_dss_device *dssdev, bool enable)
+static void dsi_framedone_irq_callback(void *data, u32 mask)
{
- int r = 0;
+ /* Note: We get FRAMEDONE when DISPC has finished sending pixels and
+ * turns itself off. However, DSI still has the pixels in its buffers,
+ * and is sending the data.
+ */
- if (dssdev->driver->enable_te) {
- r = dssdev->driver->enable_te(dssdev, enable);
- /* XXX for some reason, DSI TE breaks if we don't wait here.
- * Panel bug? Needs more studying */
- msleep(100);
- }
+ /* SIDLEMODE back to smart-idle */
+ dispc_enable_sidle();
- return r;
+ schedule_work(&dsi.framedone_work);
}
static void dsi_handle_framedone(void)
{
int r;
- const int channel = 0;
- bool use_te_trigger;
-
- use_te_trigger = dsi.te_enabled && !dsi.use_ext_te;
+ const int channel = dsi.update_channel;
- if (dsi.update_mode != OMAP_DSS_UPDATE_AUTO)
- DSSDBG("FRAMEDONE\n");
+ DSSDBG("FRAMEDONE\n");
- if (use_te_trigger) {
+ if (dsi.te_enabled) {
/* enable LP_RX_TO again after the TE */
REG_FLD_MOD(DSI_TIMING2, 1, 15, 15); /* LP_RX_TO */
}
@@ -2976,7 +2912,7 @@ static void dsi_handle_framedone(void)
/* RX_FIFO_NOT_EMPTY */
if (REG_GET(DSI_VC_CTRL(channel), 20, 20)) {
DSSERR("Received error during frame transfer:\n");
- dsi_vc_flush_receive_data(0);
+ dsi_vc_flush_receive_data(channel);
}
#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
@@ -2984,118 +2920,79 @@ static void dsi_handle_framedone(void)
#endif
}
-static int dsi_update_thread(void *data)
+static void dsi_framedone_work_callback(struct work_struct *work)
{
- unsigned long timeout;
- struct omap_dss_device *device;
- u16 x, y, w, h;
-
- while (1) {
- bool sched;
-
- wait_event_interruptible(dsi.waitqueue,
- dsi.update_mode == OMAP_DSS_UPDATE_AUTO ||
- (dsi.update_mode == OMAP_DSS_UPDATE_MANUAL &&
- dsi.update_region.dirty == true) ||
- kthread_should_stop());
-
- if (kthread_should_stop())
- break;
-
- dsi_bus_lock();
-
- if (dsi.update_mode == OMAP_DSS_UPDATE_DISABLED ||
- kthread_should_stop()) {
- dsi_bus_unlock();
- break;
- }
-
- dsi_perf_mark_setup();
-
- if (dsi.update_region.dirty) {
- spin_lock(&dsi.update_lock);
- dsi.active_update_region = dsi.update_region;
- dsi.update_region.dirty = false;
- spin_unlock(&dsi.update_lock);
- }
+ DSSDBGF();
- device = dsi.active_update_region.device;
- x = dsi.active_update_region.x;
- y = dsi.active_update_region.y;
- w = dsi.active_update_region.w;
- h = dsi.active_update_region.h;
+ cancel_delayed_work_sync(&dsi.framedone_timeout_work);
- if (device->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
+ dsi_handle_framedone();
- if (dsi.update_mode == OMAP_DSS_UPDATE_MANUAL)
- dss_setup_partial_planes(device,
- &x, &y, &w, &h);
+ dsi_perf_show("DISPC");
- dispc_set_lcd_size(w, h);
- }
+ dsi.framedone_callback(0, dsi.framedone_data);
+}
- if (dsi.active_update_region.dirty) {
- dsi.active_update_region.dirty = false;
- /* XXX TODO we don't need to send the coords, if they
- * are the same that are already programmed to the
- * panel. That should speed up manual update a bit */
- device->driver->setup_update(device, x, y, w, h);
- }
+int omap_dsi_prepare_update(struct omap_dss_device *dssdev,
+ u16 *x, u16 *y, u16 *w, u16 *h)
+{
+ u16 dw, dh;
- dsi_perf_mark_start();
+ dssdev->driver->get_resolution(dssdev, &dw, &dh);
- if (device->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
- dsi_vc_config_vp(0);
+ if (*x > dw || *y > dh)
+ return -EINVAL;
- if (dsi.te_enabled && dsi.use_ext_te)
- device->driver->wait_for_te(device);
+ if (*x + *w > dw)
+ return -EINVAL;
- dsi.framedone_received = false;
+ if (*y + *h > dh)
+ return -EINVAL;
- dsi_update_screen_dispc(device, x, y, w, h);
+ if (*w == 1)
+ return -EINVAL;
- /* wait for framedone */
- timeout = msecs_to_jiffies(1000);
- wait_event_timeout(dsi.waitqueue,
- dsi.framedone_received == true,
- timeout);
+ if (*w == 0 || *h == 0)
+ return -EINVAL;
- if (!dsi.framedone_received) {
- DSSERR("framedone timeout\n");
- DSSERR("failed update %d,%d %dx%d\n",
- x, y, w, h);
+ dsi_perf_mark_setup();
- dispc_enable_sidle();
- dispc_enable_lcd_out(0);
+ if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
+ dss_setup_partial_planes(dssdev, x, y, w, h);
+ dispc_set_lcd_size(*w, *h);
+ }
- dsi_reset_tx_fifo(0);
- } else {
- dsi_handle_framedone();
- dsi_perf_show("DISPC");
- }
- } else {
- dsi_update_screen_l4(device, x, y, w, h);
- dsi_perf_show("L4");
- }
+ return 0;
+}
+EXPORT_SYMBOL(omap_dsi_prepare_update);
- sched = atomic_read(&dsi.bus_lock.count) < 0;
+int omap_dsi_update(struct omap_dss_device *dssdev,
+ int channel,
+ u16 x, u16 y, u16 w, u16 h,
+ void (*callback)(int, void *), void *data)
+{
+ dsi.update_channel = channel;
- complete_all(&dsi.update_completion);
+ if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
+ dsi.framedone_callback = callback;
+ dsi.framedone_data = data;
- dsi_bus_unlock();
+ dsi.update_region.x = x;
+ dsi.update_region.y = y;
+ dsi.update_region.w = w;
+ dsi.update_region.h = h;
+ dsi.update_region.device = dssdev;
- /* XXX We need to give others chance to get the bus lock. Is
- * there a better way for this? */
- if (dsi.update_mode == OMAP_DSS_UPDATE_AUTO && sched)
- schedule_timeout_interruptible(1);
+ dsi_update_screen_dispc(dssdev, x, y, w, h);
+ } else {
+ dsi_update_screen_l4(dssdev, x, y, w, h);
+ dsi_perf_show("L4");
+ callback(0, data);
}
- DSSDBG("update thread exiting\n");
-
return 0;
}
-
-
+EXPORT_SYMBOL(omap_dsi_update);
/* Display funcs */
@@ -3203,7 +3100,8 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
if (r)
goto err1;
- dss_select_clk_source(true, true);
+ dss_select_dispc_clk_source(DSS_SRC_DSI1_PLL_FCLK);
+ dss_select_dsi_clk_source(DSS_SRC_DSI2_PLL_FCLK);
DSSDBG("PLL OK\n");
@@ -3229,25 +3127,18 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
/* enable interface */
dsi_vc_enable(0, 1);
+ dsi_vc_enable(1, 1);
+ dsi_vc_enable(2, 1);
+ dsi_vc_enable(3, 1);
dsi_if_enable(1);
dsi_force_tx_stop_mode_io();
- if (dssdev->driver->enable) {
- r = dssdev->driver->enable(dssdev);
- if (r)
- goto err4;
- }
-
- /* enable high-speed after initial config */
- dsi_vc_enable_hs(0, 1);
-
return 0;
-err4:
- dsi_if_enable(0);
err3:
dsi_complexio_uninit();
err2:
- dss_select_clk_source(false, false);
+ dss_select_dispc_clk_source(DSS_SRC_DSS1_ALWON_FCLK);
+ dss_select_dsi_clk_source(DSS_SRC_DSS1_ALWON_FCLK);
err1:
dsi_pll_uninit();
err0:
@@ -3256,10 +3147,8 @@ err0:
static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev)
{
- if (dssdev->driver->disable)
- dssdev->driver->disable(dssdev);
-
- dss_select_clk_source(false, false);
+ dss_select_dispc_clk_source(DSS_SRC_DSS1_ALWON_FCLK);
+ dss_select_dsi_clk_source(DSS_SRC_DSS1_ALWON_FCLK);
dsi_complexio_uninit();
dsi_pll_uninit();
}
@@ -3280,14 +3169,15 @@ static int dsi_core_init(void)
return 0;
}
-static int dsi_display_enable(struct omap_dss_device *dssdev)
+int omapdss_dsi_display_enable(struct omap_dss_device *dssdev)
{
int r = 0;
DSSDBG("dsi_display_enable\n");
+ WARN_ON(!dsi_bus_is_locked());
+
mutex_lock(&dsi.lock);
- dsi_bus_lock();
r = omap_dss_start_device(dssdev);
if (r) {
@@ -3295,100 +3185,47 @@ static int dsi_display_enable(struct omap_dss_device *dssdev)
goto err0;
}
- if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
- DSSERR("dssdev already enabled\n");
- r = -EINVAL;
- goto err1;
- }
-
enable_clocks(1);
dsi_enable_pll_clock(1);
r = _dsi_reset();
if (r)
- goto err2;
+ goto err1;
dsi_core_init();
r = dsi_display_init_dispc(dssdev);
if (r)
- goto err2;
+ goto err1;
r = dsi_display_init_dsi(dssdev);
if (r)
- goto err3;
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- dsi.use_ext_te = dssdev->phy.dsi.ext_te;
- r = dsi_set_te(dssdev, dsi.te_enabled);
- if (r)
- goto err4;
-
- dsi_set_update_mode(dssdev, dsi.user_update_mode);
+ goto err2;
- dsi_bus_unlock();
mutex_unlock(&dsi.lock);
return 0;
-err4:
-
- dsi_display_uninit_dsi(dssdev);
-err3:
- dsi_display_uninit_dispc(dssdev);
err2:
+ dsi_display_uninit_dispc(dssdev);
+err1:
enable_clocks(0);
dsi_enable_pll_clock(0);
-err1:
omap_dss_stop_device(dssdev);
err0:
- dsi_bus_unlock();
mutex_unlock(&dsi.lock);
DSSDBG("dsi_display_enable FAILED\n");
return r;
}
+EXPORT_SYMBOL(omapdss_dsi_display_enable);
-static void dsi_display_disable(struct omap_dss_device *dssdev)
+void omapdss_dsi_display_disable(struct omap_dss_device *dssdev)
{
DSSDBG("dsi_display_disable\n");
- mutex_lock(&dsi.lock);
- dsi_bus_lock();
-
- if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED ||
- dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
- goto end;
-
- dsi.update_mode = OMAP_DSS_UPDATE_DISABLED;
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-
- dsi_display_uninit_dispc(dssdev);
-
- dsi_display_uninit_dsi(dssdev);
-
- enable_clocks(0);
- dsi_enable_pll_clock(0);
-
- omap_dss_stop_device(dssdev);
-end:
- dsi_bus_unlock();
- mutex_unlock(&dsi.lock);
-}
-
-static int dsi_display_suspend(struct omap_dss_device *dssdev)
-{
- DSSDBG("dsi_display_suspend\n");
+ WARN_ON(!dsi_bus_is_locked());
mutex_lock(&dsi.lock);
- dsi_bus_lock();
-
- if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED ||
- dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
- goto end;
-
- dsi.update_mode = OMAP_DSS_UPDATE_DISABLED;
- dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
dsi_display_uninit_dispc(dssdev);
@@ -3396,312 +3233,19 @@ static int dsi_display_suspend(struct omap_dss_device *dssdev)
enable_clocks(0);
dsi_enable_pll_clock(0);
-end:
- dsi_bus_unlock();
- mutex_unlock(&dsi.lock);
-
- return 0;
-}
-
-static int dsi_display_resume(struct omap_dss_device *dssdev)
-{
- int r;
-
- DSSDBG("dsi_display_resume\n");
-
- mutex_lock(&dsi.lock);
- dsi_bus_lock();
-
- if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) {
- DSSERR("dssdev not suspended\n");
- r = -EINVAL;
- goto err0;
- }
-
- enable_clocks(1);
- dsi_enable_pll_clock(1);
-
- r = _dsi_reset();
- if (r)
- goto err1;
-
- dsi_core_init();
-
- r = dsi_display_init_dispc(dssdev);
- if (r)
- goto err1;
-
- r = dsi_display_init_dsi(dssdev);
- if (r)
- goto err2;
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- r = dsi_set_te(dssdev, dsi.te_enabled);
- if (r)
- goto err2;
-
- dsi_set_update_mode(dssdev, dsi.user_update_mode);
-
- dsi_bus_unlock();
- mutex_unlock(&dsi.lock);
-
- return 0;
-
-err2:
- dsi_display_uninit_dispc(dssdev);
-err1:
- enable_clocks(0);
- dsi_enable_pll_clock(0);
-err0:
- dsi_bus_unlock();
- mutex_unlock(&dsi.lock);
- DSSDBG("dsi_display_resume FAILED\n");
- return r;
-}
-
-static int dsi_display_update(struct omap_dss_device *dssdev,
- u16 x, u16 y, u16 w, u16 h)
-{
- int r = 0;
- u16 dw, dh;
-
- DSSDBG("dsi_display_update(%d,%d %dx%d)\n", x, y, w, h);
- mutex_lock(&dsi.lock);
-
- if (dsi.update_mode != OMAP_DSS_UPDATE_MANUAL)
- goto end;
-
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
- goto end;
-
- dssdev->get_resolution(dssdev, &dw, &dh);
-
- if (x > dw || y > dh)
- goto end;
-
- if (x + w > dw)
- w = dw - x;
-
- if (y + h > dh)
- h = dh - y;
-
- if (w == 0 || h == 0)
- goto end;
-
- if (w == 1) {
- r = -EINVAL;
- goto end;
- }
-
- dsi_set_update_region(dssdev, x, y, w, h);
-
- wake_up(&dsi.waitqueue);
-
-end:
- mutex_unlock(&dsi.lock);
-
- return r;
-}
-
-static int dsi_display_sync(struct omap_dss_device *dssdev)
-{
- bool wait;
-
- DSSDBG("dsi_display_sync()\n");
-
- mutex_lock(&dsi.lock);
- dsi_bus_lock();
-
- if (dsi.update_mode == OMAP_DSS_UPDATE_MANUAL &&
- dsi.update_region.dirty) {
- INIT_COMPLETION(dsi.update_completion);
- wait = true;
- } else {
- wait = false;
- }
-
- dsi_bus_unlock();
- mutex_unlock(&dsi.lock);
-
- if (wait)
- wait_for_completion_interruptible(&dsi.update_completion);
-
- DSSDBG("dsi_display_sync() done\n");
- return 0;
-}
-
-static int dsi_display_set_update_mode(struct omap_dss_device *dssdev,
- enum omap_dss_update_mode mode)
-{
- int r = 0;
-
- DSSDBGF("%d", mode);
-
- mutex_lock(&dsi.lock);
- dsi_bus_lock();
-
- dsi.user_update_mode = mode;
- r = dsi_set_update_mode(dssdev, mode);
+ omap_dss_stop_device(dssdev);
- dsi_bus_unlock();
mutex_unlock(&dsi.lock);
-
- return r;
}
+EXPORT_SYMBOL(omapdss_dsi_display_disable);
-static enum omap_dss_update_mode dsi_display_get_update_mode(
- struct omap_dss_device *dssdev)
+int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable)
{
- return dsi.update_mode;
-}
-
-
-static int dsi_display_enable_te(struct omap_dss_device *dssdev, bool enable)
-{
- int r = 0;
-
- DSSDBGF("%d", enable);
-
- if (!dssdev->driver->enable_te)
- return -ENOENT;
-
- dsi_bus_lock();
-
dsi.te_enabled = enable;
-
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
- goto end;
-
- r = dsi_set_te(dssdev, enable);
-end:
- dsi_bus_unlock();
-
- return r;
-}
-
-static int dsi_display_get_te(struct omap_dss_device *dssdev)
-{
- return dsi.te_enabled;
-}
-
-static int dsi_display_set_rotate(struct omap_dss_device *dssdev, u8 rotate)
-{
-
- DSSDBGF("%d", rotate);
-
- if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate)
- return -EINVAL;
-
- dsi_bus_lock();
- dssdev->driver->set_rotate(dssdev, rotate);
- if (dsi.update_mode == OMAP_DSS_UPDATE_AUTO) {
- u16 w, h;
- /* the display dimensions may have changed, so set a new
- * update region */
- dssdev->get_resolution(dssdev, &w, &h);
- dsi_set_update_region(dssdev, 0, 0, w, h);
- }
- dsi_bus_unlock();
-
return 0;
}
-
-static u8 dsi_display_get_rotate(struct omap_dss_device *dssdev)
-{
- if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate)
- return 0;
-
- return dssdev->driver->get_rotate(dssdev);
-}
-
-static int dsi_display_set_mirror(struct omap_dss_device *dssdev, bool mirror)
-{
- DSSDBGF("%d", mirror);
-
- if (!dssdev->driver->set_mirror || !dssdev->driver->get_mirror)
- return -EINVAL;
-
- dsi_bus_lock();
- dssdev->driver->set_mirror(dssdev, mirror);
- dsi_bus_unlock();
-
- return 0;
-}
-
-static bool dsi_display_get_mirror(struct omap_dss_device *dssdev)
-{
- if (!dssdev->driver->set_mirror || !dssdev->driver->get_mirror)
- return 0;
-
- return dssdev->driver->get_mirror(dssdev);
-}
-
-static int dsi_display_run_test(struct omap_dss_device *dssdev, int test_num)
-{
- int r;
-
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
- return -EIO;
-
- DSSDBGF("%d", test_num);
-
- dsi_bus_lock();
-
- /* run test first in low speed mode */
- dsi_vc_enable_hs(0, 0);
-
- if (dssdev->driver->run_test) {
- r = dssdev->driver->run_test(dssdev, test_num);
- if (r)
- goto end;
- }
-
- /* then in high speed */
- dsi_vc_enable_hs(0, 1);
-
- if (dssdev->driver->run_test) {
- r = dssdev->driver->run_test(dssdev, test_num);
- if (r)
- goto end;
- }
-
-end:
- dsi_vc_enable_hs(0, 1);
-
- dsi_bus_unlock();
-
- return r;
-}
-
-static int dsi_display_memory_read(struct omap_dss_device *dssdev,
- void *buf, size_t size,
- u16 x, u16 y, u16 w, u16 h)
-{
- int r;
-
- DSSDBGF("");
-
- if (!dssdev->driver->memory_read)
- return -EINVAL;
-
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
- return -EIO;
-
- dsi_bus_lock();
-
- r = dssdev->driver->memory_read(dssdev, buf, size,
- x, y, w, h);
-
- /* Memory read usually changes the update area. This will
- * force the next update to re-set the update area */
- dsi.active_update_region.dirty = true;
-
- dsi_bus_unlock();
-
- return r;
-}
+EXPORT_SYMBOL(omapdss_dsi_enable_te);
void dsi_get_overlay_fifo_thresholds(enum omap_plane plane,
u32 fifo_size, enum omap_burst_size *burst_size,
@@ -3720,26 +3264,6 @@ int dsi_init_display(struct omap_dss_device *dssdev)
{
DSSDBG("DSI init\n");
- dssdev->enable = dsi_display_enable;
- dssdev->disable = dsi_display_disable;
- dssdev->suspend = dsi_display_suspend;
- dssdev->resume = dsi_display_resume;
- dssdev->update = dsi_display_update;
- dssdev->sync = dsi_display_sync;
- dssdev->set_update_mode = dsi_display_set_update_mode;
- dssdev->get_update_mode = dsi_display_get_update_mode;
- dssdev->enable_te = dsi_display_enable_te;
- dssdev->get_te = dsi_display_get_te;
-
- dssdev->get_rotate = dsi_display_get_rotate;
- dssdev->set_rotate = dsi_display_set_rotate;
-
- dssdev->get_mirror = dsi_display_get_mirror;
- dssdev->set_mirror = dsi_display_set_mirror;
-
- dssdev->run_test = dsi_display_run_test;
- dssdev->memory_read = dsi_display_memory_read;
-
/* XXX these should be figured out dynamically */
dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
OMAP_DSS_DISPLAY_CAP_TEAR_ELIM;
@@ -3754,9 +3278,6 @@ int dsi_init(struct platform_device *pdev)
{
u32 rev;
int r;
- struct sched_param param = {
- .sched_priority = MAX_USER_RT_PRIO-1
- };
spin_lock_init(&dsi.errors_lock);
dsi.errors = 0;
@@ -3767,31 +3288,19 @@ int dsi_init(struct platform_device *pdev)
#endif
init_completion(&dsi.bta_completion);
- init_completion(&dsi.update_completion);
-
- dsi.thread = kthread_create(dsi_update_thread, NULL, "dsi");
- if (IS_ERR(dsi.thread)) {
- DSSERR("cannot create kthread\n");
- r = PTR_ERR(dsi.thread);
- goto err0;
- }
- sched_setscheduler(dsi.thread, SCHED_FIFO, &param);
-
- init_waitqueue_head(&dsi.waitqueue);
- spin_lock_init(&dsi.update_lock);
mutex_init(&dsi.lock);
- mutex_init(&dsi.bus_lock);
+ sema_init(&dsi.bus_lock, 1);
+
+ INIT_WORK(&dsi.framedone_work, dsi_framedone_work_callback);
+ INIT_DELAYED_WORK_DEFERRABLE(&dsi.framedone_timeout_work,
+ dsi_framedone_timeout_work_callback);
#ifdef DSI_CATCH_MISSING_TE
init_timer(&dsi.te_timer);
dsi.te_timer.function = dsi_te_timeout;
dsi.te_timer.data = 0;
#endif
-
- dsi.update_mode = OMAP_DSS_UPDATE_DISABLED;
- dsi.user_update_mode = OMAP_DSS_UPDATE_DISABLED;
-
dsi.base = ioremap(DSI_BASE, DSI_SZ_REGS);
if (!dsi.base) {
DSSERR("can't ioremap DSI\n");
@@ -3799,7 +3308,7 @@ int dsi_init(struct platform_device *pdev)
goto err1;
}
- dsi.vdds_dsi_reg = regulator_get(&pdev->dev, "vdds_dsi");
+ dsi.vdds_dsi_reg = dss_get_vdds_dsi();
if (IS_ERR(dsi.vdds_dsi_reg)) {
iounmap(dsi.base);
DSSERR("can't get VDDS_DSI regulator\n");
@@ -3815,23 +3324,15 @@ int dsi_init(struct platform_device *pdev)
enable_clocks(0);
- wake_up_process(dsi.thread);
-
return 0;
err2:
iounmap(dsi.base);
err1:
- kthread_stop(dsi.thread);
-err0:
return r;
}
void dsi_exit(void)
{
- kthread_stop(dsi.thread);
-
- regulator_put(dsi.vdds_dsi_reg);
-
iounmap(dsi.base);
DSSDBG("omap_dsi_exit\n");
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index 0a26b7d84d4..8254a4232a5 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -68,6 +68,9 @@ static struct {
struct dss_clock_info cache_dss_cinfo;
struct dispc_clock_info cache_dispc_cinfo;
+ enum dss_clk_source dsi_clk_source;
+ enum dss_clk_source dispc_clk_source;
+
u32 ctx[DSS_SZ_REGS / sizeof(u32)];
} dss;
@@ -247,23 +250,42 @@ void dss_dump_regs(struct seq_file *s)
#undef DUMPREG
}
-void dss_select_clk_source(bool dsi, bool dispc)
+void dss_select_dispc_clk_source(enum dss_clk_source clk_src)
+{
+ int b;
+
+ BUG_ON(clk_src != DSS_SRC_DSI1_PLL_FCLK &&
+ clk_src != DSS_SRC_DSS1_ALWON_FCLK);
+
+ b = clk_src == DSS_SRC_DSS1_ALWON_FCLK ? 0 : 1;
+
+ REG_FLD_MOD(DSS_CONTROL, b, 0, 0); /* DISPC_CLK_SWITCH */
+
+ dss.dispc_clk_source = clk_src;
+}
+
+void dss_select_dsi_clk_source(enum dss_clk_source clk_src)
{
- u32 r;
- r = dss_read_reg(DSS_CONTROL);
- r = FLD_MOD(r, dsi, 1, 1); /* DSI_CLK_SWITCH */
- r = FLD_MOD(r, dispc, 0, 0); /* DISPC_CLK_SWITCH */
- dss_write_reg(DSS_CONTROL, r);
+ int b;
+
+ BUG_ON(clk_src != DSS_SRC_DSI2_PLL_FCLK &&
+ clk_src != DSS_SRC_DSS1_ALWON_FCLK);
+
+ b = clk_src == DSS_SRC_DSS1_ALWON_FCLK ? 0 : 1;
+
+ REG_FLD_MOD(DSS_CONTROL, b, 1, 1); /* DSI_CLK_SWITCH */
+
+ dss.dsi_clk_source = clk_src;
}
-int dss_get_dsi_clk_source(void)
+enum dss_clk_source dss_get_dispc_clk_source(void)
{
- return FLD_GET(dss_read_reg(DSS_CONTROL), 1, 1);
+ return dss.dispc_clk_source;
}
-int dss_get_dispc_clk_source(void)
+enum dss_clk_source dss_get_dsi_clk_source(void)
{
- return FLD_GET(dss_read_reg(DSS_CONTROL), 0, 0);
+ return dss.dsi_clk_source;
}
/* calculate clock rates using dividers in cinfo */
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index 2bcb1245d6c..24326a5fd29 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -119,6 +119,12 @@ enum dss_clock {
DSS_CLK_96M = 1 << 4,
};
+enum dss_clk_source {
+ DSS_SRC_DSI1_PLL_FCLK,
+ DSS_SRC_DSI2_PLL_FCLK,
+ DSS_SRC_DSS1_ALWON_FCLK,
+};
+
struct dss_clock_info {
/* rates that we get with dividers below */
unsigned long fck;
@@ -169,6 +175,9 @@ unsigned long dss_clk_get_rate(enum dss_clock clk);
int dss_need_ctx_restore(void);
void dss_dump_clocks(struct seq_file *s);
struct bus_type *dss_get_bus(void);
+struct regulator *dss_get_vdds_dsi(void);
+struct regulator *dss_get_vdds_sdi(void);
+struct regulator *dss_get_vdda_dac(void);
/* display */
int dss_suspend_all_devices(void);
@@ -216,9 +225,11 @@ void dss_sdi_init(u8 datapairs);
int dss_sdi_enable(void);
void dss_sdi_disable(void);
-void dss_select_clk_source(bool dsi, bool dispc);
-int dss_get_dsi_clk_source(void);
-int dss_get_dispc_clk_source(void);
+void dss_select_dispc_clk_source(enum dss_clk_source clk_src);
+void dss_select_dsi_clk_source(enum dss_clk_source clk_src);
+enum dss_clk_source dss_get_dispc_clk_source(void);
+enum dss_clk_source dss_get_dsi_clk_source(void);
+
void dss_set_venc_output(enum omap_dss_venc_type type);
void dss_set_dac_pwrdn_bgz(bool enable);
@@ -261,7 +272,7 @@ void dsi_get_overlay_fifo_thresholds(enum omap_plane plane,
u32 *fifo_low, u32 *fifo_high);
/* DPI */
-int dpi_init(void);
+int dpi_init(struct platform_device *pdev);
void dpi_exit(void);
int dpi_init_display(struct omap_dss_device *dssdev);
@@ -313,8 +324,8 @@ int dispc_setup_plane(enum omap_plane plane,
bool dispc_go_busy(enum omap_channel channel);
void dispc_go(enum omap_channel channel);
-void dispc_enable_lcd_out(bool enable);
-void dispc_enable_digit_out(bool enable);
+void dispc_enable_channel(enum omap_channel channel, bool enable);
+bool dispc_is_channel_enabled(enum omap_channel channel);
int dispc_enable_plane(enum omap_plane plane, bool enable);
void dispc_enable_replication(enum omap_plane plane, bool enable);
diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c
index 27d9c465c85..913142d4cab 100644
--- a/drivers/video/omap2/dss/manager.c
+++ b/drivers/video/omap2/dss/manager.c
@@ -501,6 +501,19 @@ static int omap_dss_unset_device(struct omap_overlay_manager *mgr)
return 0;
}
+static int dss_mgr_wait_for_vsync(struct omap_overlay_manager *mgr)
+{
+ unsigned long timeout = msecs_to_jiffies(500);
+ u32 irq;
+
+ if (mgr->device->type == OMAP_DISPLAY_TYPE_VENC)
+ irq = DISPC_IRQ_EVSYNC_ODD;
+ else
+ irq = DISPC_IRQ_VSYNC;
+
+ return omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout);
+}
+
static int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
{
unsigned long timeout = msecs_to_jiffies(500);
@@ -509,17 +522,18 @@ static int dss_mgr_wait_for_go(struct omap_overlay_manager *mgr)
u32 irq;
int r;
int i;
+ struct omap_dss_device *dssdev = mgr->device;
- if (!mgr->device)
+ if (!dssdev)
return 0;
- if (mgr->device->type == OMAP_DISPLAY_TYPE_VENC) {
+ if (dssdev->type == OMAP_DISPLAY_TYPE_VENC) {
irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
channel = OMAP_DSS_CHANNEL_DIGIT;
} else {
- if (mgr->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
+ if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
enum omap_dss_update_mode mode;
- mode = mgr->device->get_update_mode(mgr->device);
+ mode = dssdev->driver->get_update_mode(dssdev);
if (mode != OMAP_DSS_UPDATE_AUTO)
return 0;
@@ -592,7 +606,7 @@ int dss_mgr_wait_for_go_ovl(struct omap_overlay *ovl)
} else {
if (dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
enum omap_dss_update_mode mode;
- mode = dssdev->get_update_mode(dssdev);
+ mode = dssdev->driver->get_update_mode(dssdev);
if (mode != OMAP_DSS_UPDATE_AUTO)
return 0;
@@ -1064,7 +1078,7 @@ void dss_start_update(struct omap_dss_device *dssdev)
mc->shadow_dirty = false;
}
- dispc_enable_lcd_out(1);
+ dssdev->manager->enable(dssdev->manager);
}
static void dss_apply_irq_handler(void *data, u32 mask)
@@ -1196,7 +1210,8 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
oc->manual_update =
dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE &&
- dssdev->get_update_mode(dssdev) != OMAP_DSS_UPDATE_AUTO;
+ dssdev->driver->get_update_mode(dssdev) !=
+ OMAP_DSS_UPDATE_AUTO;
++num_planes_enabled;
}
@@ -1237,7 +1252,8 @@ static int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
mc->manual_update =
dssdev->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE &&
- dssdev->get_update_mode(dssdev) != OMAP_DSS_UPDATE_AUTO;
+ dssdev->driver->get_update_mode(dssdev) !=
+ OMAP_DSS_UPDATE_AUTO;
}
/* XXX TODO: Try to get fifomerge working. The problem is that it
@@ -1351,6 +1367,18 @@ static void omap_dss_mgr_get_info(struct omap_overlay_manager *mgr,
*info = mgr->info;
}
+static int dss_mgr_enable(struct omap_overlay_manager *mgr)
+{
+ dispc_enable_channel(mgr->id, 1);
+ return 0;
+}
+
+static int dss_mgr_disable(struct omap_overlay_manager *mgr)
+{
+ dispc_enable_channel(mgr->id, 0);
+ return 0;
+}
+
static void omap_dss_add_overlay_manager(struct omap_overlay_manager *manager)
{
++num_managers;
@@ -1394,6 +1422,10 @@ int dss_init_overlay_managers(struct platform_device *pdev)
mgr->set_manager_info = &omap_dss_mgr_set_info;
mgr->get_manager_info = &omap_dss_mgr_get_info;
mgr->wait_for_go = &dss_mgr_wait_for_go;
+ mgr->wait_for_vsync = &dss_mgr_wait_for_vsync;
+
+ mgr->enable = &dss_mgr_enable;
+ mgr->disable = &dss_mgr_disable;
mgr->caps = OMAP_DSS_OVL_MGR_CAP_DISPC;
diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c
index b7f9a733984..0c5bea263ac 100644
--- a/drivers/video/omap2/dss/overlay.c
+++ b/drivers/video/omap2/dss/overlay.c
@@ -350,7 +350,7 @@ int dss_check_overlay(struct omap_overlay *ovl, struct omap_dss_device *dssdev)
return -EINVAL;
}
- dssdev->get_resolution(dssdev, &dw, &dh);
+ dssdev->driver->get_resolution(dssdev, &dw, &dh);
DSSDBG("check_overlay %d: (%d,%d %dx%d -> %dx%d) disp (%dx%d)\n",
ovl->id,
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c
index b936495c065..cc23f53cc62 100644
--- a/drivers/video/omap2/dss/rfbi.c
+++ b/drivers/video/omap2/dss/rfbi.c
@@ -36,8 +36,6 @@
#include <plat/display.h>
#include "dss.h"
-/*#define MEASURE_PERF*/
-
#define RFBI_BASE 0x48050800
struct rfbi_reg { u16 idx; };
@@ -66,8 +64,6 @@ struct rfbi_reg { u16 idx; };
#define RFBI_VSYNC_WIDTH RFBI_REG(0x0090)
#define RFBI_HSYNC_WIDTH RFBI_REG(0x0094)
-#define RFBI_CMD_FIFO_LEN_BYTES (16 * sizeof(struct update_param))
-
#define REG_FLD_MOD(idx, val, start, end) \
rfbi_write_reg(idx, FLD_MOD(rfbi_read_reg(idx), val, start, end))
@@ -102,7 +98,6 @@ enum update_cmd {
static int rfbi_convert_timings(struct rfbi_timings *t);
static void rfbi_get_clk_info(u32 *clk_period, u32 *max_clk_div);
-static void process_cmd_fifo(void);
static struct {
void __iomem *base;
@@ -125,11 +120,6 @@ static struct {
struct completion cmd_done;
atomic_t cmd_fifo_full;
atomic_t cmd_pending;
-#ifdef MEASURE_PERF
- unsigned perf_bytes;
- ktime_t perf_setup_time;
- ktime_t perf_start_time;
-#endif
} rfbi;
struct update_region {
@@ -139,16 +129,6 @@ struct update_region {
u16 h;
};
-struct update_param {
- u8 rfbi_module;
- u8 cmd;
-
- union {
- struct update_region r;
- struct completion *sync;
- } par;
-};
-
static inline void rfbi_write_reg(const struct rfbi_reg idx, u32 val)
{
__raw_writel(val, rfbi.base + idx.idx);
@@ -321,55 +301,6 @@ void omap_rfbi_write_pixels(const void __iomem *buf, int scr_width,
}
EXPORT_SYMBOL(omap_rfbi_write_pixels);
-#ifdef MEASURE_PERF
-static void perf_mark_setup(void)
-{
- rfbi.perf_setup_time = ktime_get();
-}
-
-static void perf_mark_start(void)
-{
- rfbi.perf_start_time = ktime_get();
-}
-
-static void perf_show(const char *name)
-{
- ktime_t t, setup_time, trans_time;
- u32 total_bytes;
- u32 setup_us, trans_us, total_us;
-
- t = ktime_get();
-
- setup_time = ktime_sub(rfbi.perf_start_time, rfbi.perf_setup_time);
- setup_us = (u32)ktime_to_us(setup_time);
- if (setup_us == 0)
- setup_us = 1;
-
- trans_time = ktime_sub(t, rfbi.perf_start_time);
- trans_us = (u32)ktime_to_us(trans_time);
- if (trans_us == 0)
- trans_us = 1;
-
- total_us = setup_us + trans_us;
-
- total_bytes = rfbi.perf_bytes;
-
- DSSINFO("%s update %u us + %u us = %u us (%uHz), %u bytes, "
- "%u kbytes/sec\n",
- name,
- setup_us,
- trans_us,
- total_us,
- 1000*1000 / total_us,
- total_bytes,
- total_bytes * 1000 / total_us);
-}
-#else
-#define perf_mark_setup()
-#define perf_mark_start()
-#define perf_show(x)
-#endif
-
void rfbi_transfer_area(u16 width, u16 height,
void (callback)(void *data), void *data)
{
@@ -382,7 +313,7 @@ void rfbi_transfer_area(u16 width, u16 height,
dispc_set_lcd_size(width, height);
- dispc_enable_lcd_out(1);
+ dispc_enable_channel(OMAP_DSS_CHANNEL_LCD, true);
rfbi.framedone_callback = callback;
rfbi.framedone_callback_data = data;
@@ -396,8 +327,6 @@ void rfbi_transfer_area(u16 width, u16 height,
if (!rfbi.te_enabled)
l = FLD_MOD(l, 1, 4, 4); /* ITE */
- perf_mark_start();
-
rfbi_write_reg(RFBI_CONTROL, l);
}
@@ -407,8 +336,6 @@ static void framedone_callback(void *data, u32 mask)
DSSDBG("FRAMEDONE\n");
- perf_show("DISPC");
-
REG_FLD_MOD(RFBI_CONTROL, 0, 0, 0);
rfbi_enable_clocks(0);
@@ -416,11 +343,10 @@ static void framedone_callback(void *data, u32 mask)
callback = rfbi.framedone_callback;
rfbi.framedone_callback = NULL;
- /*callback(rfbi.framedone_callback_data);*/
+ if (callback != NULL)
+ callback(rfbi.framedone_callback_data);
atomic_set(&rfbi.cmd_pending, 0);
-
- process_cmd_fifo();
}
#if 1 /* VERBOSE */
@@ -937,52 +863,43 @@ int rfbi_configure(int rfbi_module, int bpp, int lines)
}
EXPORT_SYMBOL(rfbi_configure);
-static int rfbi_find_display(struct omap_dss_device *dssdev)
+int omap_rfbi_prepare_update(struct omap_dss_device *dssdev,
+ u16 *x, u16 *y, u16 *w, u16 *h)
{
- if (dssdev == rfbi.dssdev[0])
- return 0;
+ u16 dw, dh;
- if (dssdev == rfbi.dssdev[1])
- return 1;
+ dssdev->driver->get_resolution(dssdev, &dw, &dh);
- BUG();
- return -1;
-}
+ if (*x > dw || *y > dh)
+ return -EINVAL;
+ if (*x + *w > dw)
+ return -EINVAL;
-static void signal_fifo_waiters(void)
-{
- if (atomic_read(&rfbi.cmd_fifo_full) > 0) {
- /* DSSDBG("SIGNALING: Fifo not full for waiter!\n"); */
- complete(&rfbi.cmd_done);
- atomic_dec(&rfbi.cmd_fifo_full);
- }
-}
+ if (*y + *h > dh)
+ return -EINVAL;
-/* returns 1 for async op, and 0 for sync op */
-static int do_update(struct omap_dss_device *dssdev, struct update_region *upd)
-{
- u16 x = upd->x;
- u16 y = upd->y;
- u16 w = upd->w;
- u16 h = upd->h;
+ if (*w == 1)
+ return -EINVAL;
- perf_mark_setup();
+ if (*w == 0 || *h == 0)
+ return -EINVAL;
if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
- /*dssdev->driver->enable_te(dssdev, 1); */
- dss_setup_partial_planes(dssdev, &x, &y, &w, &h);
+ dss_setup_partial_planes(dssdev, x, y, w, h);
+ dispc_set_lcd_size(*w, *h);
}
-#ifdef MEASURE_PERF
- rfbi.perf_bytes = w * h * 2; /* XXX always 16bit */
-#endif
-
- dssdev->driver->setup_update(dssdev, x, y, w, h);
+ return 0;
+}
+EXPORT_SYMBOL(omap_rfbi_prepare_update);
+int omap_rfbi_update(struct omap_dss_device *dssdev,
+ u16 x, u16 y, u16 w, u16 h,
+ void (*callback)(void *), void *data)
+{
if (dssdev->manager->caps & OMAP_DSS_OVL_MGR_CAP_DISPC) {
- rfbi_transfer_area(w, h, NULL, NULL);
- return 1;
+ rfbi_transfer_area(w, h, callback, data);
} else {
struct omap_overlay *ovl;
void __iomem *addr;
@@ -994,123 +911,12 @@ static int do_update(struct omap_dss_device *dssdev, struct update_region *upd)
omap_rfbi_write_pixels(addr, scr_width, x, y, w, h);
- perf_show("L4");
-
- return 0;
+ callback(data);
}
-}
-
-static void process_cmd_fifo(void)
-{
- int len;
- struct update_param p;
- struct omap_dss_device *dssdev;
- unsigned long flags;
-
- if (atomic_inc_return(&rfbi.cmd_pending) != 1)
- return;
-
- while (true) {
- spin_lock_irqsave(&rfbi.cmd_lock, flags);
-
- len = kfifo_out(&rfbi.cmd_fifo, (unsigned char *)&p,
- sizeof(struct update_param));
- if (len == 0) {
- DSSDBG("nothing more in fifo\n");
- atomic_set(&rfbi.cmd_pending, 0);
- spin_unlock_irqrestore(&rfbi.cmd_lock, flags);
- break;
- }
-
- /* DSSDBG("fifo full %d\n", rfbi.cmd_fifo_full.counter);*/
-
- spin_unlock_irqrestore(&rfbi.cmd_lock, flags);
-
- BUG_ON(len != sizeof(struct update_param));
- BUG_ON(p.rfbi_module > 1);
-
- dssdev = rfbi.dssdev[p.rfbi_module];
-
- if (p.cmd == RFBI_CMD_UPDATE) {
- if (do_update(dssdev, &p.par.r))
- break; /* async op */
- } else if (p.cmd == RFBI_CMD_SYNC) {
- DSSDBG("Signaling SYNC done!\n");
- complete(p.par.sync);
- } else
- BUG();
- }
-
- signal_fifo_waiters();
-}
-static void rfbi_push_cmd(struct update_param *p)
-{
- int ret;
-
- while (1) {
- unsigned long flags;
- int available;
-
- spin_lock_irqsave(&rfbi.cmd_lock, flags);
- available = RFBI_CMD_FIFO_LEN_BYTES -
- kfifo_len(&rfbi.cmd_fifo);
-
-/* DSSDBG("%d bytes left in fifo\n", available); */
- if (available < sizeof(struct update_param)) {
- DSSDBG("Going to wait because FIFO FULL..\n");
- spin_unlock_irqrestore(&rfbi.cmd_lock, flags);
- atomic_inc(&rfbi.cmd_fifo_full);
- wait_for_completion(&rfbi.cmd_done);
- /*DSSDBG("Woke up because fifo not full anymore\n");*/
- continue;
- }
-
- ret = kfifo_in(&rfbi.cmd_fifo, (unsigned char *)p,
- sizeof(struct update_param));
-/* DSSDBG("pushed %d bytes\n", ret);*/
-
- spin_unlock_irqrestore(&rfbi.cmd_lock, flags);
-
- BUG_ON(ret != sizeof(struct update_param));
-
- break;
- }
-}
-
-static void rfbi_push_update(int rfbi_module, int x, int y, int w, int h)
-{
- struct update_param p;
-
- p.rfbi_module = rfbi_module;
- p.cmd = RFBI_CMD_UPDATE;
-
- p.par.r.x = x;
- p.par.r.y = y;
- p.par.r.w = w;
- p.par.r.h = h;
-
- DSSDBG("RFBI pushed %d,%d %dx%d\n", x, y, w, h);
-
- rfbi_push_cmd(&p);
-
- process_cmd_fifo();
-}
-
-static void rfbi_push_sync(int rfbi_module, struct completion *sync_comp)
-{
- struct update_param p;
-
- p.rfbi_module = rfbi_module;
- p.cmd = RFBI_CMD_SYNC;
- p.par.sync = sync_comp;
-
- rfbi_push_cmd(&p);
-
- DSSDBG("RFBI sync pushed to cmd fifo\n");
-
- process_cmd_fifo();
+ return 0;
}
+EXPORT_SYMBOL(omap_rfbi_update);
void rfbi_dump_regs(struct seq_file *s)
{
@@ -1155,12 +961,8 @@ int rfbi_init(void)
{
u32 rev;
u32 l;
- int r;
spin_lock_init(&rfbi.cmd_lock);
- r = kfifo_alloc(&rfbi.cmd_fifo, RFBI_CMD_FIFO_LEN_BYTES, GFP_KERNEL);
- if (r)
- return r;
init_completion(&rfbi.cmd_done);
atomic_set(&rfbi.cmd_fifo_full, 0);
@@ -1196,49 +998,10 @@ void rfbi_exit(void)
{
DSSDBG("rfbi_exit\n");
- kfifo_free(&rfbi.cmd_fifo);
-
iounmap(rfbi.base);
}
-/* struct omap_display support */
-static int rfbi_display_update(struct omap_dss_device *dssdev,
- u16 x, u16 y, u16 w, u16 h)
-{
- int rfbi_module;
-
- if (w == 0 || h == 0)
- return 0;
-
- rfbi_module = rfbi_find_display(dssdev);
-
- rfbi_push_update(rfbi_module, x, y, w, h);
-
- return 0;
-}
-
-static int rfbi_display_sync(struct omap_dss_device *dssdev)
-{
- struct completion sync_comp;
- int rfbi_module;
-
- rfbi_module = rfbi_find_display(dssdev);
-
- init_completion(&sync_comp);
- rfbi_push_sync(rfbi_module, &sync_comp);
- DSSDBG("Waiting for SYNC to happen...\n");
- wait_for_completion(&sync_comp);
- DSSDBG("Released from SYNC\n");
- return 0;
-}
-
-static int rfbi_display_enable_te(struct omap_dss_device *dssdev, bool enable)
-{
- dssdev->driver->enable_te(dssdev, enable);
- return 0;
-}
-
-static int rfbi_display_enable(struct omap_dss_device *dssdev)
+int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev)
{
int r;
@@ -1269,41 +1032,25 @@ static int rfbi_display_enable(struct omap_dss_device *dssdev)
&dssdev->ctrl.rfbi_timings);
- if (dssdev->driver->enable) {
- r = dssdev->driver->enable(dssdev);
- if (r)
- goto err2;
- }
-
return 0;
-err2:
- omap_dispc_unregister_isr(framedone_callback, NULL,
- DISPC_IRQ_FRAMEDONE);
err1:
omap_dss_stop_device(dssdev);
err0:
return r;
}
+EXPORT_SYMBOL(omapdss_rfbi_display_enable);
-static void rfbi_display_disable(struct omap_dss_device *dssdev)
+void omapdss_rfbi_display_disable(struct omap_dss_device *dssdev)
{
- dssdev->driver->disable(dssdev);
omap_dispc_unregister_isr(framedone_callback, NULL,
DISPC_IRQ_FRAMEDONE);
omap_dss_stop_device(dssdev);
}
+EXPORT_SYMBOL(omapdss_rfbi_display_disable);
int rfbi_init_display(struct omap_dss_device *dssdev)
{
- dssdev->enable = rfbi_display_enable;
- dssdev->disable = rfbi_display_disable;
- dssdev->update = rfbi_display_update;
- dssdev->sync = rfbi_display_sync;
- dssdev->enable_te = rfbi_display_enable_te;
-
rfbi.dssdev[dssdev->phy.rfbi.channel] = dssdev;
-
dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
-
return 0;
}
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index c24f307d3da..12eb4042dd8 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -41,7 +41,7 @@ static void sdi_basic_init(void)
dispc_lcd_enable_signal_polarity(1);
}
-static int sdi_display_enable(struct omap_dss_device *dssdev)
+int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
{
struct omap_video_timings *t = &dssdev->panel.timings;
struct dss_clock_info dss_cinfo;
@@ -57,12 +57,6 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
goto err0;
}
- if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
- DSSERR("dssdev already enabled\n");
- r = -EINVAL;
- goto err1;
- }
-
/* In case of skip_init sdi_init has already enabled the clocks */
if (!sdi.skip_init)
dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
@@ -119,7 +113,7 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
mdelay(2);
}
- dispc_enable_lcd_out(1);
+ dssdev->manager->enable(dssdev->manager);
if (dssdev->driver->enable) {
r = dssdev->driver->enable(dssdev);
@@ -127,13 +121,11 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
goto err3;
}
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
sdi.skip_init = 0;
return 0;
err3:
- dispc_enable_lcd_out(0);
+ dssdev->manager->disable(dssdev->manager);
err2:
dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
err1:
@@ -141,120 +133,27 @@ err1:
err0:
return r;
}
+EXPORT_SYMBOL(omapdss_sdi_display_enable);
-static int sdi_display_resume(struct omap_dss_device *dssdev);
-
-static void sdi_display_disable(struct omap_dss_device *dssdev)
+void omapdss_sdi_display_disable(struct omap_dss_device *dssdev)
{
- if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED)
- return;
-
- if (dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
- if (sdi_display_resume(dssdev))
- return;
-
if (dssdev->driver->disable)
dssdev->driver->disable(dssdev);
- dispc_enable_lcd_out(0);
+ dssdev->manager->disable(dssdev->manager);
dss_sdi_disable();
dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-
omap_dss_stop_device(dssdev);
}
-
-static int sdi_display_suspend(struct omap_dss_device *dssdev)
-{
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
- return -EINVAL;
-
- if (dssdev->driver->suspend)
- dssdev->driver->suspend(dssdev);
-
- dispc_enable_lcd_out(0);
-
- dss_sdi_disable();
-
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
-
- dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
-
- return 0;
-}
-
-static int sdi_display_resume(struct omap_dss_device *dssdev)
-{
- int r;
-
- if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED)
- return -EINVAL;
-
- dss_clk_enable(DSS_CLK_ICK | DSS_CLK_FCK1);
-
- r = dss_sdi_enable();
- if (r)
- goto err;
- mdelay(2);
-
- dispc_enable_lcd_out(1);
-
- if (dssdev->driver->resume)
- dssdev->driver->resume(dssdev);
-
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-
- return 0;
-err:
- dss_clk_disable(DSS_CLK_ICK | DSS_CLK_FCK1);
- return r;
-}
-
-static int sdi_display_set_update_mode(struct omap_dss_device *dssdev,
- enum omap_dss_update_mode mode)
-{
- if (mode == OMAP_DSS_UPDATE_MANUAL)
- return -EINVAL;
-
- if (mode == OMAP_DSS_UPDATE_DISABLED) {
- dispc_enable_lcd_out(0);
- sdi.update_enabled = 0;
- } else {
- dispc_enable_lcd_out(1);
- sdi.update_enabled = 1;
- }
-
- return 0;
-}
-
-static enum omap_dss_update_mode sdi_display_get_update_mode(
- struct omap_dss_device *dssdev)
-{
- return sdi.update_enabled ? OMAP_DSS_UPDATE_AUTO :
- OMAP_DSS_UPDATE_DISABLED;
-}
-
-static void sdi_get_timings(struct omap_dss_device *dssdev,
- struct omap_video_timings *timings)
-{
- *timings = dssdev->panel.timings;
-}
+EXPORT_SYMBOL(omapdss_sdi_display_disable);
int sdi_init_display(struct omap_dss_device *dssdev)
{
DSSDBG("SDI init\n");
- dssdev->enable = sdi_display_enable;
- dssdev->disable = sdi_display_disable;
- dssdev->suspend = sdi_display_suspend;
- dssdev->resume = sdi_display_resume;
- dssdev->set_update_mode = sdi_display_set_update_mode;
- dssdev->get_update_mode = sdi_display_get_update_mode;
- dssdev->get_timings = sdi_get_timings;
-
return 0;
}
diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c
index 749a5a0f5be..f0ba5732d84 100644
--- a/drivers/video/omap2/dss/venc.c
+++ b/drivers/video/omap2/dss/venc.c
@@ -400,114 +400,6 @@ static const struct venc_config *venc_timings_to_config(
BUG();
}
-
-
-
-
-/* driver */
-static int venc_panel_probe(struct omap_dss_device *dssdev)
-{
- dssdev->panel.timings = omap_dss_pal_timings;
-
- return 0;
-}
-
-static void venc_panel_remove(struct omap_dss_device *dssdev)
-{
-}
-
-static int venc_panel_enable(struct omap_dss_device *dssdev)
-{
- int r = 0;
-
- /* wait couple of vsyncs until enabling the LCD */
- msleep(50);
-
- if (dssdev->platform_enable)
- r = dssdev->platform_enable(dssdev);
-
- return r;
-}
-
-static void venc_panel_disable(struct omap_dss_device *dssdev)
-{
- if (dssdev->platform_disable)
- dssdev->platform_disable(dssdev);
-
- /* wait at least 5 vsyncs after disabling the LCD */
-
- msleep(100);
-}
-
-static int venc_panel_suspend(struct omap_dss_device *dssdev)
-{
- venc_panel_disable(dssdev);
- return 0;
-}
-
-static int venc_panel_resume(struct omap_dss_device *dssdev)
-{
- return venc_panel_enable(dssdev);
-}
-
-static struct omap_dss_driver venc_driver = {
- .probe = venc_panel_probe,
- .remove = venc_panel_remove,
-
- .enable = venc_panel_enable,
- .disable = venc_panel_disable,
- .suspend = venc_panel_suspend,
- .resume = venc_panel_resume,
-
- .driver = {
- .name = "venc",
- .owner = THIS_MODULE,
- },
-};
-/* driver end */
-
-
-
-int venc_init(struct platform_device *pdev)
-{
- u8 rev_id;
-
- mutex_init(&venc.venc_lock);
-
- venc.wss_data = 0;
-
- venc.base = ioremap(VENC_BASE, SZ_1K);
- if (!venc.base) {
- DSSERR("can't ioremap VENC\n");
- return -ENOMEM;
- }
-
- venc.vdda_dac_reg = regulator_get(&pdev->dev, "vdda_dac");
- if (IS_ERR(venc.vdda_dac_reg)) {
- iounmap(venc.base);
- DSSERR("can't get VDDA_DAC regulator\n");
- return PTR_ERR(venc.vdda_dac_reg);
- }
-
- venc_enable_clocks(1);
-
- rev_id = (u8)(venc_read_reg(VENC_REV_ID) & 0xff);
- printk(KERN_INFO "OMAP VENC rev %d\n", rev_id);
-
- venc_enable_clocks(0);
-
- return omap_dss_register_driver(&venc_driver);
-}
-
-void venc_exit(void)
-{
- omap_dss_unregister_driver(&venc_driver);
-
- regulator_put(venc.vdda_dac_reg);
-
- iounmap(venc.base);
-}
-
static void venc_power_on(struct omap_dss_device *dssdev)
{
u32 l;
@@ -540,7 +432,7 @@ static void venc_power_on(struct omap_dss_device *dssdev)
if (dssdev->platform_enable)
dssdev->platform_enable(dssdev);
- dispc_enable_digit_out(1);
+ dssdev->manager->enable(dssdev->manager);
}
static void venc_power_off(struct omap_dss_device *dssdev)
@@ -548,7 +440,7 @@ static void venc_power_off(struct omap_dss_device *dssdev)
venc_write_reg(VENC_OUTPUT_CONTROL, 0);
dss_set_dac_pwrdn_bgz(0);
- dispc_enable_digit_out(0);
+ dssdev->manager->disable(dssdev->manager);
if (dssdev->platform_disable)
dssdev->platform_disable(dssdev);
@@ -558,7 +450,23 @@ static void venc_power_off(struct omap_dss_device *dssdev)
venc_enable_clocks(0);
}
-static int venc_enable_display(struct omap_dss_device *dssdev)
+
+
+
+
+/* driver */
+static int venc_panel_probe(struct omap_dss_device *dssdev)
+{
+ dssdev->panel.timings = omap_dss_pal_timings;
+
+ return 0;
+}
+
+static void venc_panel_remove(struct omap_dss_device *dssdev)
+{
+}
+
+static int venc_panel_enable(struct omap_dss_device *dssdev)
{
int r = 0;
@@ -568,7 +476,13 @@ static int venc_enable_display(struct omap_dss_device *dssdev)
if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
r = -EINVAL;
- goto err;
+ goto err1;
+ }
+
+ if (dssdev->platform_enable) {
+ r = dssdev->platform_enable(dssdev);
+ if (r)
+ goto err2;
}
venc_power_on(dssdev);
@@ -576,13 +490,21 @@ static int venc_enable_display(struct omap_dss_device *dssdev)
venc.wss_data = 0;
dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-err:
+
+ /* wait couple of vsyncs until enabling the LCD */
+ msleep(50);
+
mutex_unlock(&venc.venc_lock);
return r;
+err2:
+ venc_power_off(dssdev);
+err1:
+ mutex_unlock(&venc.venc_lock);
+ return r;
}
-static void venc_disable_display(struct omap_dss_device *dssdev)
+static void venc_panel_disable(struct omap_dss_device *dssdev)
{
DSSDBG("venc_disable_display\n");
@@ -599,53 +521,40 @@ static void venc_disable_display(struct omap_dss_device *dssdev)
venc_power_off(dssdev);
+ /* wait at least 5 vsyncs after disabling the LCD */
+ msleep(100);
+
+ if (dssdev->platform_disable)
+ dssdev->platform_disable(dssdev);
+
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
end:
mutex_unlock(&venc.venc_lock);
}
-static int venc_display_suspend(struct omap_dss_device *dssdev)
+static int venc_panel_suspend(struct omap_dss_device *dssdev)
{
- int r = 0;
-
- DSSDBG("venc_display_suspend\n");
-
- mutex_lock(&venc.venc_lock);
-
- if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
- r = -EINVAL;
- goto err;
- }
-
- venc_power_off(dssdev);
-
- dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
-err:
- mutex_unlock(&venc.venc_lock);
-
- return r;
+ venc_panel_disable(dssdev);
+ return 0;
}
-static int venc_display_resume(struct omap_dss_device *dssdev)
+static int venc_panel_resume(struct omap_dss_device *dssdev)
{
- int r = 0;
-
- DSSDBG("venc_display_resume\n");
-
- mutex_lock(&venc.venc_lock);
-
- if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) {
- r = -EINVAL;
- goto err;
- }
-
- venc_power_on(dssdev);
+ return venc_panel_enable(dssdev);
+}
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
-err:
- mutex_unlock(&venc.venc_lock);
+static enum omap_dss_update_mode venc_get_update_mode(
+ struct omap_dss_device *dssdev)
+{
+ return OMAP_DSS_UPDATE_AUTO;
+}
- return r;
+static int venc_set_update_mode(struct omap_dss_device *dssdev,
+ enum omap_dss_update_mode mode)
+{
+ if (mode != OMAP_DSS_UPDATE_AUTO)
+ return -EINVAL;
+ return 0;
}
static void venc_get_timings(struct omap_dss_device *dssdev,
@@ -666,8 +575,8 @@ static void venc_set_timings(struct omap_dss_device *dssdev,
dssdev->panel.timings = *timings;
if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
/* turn the venc off and on to get new timings to use */
- venc_disable_display(dssdev);
- venc_enable_display(dssdev);
+ venc_panel_disable(dssdev);
+ venc_panel_enable(dssdev);
}
}
@@ -716,30 +625,79 @@ static int venc_set_wss(struct omap_dss_device *dssdev, u32 wss)
return 0;
}
-static enum omap_dss_update_mode venc_display_get_update_mode(
- struct omap_dss_device *dssdev)
+static struct omap_dss_driver venc_driver = {
+ .probe = venc_panel_probe,
+ .remove = venc_panel_remove,
+
+ .enable = venc_panel_enable,
+ .disable = venc_panel_disable,
+ .suspend = venc_panel_suspend,
+ .resume = venc_panel_resume,
+
+ .get_resolution = omapdss_default_get_resolution,
+ .get_recommended_bpp = omapdss_default_get_recommended_bpp,
+
+ .set_update_mode = venc_set_update_mode,
+ .get_update_mode = venc_get_update_mode,
+
+ .get_timings = venc_get_timings,
+ .set_timings = venc_set_timings,
+ .check_timings = venc_check_timings,
+
+ .get_wss = venc_get_wss,
+ .set_wss = venc_set_wss,
+
+ .driver = {
+ .name = "venc",
+ .owner = THIS_MODULE,
+ },
+};
+/* driver end */
+
+
+
+int venc_init(struct platform_device *pdev)
{
- if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
- return OMAP_DSS_UPDATE_AUTO;
- else
- return OMAP_DSS_UPDATE_DISABLED;
+ u8 rev_id;
+
+ mutex_init(&venc.venc_lock);
+
+ venc.wss_data = 0;
+
+ venc.base = ioremap(VENC_BASE, SZ_1K);
+ if (!venc.base) {
+ DSSERR("can't ioremap VENC\n");
+ return -ENOMEM;
+ }
+
+ venc.vdda_dac_reg = dss_get_vdda_dac();
+ if (IS_ERR(venc.vdda_dac_reg)) {
+ iounmap(venc.base);
+ DSSERR("can't get VDDA_DAC regulator\n");
+ return PTR_ERR(venc.vdda_dac_reg);
+ }
+
+ venc_enable_clocks(1);
+
+ rev_id = (u8)(venc_read_reg(VENC_REV_ID) & 0xff);
+ printk(KERN_INFO "OMAP VENC rev %d\n", rev_id);
+
+ venc_enable_clocks(0);
+
+ return omap_dss_register_driver(&venc_driver);
+}
+
+void venc_exit(void)
+{
+ omap_dss_unregister_driver(&venc_driver);
+
+ iounmap(venc.base);
}
int venc_init_display(struct omap_dss_device *dssdev)
{
DSSDBG("init_display\n");
- dssdev->enable = venc_enable_display;
- dssdev->disable = venc_disable_display;
- dssdev->suspend = venc_display_suspend;
- dssdev->resume = venc_display_resume;
- dssdev->get_timings = venc_get_timings;
- dssdev->set_timings = venc_set_timings;
- dssdev->check_timings = venc_check_timings;
- dssdev->get_wss = venc_get_wss;
- dssdev->set_wss = venc_set_wss;
- dssdev->get_update_mode = venc_display_get_update_mode;
-
return 0;
}
diff --git a/drivers/video/omap2/omapfb/Kconfig b/drivers/video/omap2/omapfb/Kconfig
index bb694cc52a5..43496d6c377 100644
--- a/drivers/video/omap2/omapfb/Kconfig
+++ b/drivers/video/omap2/omapfb/Kconfig
@@ -16,16 +16,7 @@ config FB_OMAP2_DEBUG_SUPPORT
depends on FB_OMAP2
help
Support for debug output. You have to enable the actual printing
- with debug module parameter.
-
-config FB_OMAP2_FORCE_AUTO_UPDATE
- bool "Force main display to automatic update mode"
- depends on FB_OMAP2
- help
- Forces main display to automatic update mode (if possible),
- and also enables tearsync (if possible). By default
- displays that support manual update are started in manual
- update mode.
+ with 'debug' module parameter.
config FB_OMAP2_NUM_FBS
int "Number of framebuffers"
diff --git a/drivers/video/omap2/omapfb/omapfb-ioctl.c b/drivers/video/omap2/omapfb/omapfb-ioctl.c
index 4c4bafdfaa4..1ffa760b854 100644
--- a/drivers/video/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/omap2/omapfb/omapfb-ioctl.c
@@ -167,12 +167,12 @@ static int omapfb_update_window_nolock(struct fb_info *fbi,
if (w == 0 || h == 0)
return 0;
- display->get_resolution(display, &dw, &dh);
+ display->driver->get_resolution(display, &dw, &dh);
if (x + w > dw || y + h > dh)
return -EINVAL;
- return display->update(display, x, y, w, h);
+ return display->driver->update(display, x, y, w, h);
}
/* This function is exported for SGX driver use */
@@ -202,7 +202,7 @@ static int omapfb_set_update_mode(struct fb_info *fbi,
enum omap_dss_update_mode um;
int r;
- if (!display || !display->set_update_mode)
+ if (!display || !display->driver->set_update_mode)
return -EINVAL;
switch (mode) {
@@ -222,7 +222,7 @@ static int omapfb_set_update_mode(struct fb_info *fbi,
return -EINVAL;
}
- r = display->set_update_mode(display, um);
+ r = display->driver->set_update_mode(display, um);
return r;
}
@@ -233,10 +233,15 @@ static int omapfb_get_update_mode(struct fb_info *fbi,
struct omap_dss_device *display = fb2display(fbi);
enum omap_dss_update_mode m;
- if (!display || !display->get_update_mode)
+ if (!display)
return -EINVAL;
- m = display->get_update_mode(display);
+ if (!display->driver->get_update_mode) {
+ *mode = OMAPFB_AUTO_UPDATE;
+ return 0;
+ }
+
+ m = display->driver->get_update_mode(display);
switch (m) {
case OMAP_DSS_UPDATE_DISABLED:
@@ -374,7 +379,7 @@ static int omapfb_memory_read(struct fb_info *fbi,
void *buf;
int r;
- if (!display || !display->memory_read)
+ if (!display || !display->driver->memory_read)
return -ENOENT;
if (!access_ok(VERIFY_WRITE, mr->buffer, mr->buffer_size))
@@ -389,7 +394,7 @@ static int omapfb_memory_read(struct fb_info *fbi,
return -ENOMEM;
}
- r = display->memory_read(display, buf, mr->buffer_size,
+ r = display->driver->memory_read(display, buf, mr->buffer_size,
mr->x, mr->y, mr->w, mr->h);
if (r > 0) {
@@ -483,6 +488,7 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
struct omapfb_memory_read memory_read;
struct omapfb_vram_info vram_info;
struct omapfb_tearsync_info tearsync_info;
+ struct omapfb_display_info display_info;
} p;
int r = 0;
@@ -490,18 +496,18 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
switch (cmd) {
case OMAPFB_SYNC_GFX:
DBG("ioctl SYNC_GFX\n");
- if (!display || !display->sync) {
+ if (!display || !display->driver->sync) {
/* DSS1 never returns an error here, so we neither */
/*r = -EINVAL;*/
break;
}
- r = display->sync(display);
+ r = display->driver->sync(display);
break;
case OMAPFB_UPDATE_WINDOW_OLD:
DBG("ioctl UPDATE_WINDOW_OLD\n");
- if (!display || !display->update) {
+ if (!display || !display->driver->update) {
r = -EINVAL;
break;
}
@@ -519,7 +525,7 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
case OMAPFB_UPDATE_WINDOW:
DBG("ioctl UPDATE_WINDOW\n");
- if (!display || !display->update) {
+ if (!display || !display->driver->update) {
r = -EINVAL;
break;
}
@@ -648,7 +654,7 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
break;
}
- r = display->wait_vsync(display);
+ r = display->manager->wait_for_vsync(display->manager);
break;
case OMAPFB_WAITFORGO:
@@ -669,12 +675,12 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
r = -EFAULT;
break;
}
- if (!display || !display->run_test) {
+ if (!display || !display->driver->run_test) {
r = -EINVAL;
break;
}
- r = display->run_test(display, p.test_num);
+ r = display->driver->run_test(display, p.test_num);
break;
@@ -684,12 +690,12 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
r = -EFAULT;
break;
}
- if (!display || !display->run_test) {
+ if (!display || !display->driver->run_test) {
r = -EINVAL;
break;
}
- r = display->run_test(display, p.test_num);
+ r = display->driver->run_test(display, p.test_num);
break;
@@ -731,13 +737,37 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
break;
}
- if (!display->enable_te) {
+ if (!display->driver->enable_te) {
r = -ENODEV;
break;
}
- r = display->enable_te(display, !!p.tearsync_info.enabled);
+ r = display->driver->enable_te(display,
+ !!p.tearsync_info.enabled);
+
+ break;
+ }
+
+ case OMAPFB_GET_DISPLAY_INFO: {
+ u16 xres, yres;
+ DBG("ioctl GET_DISPLAY_INFO\n");
+
+ if (display == NULL) {
+ r = -ENODEV;
+ break;
+ }
+
+ display->driver->get_resolution(display, &xres, &yres);
+
+ p.display_info.xres = xres;
+ p.display_info.yres = yres;
+ p.display_info.width = 0;
+ p.display_info.height = 0;
+
+ if (copy_to_user((void __user *)arg, &p.display_info,
+ sizeof(p.display_info)))
+ r = -EFAULT;
break;
}
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index d17caef6915..4a76917b7cc 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -54,6 +54,8 @@ module_param_named(test, omapfb_test_pattern, bool, 0644);
#endif
static int omapfb_fb_init(struct omapfb2_device *fbdev, struct fb_info *fbi);
+static int omapfb_get_recommended_bpp(struct omapfb2_device *fbdev,
+ struct omap_dss_device *dssdev);
#ifdef DEBUG
static void draw_pixel(struct fb_info *fbi, int x, int y, unsigned color)
@@ -152,9 +154,9 @@ static void fill_fb(struct fb_info *fbi)
}
#endif
-static unsigned omapfb_get_vrfb_offset(struct omapfb_info *ofbi, int rot)
+static unsigned omapfb_get_vrfb_offset(const struct omapfb_info *ofbi, int rot)
{
- struct vrfb *vrfb = &ofbi->region.vrfb;
+ const struct vrfb *vrfb = &ofbi->region.vrfb;
unsigned offset;
switch (rot) {
@@ -179,7 +181,7 @@ static unsigned omapfb_get_vrfb_offset(struct omapfb_info *ofbi, int rot)
return offset;
}
-static u32 omapfb_get_region_rot_paddr(struct omapfb_info *ofbi, int rot)
+static u32 omapfb_get_region_rot_paddr(const struct omapfb_info *ofbi, int rot)
{
if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) {
return ofbi->region.vrfb.paddr[rot]
@@ -189,7 +191,7 @@ static u32 omapfb_get_region_rot_paddr(struct omapfb_info *ofbi, int rot)
}
}
-static u32 omapfb_get_region_paddr(struct omapfb_info *ofbi)
+static u32 omapfb_get_region_paddr(const struct omapfb_info *ofbi)
{
if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB)
return ofbi->region.vrfb.paddr[0];
@@ -197,7 +199,7 @@ static u32 omapfb_get_region_paddr(struct omapfb_info *ofbi)
return ofbi->region.paddr;
}
-static void __iomem *omapfb_get_region_vaddr(struct omapfb_info *ofbi)
+static void __iomem *omapfb_get_region_vaddr(const struct omapfb_info *ofbi)
{
if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB)
return ofbi->region.vrfb.vaddr[0];
@@ -703,9 +705,9 @@ int check_fb_var(struct fb_info *fbi, struct fb_var_screeninfo *var)
var->width = -1;
var->grayscale = 0;
- if (display && display->get_timings) {
+ if (display && display->driver->get_timings) {
struct omap_video_timings timings;
- display->get_timings(display, &timings);
+ display->driver->get_timings(display, &timings);
/* pixclock in ps, the rest in pixclock */
var->pixclock = timings.pixel_clock != 0 ?
@@ -778,8 +780,8 @@ static int omapfb_release(struct fb_info *fbi, int user)
return 0;
}
-static unsigned calc_rotation_offset_dma(struct fb_var_screeninfo *var,
- struct fb_fix_screeninfo *fix, int rotation)
+static unsigned calc_rotation_offset_dma(const struct fb_var_screeninfo *var,
+ const struct fb_fix_screeninfo *fix, int rotation)
{
unsigned offset;
@@ -789,8 +791,8 @@ static unsigned calc_rotation_offset_dma(struct fb_var_screeninfo *var,
return offset;
}
-static unsigned calc_rotation_offset_vrfb(struct fb_var_screeninfo *var,
- struct fb_fix_screeninfo *fix, int rotation)
+static unsigned calc_rotation_offset_vrfb(const struct fb_var_screeninfo *var,
+ const struct fb_fix_screeninfo *fix, int rotation)
{
unsigned offset;
@@ -1221,11 +1223,11 @@ static int omapfb_blank(int blank, struct fb_info *fbi)
if (display->state != OMAP_DSS_DISPLAY_SUSPENDED)
goto exit;
- if (display->resume)
- r = display->resume(display);
+ if (display->driver->resume)
+ r = display->driver->resume(display);
- if (r == 0 && display->get_update_mode &&
- display->get_update_mode(display) ==
+ if (r == 0 && display->driver->get_update_mode &&
+ display->driver->get_update_mode(display) ==
OMAP_DSS_UPDATE_MANUAL)
do_update = 1;
@@ -1240,8 +1242,8 @@ static int omapfb_blank(int blank, struct fb_info *fbi)
if (display->state != OMAP_DSS_DISPLAY_ACTIVE)
goto exit;
- if (display->suspend)
- r = display->suspend(display);
+ if (display->driver->suspend)
+ r = display->driver->suspend(display);
break;
@@ -1252,11 +1254,11 @@ static int omapfb_blank(int blank, struct fb_info *fbi)
exit:
omapfb_unlock(fbdev);
- if (r == 0 && do_update && display->update) {
+ if (r == 0 && do_update && display->driver->update) {
u16 w, h;
- display->get_resolution(display, &w, &h);
+ display->driver->get_resolution(display, &w, &h);
- r = display->update(display, 0, 0, w, h);
+ r = display->driver->update(display, 0, 0, w, h);
}
return r;
@@ -1404,6 +1406,7 @@ static int omapfb_alloc_fbmem_display(struct fb_info *fbi, unsigned long size,
unsigned long paddr)
{
struct omapfb_info *ofbi = FB2OFB(fbi);
+ struct omapfb2_device *fbdev = ofbi->fbdev;
struct omap_dss_device *display;
int bytespp;
@@ -1412,7 +1415,7 @@ static int omapfb_alloc_fbmem_display(struct fb_info *fbi, unsigned long size,
if (!display)
return 0;
- switch (display->get_recommended_bpp(display)) {
+ switch (omapfb_get_recommended_bpp(fbdev, display)) {
case 16:
bytespp = 2;
break;
@@ -1427,7 +1430,7 @@ static int omapfb_alloc_fbmem_display(struct fb_info *fbi, unsigned long size,
if (!size) {
u16 w, h;
- display->get_resolution(display, &w, &h);
+ display->driver->get_resolution(display, &w, &h);
if (ofbi->rotation_type == OMAP_DSS_ROT_VRFB) {
size = max(omap_vrfb_min_phys_size(w, h, bytespp),
@@ -1636,8 +1639,8 @@ int omapfb_realloc_fbmem(struct fb_info *fbi, unsigned long size, int type)
if (old_size == size && old_type == type)
return 0;
- if (display && display->sync)
- display->sync(display);
+ if (display && display->driver->sync)
+ display->driver->sync(display);
omapfb_free_fbmem(fbi);
@@ -1745,7 +1748,7 @@ static int omapfb_fb_init(struct omapfb2_device *fbdev, struct fb_info *fbi)
u16 w, h;
int rotation = (var->rotate + ofbi->rotation[0]) % 4;
- display->get_resolution(display, &w, &h);
+ display->driver->get_resolution(display, &w, &h);
if (rotation == FB_ROTATE_CW ||
rotation == FB_ROTATE_CCW) {
@@ -1760,7 +1763,7 @@ static int omapfb_fb_init(struct omapfb2_device *fbdev, struct fb_info *fbi)
var->yres_virtual = var->yres;
if (!var->bits_per_pixel) {
- switch (display->get_recommended_bpp(display)) {
+ switch (omapfb_get_recommended_bpp(fbdev, display)) {
case 16:
var->bits_per_pixel = 16;
break;
@@ -1828,7 +1831,7 @@ static void omapfb_free_resources(struct omapfb2_device *fbdev)
for (i = 0; i < fbdev->num_displays; i++) {
if (fbdev->displays[i]->state != OMAP_DSS_DISPLAY_DISABLED)
- fbdev->displays[i]->disable(fbdev->displays[i]);
+ fbdev->displays[i]->driver->disable(fbdev->displays[i]);
omap_dss_put_device(fbdev->displays[i]);
}
@@ -2011,7 +2014,8 @@ static int omapfb_mode_to_timings(const char *mode_str,
}
}
-static int omapfb_set_def_mode(struct omap_dss_device *display, char *mode_str)
+static int omapfb_set_def_mode(struct omapfb2_device *fbdev,
+ struct omap_dss_device *display, char *mode_str)
{
int r;
u8 bpp;
@@ -2021,20 +2025,37 @@ static int omapfb_set_def_mode(struct omap_dss_device *display, char *mode_str)
if (r)
return r;
- display->panel.recommended_bpp = bpp;
+ fbdev->bpp_overrides[fbdev->num_bpp_overrides].dssdev = display;
+ fbdev->bpp_overrides[fbdev->num_bpp_overrides].bpp = bpp;
+ ++fbdev->num_bpp_overrides;
- if (!display->check_timings || !display->set_timings)
+ if (!display->driver->check_timings || !display->driver->set_timings)
return -EINVAL;
- r = display->check_timings(display, &timings);
+ r = display->driver->check_timings(display, &timings);
if (r)
return r;
- display->set_timings(display, &timings);
+ display->driver->set_timings(display, &timings);
return 0;
}
+static int omapfb_get_recommended_bpp(struct omapfb2_device *fbdev,
+ struct omap_dss_device *dssdev)
+{
+ int i;
+
+ BUG_ON(dssdev->driver->get_recommended_bpp == NULL);
+
+ for (i = 0; i < fbdev->num_bpp_overrides; ++i) {
+ if (dssdev == fbdev->bpp_overrides[i].dssdev)
+ return fbdev->bpp_overrides[i].bpp;
+ }
+
+ return dssdev->driver->get_recommended_bpp(dssdev);
+}
+
static int omapfb_parse_def_modes(struct omapfb2_device *fbdev)
{
char *str, *options, *this_opt;
@@ -2073,7 +2094,7 @@ static int omapfb_parse_def_modes(struct omapfb2_device *fbdev)
break;
}
- r = omapfb_set_def_mode(display, mode_str);
+ r = omapfb_set_def_mode(fbdev, display, mode_str);
if (r)
break;
}
@@ -2111,18 +2132,23 @@ static int omapfb_probe(struct platform_device *pdev)
fbdev->dev = &pdev->dev;
platform_set_drvdata(pdev, fbdev);
+ r = 0;
fbdev->num_displays = 0;
dssdev = NULL;
for_each_dss_dev(dssdev) {
omap_dss_get_device(dssdev);
+
if (!dssdev->driver) {
dev_err(&pdev->dev, "no driver for display\n");
- r = -EINVAL;
- goto cleanup;
+ r = -ENODEV;
}
+
fbdev->displays[fbdev->num_displays++] = dssdev;
}
+ if (r)
+ goto cleanup;
+
if (fbdev->num_displays == 0) {
dev_err(&pdev->dev, "no displays\n");
r = -EINVAL;
@@ -2167,35 +2193,28 @@ static int omapfb_probe(struct platform_device *pdev)
}
if (def_display) {
-#ifndef CONFIG_FB_OMAP2_FORCE_AUTO_UPDATE
- u16 w, h;
-#endif
- r = def_display->enable(def_display);
- if (r)
+ struct omap_dss_driver *dssdrv = def_display->driver;
+
+ r = def_display->driver->enable(def_display);
+ if (r) {
dev_warn(fbdev->dev, "Failed to enable display '%s'\n",
def_display->name);
+ goto cleanup;
+ }
- /* set the update mode */
if (def_display->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) {
-#ifdef CONFIG_FB_OMAP2_FORCE_AUTO_UPDATE
- if (def_display->enable_te)
- def_display->enable_te(def_display, 1);
- if (def_display->set_update_mode)
- def_display->set_update_mode(def_display,
- OMAP_DSS_UPDATE_AUTO);
-#else /* MANUAL_UPDATE */
- if (def_display->enable_te)
- def_display->enable_te(def_display, 0);
- if (def_display->set_update_mode)
- def_display->set_update_mode(def_display,
+ u16 w, h;
+ if (dssdrv->enable_te)
+ dssdrv->enable_te(def_display, 1);
+ if (dssdrv->set_update_mode)
+ dssdrv->set_update_mode(def_display,
OMAP_DSS_UPDATE_MANUAL);
- def_display->get_resolution(def_display, &w, &h);
- def_display->update(def_display, 0, 0, w, h);
-#endif
+ dssdrv->get_resolution(def_display, &w, &h);
+ def_display->driver->update(def_display, 0, 0, w, h);
} else {
- if (def_display->set_update_mode)
- def_display->set_update_mode(def_display,
+ if (dssdrv->set_update_mode)
+ dssdrv->set_update_mode(def_display,
OMAP_DSS_UPDATE_AUTO);
}
}
diff --git a/drivers/video/omap2/omapfb/omapfb.h b/drivers/video/omap2/omapfb/omapfb.h
index f7c9c739e5e..cd54fdbfd8b 100644
--- a/drivers/video/omap2/omapfb/omapfb.h
+++ b/drivers/video/omap2/omapfb/omapfb.h
@@ -83,6 +83,12 @@ struct omapfb2_device {
struct omap_overlay *overlays[10];
unsigned num_managers;
struct omap_overlay_manager *managers[10];
+
+ unsigned num_bpp_overrides;
+ struct {
+ struct omap_dss_device *dssdev;
+ u8 bpp;
+ } bpp_overrides[10];
};
struct omapfb_colormode {
@@ -105,6 +111,9 @@ void omapfb_remove_sysfs(struct omapfb2_device *fbdev);
int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg);
+int omapfb_update_window(struct fb_info *fbi,
+ u32 x, u32 y, u32 w, u32 h);
+
int dss_mode_to_fb_mode(enum omap_color_mode dssmode,
struct fb_var_screeninfo *var);
diff --git a/drivers/video/sunxvr500.c b/drivers/video/sunxvr500.c
index 18b950706ca..4cd50497264 100644
--- a/drivers/video/sunxvr500.c
+++ b/drivers/video/sunxvr500.c
@@ -400,6 +400,7 @@ static void __devexit e3d_pci_unregister(struct pci_dev *pdev)
static struct pci_device_id e3d_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_3DLABS, 0x7a0), },
+ { PCI_DEVICE(0x1091, 0x7a0), },
{ PCI_DEVICE(PCI_VENDOR_ID_3DLABS, 0x7a2), },
{ .vendor = PCI_VENDOR_ID_3DLABS,
.device = PCI_ANY_ID,
diff --git a/drivers/virtio/virtio_pci.c b/drivers/virtio/virtio_pci.c
index 1b657321699..625447f645d 100644
--- a/drivers/virtio/virtio_pci.c
+++ b/drivers/virtio/virtio_pci.c
@@ -649,6 +649,7 @@ static int __devinit virtio_pci_probe(struct pci_dev *pci_dev,
goto out_req_regions;
pci_set_drvdata(pci_dev, vp_dev);
+ pci_set_master(pci_dev);
/* we use the subsystem vendor/device id as the virtio vendor/device
* id. this allows us to use the same PCI vendor/device id for all
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig
index 3195fb8b7d9..80b3b123dd7 100644
--- a/drivers/w1/masters/Kconfig
+++ b/drivers/w1/masters/Kconfig
@@ -60,7 +60,7 @@ config W1_MASTER_GPIO
config HDQ_MASTER_OMAP
tristate "OMAP HDQ driver"
- depends on ARCH_OMAP2430 || ARCH_OMAP34XX
+ depends on ARCH_OMAP2430 || ARCH_OMAP3
help
Say Y here if you want support for the 1-wire or HDQ Interface
on an OMAP processor.
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 050ee147592..3da3f48720a 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -194,7 +194,7 @@ config EP93XX_WATCHDOG
config OMAP_WATCHDOG
tristate "OMAP Watchdog"
- depends on ARCH_OMAP16XX || ARCH_OMAP24XX || ARCH_OMAP34XX
+ depends on ARCH_OMAP16XX || ARCH_OMAP2 || ARCH_OMAP3
help
Support for TI OMAP1610/OMAP1710/OMAP2420/OMAP3430 watchdog. Say 'Y'
here to enable the OMAP1610/OMAP1710/OMAP2420/OMAP3430 watchdog timer.
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index cab100acf98..fad3df2c127 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -1,6 +1,8 @@
+menu "Xen driver support"
+ depends on XEN
+
config XEN_BALLOON
bool "Xen memory balloon driver"
- depends on XEN
default y
help
The balloon driver allows the Xen domain to request more memory from
@@ -20,7 +22,6 @@ config XEN_SCRUB_PAGES
config XEN_DEV_EVTCHN
tristate "Xen /dev/xen/evtchn device"
- depends on XEN
default y
help
The evtchn driver allows a userspace process to triger event
@@ -30,7 +31,6 @@ config XEN_DEV_EVTCHN
config XENFS
tristate "Xen filesystem"
- depends on XEN
default y
help
The xen filesystem provides a way for domains to share
@@ -53,11 +53,13 @@ config XEN_COMPAT_XENFS
config XEN_SYS_HYPERVISOR
bool "Create xen entries under /sys/hypervisor"
- depends on XEN && SYSFS
+ depends on SYSFS
select SYS_HYPERVISOR
default y
help
Create entries under /sys/hypervisor describing the Xen
hypervisor environment. When running native or in another
virtual environment, /sys/hypervisor will still be present,
- but will have no xen contents. \ No newline at end of file
+ but will have no xen contents.
+
+endmenu
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index ce602dd09bc..2f8413794d0 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -649,9 +649,13 @@ void xen_evtchn_do_upcall(struct pt_regs *regs)
int bit_idx = __ffs(pending_bits);
int port = (word_idx * BITS_PER_LONG) + bit_idx;
int irq = evtchn_to_irq[port];
+ struct irq_desc *desc;
- if (irq != -1)
- handle_irq(irq, regs);
+ if (irq != -1) {
+ desc = irq_to_desc(irq);
+ if (desc)
+ generic_handle_irq_desc(irq, desc);
+ }
}
}